posix-cpu-timers: Cure SMP wobbles
[pandora-kernel.git] / drivers / usb / host / xhci-ring.c
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 /*
24  * Ring initialization rules:
25  * 1. Each segment is initialized to zero, except for link TRBs.
26  * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
27  *    Consumer Cycle State (CCS), depending on ring function.
28  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29  *
30  * Ring behavior rules:
31  * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
32  *    least one free TRB in the ring.  This is useful if you want to turn that
33  *    into a link TRB and expand the ring.
34  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35  *    link TRB, then load the pointer with the address in the link TRB.  If the
36  *    link TRB had its toggle bit set, you may need to update the ring cycle
37  *    state (see cycle bit rules).  You may have to do this multiple times
38  *    until you reach a non-link TRB.
39  * 3. A ring is full if enqueue++ (for the definition of increment above)
40  *    equals the dequeue pointer.
41  *
42  * Cycle bit rules:
43  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44  *    in a link TRB, it must toggle the ring cycle state.
45  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46  *    in a link TRB, it must toggle the ring cycle state.
47  *
48  * Producer rules:
49  * 1. Check if ring is full before you enqueue.
50  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51  *    Update enqueue pointer between each write (which may update the ring
52  *    cycle state).
53  * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
54  *    and endpoint rings.  If HC is the producer for the event ring,
55  *    and it generates an interrupt according to interrupt modulation rules.
56  *
57  * Consumer rules:
58  * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
59  *    the TRB is owned by the consumer.
60  * 2. Update dequeue pointer (which may update the ring cycle state) and
61  *    continue processing TRBs until you reach a TRB which is not owned by you.
62  * 3. Notify the producer.  SW is the consumer for the event ring, and it
63  *   updates event ring dequeue pointer.  HC is the consumer for the command and
64  *   endpoint rings; it generates events on the event ring for these.
65  */
66
67 #include <linux/scatterlist.h>
68 #include <linux/slab.h>
69 #include "xhci.h"
70
71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72                 struct xhci_virt_device *virt_dev,
73                 struct xhci_event_cmd *event);
74
75 /*
76  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
77  * address of the TRB.
78  */
79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80                 union xhci_trb *trb)
81 {
82         unsigned long segment_offset;
83
84         if (!seg || !trb || trb < seg->trbs)
85                 return 0;
86         /* offset in TRBs */
87         segment_offset = trb - seg->trbs;
88         if (segment_offset > TRBS_PER_SEGMENT)
89                 return 0;
90         return seg->dma + (segment_offset * sizeof(*trb));
91 }
92
93 /* Does this link TRB point to the first segment in a ring,
94  * or was the previous TRB the last TRB on the last segment in the ERST?
95  */
96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97                 struct xhci_segment *seg, union xhci_trb *trb)
98 {
99         if (ring == xhci->event_ring)
100                 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101                         (seg->next == xhci->event_ring->first_seg);
102         else
103                 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104 }
105
106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
107  * segment?  I.e. would the updated event TRB pointer step off the end of the
108  * event seg?
109  */
110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111                 struct xhci_segment *seg, union xhci_trb *trb)
112 {
113         if (ring == xhci->event_ring)
114                 return trb == &seg->trbs[TRBS_PER_SEGMENT];
115         else
116                 return TRB_TYPE_LINK_LE32(trb->link.control);
117 }
118
119 static int enqueue_is_link_trb(struct xhci_ring *ring)
120 {
121         struct xhci_link_trb *link = &ring->enqueue->link;
122         return TRB_TYPE_LINK_LE32(link->control);
123 }
124
125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
126  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
127  * effect the ring dequeue or enqueue pointers.
128  */
129 static void next_trb(struct xhci_hcd *xhci,
130                 struct xhci_ring *ring,
131                 struct xhci_segment **seg,
132                 union xhci_trb **trb)
133 {
134         if (last_trb(xhci, ring, *seg, *trb)) {
135                 *seg = (*seg)->next;
136                 *trb = ((*seg)->trbs);
137         } else {
138                 (*trb)++;
139         }
140 }
141
142 /*
143  * See Cycle bit rules. SW is the consumer for the event ring only.
144  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
145  */
146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
147 {
148         union xhci_trb *next = ++(ring->dequeue);
149         unsigned long long addr;
150
151         ring->deq_updates++;
152         /* Update the dequeue pointer further if that was a link TRB or we're at
153          * the end of an event ring segment (which doesn't have link TRBS)
154          */
155         while (last_trb(xhci, ring, ring->deq_seg, next)) {
156                 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
157                         ring->cycle_state = (ring->cycle_state ? 0 : 1);
158                         if (!in_interrupt())
159                                 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
160                                                 ring,
161                                                 (unsigned int) ring->cycle_state);
162                 }
163                 ring->deq_seg = ring->deq_seg->next;
164                 ring->dequeue = ring->deq_seg->trbs;
165                 next = ring->dequeue;
166         }
167         addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
168 }
169
170 /*
171  * See Cycle bit rules. SW is the consumer for the event ring only.
172  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
173  *
174  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
175  * chain bit is set), then set the chain bit in all the following link TRBs.
176  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
177  * have their chain bit cleared (so that each Link TRB is a separate TD).
178  *
179  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
180  * set, but other sections talk about dealing with the chain bit set.  This was
181  * fixed in the 0.96 specification errata, but we have to assume that all 0.95
182  * xHCI hardware can't handle the chain bit being cleared on a link TRB.
183  *
184  * @more_trbs_coming:   Will you enqueue more TRBs before calling
185  *                      prepare_transfer()?
186  */
187 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
188                 bool consumer, bool more_trbs_coming)
189 {
190         u32 chain;
191         union xhci_trb *next;
192         unsigned long long addr;
193
194         chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
195         next = ++(ring->enqueue);
196
197         ring->enq_updates++;
198         /* Update the dequeue pointer further if that was a link TRB or we're at
199          * the end of an event ring segment (which doesn't have link TRBS)
200          */
201         while (last_trb(xhci, ring, ring->enq_seg, next)) {
202                 if (!consumer) {
203                         if (ring != xhci->event_ring) {
204                                 /*
205                                  * If the caller doesn't plan on enqueueing more
206                                  * TDs before ringing the doorbell, then we
207                                  * don't want to give the link TRB to the
208                                  * hardware just yet.  We'll give the link TRB
209                                  * back in prepare_ring() just before we enqueue
210                                  * the TD at the top of the ring.
211                                  */
212                                 if (!chain && !more_trbs_coming)
213                                         break;
214
215                                 /* If we're not dealing with 0.95 hardware,
216                                  * carry over the chain bit of the previous TRB
217                                  * (which may mean the chain bit is cleared).
218                                  */
219                                 if (!xhci_link_trb_quirk(xhci)) {
220                                         next->link.control &=
221                                                 cpu_to_le32(~TRB_CHAIN);
222                                         next->link.control |=
223                                                 cpu_to_le32(chain);
224                                 }
225                                 /* Give this link TRB to the hardware */
226                                 wmb();
227                                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
228                         }
229                         /* Toggle the cycle bit after the last ring segment. */
230                         if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
231                                 ring->cycle_state = (ring->cycle_state ? 0 : 1);
232                                 if (!in_interrupt())
233                                         xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
234                                                         ring,
235                                                         (unsigned int) ring->cycle_state);
236                         }
237                 }
238                 ring->enq_seg = ring->enq_seg->next;
239                 ring->enqueue = ring->enq_seg->trbs;
240                 next = ring->enqueue;
241         }
242         addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
243 }
244
245 /*
246  * Check to see if there's room to enqueue num_trbs on the ring.  See rules
247  * above.
248  * FIXME: this would be simpler and faster if we just kept track of the number
249  * of free TRBs in a ring.
250  */
251 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
252                 unsigned int num_trbs)
253 {
254         int i;
255         union xhci_trb *enq = ring->enqueue;
256         struct xhci_segment *enq_seg = ring->enq_seg;
257         struct xhci_segment *cur_seg;
258         unsigned int left_on_ring;
259
260         /* If we are currently pointing to a link TRB, advance the
261          * enqueue pointer before checking for space */
262         while (last_trb(xhci, ring, enq_seg, enq)) {
263                 enq_seg = enq_seg->next;
264                 enq = enq_seg->trbs;
265         }
266
267         /* Check if ring is empty */
268         if (enq == ring->dequeue) {
269                 /* Can't use link trbs */
270                 left_on_ring = TRBS_PER_SEGMENT - 1;
271                 for (cur_seg = enq_seg->next; cur_seg != enq_seg;
272                                 cur_seg = cur_seg->next)
273                         left_on_ring += TRBS_PER_SEGMENT - 1;
274
275                 /* Always need one TRB free in the ring. */
276                 left_on_ring -= 1;
277                 if (num_trbs > left_on_ring) {
278                         xhci_warn(xhci, "Not enough room on ring; "
279                                         "need %u TRBs, %u TRBs left\n",
280                                         num_trbs, left_on_ring);
281                         return 0;
282                 }
283                 return 1;
284         }
285         /* Make sure there's an extra empty TRB available */
286         for (i = 0; i <= num_trbs; ++i) {
287                 if (enq == ring->dequeue)
288                         return 0;
289                 enq++;
290                 while (last_trb(xhci, ring, enq_seg, enq)) {
291                         enq_seg = enq_seg->next;
292                         enq = enq_seg->trbs;
293                 }
294         }
295         return 1;
296 }
297
298 /* Ring the host controller doorbell after placing a command on the ring */
299 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
300 {
301         xhci_dbg(xhci, "// Ding dong!\n");
302         xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
303         /* Flush PCI posted writes */
304         xhci_readl(xhci, &xhci->dba->doorbell[0]);
305 }
306
307 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
308                 unsigned int slot_id,
309                 unsigned int ep_index,
310                 unsigned int stream_id)
311 {
312         __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
313         struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
314         unsigned int ep_state = ep->ep_state;
315
316         /* Don't ring the doorbell for this endpoint if there are pending
317          * cancellations because we don't want to interrupt processing.
318          * We don't want to restart any stream rings if there's a set dequeue
319          * pointer command pending because the device can choose to start any
320          * stream once the endpoint is on the HW schedule.
321          * FIXME - check all the stream rings for pending cancellations.
322          */
323         if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
324             (ep_state & EP_HALTED))
325                 return;
326         xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
327         /* The CPU has better things to do at this point than wait for a
328          * write-posting flush.  It'll get there soon enough.
329          */
330 }
331
332 /* Ring the doorbell for any rings with pending URBs */
333 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
334                 unsigned int slot_id,
335                 unsigned int ep_index)
336 {
337         unsigned int stream_id;
338         struct xhci_virt_ep *ep;
339
340         ep = &xhci->devs[slot_id]->eps[ep_index];
341
342         /* A ring has pending URBs if its TD list is not empty */
343         if (!(ep->ep_state & EP_HAS_STREAMS)) {
344                 if (!(list_empty(&ep->ring->td_list)))
345                         xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
346                 return;
347         }
348
349         for (stream_id = 1; stream_id < ep->stream_info->num_streams;
350                         stream_id++) {
351                 struct xhci_stream_info *stream_info = ep->stream_info;
352                 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
353                         xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
354                                                 stream_id);
355         }
356 }
357
358 /*
359  * Find the segment that trb is in.  Start searching in start_seg.
360  * If we must move past a segment that has a link TRB with a toggle cycle state
361  * bit set, then we will toggle the value pointed at by cycle_state.
362  */
363 static struct xhci_segment *find_trb_seg(
364                 struct xhci_segment *start_seg,
365                 union xhci_trb  *trb, int *cycle_state)
366 {
367         struct xhci_segment *cur_seg = start_seg;
368         struct xhci_generic_trb *generic_trb;
369
370         while (cur_seg->trbs > trb ||
371                         &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
372                 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
373                 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
374                         *cycle_state ^= 0x1;
375                 cur_seg = cur_seg->next;
376                 if (cur_seg == start_seg)
377                         /* Looped over the entire list.  Oops! */
378                         return NULL;
379         }
380         return cur_seg;
381 }
382
383
384 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
385                 unsigned int slot_id, unsigned int ep_index,
386                 unsigned int stream_id)
387 {
388         struct xhci_virt_ep *ep;
389
390         ep = &xhci->devs[slot_id]->eps[ep_index];
391         /* Common case: no streams */
392         if (!(ep->ep_state & EP_HAS_STREAMS))
393                 return ep->ring;
394
395         if (stream_id == 0) {
396                 xhci_warn(xhci,
397                                 "WARN: Slot ID %u, ep index %u has streams, "
398                                 "but URB has no stream ID.\n",
399                                 slot_id, ep_index);
400                 return NULL;
401         }
402
403         if (stream_id < ep->stream_info->num_streams)
404                 return ep->stream_info->stream_rings[stream_id];
405
406         xhci_warn(xhci,
407                         "WARN: Slot ID %u, ep index %u has "
408                         "stream IDs 1 to %u allocated, "
409                         "but stream ID %u is requested.\n",
410                         slot_id, ep_index,
411                         ep->stream_info->num_streams - 1,
412                         stream_id);
413         return NULL;
414 }
415
416 /* Get the right ring for the given URB.
417  * If the endpoint supports streams, boundary check the URB's stream ID.
418  * If the endpoint doesn't support streams, return the singular endpoint ring.
419  */
420 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
421                 struct urb *urb)
422 {
423         return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
424                 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
425 }
426
427 /*
428  * Move the xHC's endpoint ring dequeue pointer past cur_td.
429  * Record the new state of the xHC's endpoint ring dequeue segment,
430  * dequeue pointer, and new consumer cycle state in state.
431  * Update our internal representation of the ring's dequeue pointer.
432  *
433  * We do this in three jumps:
434  *  - First we update our new ring state to be the same as when the xHC stopped.
435  *  - Then we traverse the ring to find the segment that contains
436  *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
437  *    any link TRBs with the toggle cycle bit set.
438  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
439  *    if we've moved it past a link TRB with the toggle cycle bit set.
440  *
441  * Some of the uses of xhci_generic_trb are grotty, but if they're done
442  * with correct __le32 accesses they should work fine.  Only users of this are
443  * in here.
444  */
445 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
446                 unsigned int slot_id, unsigned int ep_index,
447                 unsigned int stream_id, struct xhci_td *cur_td,
448                 struct xhci_dequeue_state *state)
449 {
450         struct xhci_virt_device *dev = xhci->devs[slot_id];
451         struct xhci_ring *ep_ring;
452         struct xhci_generic_trb *trb;
453         struct xhci_ep_ctx *ep_ctx;
454         dma_addr_t addr;
455
456         ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
457                         ep_index, stream_id);
458         if (!ep_ring) {
459                 xhci_warn(xhci, "WARN can't find new dequeue state "
460                                 "for invalid stream ID %u.\n",
461                                 stream_id);
462                 return;
463         }
464         state->new_cycle_state = 0;
465         xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
466         state->new_deq_seg = find_trb_seg(cur_td->start_seg,
467                         dev->eps[ep_index].stopped_trb,
468                         &state->new_cycle_state);
469         if (!state->new_deq_seg) {
470                 WARN_ON(1);
471                 return;
472         }
473
474         /* Dig out the cycle state saved by the xHC during the stop ep cmd */
475         xhci_dbg(xhci, "Finding endpoint context\n");
476         ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
477         state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
478
479         state->new_deq_ptr = cur_td->last_trb;
480         xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
481         state->new_deq_seg = find_trb_seg(state->new_deq_seg,
482                         state->new_deq_ptr,
483                         &state->new_cycle_state);
484         if (!state->new_deq_seg) {
485                 WARN_ON(1);
486                 return;
487         }
488
489         trb = &state->new_deq_ptr->generic;
490         if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
491             (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
492                 state->new_cycle_state ^= 0x1;
493         next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
494
495         /*
496          * If there is only one segment in a ring, find_trb_seg()'s while loop
497          * will not run, and it will return before it has a chance to see if it
498          * needs to toggle the cycle bit.  It can't tell if the stalled transfer
499          * ended just before the link TRB on a one-segment ring, or if the TD
500          * wrapped around the top of the ring, because it doesn't have the TD in
501          * question.  Look for the one-segment case where stalled TRB's address
502          * is greater than the new dequeue pointer address.
503          */
504         if (ep_ring->first_seg == ep_ring->first_seg->next &&
505                         state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
506                 state->new_cycle_state ^= 0x1;
507         xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
508
509         /* Don't update the ring cycle state for the producer (us). */
510         xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
511                         state->new_deq_seg);
512         addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
513         xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
514                         (unsigned long long) addr);
515 }
516
517 /* flip_cycle means flip the cycle bit of all but the first and last TRB.
518  * (The last TRB actually points to the ring enqueue pointer, which is not part
519  * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
520  */
521 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
522                 struct xhci_td *cur_td, bool flip_cycle)
523 {
524         struct xhci_segment *cur_seg;
525         union xhci_trb *cur_trb;
526
527         for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
528                         true;
529                         next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
530                 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
531                         /* Unchain any chained Link TRBs, but
532                          * leave the pointers intact.
533                          */
534                         cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535                         /* Flip the cycle bit (link TRBs can't be the first
536                          * or last TRB).
537                          */
538                         if (flip_cycle)
539                                 cur_trb->generic.field[3] ^=
540                                         cpu_to_le32(TRB_CYCLE);
541                         xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
542                         xhci_dbg(xhci, "Address = %p (0x%llx dma); "
543                                         "in seg %p (0x%llx dma)\n",
544                                         cur_trb,
545                                         (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
546                                         cur_seg,
547                                         (unsigned long long)cur_seg->dma);
548                 } else {
549                         cur_trb->generic.field[0] = 0;
550                         cur_trb->generic.field[1] = 0;
551                         cur_trb->generic.field[2] = 0;
552                         /* Preserve only the cycle bit of this TRB */
553                         cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554                         /* Flip the cycle bit except on the first or last TRB */
555                         if (flip_cycle && cur_trb != cur_td->first_trb &&
556                                         cur_trb != cur_td->last_trb)
557                                 cur_trb->generic.field[3] ^=
558                                         cpu_to_le32(TRB_CYCLE);
559                         cur_trb->generic.field[3] |= cpu_to_le32(
560                                 TRB_TYPE(TRB_TR_NOOP));
561                         xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
562                                         "in seg %p (0x%llx dma)\n",
563                                         cur_trb,
564                                         (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
565                                         cur_seg,
566                                         (unsigned long long)cur_seg->dma);
567                 }
568                 if (cur_trb == cur_td->last_trb)
569                         break;
570         }
571 }
572
573 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
574                 unsigned int ep_index, unsigned int stream_id,
575                 struct xhci_segment *deq_seg,
576                 union xhci_trb *deq_ptr, u32 cycle_state);
577
578 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
579                 unsigned int slot_id, unsigned int ep_index,
580                 unsigned int stream_id,
581                 struct xhci_dequeue_state *deq_state)
582 {
583         struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
584
585         xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
586                         "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
587                         deq_state->new_deq_seg,
588                         (unsigned long long)deq_state->new_deq_seg->dma,
589                         deq_state->new_deq_ptr,
590                         (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
591                         deq_state->new_cycle_state);
592         queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
593                         deq_state->new_deq_seg,
594                         deq_state->new_deq_ptr,
595                         (u32) deq_state->new_cycle_state);
596         /* Stop the TD queueing code from ringing the doorbell until
597          * this command completes.  The HC won't set the dequeue pointer
598          * if the ring is running, and ringing the doorbell starts the
599          * ring running.
600          */
601         ep->ep_state |= SET_DEQ_PENDING;
602 }
603
604 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
605                 struct xhci_virt_ep *ep)
606 {
607         ep->ep_state &= ~EP_HALT_PENDING;
608         /* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
609          * timer is running on another CPU, we don't decrement stop_cmds_pending
610          * (since we didn't successfully stop the watchdog timer).
611          */
612         if (del_timer(&ep->stop_cmd_timer))
613                 ep->stop_cmds_pending--;
614 }
615
616 /* Must be called with xhci->lock held in interrupt context */
617 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
618                 struct xhci_td *cur_td, int status, char *adjective)
619 {
620         struct usb_hcd *hcd;
621         struct urb      *urb;
622         struct urb_priv *urb_priv;
623
624         urb = cur_td->urb;
625         urb_priv = urb->hcpriv;
626         urb_priv->td_cnt++;
627         hcd = bus_to_hcd(urb->dev->bus);
628
629         /* Only giveback urb when this is the last td in urb */
630         if (urb_priv->td_cnt == urb_priv->length) {
631                 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
632                         xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
633                         if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
634                                 if (xhci->quirks & XHCI_AMD_PLL_FIX)
635                                         usb_amd_quirk_pll_enable();
636                         }
637                 }
638                 usb_hcd_unlink_urb_from_ep(hcd, urb);
639
640                 spin_unlock(&xhci->lock);
641                 usb_hcd_giveback_urb(hcd, urb, status);
642                 xhci_urb_free_priv(xhci, urb_priv);
643                 spin_lock(&xhci->lock);
644         }
645 }
646
647 /*
648  * When we get a command completion for a Stop Endpoint Command, we need to
649  * unlink any cancelled TDs from the ring.  There are two ways to do that:
650  *
651  *  1. If the HW was in the middle of processing the TD that needs to be
652  *     cancelled, then we must move the ring's dequeue pointer past the last TRB
653  *     in the TD with a Set Dequeue Pointer Command.
654  *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
655  *     bit cleared) so that the HW will skip over them.
656  */
657 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
658                 union xhci_trb *trb, struct xhci_event_cmd *event)
659 {
660         unsigned int slot_id;
661         unsigned int ep_index;
662         struct xhci_virt_device *virt_dev;
663         struct xhci_ring *ep_ring;
664         struct xhci_virt_ep *ep;
665         struct list_head *entry;
666         struct xhci_td *cur_td = NULL;
667         struct xhci_td *last_unlinked_td;
668
669         struct xhci_dequeue_state deq_state;
670
671         if (unlikely(TRB_TO_SUSPEND_PORT(
672                              le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
673                 slot_id = TRB_TO_SLOT_ID(
674                         le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
675                 virt_dev = xhci->devs[slot_id];
676                 if (virt_dev)
677                         handle_cmd_in_cmd_wait_list(xhci, virt_dev,
678                                 event);
679                 else
680                         xhci_warn(xhci, "Stop endpoint command "
681                                 "completion for disabled slot %u\n",
682                                 slot_id);
683                 return;
684         }
685
686         memset(&deq_state, 0, sizeof(deq_state));
687         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
688         ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
689         ep = &xhci->devs[slot_id]->eps[ep_index];
690
691         if (list_empty(&ep->cancelled_td_list)) {
692                 xhci_stop_watchdog_timer_in_irq(xhci, ep);
693                 ep->stopped_td = NULL;
694                 ep->stopped_trb = NULL;
695                 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
696                 return;
697         }
698
699         /* Fix up the ep ring first, so HW stops executing cancelled TDs.
700          * We have the xHCI lock, so nothing can modify this list until we drop
701          * it.  We're also in the event handler, so we can't get re-interrupted
702          * if another Stop Endpoint command completes
703          */
704         list_for_each(entry, &ep->cancelled_td_list) {
705                 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
706                 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
707                                 cur_td->first_trb,
708                                 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
709                 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
710                 if (!ep_ring) {
711                         /* This shouldn't happen unless a driver is mucking
712                          * with the stream ID after submission.  This will
713                          * leave the TD on the hardware ring, and the hardware
714                          * will try to execute it, and may access a buffer
715                          * that has already been freed.  In the best case, the
716                          * hardware will execute it, and the event handler will
717                          * ignore the completion event for that TD, since it was
718                          * removed from the td_list for that endpoint.  In
719                          * short, don't muck with the stream ID after
720                          * submission.
721                          */
722                         xhci_warn(xhci, "WARN Cancelled URB %p "
723                                         "has invalid stream ID %u.\n",
724                                         cur_td->urb,
725                                         cur_td->urb->stream_id);
726                         goto remove_finished_td;
727                 }
728                 /*
729                  * If we stopped on the TD we need to cancel, then we have to
730                  * move the xHC endpoint ring dequeue pointer past this TD.
731                  */
732                 if (cur_td == ep->stopped_td)
733                         xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
734                                         cur_td->urb->stream_id,
735                                         cur_td, &deq_state);
736                 else
737                         td_to_noop(xhci, ep_ring, cur_td, false);
738 remove_finished_td:
739                 /*
740                  * The event handler won't see a completion for this TD anymore,
741                  * so remove it from the endpoint ring's TD list.  Keep it in
742                  * the cancelled TD list for URB completion later.
743                  */
744                 list_del_init(&cur_td->td_list);
745         }
746         last_unlinked_td = cur_td;
747         xhci_stop_watchdog_timer_in_irq(xhci, ep);
748
749         /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
750         if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
751                 xhci_queue_new_dequeue_state(xhci,
752                                 slot_id, ep_index,
753                                 ep->stopped_td->urb->stream_id,
754                                 &deq_state);
755                 xhci_ring_cmd_db(xhci);
756         } else {
757                 /* Otherwise ring the doorbell(s) to restart queued transfers */
758                 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
759         }
760         ep->stopped_td = NULL;
761         ep->stopped_trb = NULL;
762
763         /*
764          * Drop the lock and complete the URBs in the cancelled TD list.
765          * New TDs to be cancelled might be added to the end of the list before
766          * we can complete all the URBs for the TDs we already unlinked.
767          * So stop when we've completed the URB for the last TD we unlinked.
768          */
769         do {
770                 cur_td = list_entry(ep->cancelled_td_list.next,
771                                 struct xhci_td, cancelled_td_list);
772                 list_del_init(&cur_td->cancelled_td_list);
773
774                 /* Clean up the cancelled URB */
775                 /* Doesn't matter what we pass for status, since the core will
776                  * just overwrite it (because the URB has been unlinked).
777                  */
778                 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
779
780                 /* Stop processing the cancelled list if the watchdog timer is
781                  * running.
782                  */
783                 if (xhci->xhc_state & XHCI_STATE_DYING)
784                         return;
785         } while (cur_td != last_unlinked_td);
786
787         /* Return to the event handler with xhci->lock re-acquired */
788 }
789
790 /* Watchdog timer function for when a stop endpoint command fails to complete.
791  * In this case, we assume the host controller is broken or dying or dead.  The
792  * host may still be completing some other events, so we have to be careful to
793  * let the event ring handler and the URB dequeueing/enqueueing functions know
794  * through xhci->state.
795  *
796  * The timer may also fire if the host takes a very long time to respond to the
797  * command, and the stop endpoint command completion handler cannot delete the
798  * timer before the timer function is called.  Another endpoint cancellation may
799  * sneak in before the timer function can grab the lock, and that may queue
800  * another stop endpoint command and add the timer back.  So we cannot use a
801  * simple flag to say whether there is a pending stop endpoint command for a
802  * particular endpoint.
803  *
804  * Instead we use a combination of that flag and a counter for the number of
805  * pending stop endpoint commands.  If the timer is the tail end of the last
806  * stop endpoint command, and the endpoint's command is still pending, we assume
807  * the host is dying.
808  */
809 void xhci_stop_endpoint_command_watchdog(unsigned long arg)
810 {
811         struct xhci_hcd *xhci;
812         struct xhci_virt_ep *ep;
813         struct xhci_virt_ep *temp_ep;
814         struct xhci_ring *ring;
815         struct xhci_td *cur_td;
816         int ret, i, j;
817
818         ep = (struct xhci_virt_ep *) arg;
819         xhci = ep->xhci;
820
821         spin_lock(&xhci->lock);
822
823         ep->stop_cmds_pending--;
824         if (xhci->xhc_state & XHCI_STATE_DYING) {
825                 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
826                                 "xHCI as DYING, exiting.\n");
827                 spin_unlock(&xhci->lock);
828                 return;
829         }
830         if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
831                 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
832                                 "exiting.\n");
833                 spin_unlock(&xhci->lock);
834                 return;
835         }
836
837         xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
838         xhci_warn(xhci, "Assuming host is dying, halting host.\n");
839         /* Oops, HC is dead or dying or at least not responding to the stop
840          * endpoint command.
841          */
842         xhci->xhc_state |= XHCI_STATE_DYING;
843         /* Disable interrupts from the host controller and start halting it */
844         xhci_quiesce(xhci);
845         spin_unlock(&xhci->lock);
846
847         ret = xhci_halt(xhci);
848
849         spin_lock(&xhci->lock);
850         if (ret < 0) {
851                 /* This is bad; the host is not responding to commands and it's
852                  * not allowing itself to be halted.  At least interrupts are
853                  * disabled. If we call usb_hc_died(), it will attempt to
854                  * disconnect all device drivers under this host.  Those
855                  * disconnect() methods will wait for all URBs to be unlinked,
856                  * so we must complete them.
857                  */
858                 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
859                 xhci_warn(xhci, "Completing active URBs anyway.\n");
860                 /* We could turn all TDs on the rings to no-ops.  This won't
861                  * help if the host has cached part of the ring, and is slow if
862                  * we want to preserve the cycle bit.  Skip it and hope the host
863                  * doesn't touch the memory.
864                  */
865         }
866         for (i = 0; i < MAX_HC_SLOTS; i++) {
867                 if (!xhci->devs[i])
868                         continue;
869                 for (j = 0; j < 31; j++) {
870                         temp_ep = &xhci->devs[i]->eps[j];
871                         ring = temp_ep->ring;
872                         if (!ring)
873                                 continue;
874                         xhci_dbg(xhci, "Killing URBs for slot ID %u, "
875                                         "ep index %u\n", i, j);
876                         while (!list_empty(&ring->td_list)) {
877                                 cur_td = list_first_entry(&ring->td_list,
878                                                 struct xhci_td,
879                                                 td_list);
880                                 list_del_init(&cur_td->td_list);
881                                 if (!list_empty(&cur_td->cancelled_td_list))
882                                         list_del_init(&cur_td->cancelled_td_list);
883                                 xhci_giveback_urb_in_irq(xhci, cur_td,
884                                                 -ESHUTDOWN, "killed");
885                         }
886                         while (!list_empty(&temp_ep->cancelled_td_list)) {
887                                 cur_td = list_first_entry(
888                                                 &temp_ep->cancelled_td_list,
889                                                 struct xhci_td,
890                                                 cancelled_td_list);
891                                 list_del_init(&cur_td->cancelled_td_list);
892                                 xhci_giveback_urb_in_irq(xhci, cur_td,
893                                                 -ESHUTDOWN, "killed");
894                         }
895                 }
896         }
897         spin_unlock(&xhci->lock);
898         xhci_dbg(xhci, "Calling usb_hc_died()\n");
899         usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
900         xhci_dbg(xhci, "xHCI host controller is dead.\n");
901 }
902
903 /*
904  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
905  * we need to clear the set deq pending flag in the endpoint ring state, so that
906  * the TD queueing code can ring the doorbell again.  We also need to ring the
907  * endpoint doorbell to restart the ring, but only if there aren't more
908  * cancellations pending.
909  */
910 static void handle_set_deq_completion(struct xhci_hcd *xhci,
911                 struct xhci_event_cmd *event,
912                 union xhci_trb *trb)
913 {
914         unsigned int slot_id;
915         unsigned int ep_index;
916         unsigned int stream_id;
917         struct xhci_ring *ep_ring;
918         struct xhci_virt_device *dev;
919         struct xhci_ep_ctx *ep_ctx;
920         struct xhci_slot_ctx *slot_ctx;
921
922         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
923         ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
924         stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
925         dev = xhci->devs[slot_id];
926
927         ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
928         if (!ep_ring) {
929                 xhci_warn(xhci, "WARN Set TR deq ptr command for "
930                                 "freed stream ID %u\n",
931                                 stream_id);
932                 /* XXX: Harmless??? */
933                 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
934                 return;
935         }
936
937         ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
938         slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
939
940         if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
941                 unsigned int ep_state;
942                 unsigned int slot_state;
943
944                 switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
945                 case COMP_TRB_ERR:
946                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
947                                         "of stream ID configuration\n");
948                         break;
949                 case COMP_CTX_STATE:
950                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
951                                         "to incorrect slot or ep state.\n");
952                         ep_state = le32_to_cpu(ep_ctx->ep_info);
953                         ep_state &= EP_STATE_MASK;
954                         slot_state = le32_to_cpu(slot_ctx->dev_state);
955                         slot_state = GET_SLOT_STATE(slot_state);
956                         xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
957                                         slot_state, ep_state);
958                         break;
959                 case COMP_EBADSLT:
960                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
961                                         "slot %u was not enabled.\n", slot_id);
962                         break;
963                 default:
964                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
965                                         "completion code of %u.\n",
966                                   GET_COMP_CODE(le32_to_cpu(event->status)));
967                         break;
968                 }
969                 /* OK what do we do now?  The endpoint state is hosed, and we
970                  * should never get to this point if the synchronization between
971                  * queueing, and endpoint state are correct.  This might happen
972                  * if the device gets disconnected after we've finished
973                  * cancelling URBs, which might not be an error...
974                  */
975         } else {
976                 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
977                          le64_to_cpu(ep_ctx->deq));
978                 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
979                                          dev->eps[ep_index].queued_deq_ptr) ==
980                     (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
981                         /* Update the ring's dequeue segment and dequeue pointer
982                          * to reflect the new position.
983                          */
984                         ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
985                         ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
986                 } else {
987                         xhci_warn(xhci, "Mismatch between completed Set TR Deq "
988                                         "Ptr command & xHCI internal state.\n");
989                         xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
990                                         dev->eps[ep_index].queued_deq_seg,
991                                         dev->eps[ep_index].queued_deq_ptr);
992                 }
993         }
994
995         dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
996         dev->eps[ep_index].queued_deq_seg = NULL;
997         dev->eps[ep_index].queued_deq_ptr = NULL;
998         /* Restart any rings with pending URBs */
999         ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1000 }
1001
1002 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1003                 struct xhci_event_cmd *event,
1004                 union xhci_trb *trb)
1005 {
1006         int slot_id;
1007         unsigned int ep_index;
1008
1009         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1010         ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1011         /* This command will only fail if the endpoint wasn't halted,
1012          * but we don't care.
1013          */
1014         xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1015                  GET_COMP_CODE(le32_to_cpu(event->status)));
1016
1017         /* HW with the reset endpoint quirk needs to have a configure endpoint
1018          * command complete before the endpoint can be used.  Queue that here
1019          * because the HW can't handle two commands being queued in a row.
1020          */
1021         if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1022                 xhci_dbg(xhci, "Queueing configure endpoint command\n");
1023                 xhci_queue_configure_endpoint(xhci,
1024                                 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1025                                 false);
1026                 xhci_ring_cmd_db(xhci);
1027         } else {
1028                 /* Clear our internal halted state and restart the ring(s) */
1029                 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1030                 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1031         }
1032 }
1033
1034 /* Check to see if a command in the device's command queue matches this one.
1035  * Signal the completion or free the command, and return 1.  Return 0 if the
1036  * completed command isn't at the head of the command list.
1037  */
1038 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1039                 struct xhci_virt_device *virt_dev,
1040                 struct xhci_event_cmd *event)
1041 {
1042         struct xhci_command *command;
1043
1044         if (list_empty(&virt_dev->cmd_list))
1045                 return 0;
1046
1047         command = list_entry(virt_dev->cmd_list.next,
1048                         struct xhci_command, cmd_list);
1049         if (xhci->cmd_ring->dequeue != command->command_trb)
1050                 return 0;
1051
1052         command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1053         list_del(&command->cmd_list);
1054         if (command->completion)
1055                 complete(command->completion);
1056         else
1057                 xhci_free_command(xhci, command);
1058         return 1;
1059 }
1060
1061 static void handle_cmd_completion(struct xhci_hcd *xhci,
1062                 struct xhci_event_cmd *event)
1063 {
1064         int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1065         u64 cmd_dma;
1066         dma_addr_t cmd_dequeue_dma;
1067         struct xhci_input_control_ctx *ctrl_ctx;
1068         struct xhci_virt_device *virt_dev;
1069         unsigned int ep_index;
1070         struct xhci_ring *ep_ring;
1071         unsigned int ep_state;
1072
1073         cmd_dma = le64_to_cpu(event->cmd_trb);
1074         cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1075                         xhci->cmd_ring->dequeue);
1076         /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1077         if (cmd_dequeue_dma == 0) {
1078                 xhci->error_bitmask |= 1 << 4;
1079                 return;
1080         }
1081         /* Does the DMA address match our internal dequeue pointer address? */
1082         if (cmd_dma != (u64) cmd_dequeue_dma) {
1083                 xhci->error_bitmask |= 1 << 5;
1084                 return;
1085         }
1086         switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1087                 & TRB_TYPE_BITMASK) {
1088         case TRB_TYPE(TRB_ENABLE_SLOT):
1089                 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1090                         xhci->slot_id = slot_id;
1091                 else
1092                         xhci->slot_id = 0;
1093                 complete(&xhci->addr_dev);
1094                 break;
1095         case TRB_TYPE(TRB_DISABLE_SLOT):
1096                 if (xhci->devs[slot_id]) {
1097                         if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1098                                 /* Delete default control endpoint resources */
1099                                 xhci_free_device_endpoint_resources(xhci,
1100                                                 xhci->devs[slot_id], true);
1101                         xhci_free_virt_device(xhci, slot_id);
1102                 }
1103                 break;
1104         case TRB_TYPE(TRB_CONFIG_EP):
1105                 virt_dev = xhci->devs[slot_id];
1106                 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1107                         break;
1108                 /*
1109                  * Configure endpoint commands can come from the USB core
1110                  * configuration or alt setting changes, or because the HW
1111                  * needed an extra configure endpoint command after a reset
1112                  * endpoint command or streams were being configured.
1113                  * If the command was for a halted endpoint, the xHCI driver
1114                  * is not waiting on the configure endpoint command.
1115                  */
1116                 ctrl_ctx = xhci_get_input_control_ctx(xhci,
1117                                 virt_dev->in_ctx);
1118                 /* Input ctx add_flags are the endpoint index plus one */
1119                 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1120                 /* A usb_set_interface() call directly after clearing a halted
1121                  * condition may race on this quirky hardware.  Not worth
1122                  * worrying about, since this is prototype hardware.  Not sure
1123                  * if this will work for streams, but streams support was
1124                  * untested on this prototype.
1125                  */
1126                 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1127                                 ep_index != (unsigned int) -1 &&
1128                     le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1129                     le32_to_cpu(ctrl_ctx->drop_flags)) {
1130                         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1131                         ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1132                         if (!(ep_state & EP_HALTED))
1133                                 goto bandwidth_change;
1134                         xhci_dbg(xhci, "Completed config ep cmd - "
1135                                         "last ep index = %d, state = %d\n",
1136                                         ep_index, ep_state);
1137                         /* Clear internal halted state and restart ring(s) */
1138                         xhci->devs[slot_id]->eps[ep_index].ep_state &=
1139                                 ~EP_HALTED;
1140                         ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1141                         break;
1142                 }
1143 bandwidth_change:
1144                 xhci_dbg(xhci, "Completed config ep cmd\n");
1145                 xhci->devs[slot_id]->cmd_status =
1146                         GET_COMP_CODE(le32_to_cpu(event->status));
1147                 complete(&xhci->devs[slot_id]->cmd_completion);
1148                 break;
1149         case TRB_TYPE(TRB_EVAL_CONTEXT):
1150                 virt_dev = xhci->devs[slot_id];
1151                 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1152                         break;
1153                 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1154                 complete(&xhci->devs[slot_id]->cmd_completion);
1155                 break;
1156         case TRB_TYPE(TRB_ADDR_DEV):
1157                 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1158                 complete(&xhci->addr_dev);
1159                 break;
1160         case TRB_TYPE(TRB_STOP_RING):
1161                 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1162                 break;
1163         case TRB_TYPE(TRB_SET_DEQ):
1164                 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1165                 break;
1166         case TRB_TYPE(TRB_CMD_NOOP):
1167                 break;
1168         case TRB_TYPE(TRB_RESET_EP):
1169                 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1170                 break;
1171         case TRB_TYPE(TRB_RESET_DEV):
1172                 xhci_dbg(xhci, "Completed reset device command.\n");
1173                 slot_id = TRB_TO_SLOT_ID(
1174                         le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1175                 virt_dev = xhci->devs[slot_id];
1176                 if (virt_dev)
1177                         handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1178                 else
1179                         xhci_warn(xhci, "Reset device command completion "
1180                                         "for disabled slot %u\n", slot_id);
1181                 break;
1182         case TRB_TYPE(TRB_NEC_GET_FW):
1183                 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1184                         xhci->error_bitmask |= 1 << 6;
1185                         break;
1186                 }
1187                 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1188                          NEC_FW_MAJOR(le32_to_cpu(event->status)),
1189                          NEC_FW_MINOR(le32_to_cpu(event->status)));
1190                 break;
1191         default:
1192                 /* Skip over unknown commands on the event ring */
1193                 xhci->error_bitmask |= 1 << 6;
1194                 break;
1195         }
1196         inc_deq(xhci, xhci->cmd_ring, false);
1197 }
1198
1199 static void handle_vendor_event(struct xhci_hcd *xhci,
1200                 union xhci_trb *event)
1201 {
1202         u32 trb_type;
1203
1204         trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1205         xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1206         if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1207                 handle_cmd_completion(xhci, &event->event_cmd);
1208 }
1209
1210 /* @port_id: the one-based port ID from the hardware (indexed from array of all
1211  * port registers -- USB 3.0 and USB 2.0).
1212  *
1213  * Returns a zero-based port number, which is suitable for indexing into each of
1214  * the split roothubs' port arrays and bus state arrays.
1215  */
1216 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1217                 struct xhci_hcd *xhci, u32 port_id)
1218 {
1219         unsigned int i;
1220         unsigned int num_similar_speed_ports = 0;
1221
1222         /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1223          * and usb2_ports are 0-based indexes.  Count the number of similar
1224          * speed ports, up to 1 port before this port.
1225          */
1226         for (i = 0; i < (port_id - 1); i++) {
1227                 u8 port_speed = xhci->port_array[i];
1228
1229                 /*
1230                  * Skip ports that don't have known speeds, or have duplicate
1231                  * Extended Capabilities port speed entries.
1232                  */
1233                 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1234                         continue;
1235
1236                 /*
1237                  * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
1238                  * 1.1 ports are under the USB 2.0 hub.  If the port speed
1239                  * matches the device speed, it's a similar speed port.
1240                  */
1241                 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1242                         num_similar_speed_ports++;
1243         }
1244         return num_similar_speed_ports;
1245 }
1246
1247 static void handle_port_status(struct xhci_hcd *xhci,
1248                 union xhci_trb *event)
1249 {
1250         struct usb_hcd *hcd;
1251         u32 port_id;
1252         u32 temp, temp1;
1253         int max_ports;
1254         int slot_id;
1255         unsigned int faked_port_index;
1256         u8 major_revision;
1257         struct xhci_bus_state *bus_state;
1258         __le32 __iomem **port_array;
1259         bool bogus_port_status = false;
1260
1261         /* Port status change events always have a successful completion code */
1262         if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1263                 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1264                 xhci->error_bitmask |= 1 << 8;
1265         }
1266         port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1267         xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1268
1269         max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1270         if ((port_id <= 0) || (port_id > max_ports)) {
1271                 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1272                 bogus_port_status = true;
1273                 goto cleanup;
1274         }
1275
1276         /* Figure out which usb_hcd this port is attached to:
1277          * is it a USB 3.0 port or a USB 2.0/1.1 port?
1278          */
1279         major_revision = xhci->port_array[port_id - 1];
1280         if (major_revision == 0) {
1281                 xhci_warn(xhci, "Event for port %u not in "
1282                                 "Extended Capabilities, ignoring.\n",
1283                                 port_id);
1284                 bogus_port_status = true;
1285                 goto cleanup;
1286         }
1287         if (major_revision == DUPLICATE_ENTRY) {
1288                 xhci_warn(xhci, "Event for port %u duplicated in"
1289                                 "Extended Capabilities, ignoring.\n",
1290                                 port_id);
1291                 bogus_port_status = true;
1292                 goto cleanup;
1293         }
1294
1295         /*
1296          * Hardware port IDs reported by a Port Status Change Event include USB
1297          * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
1298          * resume event, but we first need to translate the hardware port ID
1299          * into the index into the ports on the correct split roothub, and the
1300          * correct bus_state structure.
1301          */
1302         /* Find the right roothub. */
1303         hcd = xhci_to_hcd(xhci);
1304         if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1305                 hcd = xhci->shared_hcd;
1306         bus_state = &xhci->bus_state[hcd_index(hcd)];
1307         if (hcd->speed == HCD_USB3)
1308                 port_array = xhci->usb3_ports;
1309         else
1310                 port_array = xhci->usb2_ports;
1311         /* Find the faked port hub number */
1312         faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1313                         port_id);
1314
1315         temp = xhci_readl(xhci, port_array[faked_port_index]);
1316         if (hcd->state == HC_STATE_SUSPENDED) {
1317                 xhci_dbg(xhci, "resume root hub\n");
1318                 usb_hcd_resume_root_hub(hcd);
1319         }
1320
1321         if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1322                 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1323
1324                 temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1325                 if (!(temp1 & CMD_RUN)) {
1326                         xhci_warn(xhci, "xHC is not running.\n");
1327                         goto cleanup;
1328                 }
1329
1330                 if (DEV_SUPERSPEED(temp)) {
1331                         xhci_dbg(xhci, "resume SS port %d\n", port_id);
1332                         temp = xhci_port_state_to_neutral(temp);
1333                         temp &= ~PORT_PLS_MASK;
1334                         temp |= PORT_LINK_STROBE | XDEV_U0;
1335                         xhci_writel(xhci, temp, port_array[faked_port_index]);
1336                         slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1337                                         faked_port_index);
1338                         if (!slot_id) {
1339                                 xhci_dbg(xhci, "slot_id is zero\n");
1340                                 goto cleanup;
1341                         }
1342                         xhci_ring_device(xhci, slot_id);
1343                         xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1344                         /* Clear PORT_PLC */
1345                         temp = xhci_readl(xhci, port_array[faked_port_index]);
1346                         temp = xhci_port_state_to_neutral(temp);
1347                         temp |= PORT_PLC;
1348                         xhci_writel(xhci, temp, port_array[faked_port_index]);
1349                 } else {
1350                         xhci_dbg(xhci, "resume HS port %d\n", port_id);
1351                         bus_state->resume_done[faked_port_index] = jiffies +
1352                                 msecs_to_jiffies(20);
1353                         mod_timer(&hcd->rh_timer,
1354                                   bus_state->resume_done[faked_port_index]);
1355                         /* Do the rest in GetPortStatus */
1356                 }
1357         }
1358
1359 cleanup:
1360         /* Update event ring dequeue pointer before dropping the lock */
1361         inc_deq(xhci, xhci->event_ring, true);
1362
1363         /* Don't make the USB core poll the roothub if we got a bad port status
1364          * change event.  Besides, at that point we can't tell which roothub
1365          * (USB 2.0 or USB 3.0) to kick.
1366          */
1367         if (bogus_port_status)
1368                 return;
1369
1370         spin_unlock(&xhci->lock);
1371         /* Pass this up to the core */
1372         usb_hcd_poll_rh_status(hcd);
1373         spin_lock(&xhci->lock);
1374 }
1375
1376 /*
1377  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1378  * at end_trb, which may be in another segment.  If the suspect DMA address is a
1379  * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1380  * returns 0.
1381  */
1382 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1383                 union xhci_trb  *start_trb,
1384                 union xhci_trb  *end_trb,
1385                 dma_addr_t      suspect_dma)
1386 {
1387         dma_addr_t start_dma;
1388         dma_addr_t end_seg_dma;
1389         dma_addr_t end_trb_dma;
1390         struct xhci_segment *cur_seg;
1391
1392         start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1393         cur_seg = start_seg;
1394
1395         do {
1396                 if (start_dma == 0)
1397                         return NULL;
1398                 /* We may get an event for a Link TRB in the middle of a TD */
1399                 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1400                                 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1401                 /* If the end TRB isn't in this segment, this is set to 0 */
1402                 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1403
1404                 if (end_trb_dma > 0) {
1405                         /* The end TRB is in this segment, so suspect should be here */
1406                         if (start_dma <= end_trb_dma) {
1407                                 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1408                                         return cur_seg;
1409                         } else {
1410                                 /* Case for one segment with
1411                                  * a TD wrapped around to the top
1412                                  */
1413                                 if ((suspect_dma >= start_dma &&
1414                                                         suspect_dma <= end_seg_dma) ||
1415                                                 (suspect_dma >= cur_seg->dma &&
1416                                                  suspect_dma <= end_trb_dma))
1417                                         return cur_seg;
1418                         }
1419                         return NULL;
1420                 } else {
1421                         /* Might still be somewhere in this segment */
1422                         if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1423                                 return cur_seg;
1424                 }
1425                 cur_seg = cur_seg->next;
1426                 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1427         } while (cur_seg != start_seg);
1428
1429         return NULL;
1430 }
1431
1432 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1433                 unsigned int slot_id, unsigned int ep_index,
1434                 unsigned int stream_id,
1435                 struct xhci_td *td, union xhci_trb *event_trb)
1436 {
1437         struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1438         ep->ep_state |= EP_HALTED;
1439         ep->stopped_td = td;
1440         ep->stopped_trb = event_trb;
1441         ep->stopped_stream = stream_id;
1442
1443         xhci_queue_reset_ep(xhci, slot_id, ep_index);
1444         xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1445
1446         ep->stopped_td = NULL;
1447         ep->stopped_trb = NULL;
1448         ep->stopped_stream = 0;
1449
1450         xhci_ring_cmd_db(xhci);
1451 }
1452
1453 /* Check if an error has halted the endpoint ring.  The class driver will
1454  * cleanup the halt for a non-default control endpoint if we indicate a stall.
1455  * However, a babble and other errors also halt the endpoint ring, and the class
1456  * driver won't clear the halt in that case, so we need to issue a Set Transfer
1457  * Ring Dequeue Pointer command manually.
1458  */
1459 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1460                 struct xhci_ep_ctx *ep_ctx,
1461                 unsigned int trb_comp_code)
1462 {
1463         /* TRB completion codes that may require a manual halt cleanup */
1464         if (trb_comp_code == COMP_TX_ERR ||
1465                         trb_comp_code == COMP_BABBLE ||
1466                         trb_comp_code == COMP_SPLIT_ERR)
1467                 /* The 0.96 spec says a babbling control endpoint
1468                  * is not halted. The 0.96 spec says it is.  Some HW
1469                  * claims to be 0.95 compliant, but it halts the control
1470                  * endpoint anyway.  Check if a babble halted the
1471                  * endpoint.
1472                  */
1473                 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1474                     cpu_to_le32(EP_STATE_HALTED))
1475                         return 1;
1476
1477         return 0;
1478 }
1479
1480 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1481 {
1482         if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1483                 /* Vendor defined "informational" completion code,
1484                  * treat as not-an-error.
1485                  */
1486                 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1487                                 trb_comp_code);
1488                 xhci_dbg(xhci, "Treating code as success.\n");
1489                 return 1;
1490         }
1491         return 0;
1492 }
1493
1494 /*
1495  * Finish the td processing, remove the td from td list;
1496  * Return 1 if the urb can be given back.
1497  */
1498 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1499         union xhci_trb *event_trb, struct xhci_transfer_event *event,
1500         struct xhci_virt_ep *ep, int *status, bool skip)
1501 {
1502         struct xhci_virt_device *xdev;
1503         struct xhci_ring *ep_ring;
1504         unsigned int slot_id;
1505         int ep_index;
1506         struct urb *urb = NULL;
1507         struct xhci_ep_ctx *ep_ctx;
1508         int ret = 0;
1509         struct urb_priv *urb_priv;
1510         u32 trb_comp_code;
1511
1512         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1513         xdev = xhci->devs[slot_id];
1514         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1515         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1516         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1517         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1518
1519         if (skip)
1520                 goto td_cleanup;
1521
1522         if (trb_comp_code == COMP_STOP_INVAL ||
1523                         trb_comp_code == COMP_STOP) {
1524                 /* The Endpoint Stop Command completion will take care of any
1525                  * stopped TDs.  A stopped TD may be restarted, so don't update
1526                  * the ring dequeue pointer or take this TD off any lists yet.
1527                  */
1528                 ep->stopped_td = td;
1529                 ep->stopped_trb = event_trb;
1530                 return 0;
1531         } else {
1532                 if (trb_comp_code == COMP_STALL) {
1533                         /* The transfer is completed from the driver's
1534                          * perspective, but we need to issue a set dequeue
1535                          * command for this stalled endpoint to move the dequeue
1536                          * pointer past the TD.  We can't do that here because
1537                          * the halt condition must be cleared first.  Let the
1538                          * USB class driver clear the stall later.
1539                          */
1540                         ep->stopped_td = td;
1541                         ep->stopped_trb = event_trb;
1542                         ep->stopped_stream = ep_ring->stream_id;
1543                 } else if (xhci_requires_manual_halt_cleanup(xhci,
1544                                         ep_ctx, trb_comp_code)) {
1545                         /* Other types of errors halt the endpoint, but the
1546                          * class driver doesn't call usb_reset_endpoint() unless
1547                          * the error is -EPIPE.  Clear the halted status in the
1548                          * xHCI hardware manually.
1549                          */
1550                         xhci_cleanup_halted_endpoint(xhci,
1551                                         slot_id, ep_index, ep_ring->stream_id,
1552                                         td, event_trb);
1553                 } else {
1554                         /* Update ring dequeue pointer */
1555                         while (ep_ring->dequeue != td->last_trb)
1556                                 inc_deq(xhci, ep_ring, false);
1557                         inc_deq(xhci, ep_ring, false);
1558                 }
1559
1560 td_cleanup:
1561                 /* Clean up the endpoint's TD list */
1562                 urb = td->urb;
1563                 urb_priv = urb->hcpriv;
1564
1565                 /* Do one last check of the actual transfer length.
1566                  * If the host controller said we transferred more data than
1567                  * the buffer length, urb->actual_length will be a very big
1568                  * number (since it's unsigned).  Play it safe and say we didn't
1569                  * transfer anything.
1570                  */
1571                 if (urb->actual_length > urb->transfer_buffer_length) {
1572                         xhci_warn(xhci, "URB transfer length is wrong, "
1573                                         "xHC issue? req. len = %u, "
1574                                         "act. len = %u\n",
1575                                         urb->transfer_buffer_length,
1576                                         urb->actual_length);
1577                         urb->actual_length = 0;
1578                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1579                                 *status = -EREMOTEIO;
1580                         else
1581                                 *status = 0;
1582                 }
1583                 list_del_init(&td->td_list);
1584                 /* Was this TD slated to be cancelled but completed anyway? */
1585                 if (!list_empty(&td->cancelled_td_list))
1586                         list_del_init(&td->cancelled_td_list);
1587
1588                 urb_priv->td_cnt++;
1589                 /* Giveback the urb when all the tds are completed */
1590                 if (urb_priv->td_cnt == urb_priv->length) {
1591                         ret = 1;
1592                         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1593                                 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1594                                 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1595                                         == 0) {
1596                                         if (xhci->quirks & XHCI_AMD_PLL_FIX)
1597                                                 usb_amd_quirk_pll_enable();
1598                                 }
1599                         }
1600                 }
1601         }
1602
1603         return ret;
1604 }
1605
1606 /*
1607  * Process control tds, update urb status and actual_length.
1608  */
1609 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1610         union xhci_trb *event_trb, struct xhci_transfer_event *event,
1611         struct xhci_virt_ep *ep, int *status)
1612 {
1613         struct xhci_virt_device *xdev;
1614         struct xhci_ring *ep_ring;
1615         unsigned int slot_id;
1616         int ep_index;
1617         struct xhci_ep_ctx *ep_ctx;
1618         u32 trb_comp_code;
1619
1620         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1621         xdev = xhci->devs[slot_id];
1622         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1623         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1624         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1625         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1626
1627         xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1628         switch (trb_comp_code) {
1629         case COMP_SUCCESS:
1630                 if (event_trb == ep_ring->dequeue) {
1631                         xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1632                                         "without IOC set??\n");
1633                         *status = -ESHUTDOWN;
1634                 } else if (event_trb != td->last_trb) {
1635                         xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1636                                         "without IOC set??\n");
1637                         *status = -ESHUTDOWN;
1638                 } else {
1639                         *status = 0;
1640                 }
1641                 break;
1642         case COMP_SHORT_TX:
1643                 xhci_warn(xhci, "WARN: short transfer on control ep\n");
1644                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1645                         *status = -EREMOTEIO;
1646                 else
1647                         *status = 0;
1648                 break;
1649         case COMP_STOP_INVAL:
1650         case COMP_STOP:
1651                 return finish_td(xhci, td, event_trb, event, ep, status, false);
1652         default:
1653                 if (!xhci_requires_manual_halt_cleanup(xhci,
1654                                         ep_ctx, trb_comp_code))
1655                         break;
1656                 xhci_dbg(xhci, "TRB error code %u, "
1657                                 "halted endpoint index = %u\n",
1658                                 trb_comp_code, ep_index);
1659                 /* else fall through */
1660         case COMP_STALL:
1661                 /* Did we transfer part of the data (middle) phase? */
1662                 if (event_trb != ep_ring->dequeue &&
1663                                 event_trb != td->last_trb)
1664                         td->urb->actual_length =
1665                                 td->urb->transfer_buffer_length
1666                                 - TRB_LEN(le32_to_cpu(event->transfer_len));
1667                 else
1668                         td->urb->actual_length = 0;
1669
1670                 xhci_cleanup_halted_endpoint(xhci,
1671                         slot_id, ep_index, 0, td, event_trb);
1672                 return finish_td(xhci, td, event_trb, event, ep, status, true);
1673         }
1674         /*
1675          * Did we transfer any data, despite the errors that might have
1676          * happened?  I.e. did we get past the setup stage?
1677          */
1678         if (event_trb != ep_ring->dequeue) {
1679                 /* The event was for the status stage */
1680                 if (event_trb == td->last_trb) {
1681                         if (td->urb->actual_length != 0) {
1682                                 /* Don't overwrite a previously set error code
1683                                  */
1684                                 if ((*status == -EINPROGRESS || *status == 0) &&
1685                                                 (td->urb->transfer_flags
1686                                                  & URB_SHORT_NOT_OK))
1687                                         /* Did we already see a short data
1688                                          * stage? */
1689                                         *status = -EREMOTEIO;
1690                         } else {
1691                                 td->urb->actual_length =
1692                                         td->urb->transfer_buffer_length;
1693                         }
1694                 } else {
1695                 /* Maybe the event was for the data stage? */
1696                         td->urb->actual_length =
1697                                 td->urb->transfer_buffer_length -
1698                                 TRB_LEN(le32_to_cpu(event->transfer_len));
1699                         xhci_dbg(xhci, "Waiting for status "
1700                                         "stage event\n");
1701                         return 0;
1702                 }
1703         }
1704
1705         return finish_td(xhci, td, event_trb, event, ep, status, false);
1706 }
1707
1708 /*
1709  * Process isochronous tds, update urb packet status and actual_length.
1710  */
1711 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1712         union xhci_trb *event_trb, struct xhci_transfer_event *event,
1713         struct xhci_virt_ep *ep, int *status)
1714 {
1715         struct xhci_ring *ep_ring;
1716         struct urb_priv *urb_priv;
1717         int idx;
1718         int len = 0;
1719         union xhci_trb *cur_trb;
1720         struct xhci_segment *cur_seg;
1721         struct usb_iso_packet_descriptor *frame;
1722         u32 trb_comp_code;
1723         bool skip_td = false;
1724
1725         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1726         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1727         urb_priv = td->urb->hcpriv;
1728         idx = urb_priv->td_cnt;
1729         frame = &td->urb->iso_frame_desc[idx];
1730
1731         /* handle completion code */
1732         switch (trb_comp_code) {
1733         case COMP_SUCCESS:
1734                 frame->status = 0;
1735                 break;
1736         case COMP_SHORT_TX:
1737                 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
1738                                 -EREMOTEIO : 0;
1739                 break;
1740         case COMP_BW_OVER:
1741                 frame->status = -ECOMM;
1742                 skip_td = true;
1743                 break;
1744         case COMP_BUFF_OVER:
1745         case COMP_BABBLE:
1746                 frame->status = -EOVERFLOW;
1747                 skip_td = true;
1748                 break;
1749         case COMP_DEV_ERR:
1750         case COMP_STALL:
1751                 frame->status = -EPROTO;
1752                 skip_td = true;
1753                 break;
1754         case COMP_STOP:
1755         case COMP_STOP_INVAL:
1756                 break;
1757         default:
1758                 frame->status = -1;
1759                 break;
1760         }
1761
1762         if (trb_comp_code == COMP_SUCCESS || skip_td) {
1763                 frame->actual_length = frame->length;
1764                 td->urb->actual_length += frame->length;
1765         } else {
1766                 for (cur_trb = ep_ring->dequeue,
1767                      cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1768                      next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1769                         if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
1770                             !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1771                                 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1772                 }
1773                 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1774                         TRB_LEN(le32_to_cpu(event->transfer_len));
1775
1776                 if (trb_comp_code != COMP_STOP_INVAL) {
1777                         frame->actual_length = len;
1778                         td->urb->actual_length += len;
1779                 }
1780         }
1781
1782         return finish_td(xhci, td, event_trb, event, ep, status, false);
1783 }
1784
1785 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1786                         struct xhci_transfer_event *event,
1787                         struct xhci_virt_ep *ep, int *status)
1788 {
1789         struct xhci_ring *ep_ring;
1790         struct urb_priv *urb_priv;
1791         struct usb_iso_packet_descriptor *frame;
1792         int idx;
1793
1794         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1795         urb_priv = td->urb->hcpriv;
1796         idx = urb_priv->td_cnt;
1797         frame = &td->urb->iso_frame_desc[idx];
1798
1799         /* The transfer is partly done. */
1800         frame->status = -EXDEV;
1801
1802         /* calc actual length */
1803         frame->actual_length = 0;
1804
1805         /* Update ring dequeue pointer */
1806         while (ep_ring->dequeue != td->last_trb)
1807                 inc_deq(xhci, ep_ring, false);
1808         inc_deq(xhci, ep_ring, false);
1809
1810         return finish_td(xhci, td, NULL, event, ep, status, true);
1811 }
1812
1813 /*
1814  * Process bulk and interrupt tds, update urb status and actual_length.
1815  */
1816 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1817         union xhci_trb *event_trb, struct xhci_transfer_event *event,
1818         struct xhci_virt_ep *ep, int *status)
1819 {
1820         struct xhci_ring *ep_ring;
1821         union xhci_trb *cur_trb;
1822         struct xhci_segment *cur_seg;
1823         u32 trb_comp_code;
1824
1825         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1826         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1827
1828         switch (trb_comp_code) {
1829         case COMP_SUCCESS:
1830                 /* Double check that the HW transferred everything. */
1831                 if (event_trb != td->last_trb) {
1832                         xhci_warn(xhci, "WARN Successful completion "
1833                                         "on short TX\n");
1834                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1835                                 *status = -EREMOTEIO;
1836                         else
1837                                 *status = 0;
1838                 } else {
1839                         *status = 0;
1840                 }
1841                 break;
1842         case COMP_SHORT_TX:
1843                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1844                         *status = -EREMOTEIO;
1845                 else
1846                         *status = 0;
1847                 break;
1848         default:
1849                 /* Others already handled above */
1850                 break;
1851         }
1852         if (trb_comp_code == COMP_SHORT_TX)
1853                 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
1854                                 "%d bytes untransferred\n",
1855                                 td->urb->ep->desc.bEndpointAddress,
1856                                 td->urb->transfer_buffer_length,
1857                                 TRB_LEN(le32_to_cpu(event->transfer_len)));
1858         /* Fast path - was this the last TRB in the TD for this URB? */
1859         if (event_trb == td->last_trb) {
1860                 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1861                         td->urb->actual_length =
1862                                 td->urb->transfer_buffer_length -
1863                                 TRB_LEN(le32_to_cpu(event->transfer_len));
1864                         if (td->urb->transfer_buffer_length <
1865                                         td->urb->actual_length) {
1866                                 xhci_warn(xhci, "HC gave bad length "
1867                                                 "of %d bytes left\n",
1868                                           TRB_LEN(le32_to_cpu(event->transfer_len)));
1869                                 td->urb->actual_length = 0;
1870                                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1871                                         *status = -EREMOTEIO;
1872                                 else
1873                                         *status = 0;
1874                         }
1875                         /* Don't overwrite a previously set error code */
1876                         if (*status == -EINPROGRESS) {
1877                                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1878                                         *status = -EREMOTEIO;
1879                                 else
1880                                         *status = 0;
1881                         }
1882                 } else {
1883                         td->urb->actual_length =
1884                                 td->urb->transfer_buffer_length;
1885                         /* Ignore a short packet completion if the
1886                          * untransferred length was zero.
1887                          */
1888                         if (*status == -EREMOTEIO)
1889                                 *status = 0;
1890                 }
1891         } else {
1892                 /* Slow path - walk the list, starting from the dequeue
1893                  * pointer, to get the actual length transferred.
1894                  */
1895                 td->urb->actual_length = 0;
1896                 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1897                                 cur_trb != event_trb;
1898                                 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1899                         if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
1900                             !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1901                                 td->urb->actual_length +=
1902                                         TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1903                 }
1904                 /* If the ring didn't stop on a Link or No-op TRB, add
1905                  * in the actual bytes transferred from the Normal TRB
1906                  */
1907                 if (trb_comp_code != COMP_STOP_INVAL)
1908                         td->urb->actual_length +=
1909                                 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1910                                 TRB_LEN(le32_to_cpu(event->transfer_len));
1911         }
1912
1913         return finish_td(xhci, td, event_trb, event, ep, status, false);
1914 }
1915
1916 /*
1917  * If this function returns an error condition, it means it got a Transfer
1918  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
1919  * At this point, the host controller is probably hosed and should be reset.
1920  */
1921 static int handle_tx_event(struct xhci_hcd *xhci,
1922                 struct xhci_transfer_event *event)
1923 {
1924         struct xhci_virt_device *xdev;
1925         struct xhci_virt_ep *ep;
1926         struct xhci_ring *ep_ring;
1927         unsigned int slot_id;
1928         int ep_index;
1929         struct xhci_td *td = NULL;
1930         dma_addr_t event_dma;
1931         struct xhci_segment *event_seg;
1932         union xhci_trb *event_trb;
1933         struct urb *urb = NULL;
1934         int status = -EINPROGRESS;
1935         struct urb_priv *urb_priv;
1936         struct xhci_ep_ctx *ep_ctx;
1937         u32 trb_comp_code;
1938         int ret = 0;
1939
1940         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1941         xdev = xhci->devs[slot_id];
1942         if (!xdev) {
1943                 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
1944                 return -ENODEV;
1945         }
1946
1947         /* Endpoint ID is 1 based, our index is zero based */
1948         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1949         ep = &xdev->eps[ep_index];
1950         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1951         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1952         if (!ep_ring ||
1953             (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1954             EP_STATE_DISABLED) {
1955                 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1956                                 "or incorrect stream ring\n");
1957                 return -ENODEV;
1958         }
1959
1960         event_dma = le64_to_cpu(event->buffer);
1961         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1962         /* Look for common error cases */
1963         switch (trb_comp_code) {
1964         /* Skip codes that require special handling depending on
1965          * transfer type
1966          */
1967         case COMP_SUCCESS:
1968         case COMP_SHORT_TX:
1969                 break;
1970         case COMP_STOP:
1971                 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
1972                 break;
1973         case COMP_STOP_INVAL:
1974                 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
1975                 break;
1976         case COMP_STALL:
1977                 xhci_warn(xhci, "WARN: Stalled endpoint\n");
1978                 ep->ep_state |= EP_HALTED;
1979                 status = -EPIPE;
1980                 break;
1981         case COMP_TRB_ERR:
1982                 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1983                 status = -EILSEQ;
1984                 break;
1985         case COMP_SPLIT_ERR:
1986         case COMP_TX_ERR:
1987                 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1988                 status = -EPROTO;
1989                 break;
1990         case COMP_BABBLE:
1991                 xhci_warn(xhci, "WARN: babble error on endpoint\n");
1992                 status = -EOVERFLOW;
1993                 break;
1994         case COMP_DB_ERR:
1995                 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
1996                 status = -ENOSR;
1997                 break;
1998         case COMP_BW_OVER:
1999                 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2000                 break;
2001         case COMP_BUFF_OVER:
2002                 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2003                 break;
2004         case COMP_UNDERRUN:
2005                 /*
2006                  * When the Isoch ring is empty, the xHC will generate
2007                  * a Ring Overrun Event for IN Isoch endpoint or Ring
2008                  * Underrun Event for OUT Isoch endpoint.
2009                  */
2010                 xhci_dbg(xhci, "underrun event on endpoint\n");
2011                 if (!list_empty(&ep_ring->td_list))
2012                         xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2013                                         "still with TDs queued?\n",
2014                                  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2015                                  ep_index);
2016                 goto cleanup;
2017         case COMP_OVERRUN:
2018                 xhci_dbg(xhci, "overrun event on endpoint\n");
2019                 if (!list_empty(&ep_ring->td_list))
2020                         xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2021                                         "still with TDs queued?\n",
2022                                  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2023                                  ep_index);
2024                 goto cleanup;
2025         case COMP_DEV_ERR:
2026                 xhci_warn(xhci, "WARN: detect an incompatible device");
2027                 status = -EPROTO;
2028                 break;
2029         case COMP_MISSED_INT:
2030                 /*
2031                  * When encounter missed service error, one or more isoc tds
2032                  * may be missed by xHC.
2033                  * Set skip flag of the ep_ring; Complete the missed tds as
2034                  * short transfer when process the ep_ring next time.
2035                  */
2036                 ep->skip = true;
2037                 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2038                 goto cleanup;
2039         default:
2040                 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2041                         status = 0;
2042                         break;
2043                 }
2044                 xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2045                                 "busted\n");
2046                 goto cleanup;
2047         }
2048
2049         do {
2050                 /* This TRB should be in the TD at the head of this ring's
2051                  * TD list.
2052                  */
2053                 if (list_empty(&ep_ring->td_list)) {
2054                         xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
2055                                         "with no TDs queued?\n",
2056                                   TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2057                                   ep_index);
2058                         xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2059                                  (le32_to_cpu(event->flags) &
2060                                   TRB_TYPE_BITMASK)>>10);
2061                         xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2062                         if (ep->skip) {
2063                                 ep->skip = false;
2064                                 xhci_dbg(xhci, "td_list is empty while skip "
2065                                                 "flag set. Clear skip flag.\n");
2066                         }
2067                         ret = 0;
2068                         goto cleanup;
2069                 }
2070
2071                 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2072
2073                 /* Is this a TRB in the currently executing TD? */
2074                 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2075                                 td->last_trb, event_dma);
2076
2077                 /*
2078                  * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2079                  * is not in the current TD pointed by ep_ring->dequeue because
2080                  * that the hardware dequeue pointer still at the previous TRB
2081                  * of the current TD. The previous TRB maybe a Link TD or the
2082                  * last TRB of the previous TD. The command completion handle
2083                  * will take care the rest.
2084                  */
2085                 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2086                         ret = 0;
2087                         goto cleanup;
2088                 }
2089
2090                 if (!event_seg) {
2091                         if (!ep->skip ||
2092                             !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2093                                 /* Some host controllers give a spurious
2094                                  * successful event after a short transfer.
2095                                  * Ignore it.
2096                                  */
2097                                 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
2098                                                 ep_ring->last_td_was_short) {
2099                                         ep_ring->last_td_was_short = false;
2100                                         ret = 0;
2101                                         goto cleanup;
2102                                 }
2103                                 /* HC is busted, give up! */
2104                                 xhci_err(xhci,
2105                                         "ERROR Transfer event TRB DMA ptr not "
2106                                         "part of current TD\n");
2107                                 return -ESHUTDOWN;
2108                         }
2109
2110                         ret = skip_isoc_td(xhci, td, event, ep, &status);
2111                         goto cleanup;
2112                 }
2113                 if (trb_comp_code == COMP_SHORT_TX)
2114                         ep_ring->last_td_was_short = true;
2115                 else
2116                         ep_ring->last_td_was_short = false;
2117
2118                 if (ep->skip) {
2119                         xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2120                         ep->skip = false;
2121                 }
2122
2123                 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2124                                                 sizeof(*event_trb)];
2125                 /*
2126                  * No-op TRB should not trigger interrupts.
2127                  * If event_trb is a no-op TRB, it means the
2128                  * corresponding TD has been cancelled. Just ignore
2129                  * the TD.
2130                  */
2131                 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2132                         xhci_dbg(xhci,
2133                                  "event_trb is a no-op TRB. Skip it\n");
2134                         goto cleanup;
2135                 }
2136
2137                 /* Now update the urb's actual_length and give back to
2138                  * the core
2139                  */
2140                 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2141                         ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2142                                                  &status);
2143                 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2144                         ret = process_isoc_td(xhci, td, event_trb, event, ep,
2145                                                  &status);
2146                 else
2147                         ret = process_bulk_intr_td(xhci, td, event_trb, event,
2148                                                  ep, &status);
2149
2150 cleanup:
2151                 /*
2152                  * Do not update event ring dequeue pointer if ep->skip is set.
2153                  * Will roll back to continue process missed tds.
2154                  */
2155                 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2156                         inc_deq(xhci, xhci->event_ring, true);
2157                 }
2158
2159                 if (ret) {
2160                         urb = td->urb;
2161                         urb_priv = urb->hcpriv;
2162                         /* Leave the TD around for the reset endpoint function
2163                          * to use(but only if it's not a control endpoint,
2164                          * since we already queued the Set TR dequeue pointer
2165                          * command for stalled control endpoints).
2166                          */
2167                         if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2168                                 (trb_comp_code != COMP_STALL &&
2169                                         trb_comp_code != COMP_BABBLE))
2170                                 xhci_urb_free_priv(xhci, urb_priv);
2171
2172                         usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2173                         if ((urb->actual_length != urb->transfer_buffer_length &&
2174                                                 (urb->transfer_flags &
2175                                                  URB_SHORT_NOT_OK)) ||
2176                                         status != 0)
2177                                 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2178                                                 "expected = %x, status = %d\n",
2179                                                 urb, urb->actual_length,
2180                                                 urb->transfer_buffer_length,
2181                                                 status);
2182                         spin_unlock(&xhci->lock);
2183                         /* EHCI, UHCI, and OHCI always unconditionally set the
2184                          * urb->status of an isochronous endpoint to 0.
2185                          */
2186                         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2187                                 status = 0;
2188                         usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2189                         spin_lock(&xhci->lock);
2190                 }
2191
2192         /*
2193          * If ep->skip is set, it means there are missed tds on the
2194          * endpoint ring need to take care of.
2195          * Process them as short transfer until reach the td pointed by
2196          * the event.
2197          */
2198         } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2199
2200         return 0;
2201 }
2202
2203 /*
2204  * This function handles all OS-owned events on the event ring.  It may drop
2205  * xhci->lock between event processing (e.g. to pass up port status changes).
2206  * Returns >0 for "possibly more events to process" (caller should call again),
2207  * otherwise 0 if done.  In future, <0 returns should indicate error code.
2208  */
2209 static int xhci_handle_event(struct xhci_hcd *xhci)
2210 {
2211         union xhci_trb *event;
2212         int update_ptrs = 1;
2213         int ret;
2214
2215         if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2216                 xhci->error_bitmask |= 1 << 1;
2217                 return 0;
2218         }
2219
2220         event = xhci->event_ring->dequeue;
2221         /* Does the HC or OS own the TRB? */
2222         if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2223             xhci->event_ring->cycle_state) {
2224                 xhci->error_bitmask |= 1 << 2;
2225                 return 0;
2226         }
2227
2228         /*
2229          * Barrier between reading the TRB_CYCLE (valid) flag above and any
2230          * speculative reads of the event's flags/data below.
2231          */
2232         rmb();
2233         /* FIXME: Handle more event types. */
2234         switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2235         case TRB_TYPE(TRB_COMPLETION):
2236                 handle_cmd_completion(xhci, &event->event_cmd);
2237                 break;
2238         case TRB_TYPE(TRB_PORT_STATUS):
2239                 handle_port_status(xhci, event);
2240                 update_ptrs = 0;
2241                 break;
2242         case TRB_TYPE(TRB_TRANSFER):
2243                 ret = handle_tx_event(xhci, &event->trans_event);
2244                 if (ret < 0)
2245                         xhci->error_bitmask |= 1 << 9;
2246                 else
2247                         update_ptrs = 0;
2248                 break;
2249         default:
2250                 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2251                     TRB_TYPE(48))
2252                         handle_vendor_event(xhci, event);
2253                 else
2254                         xhci->error_bitmask |= 1 << 3;
2255         }
2256         /* Any of the above functions may drop and re-acquire the lock, so check
2257          * to make sure a watchdog timer didn't mark the host as non-responsive.
2258          */
2259         if (xhci->xhc_state & XHCI_STATE_DYING) {
2260                 xhci_dbg(xhci, "xHCI host dying, returning from "
2261                                 "event handler.\n");
2262                 return 0;
2263         }
2264
2265         if (update_ptrs)
2266                 /* Update SW event ring dequeue pointer */
2267                 inc_deq(xhci, xhci->event_ring, true);
2268
2269         /* Are there more items on the event ring?  Caller will call us again to
2270          * check.
2271          */
2272         return 1;
2273 }
2274
2275 /*
2276  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2277  * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2278  * indicators of an event TRB error, but we check the status *first* to be safe.
2279  */
2280 irqreturn_t xhci_irq(struct usb_hcd *hcd)
2281 {
2282         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2283         u32 status;
2284         union xhci_trb *trb;
2285         u64 temp_64;
2286         union xhci_trb *event_ring_deq;
2287         dma_addr_t deq;
2288
2289         spin_lock(&xhci->lock);
2290         trb = xhci->event_ring->dequeue;
2291         /* Check if the xHC generated the interrupt, or the irq is shared */
2292         status = xhci_readl(xhci, &xhci->op_regs->status);
2293         if (status == 0xffffffff)
2294                 goto hw_died;
2295
2296         if (!(status & STS_EINT)) {
2297                 spin_unlock(&xhci->lock);
2298                 return IRQ_NONE;
2299         }
2300         if (status & STS_FATAL) {
2301                 xhci_warn(xhci, "WARNING: Host System Error\n");
2302                 xhci_halt(xhci);
2303 hw_died:
2304                 spin_unlock(&xhci->lock);
2305                 return -ESHUTDOWN;
2306         }
2307
2308         /*
2309          * Clear the op reg interrupt status first,
2310          * so we can receive interrupts from other MSI-X interrupters.
2311          * Write 1 to clear the interrupt status.
2312          */
2313         status |= STS_EINT;
2314         xhci_writel(xhci, status, &xhci->op_regs->status);
2315         /* FIXME when MSI-X is supported and there are multiple vectors */
2316         /* Clear the MSI-X event interrupt status */
2317
2318         if (hcd->irq != -1) {
2319                 u32 irq_pending;
2320                 /* Acknowledge the PCI interrupt */
2321                 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2322                 irq_pending |= 0x3;
2323                 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2324         }
2325
2326         if (xhci->xhc_state & XHCI_STATE_DYING) {
2327                 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2328                                 "Shouldn't IRQs be disabled?\n");
2329                 /* Clear the event handler busy flag (RW1C);
2330                  * the event ring should be empty.
2331                  */
2332                 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2333                 xhci_write_64(xhci, temp_64 | ERST_EHB,
2334                                 &xhci->ir_set->erst_dequeue);
2335                 spin_unlock(&xhci->lock);
2336
2337                 return IRQ_HANDLED;
2338         }
2339
2340         event_ring_deq = xhci->event_ring->dequeue;
2341         /* FIXME this should be a delayed service routine
2342          * that clears the EHB.
2343          */
2344         while (xhci_handle_event(xhci) > 0) {}
2345
2346         temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2347         /* If necessary, update the HW's version of the event ring deq ptr. */
2348         if (event_ring_deq != xhci->event_ring->dequeue) {
2349                 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2350                                 xhci->event_ring->dequeue);
2351                 if (deq == 0)
2352                         xhci_warn(xhci, "WARN something wrong with SW event "
2353                                         "ring dequeue ptr.\n");
2354                 /* Update HC event ring dequeue pointer */
2355                 temp_64 &= ERST_PTR_MASK;
2356                 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2357         }
2358
2359         /* Clear the event handler busy flag (RW1C); event ring is empty. */
2360         temp_64 |= ERST_EHB;
2361         xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2362
2363         spin_unlock(&xhci->lock);
2364
2365         return IRQ_HANDLED;
2366 }
2367
2368 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2369 {
2370         irqreturn_t ret;
2371         struct xhci_hcd *xhci;
2372
2373         xhci = hcd_to_xhci(hcd);
2374         set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
2375         if (xhci->shared_hcd)
2376                 set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
2377
2378         ret = xhci_irq(hcd);
2379
2380         return ret;
2381 }
2382
2383 /****           Endpoint Ring Operations        ****/
2384
2385 /*
2386  * Generic function for queueing a TRB on a ring.
2387  * The caller must have checked to make sure there's room on the ring.
2388  *
2389  * @more_trbs_coming:   Will you enqueue more TRBs before calling
2390  *                      prepare_transfer()?
2391  */
2392 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2393                 bool consumer, bool more_trbs_coming,
2394                 u32 field1, u32 field2, u32 field3, u32 field4)
2395 {
2396         struct xhci_generic_trb *trb;
2397
2398         trb = &ring->enqueue->generic;
2399         trb->field[0] = cpu_to_le32(field1);
2400         trb->field[1] = cpu_to_le32(field2);
2401         trb->field[2] = cpu_to_le32(field3);
2402         trb->field[3] = cpu_to_le32(field4);
2403         inc_enq(xhci, ring, consumer, more_trbs_coming);
2404 }
2405
2406 /*
2407  * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2408  * FIXME allocate segments if the ring is full.
2409  */
2410 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2411                 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2412 {
2413         /* Make sure the endpoint has been added to xHC schedule */
2414         switch (ep_state) {
2415         case EP_STATE_DISABLED:
2416                 /*
2417                  * USB core changed config/interfaces without notifying us,
2418                  * or hardware is reporting the wrong state.
2419                  */
2420                 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2421                 return -ENOENT;
2422         case EP_STATE_ERROR:
2423                 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2424                 /* FIXME event handling code for error needs to clear it */
2425                 /* XXX not sure if this should be -ENOENT or not */
2426                 return -EINVAL;
2427         case EP_STATE_HALTED:
2428                 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2429         case EP_STATE_STOPPED:
2430         case EP_STATE_RUNNING:
2431                 break;
2432         default:
2433                 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2434                 /*
2435                  * FIXME issue Configure Endpoint command to try to get the HC
2436                  * back into a known state.
2437                  */
2438                 return -EINVAL;
2439         }
2440         if (!room_on_ring(xhci, ep_ring, num_trbs)) {
2441                 /* FIXME allocate more room */
2442                 xhci_err(xhci, "ERROR no room on ep ring\n");
2443                 return -ENOMEM;
2444         }
2445
2446         if (enqueue_is_link_trb(ep_ring)) {
2447                 struct xhci_ring *ring = ep_ring;
2448                 union xhci_trb *next;
2449
2450                 next = ring->enqueue;
2451
2452                 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2453                         /* If we're not dealing with 0.95 hardware,
2454                          * clear the chain bit.
2455                          */
2456                         if (!xhci_link_trb_quirk(xhci))
2457                                 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2458                         else
2459                                 next->link.control |= cpu_to_le32(TRB_CHAIN);
2460
2461                         wmb();
2462                         next->link.control ^= cpu_to_le32(TRB_CYCLE);
2463
2464                         /* Toggle the cycle bit after the last ring segment. */
2465                         if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2466                                 ring->cycle_state = (ring->cycle_state ? 0 : 1);
2467                                 if (!in_interrupt()) {
2468                                         xhci_dbg(xhci, "queue_trb: Toggle cycle "
2469                                                 "state for ring %p = %i\n",
2470                                                 ring, (unsigned int)ring->cycle_state);
2471                                 }
2472                         }
2473                         ring->enq_seg = ring->enq_seg->next;
2474                         ring->enqueue = ring->enq_seg->trbs;
2475                         next = ring->enqueue;
2476                 }
2477         }
2478
2479         return 0;
2480 }
2481
2482 static int prepare_transfer(struct xhci_hcd *xhci,
2483                 struct xhci_virt_device *xdev,
2484                 unsigned int ep_index,
2485                 unsigned int stream_id,
2486                 unsigned int num_trbs,
2487                 struct urb *urb,
2488                 unsigned int td_index,
2489                 gfp_t mem_flags)
2490 {
2491         int ret;
2492         struct urb_priv *urb_priv;
2493         struct xhci_td  *td;
2494         struct xhci_ring *ep_ring;
2495         struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2496
2497         ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2498         if (!ep_ring) {
2499                 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2500                                 stream_id);
2501                 return -EINVAL;
2502         }
2503
2504         ret = prepare_ring(xhci, ep_ring,
2505                            le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2506                            num_trbs, mem_flags);
2507         if (ret)
2508                 return ret;
2509
2510         urb_priv = urb->hcpriv;
2511         td = urb_priv->td[td_index];
2512
2513         INIT_LIST_HEAD(&td->td_list);
2514         INIT_LIST_HEAD(&td->cancelled_td_list);
2515
2516         if (td_index == 0) {
2517                 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2518                 if (unlikely(ret))
2519                         return ret;
2520         }
2521
2522         td->urb = urb;
2523         /* Add this TD to the tail of the endpoint ring's TD list */
2524         list_add_tail(&td->td_list, &ep_ring->td_list);
2525         td->start_seg = ep_ring->enq_seg;
2526         td->first_trb = ep_ring->enqueue;
2527
2528         urb_priv->td[td_index] = td;
2529
2530         return 0;
2531 }
2532
2533 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2534 {
2535         int num_sgs, num_trbs, running_total, temp, i;
2536         struct scatterlist *sg;
2537
2538         sg = NULL;
2539         num_sgs = urb->num_sgs;
2540         temp = urb->transfer_buffer_length;
2541
2542         xhci_dbg(xhci, "count sg list trbs: \n");
2543         num_trbs = 0;
2544         for_each_sg(urb->sg, sg, num_sgs, i) {
2545                 unsigned int previous_total_trbs = num_trbs;
2546                 unsigned int len = sg_dma_len(sg);
2547
2548                 /* Scatter gather list entries may cross 64KB boundaries */
2549                 running_total = TRB_MAX_BUFF_SIZE -
2550                         (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2551                 running_total &= TRB_MAX_BUFF_SIZE - 1;
2552                 if (running_total != 0)
2553                         num_trbs++;
2554
2555                 /* How many more 64KB chunks to transfer, how many more TRBs? */
2556                 while (running_total < sg_dma_len(sg) && running_total < temp) {
2557                         num_trbs++;
2558                         running_total += TRB_MAX_BUFF_SIZE;
2559                 }
2560                 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
2561                                 i, (unsigned long long)sg_dma_address(sg),
2562                                 len, len, num_trbs - previous_total_trbs);
2563
2564                 len = min_t(int, len, temp);
2565                 temp -= len;
2566                 if (temp == 0)
2567                         break;
2568         }
2569         xhci_dbg(xhci, "\n");
2570         if (!in_interrupt())
2571                 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
2572                                 "num_trbs = %d\n",
2573                                 urb->ep->desc.bEndpointAddress,
2574                                 urb->transfer_buffer_length,
2575                                 num_trbs);
2576         return num_trbs;
2577 }
2578
2579 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2580 {
2581         if (num_trbs != 0)
2582                 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2583                                 "TRBs, %d left\n", __func__,
2584                                 urb->ep->desc.bEndpointAddress, num_trbs);
2585         if (running_total != urb->transfer_buffer_length)
2586                 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2587                                 "queued %#x (%d), asked for %#x (%d)\n",
2588                                 __func__,
2589                                 urb->ep->desc.bEndpointAddress,
2590                                 running_total, running_total,
2591                                 urb->transfer_buffer_length,
2592                                 urb->transfer_buffer_length);
2593 }
2594
2595 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2596                 unsigned int ep_index, unsigned int stream_id, int start_cycle,
2597                 struct xhci_generic_trb *start_trb)
2598 {
2599         /*
2600          * Pass all the TRBs to the hardware at once and make sure this write
2601          * isn't reordered.
2602          */
2603         wmb();
2604         if (start_cycle)
2605                 start_trb->field[3] |= cpu_to_le32(start_cycle);
2606         else
2607                 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2608         xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2609 }
2610
2611 /*
2612  * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
2613  * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
2614  * (comprised of sg list entries) can take several service intervals to
2615  * transmit.
2616  */
2617 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2618                 struct urb *urb, int slot_id, unsigned int ep_index)
2619 {
2620         struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
2621                         xhci->devs[slot_id]->out_ctx, ep_index);
2622         int xhci_interval;
2623         int ep_interval;
2624
2625         xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2626         ep_interval = urb->interval;
2627         /* Convert to microframes */
2628         if (urb->dev->speed == USB_SPEED_LOW ||
2629                         urb->dev->speed == USB_SPEED_FULL)
2630                 ep_interval *= 8;
2631         /* FIXME change this to a warning and a suggestion to use the new API
2632          * to set the polling interval (once the API is added).
2633          */
2634         if (xhci_interval != ep_interval) {
2635                 if (printk_ratelimit())
2636                         dev_dbg(&urb->dev->dev, "Driver uses different interval"
2637                                         " (%d microframe%s) than xHCI "
2638                                         "(%d microframe%s)\n",
2639                                         ep_interval,
2640                                         ep_interval == 1 ? "" : "s",
2641                                         xhci_interval,
2642                                         xhci_interval == 1 ? "" : "s");
2643                 urb->interval = xhci_interval;
2644                 /* Convert back to frames for LS/FS devices */
2645                 if (urb->dev->speed == USB_SPEED_LOW ||
2646                                 urb->dev->speed == USB_SPEED_FULL)
2647                         urb->interval /= 8;
2648         }
2649         return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
2650 }
2651
2652 /*
2653  * The TD size is the number of bytes remaining in the TD (including this TRB),
2654  * right shifted by 10.
2655  * It must fit in bits 21:17, so it can't be bigger than 31.
2656  */
2657 static u32 xhci_td_remainder(unsigned int remainder)
2658 {
2659         u32 max = (1 << (21 - 17 + 1)) - 1;
2660
2661         if ((remainder >> 10) >= max)
2662                 return max << 17;
2663         else
2664                 return (remainder >> 10) << 17;
2665 }
2666
2667 /*
2668  * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
2669  * the TD (*not* including this TRB).
2670  *
2671  * Total TD packet count = total_packet_count =
2672  *     roundup(TD size in bytes / wMaxPacketSize)
2673  *
2674  * Packets transferred up to and including this TRB = packets_transferred =
2675  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
2676  *
2677  * TD size = total_packet_count - packets_transferred
2678  *
2679  * It must fit in bits 21:17, so it can't be bigger than 31.
2680  */
2681
2682 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2683                 unsigned int total_packet_count, struct urb *urb)
2684 {
2685         int packets_transferred;
2686
2687         /* One TRB with a zero-length data packet. */
2688         if (running_total == 0 && trb_buff_len == 0)
2689                 return 0;
2690
2691         /* All the TRB queueing functions don't count the current TRB in
2692          * running_total.
2693          */
2694         packets_transferred = (running_total + trb_buff_len) /
2695                 le16_to_cpu(urb->ep->desc.wMaxPacketSize);
2696
2697         return xhci_td_remainder(total_packet_count - packets_transferred);
2698 }
2699
2700 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2701                 struct urb *urb, int slot_id, unsigned int ep_index)
2702 {
2703         struct xhci_ring *ep_ring;
2704         unsigned int num_trbs;
2705         struct urb_priv *urb_priv;
2706         struct xhci_td *td;
2707         struct scatterlist *sg;
2708         int num_sgs;
2709         int trb_buff_len, this_sg_len, running_total;
2710         unsigned int total_packet_count;
2711         bool first_trb;
2712         u64 addr;
2713         bool more_trbs_coming;
2714
2715         struct xhci_generic_trb *start_trb;
2716         int start_cycle;
2717
2718         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2719         if (!ep_ring)
2720                 return -EINVAL;
2721
2722         num_trbs = count_sg_trbs_needed(xhci, urb);
2723         num_sgs = urb->num_sgs;
2724         total_packet_count = roundup(urb->transfer_buffer_length,
2725                         le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2726
2727         trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2728                         ep_index, urb->stream_id,
2729                         num_trbs, urb, 0, mem_flags);
2730         if (trb_buff_len < 0)
2731                 return trb_buff_len;
2732
2733         urb_priv = urb->hcpriv;
2734         td = urb_priv->td[0];
2735
2736         /*
2737          * Don't give the first TRB to the hardware (by toggling the cycle bit)
2738          * until we've finished creating all the other TRBs.  The ring's cycle
2739          * state may change as we enqueue the other TRBs, so save it too.
2740          */
2741         start_trb = &ep_ring->enqueue->generic;
2742         start_cycle = ep_ring->cycle_state;
2743
2744         running_total = 0;
2745         /*
2746          * How much data is in the first TRB?
2747          *
2748          * There are three forces at work for TRB buffer pointers and lengths:
2749          * 1. We don't want to walk off the end of this sg-list entry buffer.
2750          * 2. The transfer length that the driver requested may be smaller than
2751          *    the amount of memory allocated for this scatter-gather list.
2752          * 3. TRBs buffers can't cross 64KB boundaries.
2753          */
2754         sg = urb->sg;
2755         addr = (u64) sg_dma_address(sg);
2756         this_sg_len = sg_dma_len(sg);
2757         trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2758         trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2759         if (trb_buff_len > urb->transfer_buffer_length)
2760                 trb_buff_len = urb->transfer_buffer_length;
2761         xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
2762                         trb_buff_len);
2763
2764         first_trb = true;
2765         /* Queue the first TRB, even if it's zero-length */
2766         do {
2767                 u32 field = 0;
2768                 u32 length_field = 0;
2769                 u32 remainder = 0;
2770
2771                 /* Don't change the cycle bit of the first TRB until later */
2772                 if (first_trb) {
2773                         first_trb = false;
2774                         if (start_cycle == 0)
2775                                 field |= 0x1;
2776                 } else
2777                         field |= ep_ring->cycle_state;
2778
2779                 /* Chain all the TRBs together; clear the chain bit in the last
2780                  * TRB to indicate it's the last TRB in the chain.
2781                  */
2782                 if (num_trbs > 1) {
2783                         field |= TRB_CHAIN;
2784                 } else {
2785                         /* FIXME - add check for ZERO_PACKET flag before this */
2786                         td->last_trb = ep_ring->enqueue;
2787                         field |= TRB_IOC;
2788                 }
2789
2790                 /* Only set interrupt on short packet for IN endpoints */
2791                 if (usb_urb_dir_in(urb))
2792                         field |= TRB_ISP;
2793
2794                 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
2795                                 "64KB boundary at %#x, end dma = %#x\n",
2796                                 (unsigned int) addr, trb_buff_len, trb_buff_len,
2797                                 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2798                                 (unsigned int) addr + trb_buff_len);
2799                 if (TRB_MAX_BUFF_SIZE -
2800                                 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2801                         xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2802                         xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2803                                         (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2804                                         (unsigned int) addr + trb_buff_len);
2805                 }
2806
2807                 /* Set the TRB length, TD size, and interrupter fields. */
2808                 if (xhci->hci_version < 0x100) {
2809                         remainder = xhci_td_remainder(
2810                                         urb->transfer_buffer_length -
2811                                         running_total);
2812                 } else {
2813                         remainder = xhci_v1_0_td_remainder(running_total,
2814                                         trb_buff_len, total_packet_count, urb);
2815                 }
2816                 length_field = TRB_LEN(trb_buff_len) |
2817                         remainder |
2818                         TRB_INTR_TARGET(0);
2819
2820                 if (num_trbs > 1)
2821                         more_trbs_coming = true;
2822                 else
2823                         more_trbs_coming = false;
2824                 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2825                                 lower_32_bits(addr),
2826                                 upper_32_bits(addr),
2827                                 length_field,
2828                                 field | TRB_TYPE(TRB_NORMAL));
2829                 --num_trbs;
2830                 running_total += trb_buff_len;
2831
2832                 /* Calculate length for next transfer --
2833                  * Are we done queueing all the TRBs for this sg entry?
2834                  */
2835                 this_sg_len -= trb_buff_len;
2836                 if (this_sg_len == 0) {
2837                         --num_sgs;
2838                         if (num_sgs == 0)
2839                                 break;
2840                         sg = sg_next(sg);
2841                         addr = (u64) sg_dma_address(sg);
2842                         this_sg_len = sg_dma_len(sg);
2843                 } else {
2844                         addr += trb_buff_len;
2845                 }
2846
2847                 trb_buff_len = TRB_MAX_BUFF_SIZE -
2848                         (addr & (TRB_MAX_BUFF_SIZE - 1));
2849                 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2850                 if (running_total + trb_buff_len > urb->transfer_buffer_length)
2851                         trb_buff_len =
2852                                 urb->transfer_buffer_length - running_total;
2853         } while (running_total < urb->transfer_buffer_length);
2854
2855         check_trb_math(urb, num_trbs, running_total);
2856         giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2857                         start_cycle, start_trb);
2858         return 0;
2859 }
2860
2861 /* This is very similar to what ehci-q.c qtd_fill() does */
2862 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2863                 struct urb *urb, int slot_id, unsigned int ep_index)
2864 {
2865         struct xhci_ring *ep_ring;
2866         struct urb_priv *urb_priv;
2867         struct xhci_td *td;
2868         int num_trbs;
2869         struct xhci_generic_trb *start_trb;
2870         bool first_trb;
2871         bool more_trbs_coming;
2872         int start_cycle;
2873         u32 field, length_field;
2874
2875         int running_total, trb_buff_len, ret;
2876         unsigned int total_packet_count;
2877         u64 addr;
2878
2879         if (urb->num_sgs)
2880                 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
2881
2882         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2883         if (!ep_ring)
2884                 return -EINVAL;
2885
2886         num_trbs = 0;
2887         /* How much data is (potentially) left before the 64KB boundary? */
2888         running_total = TRB_MAX_BUFF_SIZE -
2889                 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2890         running_total &= TRB_MAX_BUFF_SIZE - 1;
2891
2892         /* If there's some data on this 64KB chunk, or we have to send a
2893          * zero-length transfer, we need at least one TRB
2894          */
2895         if (running_total != 0 || urb->transfer_buffer_length == 0)
2896                 num_trbs++;
2897         /* How many more 64KB chunks to transfer, how many more TRBs? */
2898         while (running_total < urb->transfer_buffer_length) {
2899                 num_trbs++;
2900                 running_total += TRB_MAX_BUFF_SIZE;
2901         }
2902         /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
2903
2904         if (!in_interrupt())
2905                 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
2906                                 "addr = %#llx, num_trbs = %d\n",
2907                                 urb->ep->desc.bEndpointAddress,
2908                                 urb->transfer_buffer_length,
2909                                 urb->transfer_buffer_length,
2910                                 (unsigned long long)urb->transfer_dma,
2911                                 num_trbs);
2912
2913         ret = prepare_transfer(xhci, xhci->devs[slot_id],
2914                         ep_index, urb->stream_id,
2915                         num_trbs, urb, 0, mem_flags);
2916         if (ret < 0)
2917                 return ret;
2918
2919         urb_priv = urb->hcpriv;
2920         td = urb_priv->td[0];
2921
2922         /*
2923          * Don't give the first TRB to the hardware (by toggling the cycle bit)
2924          * until we've finished creating all the other TRBs.  The ring's cycle
2925          * state may change as we enqueue the other TRBs, so save it too.
2926          */
2927         start_trb = &ep_ring->enqueue->generic;
2928         start_cycle = ep_ring->cycle_state;
2929
2930         running_total = 0;
2931         total_packet_count = roundup(urb->transfer_buffer_length,
2932                         le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2933         /* How much data is in the first TRB? */
2934         addr = (u64) urb->transfer_dma;
2935         trb_buff_len = TRB_MAX_BUFF_SIZE -
2936                 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2937         if (trb_buff_len > urb->transfer_buffer_length)
2938                 trb_buff_len = urb->transfer_buffer_length;
2939
2940         first_trb = true;
2941
2942         /* Queue the first TRB, even if it's zero-length */
2943         do {
2944                 u32 remainder = 0;
2945                 field = 0;
2946
2947                 /* Don't change the cycle bit of the first TRB until later */
2948                 if (first_trb) {
2949                         first_trb = false;
2950                         if (start_cycle == 0)
2951                                 field |= 0x1;
2952                 } else
2953                         field |= ep_ring->cycle_state;
2954
2955                 /* Chain all the TRBs together; clear the chain bit in the last
2956                  * TRB to indicate it's the last TRB in the chain.
2957                  */
2958                 if (num_trbs > 1) {
2959                         field |= TRB_CHAIN;
2960                 } else {
2961                         /* FIXME - add check for ZERO_PACKET flag before this */
2962                         td->last_trb = ep_ring->enqueue;
2963                         field |= TRB_IOC;
2964                 }
2965
2966                 /* Only set interrupt on short packet for IN endpoints */
2967                 if (usb_urb_dir_in(urb))
2968                         field |= TRB_ISP;
2969
2970                 /* Set the TRB length, TD size, and interrupter fields. */
2971                 if (xhci->hci_version < 0x100) {
2972                         remainder = xhci_td_remainder(
2973                                         urb->transfer_buffer_length -
2974                                         running_total);
2975                 } else {
2976                         remainder = xhci_v1_0_td_remainder(running_total,
2977                                         trb_buff_len, total_packet_count, urb);
2978                 }
2979                 length_field = TRB_LEN(trb_buff_len) |
2980                         remainder |
2981                         TRB_INTR_TARGET(0);
2982
2983                 if (num_trbs > 1)
2984                         more_trbs_coming = true;
2985                 else
2986                         more_trbs_coming = false;
2987                 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2988                                 lower_32_bits(addr),
2989                                 upper_32_bits(addr),
2990                                 length_field,
2991                                 field | TRB_TYPE(TRB_NORMAL));
2992                 --num_trbs;
2993                 running_total += trb_buff_len;
2994
2995                 /* Calculate length for next transfer */
2996                 addr += trb_buff_len;
2997                 trb_buff_len = urb->transfer_buffer_length - running_total;
2998                 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
2999                         trb_buff_len = TRB_MAX_BUFF_SIZE;
3000         } while (running_total < urb->transfer_buffer_length);
3001
3002         check_trb_math(urb, num_trbs, running_total);
3003         giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3004                         start_cycle, start_trb);
3005         return 0;
3006 }
3007
3008 /* Caller must have locked xhci->lock */
3009 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3010                 struct urb *urb, int slot_id, unsigned int ep_index)
3011 {
3012         struct xhci_ring *ep_ring;
3013         int num_trbs;
3014         int ret;
3015         struct usb_ctrlrequest *setup;
3016         struct xhci_generic_trb *start_trb;
3017         int start_cycle;
3018         u32 field, length_field;
3019         struct urb_priv *urb_priv;
3020         struct xhci_td *td;
3021
3022         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3023         if (!ep_ring)
3024                 return -EINVAL;
3025
3026         /*
3027          * Need to copy setup packet into setup TRB, so we can't use the setup
3028          * DMA address.
3029          */
3030         if (!urb->setup_packet)
3031                 return -EINVAL;
3032
3033         if (!in_interrupt())
3034                 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
3035                                 slot_id, ep_index);
3036         /* 1 TRB for setup, 1 for status */
3037         num_trbs = 2;
3038         /*
3039          * Don't need to check if we need additional event data and normal TRBs,
3040          * since data in control transfers will never get bigger than 16MB
3041          * XXX: can we get a buffer that crosses 64KB boundaries?
3042          */
3043         if (urb->transfer_buffer_length > 0)
3044                 num_trbs++;
3045         ret = prepare_transfer(xhci, xhci->devs[slot_id],
3046                         ep_index, urb->stream_id,
3047                         num_trbs, urb, 0, mem_flags);
3048         if (ret < 0)
3049                 return ret;
3050
3051         urb_priv = urb->hcpriv;
3052         td = urb_priv->td[0];
3053
3054         /*
3055          * Don't give the first TRB to the hardware (by toggling the cycle bit)
3056          * until we've finished creating all the other TRBs.  The ring's cycle
3057          * state may change as we enqueue the other TRBs, so save it too.
3058          */
3059         start_trb = &ep_ring->enqueue->generic;
3060         start_cycle = ep_ring->cycle_state;
3061
3062         /* Queue setup TRB - see section 6.4.1.2.1 */
3063         /* FIXME better way to translate setup_packet into two u32 fields? */
3064         setup = (struct usb_ctrlrequest *) urb->setup_packet;
3065         field = 0;
3066         field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3067         if (start_cycle == 0)
3068                 field |= 0x1;
3069
3070         /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3071         if (xhci->hci_version == 0x100) {
3072                 if (urb->transfer_buffer_length > 0) {
3073                         if (setup->bRequestType & USB_DIR_IN)
3074                                 field |= TRB_TX_TYPE(TRB_DATA_IN);
3075                         else
3076                                 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3077                 }
3078         }
3079
3080         queue_trb(xhci, ep_ring, false, true,
3081                   setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3082                   le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3083                   TRB_LEN(8) | TRB_INTR_TARGET(0),
3084                   /* Immediate data in pointer */
3085                   field);
3086
3087         /* If there's data, queue data TRBs */
3088         /* Only set interrupt on short packet for IN endpoints */
3089         if (usb_urb_dir_in(urb))
3090                 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3091         else
3092                 field = TRB_TYPE(TRB_DATA);
3093
3094         length_field = TRB_LEN(urb->transfer_buffer_length) |
3095                 xhci_td_remainder(urb->transfer_buffer_length) |
3096                 TRB_INTR_TARGET(0);
3097         if (urb->transfer_buffer_length > 0) {
3098                 if (setup->bRequestType & USB_DIR_IN)
3099                         field |= TRB_DIR_IN;
3100                 queue_trb(xhci, ep_ring, false, true,
3101                                 lower_32_bits(urb->transfer_dma),
3102                                 upper_32_bits(urb->transfer_dma),
3103                                 length_field,
3104                                 field | ep_ring->cycle_state);
3105         }
3106
3107         /* Save the DMA address of the last TRB in the TD */
3108         td->last_trb = ep_ring->enqueue;
3109
3110         /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3111         /* If the device sent data, the status stage is an OUT transfer */
3112         if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3113                 field = 0;
3114         else
3115                 field = TRB_DIR_IN;
3116         queue_trb(xhci, ep_ring, false, false,
3117                         0,
3118                         0,
3119                         TRB_INTR_TARGET(0),
3120                         /* Event on completion */
3121                         field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3122
3123         giveback_first_trb(xhci, slot_id, ep_index, 0,
3124                         start_cycle, start_trb);
3125         return 0;
3126 }
3127
3128 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3129                 struct urb *urb, int i)
3130 {
3131         int num_trbs = 0;
3132         u64 addr, td_len;
3133
3134         addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3135         td_len = urb->iso_frame_desc[i].length;
3136
3137         num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3138                         TRB_MAX_BUFF_SIZE);
3139         if (num_trbs == 0)
3140                 num_trbs++;
3141
3142         return num_trbs;
3143 }
3144
3145 /*
3146  * The transfer burst count field of the isochronous TRB defines the number of
3147  * bursts that are required to move all packets in this TD.  Only SuperSpeed
3148  * devices can burst up to bMaxBurst number of packets per service interval.
3149  * This field is zero based, meaning a value of zero in the field means one
3150  * burst.  Basically, for everything but SuperSpeed devices, this field will be
3151  * zero.  Only xHCI 1.0 host controllers support this field.
3152  */
3153 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3154                 struct usb_device *udev,
3155                 struct urb *urb, unsigned int total_packet_count)
3156 {
3157         unsigned int max_burst;
3158
3159         if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3160                 return 0;
3161
3162         max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3163         return roundup(total_packet_count, max_burst + 1) - 1;
3164 }
3165
3166 /*
3167  * Returns the number of packets in the last "burst" of packets.  This field is
3168  * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3169  * the last burst packet count is equal to the total number of packets in the
3170  * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3171  * must contain (bMaxBurst + 1) number of packets, but the last burst can
3172  * contain 1 to (bMaxBurst + 1) packets.
3173  */
3174 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3175                 struct usb_device *udev,
3176                 struct urb *urb, unsigned int total_packet_count)
3177 {
3178         unsigned int max_burst;
3179         unsigned int residue;
3180
3181         if (xhci->hci_version < 0x100)
3182                 return 0;
3183
3184         switch (udev->speed) {
3185         case USB_SPEED_SUPER:
3186                 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3187                 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3188                 residue = total_packet_count % (max_burst + 1);
3189                 /* If residue is zero, the last burst contains (max_burst + 1)
3190                  * number of packets, but the TLBPC field is zero-based.
3191                  */
3192                 if (residue == 0)
3193                         return max_burst;
3194                 return residue - 1;
3195         default:
3196                 if (total_packet_count == 0)
3197                         return 0;
3198                 return total_packet_count - 1;
3199         }
3200 }
3201
3202 /* This is for isoc transfer */
3203 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3204                 struct urb *urb, int slot_id, unsigned int ep_index)
3205 {
3206         struct xhci_ring *ep_ring;
3207         struct urb_priv *urb_priv;
3208         struct xhci_td *td;
3209         int num_tds, trbs_per_td;
3210         struct xhci_generic_trb *start_trb;
3211         bool first_trb;
3212         int start_cycle;
3213         u32 field, length_field;
3214         int running_total, trb_buff_len, td_len, td_remain_len, ret;
3215         u64 start_addr, addr;
3216         int i, j;
3217         bool more_trbs_coming;
3218
3219         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3220
3221         num_tds = urb->number_of_packets;
3222         if (num_tds < 1) {
3223                 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3224                 return -EINVAL;
3225         }
3226
3227         if (!in_interrupt())
3228                 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
3229                                 " addr = %#llx, num_tds = %d\n",
3230                                 urb->ep->desc.bEndpointAddress,
3231                                 urb->transfer_buffer_length,
3232                                 urb->transfer_buffer_length,
3233                                 (unsigned long long)urb->transfer_dma,
3234                                 num_tds);
3235
3236         start_addr = (u64) urb->transfer_dma;
3237         start_trb = &ep_ring->enqueue->generic;
3238         start_cycle = ep_ring->cycle_state;
3239
3240         urb_priv = urb->hcpriv;
3241         /* Queue the first TRB, even if it's zero-length */
3242         for (i = 0; i < num_tds; i++) {
3243                 unsigned int total_packet_count;
3244                 unsigned int burst_count;
3245                 unsigned int residue;
3246
3247                 first_trb = true;
3248                 running_total = 0;
3249                 addr = start_addr + urb->iso_frame_desc[i].offset;
3250                 td_len = urb->iso_frame_desc[i].length;
3251                 td_remain_len = td_len;
3252                 total_packet_count = roundup(td_len,
3253                                 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3254                 /* A zero-length transfer still involves at least one packet. */
3255                 if (total_packet_count == 0)
3256                         total_packet_count++;
3257                 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3258                                 total_packet_count);
3259                 residue = xhci_get_last_burst_packet_count(xhci,
3260                                 urb->dev, urb, total_packet_count);
3261
3262                 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3263
3264                 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3265                                 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3266                 if (ret < 0) {
3267                         if (i == 0)
3268                                 return ret;
3269                         goto cleanup;
3270                 }
3271
3272                 td = urb_priv->td[i];
3273                 for (j = 0; j < trbs_per_td; j++) {
3274                         u32 remainder = 0;
3275                         field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
3276
3277                         if (first_trb) {
3278                                 /* Queue the isoc TRB */
3279                                 field |= TRB_TYPE(TRB_ISOC);
3280                                 /* Assume URB_ISO_ASAP is set */
3281                                 field |= TRB_SIA;
3282                                 if (i == 0) {
3283                                         if (start_cycle == 0)
3284                                                 field |= 0x1;
3285                                 } else
3286                                         field |= ep_ring->cycle_state;
3287                                 first_trb = false;
3288                         } else {
3289                                 /* Queue other normal TRBs */
3290                                 field |= TRB_TYPE(TRB_NORMAL);
3291                                 field |= ep_ring->cycle_state;
3292                         }
3293
3294                         /* Only set interrupt on short packet for IN EPs */
3295                         if (usb_urb_dir_in(urb))
3296                                 field |= TRB_ISP;
3297
3298                         /* Chain all the TRBs together; clear the chain bit in
3299                          * the last TRB to indicate it's the last TRB in the
3300                          * chain.
3301                          */
3302                         if (j < trbs_per_td - 1) {
3303                                 field |= TRB_CHAIN;
3304                                 more_trbs_coming = true;
3305                         } else {
3306                                 td->last_trb = ep_ring->enqueue;
3307                                 field |= TRB_IOC;
3308                                 if (xhci->hci_version == 0x100) {
3309                                         /* Set BEI bit except for the last td */
3310                                         if (i < num_tds - 1)
3311                                                 field |= TRB_BEI;
3312                                 }
3313                                 more_trbs_coming = false;
3314                         }
3315
3316                         /* Calculate TRB length */
3317                         trb_buff_len = TRB_MAX_BUFF_SIZE -
3318                                 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3319                         if (trb_buff_len > td_remain_len)
3320                                 trb_buff_len = td_remain_len;
3321
3322                         /* Set the TRB length, TD size, & interrupter fields. */
3323                         if (xhci->hci_version < 0x100) {
3324                                 remainder = xhci_td_remainder(
3325                                                 td_len - running_total);
3326                         } else {
3327                                 remainder = xhci_v1_0_td_remainder(
3328                                                 running_total, trb_buff_len,
3329                                                 total_packet_count, urb);
3330                         }
3331                         length_field = TRB_LEN(trb_buff_len) |
3332                                 remainder |
3333                                 TRB_INTR_TARGET(0);
3334
3335                         queue_trb(xhci, ep_ring, false, more_trbs_coming,
3336                                 lower_32_bits(addr),
3337                                 upper_32_bits(addr),
3338                                 length_field,
3339                                 field);
3340                         running_total += trb_buff_len;
3341
3342                         addr += trb_buff_len;
3343                         td_remain_len -= trb_buff_len;
3344                 }
3345
3346                 /* Check TD length */
3347                 if (running_total != td_len) {
3348                         xhci_err(xhci, "ISOC TD length unmatch\n");
3349                         return -EINVAL;
3350                 }
3351         }
3352
3353         if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3354                 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3355                         usb_amd_quirk_pll_disable();
3356         }
3357         xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3358
3359         giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3360                         start_cycle, start_trb);
3361         return 0;
3362 cleanup:
3363         /* Clean up a partially enqueued isoc transfer. */
3364
3365         for (i--; i >= 0; i--)
3366                 list_del_init(&urb_priv->td[i]->td_list);
3367
3368         /* Use the first TD as a temporary variable to turn the TDs we've queued
3369          * into No-ops with a software-owned cycle bit. That way the hardware
3370          * won't accidentally start executing bogus TDs when we partially
3371          * overwrite them.  td->first_trb and td->start_seg are already set.
3372          */
3373         urb_priv->td[0]->last_trb = ep_ring->enqueue;
3374         /* Every TRB except the first & last will have its cycle bit flipped. */
3375         td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3376
3377         /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3378         ep_ring->enqueue = urb_priv->td[0]->first_trb;
3379         ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3380         ep_ring->cycle_state = start_cycle;
3381         usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3382         return ret;
3383 }
3384
3385 /*
3386  * Check transfer ring to guarantee there is enough room for the urb.
3387  * Update ISO URB start_frame and interval.
3388  * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3389  * update the urb->start_frame by now.
3390  * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3391  */
3392 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3393                 struct urb *urb, int slot_id, unsigned int ep_index)
3394 {
3395         struct xhci_virt_device *xdev;
3396         struct xhci_ring *ep_ring;
3397         struct xhci_ep_ctx *ep_ctx;
3398         int start_frame;
3399         int xhci_interval;
3400         int ep_interval;
3401         int num_tds, num_trbs, i;
3402         int ret;
3403
3404         xdev = xhci->devs[slot_id];
3405         ep_ring = xdev->eps[ep_index].ring;
3406         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3407
3408         num_trbs = 0;
3409         num_tds = urb->number_of_packets;
3410         for (i = 0; i < num_tds; i++)
3411                 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3412
3413         /* Check the ring to guarantee there is enough room for the whole urb.
3414          * Do not insert any td of the urb to the ring if the check failed.
3415          */
3416         ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3417                            num_trbs, mem_flags);
3418         if (ret)
3419                 return ret;
3420
3421         start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3422         start_frame &= 0x3fff;
3423
3424         urb->start_frame = start_frame;
3425         if (urb->dev->speed == USB_SPEED_LOW ||
3426                         urb->dev->speed == USB_SPEED_FULL)
3427                 urb->start_frame >>= 3;
3428
3429         xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3430         ep_interval = urb->interval;
3431         /* Convert to microframes */
3432         if (urb->dev->speed == USB_SPEED_LOW ||
3433                         urb->dev->speed == USB_SPEED_FULL)
3434                 ep_interval *= 8;
3435         /* FIXME change this to a warning and a suggestion to use the new API
3436          * to set the polling interval (once the API is added).
3437          */
3438         if (xhci_interval != ep_interval) {
3439                 if (printk_ratelimit())
3440                         dev_dbg(&urb->dev->dev, "Driver uses different interval"
3441                                         " (%d microframe%s) than xHCI "
3442                                         "(%d microframe%s)\n",
3443                                         ep_interval,
3444                                         ep_interval == 1 ? "" : "s",
3445                                         xhci_interval,
3446                                         xhci_interval == 1 ? "" : "s");
3447                 urb->interval = xhci_interval;
3448                 /* Convert back to frames for LS/FS devices */
3449                 if (urb->dev->speed == USB_SPEED_LOW ||
3450                                 urb->dev->speed == USB_SPEED_FULL)
3451                         urb->interval /= 8;
3452         }
3453         return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3454 }
3455
3456 /****           Command Ring Operations         ****/
3457
3458 /* Generic function for queueing a command TRB on the command ring.
3459  * Check to make sure there's room on the command ring for one command TRB.
3460  * Also check that there's room reserved for commands that must not fail.
3461  * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3462  * then only check for the number of reserved spots.
3463  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3464  * because the command event handler may want to resubmit a failed command.
3465  */
3466 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3467                 u32 field3, u32 field4, bool command_must_succeed)
3468 {
3469         int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3470         int ret;
3471
3472         if (!command_must_succeed)
3473                 reserved_trbs++;
3474
3475         ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3476                         reserved_trbs, GFP_ATOMIC);
3477         if (ret < 0) {
3478                 xhci_err(xhci, "ERR: No room for command on command ring\n");
3479                 if (command_must_succeed)
3480                         xhci_err(xhci, "ERR: Reserved TRB counting for "
3481                                         "unfailable commands failed.\n");
3482                 return ret;
3483         }
3484         queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
3485                         field4 | xhci->cmd_ring->cycle_state);
3486         return 0;
3487 }
3488
3489 /* Queue a slot enable or disable request on the command ring */
3490 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3491 {
3492         return queue_command(xhci, 0, 0, 0,
3493                         TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3494 }
3495
3496 /* Queue an address device command TRB */
3497 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3498                 u32 slot_id)
3499 {
3500         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3501                         upper_32_bits(in_ctx_ptr), 0,
3502                         TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
3503                         false);
3504 }
3505
3506 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
3507                 u32 field1, u32 field2, u32 field3, u32 field4)
3508 {
3509         return queue_command(xhci, field1, field2, field3, field4, false);
3510 }
3511
3512 /* Queue a reset device command TRB */
3513 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
3514 {
3515         return queue_command(xhci, 0, 0, 0,
3516                         TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3517                         false);
3518 }
3519
3520 /* Queue a configure endpoint command TRB */
3521 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3522                 u32 slot_id, bool command_must_succeed)
3523 {
3524         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3525                         upper_32_bits(in_ctx_ptr), 0,
3526                         TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3527                         command_must_succeed);
3528 }
3529
3530 /* Queue an evaluate context command TRB */
3531 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3532                 u32 slot_id)
3533 {
3534         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3535                         upper_32_bits(in_ctx_ptr), 0,
3536                         TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3537                         false);
3538 }
3539
3540 /*
3541  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3542  * activity on an endpoint that is about to be suspended.
3543  */
3544 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3545                 unsigned int ep_index, int suspend)
3546 {
3547         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3548         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3549         u32 type = TRB_TYPE(TRB_STOP_RING);
3550         u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3551
3552         return queue_command(xhci, 0, 0, 0,
3553                         trb_slot_id | trb_ep_index | type | trb_suspend, false);
3554 }
3555
3556 /* Set Transfer Ring Dequeue Pointer command.
3557  * This should not be used for endpoints that have streams enabled.
3558  */
3559 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3560                 unsigned int ep_index, unsigned int stream_id,
3561                 struct xhci_segment *deq_seg,
3562                 union xhci_trb *deq_ptr, u32 cycle_state)
3563 {
3564         dma_addr_t addr;
3565         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3566         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3567         u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3568         u32 type = TRB_TYPE(TRB_SET_DEQ);
3569         struct xhci_virt_ep *ep;
3570
3571         addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3572         if (addr == 0) {
3573                 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3574                 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3575                                 deq_seg, deq_ptr);
3576                 return 0;
3577         }
3578         ep = &xhci->devs[slot_id]->eps[ep_index];
3579         if ((ep->ep_state & SET_DEQ_PENDING)) {
3580                 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3581                 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3582                 return 0;
3583         }
3584         ep->queued_deq_seg = deq_seg;
3585         ep->queued_deq_ptr = deq_ptr;
3586         return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3587                         upper_32_bits(addr), trb_stream_id,
3588                         trb_slot_id | trb_ep_index | type, false);
3589 }
3590
3591 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
3592                 unsigned int ep_index)
3593 {
3594         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3595         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3596         u32 type = TRB_TYPE(TRB_RESET_EP);
3597
3598         return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
3599                         false);
3600 }