Merge branch 'writeback' of git://git.kernel.dk/linux-2.6-block
[pandora-kernel.git] / drivers / usb / host / xhci-ring.c
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 /*
24  * Ring initialization rules:
25  * 1. Each segment is initialized to zero, except for link TRBs.
26  * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
27  *    Consumer Cycle State (CCS), depending on ring function.
28  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29  *
30  * Ring behavior rules:
31  * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
32  *    least one free TRB in the ring.  This is useful if you want to turn that
33  *    into a link TRB and expand the ring.
34  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35  *    link TRB, then load the pointer with the address in the link TRB.  If the
36  *    link TRB had its toggle bit set, you may need to update the ring cycle
37  *    state (see cycle bit rules).  You may have to do this multiple times
38  *    until you reach a non-link TRB.
39  * 3. A ring is full if enqueue++ (for the definition of increment above)
40  *    equals the dequeue pointer.
41  *
42  * Cycle bit rules:
43  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44  *    in a link TRB, it must toggle the ring cycle state.
45  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46  *    in a link TRB, it must toggle the ring cycle state.
47  *
48  * Producer rules:
49  * 1. Check if ring is full before you enqueue.
50  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51  *    Update enqueue pointer between each write (which may update the ring
52  *    cycle state).
53  * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
54  *    and endpoint rings.  If HC is the producer for the event ring,
55  *    and it generates an interrupt according to interrupt modulation rules.
56  *
57  * Consumer rules:
58  * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
59  *    the TRB is owned by the consumer.
60  * 2. Update dequeue pointer (which may update the ring cycle state) and
61  *    continue processing TRBs until you reach a TRB which is not owned by you.
62  * 3. Notify the producer.  SW is the consumer for the event ring, and it
63  *   updates event ring dequeue pointer.  HC is the consumer for the command and
64  *   endpoint rings; it generates events on the event ring for these.
65  */
66
67 #include <linux/scatterlist.h>
68 #include "xhci.h"
69
70 /*
71  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
72  * address of the TRB.
73  */
74 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
75                 union xhci_trb *trb)
76 {
77         unsigned long segment_offset;
78
79         if (!seg || !trb || trb < seg->trbs)
80                 return 0;
81         /* offset in TRBs */
82         segment_offset = trb - seg->trbs;
83         if (segment_offset > TRBS_PER_SEGMENT)
84                 return 0;
85         return seg->dma + (segment_offset * sizeof(*trb));
86 }
87
88 /* Does this link TRB point to the first segment in a ring,
89  * or was the previous TRB the last TRB on the last segment in the ERST?
90  */
91 static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
92                 struct xhci_segment *seg, union xhci_trb *trb)
93 {
94         if (ring == xhci->event_ring)
95                 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
96                         (seg->next == xhci->event_ring->first_seg);
97         else
98                 return trb->link.control & LINK_TOGGLE;
99 }
100
101 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
102  * segment?  I.e. would the updated event TRB pointer step off the end of the
103  * event seg?
104  */
105 static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
106                 struct xhci_segment *seg, union xhci_trb *trb)
107 {
108         if (ring == xhci->event_ring)
109                 return trb == &seg->trbs[TRBS_PER_SEGMENT];
110         else
111                 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
112 }
113
114 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
115  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
116  * effect the ring dequeue or enqueue pointers.
117  */
118 static void next_trb(struct xhci_hcd *xhci,
119                 struct xhci_ring *ring,
120                 struct xhci_segment **seg,
121                 union xhci_trb **trb)
122 {
123         if (last_trb(xhci, ring, *seg, *trb)) {
124                 *seg = (*seg)->next;
125                 *trb = ((*seg)->trbs);
126         } else {
127                 *trb = (*trb)++;
128         }
129 }
130
131 /*
132  * See Cycle bit rules. SW is the consumer for the event ring only.
133  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
134  */
135 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
136 {
137         union xhci_trb *next = ++(ring->dequeue);
138         unsigned long long addr;
139
140         ring->deq_updates++;
141         /* Update the dequeue pointer further if that was a link TRB or we're at
142          * the end of an event ring segment (which doesn't have link TRBS)
143          */
144         while (last_trb(xhci, ring, ring->deq_seg, next)) {
145                 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
146                         ring->cycle_state = (ring->cycle_state ? 0 : 1);
147                         if (!in_interrupt())
148                                 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
149                                                 ring,
150                                                 (unsigned int) ring->cycle_state);
151                 }
152                 ring->deq_seg = ring->deq_seg->next;
153                 ring->dequeue = ring->deq_seg->trbs;
154                 next = ring->dequeue;
155         }
156         addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
157         if (ring == xhci->event_ring)
158                 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
159         else if (ring == xhci->cmd_ring)
160                 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
161         else
162                 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
163 }
164
165 /*
166  * See Cycle bit rules. SW is the consumer for the event ring only.
167  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
168  *
169  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
170  * chain bit is set), then set the chain bit in all the following link TRBs.
171  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
172  * have their chain bit cleared (so that each Link TRB is a separate TD).
173  *
174  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
175  * set, but other sections talk about dealing with the chain bit set.  This was
176  * fixed in the 0.96 specification errata, but we have to assume that all 0.95
177  * xHCI hardware can't handle the chain bit being cleared on a link TRB.
178  */
179 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
180 {
181         u32 chain;
182         union xhci_trb *next;
183         unsigned long long addr;
184
185         chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
186         next = ++(ring->enqueue);
187
188         ring->enq_updates++;
189         /* Update the dequeue pointer further if that was a link TRB or we're at
190          * the end of an event ring segment (which doesn't have link TRBS)
191          */
192         while (last_trb(xhci, ring, ring->enq_seg, next)) {
193                 if (!consumer) {
194                         if (ring != xhci->event_ring) {
195                                 /* If we're not dealing with 0.95 hardware,
196                                  * carry over the chain bit of the previous TRB
197                                  * (which may mean the chain bit is cleared).
198                                  */
199                                 if (!xhci_link_trb_quirk(xhci)) {
200                                         next->link.control &= ~TRB_CHAIN;
201                                         next->link.control |= chain;
202                                 }
203                                 /* Give this link TRB to the hardware */
204                                 wmb();
205                                 if (next->link.control & TRB_CYCLE)
206                                         next->link.control &= (u32) ~TRB_CYCLE;
207                                 else
208                                         next->link.control |= (u32) TRB_CYCLE;
209                         }
210                         /* Toggle the cycle bit after the last ring segment. */
211                         if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
212                                 ring->cycle_state = (ring->cycle_state ? 0 : 1);
213                                 if (!in_interrupt())
214                                         xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
215                                                         ring,
216                                                         (unsigned int) ring->cycle_state);
217                         }
218                 }
219                 ring->enq_seg = ring->enq_seg->next;
220                 ring->enqueue = ring->enq_seg->trbs;
221                 next = ring->enqueue;
222         }
223         addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
224         if (ring == xhci->event_ring)
225                 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
226         else if (ring == xhci->cmd_ring)
227                 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
228         else
229                 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
230 }
231
232 /*
233  * Check to see if there's room to enqueue num_trbs on the ring.  See rules
234  * above.
235  * FIXME: this would be simpler and faster if we just kept track of the number
236  * of free TRBs in a ring.
237  */
238 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
239                 unsigned int num_trbs)
240 {
241         int i;
242         union xhci_trb *enq = ring->enqueue;
243         struct xhci_segment *enq_seg = ring->enq_seg;
244
245         /* Check if ring is empty */
246         if (enq == ring->dequeue)
247                 return 1;
248         /* Make sure there's an extra empty TRB available */
249         for (i = 0; i <= num_trbs; ++i) {
250                 if (enq == ring->dequeue)
251                         return 0;
252                 enq++;
253                 while (last_trb(xhci, ring, enq_seg, enq)) {
254                         enq_seg = enq_seg->next;
255                         enq = enq_seg->trbs;
256                 }
257         }
258         return 1;
259 }
260
261 void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
262 {
263         u64 temp;
264         dma_addr_t deq;
265
266         deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
267                         xhci->event_ring->dequeue);
268         if (deq == 0 && !in_interrupt())
269                 xhci_warn(xhci, "WARN something wrong with SW event ring "
270                                 "dequeue ptr.\n");
271         /* Update HC event ring dequeue pointer */
272         temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
273         temp &= ERST_PTR_MASK;
274         /* Don't clear the EHB bit (which is RW1C) because
275          * there might be more events to service.
276          */
277         temp &= ~ERST_EHB;
278         xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
279         xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
280                         &xhci->ir_set->erst_dequeue);
281 }
282
283 /* Ring the host controller doorbell after placing a command on the ring */
284 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
285 {
286         u32 temp;
287
288         xhci_dbg(xhci, "// Ding dong!\n");
289         temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
290         xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
291         /* Flush PCI posted writes */
292         xhci_readl(xhci, &xhci->dba->doorbell[0]);
293 }
294
295 static void ring_ep_doorbell(struct xhci_hcd *xhci,
296                 unsigned int slot_id,
297                 unsigned int ep_index)
298 {
299         struct xhci_virt_ep *ep;
300         unsigned int ep_state;
301         u32 field;
302         __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
303
304         ep = &xhci->devs[slot_id]->eps[ep_index];
305         ep_state = ep->ep_state;
306         /* Don't ring the doorbell for this endpoint if there are pending
307          * cancellations because the we don't want to interrupt processing.
308          */
309         if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
310                         && !(ep_state & EP_HALTED)) {
311                 field = xhci_readl(xhci, db_addr) & DB_MASK;
312                 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
313                 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
314                  * isn't time-critical and we shouldn't make the CPU wait for
315                  * the flush.
316                  */
317                 xhci_readl(xhci, db_addr);
318         }
319 }
320
321 /*
322  * Find the segment that trb is in.  Start searching in start_seg.
323  * If we must move past a segment that has a link TRB with a toggle cycle state
324  * bit set, then we will toggle the value pointed at by cycle_state.
325  */
326 static struct xhci_segment *find_trb_seg(
327                 struct xhci_segment *start_seg,
328                 union xhci_trb  *trb, int *cycle_state)
329 {
330         struct xhci_segment *cur_seg = start_seg;
331         struct xhci_generic_trb *generic_trb;
332
333         while (cur_seg->trbs > trb ||
334                         &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
335                 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
336                 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
337                                 (generic_trb->field[3] & LINK_TOGGLE))
338                         *cycle_state = ~(*cycle_state) & 0x1;
339                 cur_seg = cur_seg->next;
340                 if (cur_seg == start_seg)
341                         /* Looped over the entire list.  Oops! */
342                         return 0;
343         }
344         return cur_seg;
345 }
346
347 /*
348  * Move the xHC's endpoint ring dequeue pointer past cur_td.
349  * Record the new state of the xHC's endpoint ring dequeue segment,
350  * dequeue pointer, and new consumer cycle state in state.
351  * Update our internal representation of the ring's dequeue pointer.
352  *
353  * We do this in three jumps:
354  *  - First we update our new ring state to be the same as when the xHC stopped.
355  *  - Then we traverse the ring to find the segment that contains
356  *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
357  *    any link TRBs with the toggle cycle bit set.
358  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
359  *    if we've moved it past a link TRB with the toggle cycle bit set.
360  */
361 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
362                 unsigned int slot_id, unsigned int ep_index,
363                 struct xhci_td *cur_td, struct xhci_dequeue_state *state)
364 {
365         struct xhci_virt_device *dev = xhci->devs[slot_id];
366         struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
367         struct xhci_generic_trb *trb;
368         struct xhci_ep_ctx *ep_ctx;
369         dma_addr_t addr;
370
371         state->new_cycle_state = 0;
372         xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
373         state->new_deq_seg = find_trb_seg(cur_td->start_seg,
374                         dev->eps[ep_index].stopped_trb,
375                         &state->new_cycle_state);
376         if (!state->new_deq_seg)
377                 BUG();
378         /* Dig out the cycle state saved by the xHC during the stop ep cmd */
379         xhci_dbg(xhci, "Finding endpoint context\n");
380         ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
381         state->new_cycle_state = 0x1 & ep_ctx->deq;
382
383         state->new_deq_ptr = cur_td->last_trb;
384         xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
385         state->new_deq_seg = find_trb_seg(state->new_deq_seg,
386                         state->new_deq_ptr,
387                         &state->new_cycle_state);
388         if (!state->new_deq_seg)
389                 BUG();
390
391         trb = &state->new_deq_ptr->generic;
392         if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
393                                 (trb->field[3] & LINK_TOGGLE))
394                 state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
395         next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
396
397         /* Don't update the ring cycle state for the producer (us). */
398         xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
399                         state->new_deq_seg);
400         addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
401         xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
402                         (unsigned long long) addr);
403         xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
404         ep_ring->dequeue = state->new_deq_ptr;
405         ep_ring->deq_seg = state->new_deq_seg;
406 }
407
408 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
409                 struct xhci_td *cur_td)
410 {
411         struct xhci_segment *cur_seg;
412         union xhci_trb *cur_trb;
413
414         for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
415                         true;
416                         next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
417                 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
418                                 TRB_TYPE(TRB_LINK)) {
419                         /* Unchain any chained Link TRBs, but
420                          * leave the pointers intact.
421                          */
422                         cur_trb->generic.field[3] &= ~TRB_CHAIN;
423                         xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
424                         xhci_dbg(xhci, "Address = %p (0x%llx dma); "
425                                         "in seg %p (0x%llx dma)\n",
426                                         cur_trb,
427                                         (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
428                                         cur_seg,
429                                         (unsigned long long)cur_seg->dma);
430                 } else {
431                         cur_trb->generic.field[0] = 0;
432                         cur_trb->generic.field[1] = 0;
433                         cur_trb->generic.field[2] = 0;
434                         /* Preserve only the cycle bit of this TRB */
435                         cur_trb->generic.field[3] &= TRB_CYCLE;
436                         cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
437                         xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
438                                         "in seg %p (0x%llx dma)\n",
439                                         cur_trb,
440                                         (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
441                                         cur_seg,
442                                         (unsigned long long)cur_seg->dma);
443                 }
444                 if (cur_trb == cur_td->last_trb)
445                         break;
446         }
447 }
448
449 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
450                 unsigned int ep_index, struct xhci_segment *deq_seg,
451                 union xhci_trb *deq_ptr, u32 cycle_state);
452
453 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
454                 unsigned int slot_id, unsigned int ep_index,
455                 struct xhci_dequeue_state *deq_state)
456 {
457         struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
458
459         xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
460                         "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
461                         deq_state->new_deq_seg,
462                         (unsigned long long)deq_state->new_deq_seg->dma,
463                         deq_state->new_deq_ptr,
464                         (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
465                         deq_state->new_cycle_state);
466         queue_set_tr_deq(xhci, slot_id, ep_index,
467                         deq_state->new_deq_seg,
468                         deq_state->new_deq_ptr,
469                         (u32) deq_state->new_cycle_state);
470         /* Stop the TD queueing code from ringing the doorbell until
471          * this command completes.  The HC won't set the dequeue pointer
472          * if the ring is running, and ringing the doorbell starts the
473          * ring running.
474          */
475         ep->ep_state |= SET_DEQ_PENDING;
476 }
477
478 /*
479  * When we get a command completion for a Stop Endpoint Command, we need to
480  * unlink any cancelled TDs from the ring.  There are two ways to do that:
481  *
482  *  1. If the HW was in the middle of processing the TD that needs to be
483  *     cancelled, then we must move the ring's dequeue pointer past the last TRB
484  *     in the TD with a Set Dequeue Pointer Command.
485  *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
486  *     bit cleared) so that the HW will skip over them.
487  */
488 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
489                 union xhci_trb *trb)
490 {
491         unsigned int slot_id;
492         unsigned int ep_index;
493         struct xhci_ring *ep_ring;
494         struct xhci_virt_ep *ep;
495         struct list_head *entry;
496         struct xhci_td *cur_td = 0;
497         struct xhci_td *last_unlinked_td;
498
499         struct xhci_dequeue_state deq_state;
500 #ifdef CONFIG_USB_HCD_STAT
501         ktime_t stop_time = ktime_get();
502 #endif
503
504         memset(&deq_state, 0, sizeof(deq_state));
505         slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
506         ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
507         ep = &xhci->devs[slot_id]->eps[ep_index];
508         ep_ring = ep->ring;
509
510         if (list_empty(&ep->cancelled_td_list))
511                 return;
512
513         /* Fix up the ep ring first, so HW stops executing cancelled TDs.
514          * We have the xHCI lock, so nothing can modify this list until we drop
515          * it.  We're also in the event handler, so we can't get re-interrupted
516          * if another Stop Endpoint command completes
517          */
518         list_for_each(entry, &ep->cancelled_td_list) {
519                 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
520                 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
521                                 cur_td->first_trb,
522                                 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
523                 /*
524                  * If we stopped on the TD we need to cancel, then we have to
525                  * move the xHC endpoint ring dequeue pointer past this TD.
526                  */
527                 if (cur_td == ep->stopped_td)
528                         xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
529                                         &deq_state);
530                 else
531                         td_to_noop(xhci, ep_ring, cur_td);
532                 /*
533                  * The event handler won't see a completion for this TD anymore,
534                  * so remove it from the endpoint ring's TD list.  Keep it in
535                  * the cancelled TD list for URB completion later.
536                  */
537                 list_del(&cur_td->td_list);
538                 ep->cancels_pending--;
539         }
540         last_unlinked_td = cur_td;
541
542         /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
543         if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
544                 xhci_queue_new_dequeue_state(xhci,
545                                 slot_id, ep_index, &deq_state);
546                 xhci_ring_cmd_db(xhci);
547         } else {
548                 /* Otherwise just ring the doorbell to restart the ring */
549                 ring_ep_doorbell(xhci, slot_id, ep_index);
550         }
551
552         /*
553          * Drop the lock and complete the URBs in the cancelled TD list.
554          * New TDs to be cancelled might be added to the end of the list before
555          * we can complete all the URBs for the TDs we already unlinked.
556          * So stop when we've completed the URB for the last TD we unlinked.
557          */
558         do {
559                 cur_td = list_entry(ep->cancelled_td_list.next,
560                                 struct xhci_td, cancelled_td_list);
561                 list_del(&cur_td->cancelled_td_list);
562
563                 /* Clean up the cancelled URB */
564 #ifdef CONFIG_USB_HCD_STAT
565                 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
566                                 ktime_sub(stop_time, cur_td->start_time));
567 #endif
568                 cur_td->urb->hcpriv = NULL;
569                 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
570
571                 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
572                 spin_unlock(&xhci->lock);
573                 /* Doesn't matter what we pass for status, since the core will
574                  * just overwrite it (because the URB has been unlinked).
575                  */
576                 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
577                 kfree(cur_td);
578
579                 spin_lock(&xhci->lock);
580         } while (cur_td != last_unlinked_td);
581
582         /* Return to the event handler with xhci->lock re-acquired */
583 }
584
585 /*
586  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
587  * we need to clear the set deq pending flag in the endpoint ring state, so that
588  * the TD queueing code can ring the doorbell again.  We also need to ring the
589  * endpoint doorbell to restart the ring, but only if there aren't more
590  * cancellations pending.
591  */
592 static void handle_set_deq_completion(struct xhci_hcd *xhci,
593                 struct xhci_event_cmd *event,
594                 union xhci_trb *trb)
595 {
596         unsigned int slot_id;
597         unsigned int ep_index;
598         struct xhci_ring *ep_ring;
599         struct xhci_virt_device *dev;
600         struct xhci_ep_ctx *ep_ctx;
601         struct xhci_slot_ctx *slot_ctx;
602
603         slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
604         ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
605         dev = xhci->devs[slot_id];
606         ep_ring = dev->eps[ep_index].ring;
607         ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
608         slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
609
610         if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
611                 unsigned int ep_state;
612                 unsigned int slot_state;
613
614                 switch (GET_COMP_CODE(event->status)) {
615                 case COMP_TRB_ERR:
616                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
617                                         "of stream ID configuration\n");
618                         break;
619                 case COMP_CTX_STATE:
620                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
621                                         "to incorrect slot or ep state.\n");
622                         ep_state = ep_ctx->ep_info;
623                         ep_state &= EP_STATE_MASK;
624                         slot_state = slot_ctx->dev_state;
625                         slot_state = GET_SLOT_STATE(slot_state);
626                         xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
627                                         slot_state, ep_state);
628                         break;
629                 case COMP_EBADSLT:
630                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
631                                         "slot %u was not enabled.\n", slot_id);
632                         break;
633                 default:
634                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
635                                         "completion code of %u.\n",
636                                         GET_COMP_CODE(event->status));
637                         break;
638                 }
639                 /* OK what do we do now?  The endpoint state is hosed, and we
640                  * should never get to this point if the synchronization between
641                  * queueing, and endpoint state are correct.  This might happen
642                  * if the device gets disconnected after we've finished
643                  * cancelling URBs, which might not be an error...
644                  */
645         } else {
646                 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
647                                 ep_ctx->deq);
648         }
649
650         dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
651         ring_ep_doorbell(xhci, slot_id, ep_index);
652 }
653
654 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
655                 struct xhci_event_cmd *event,
656                 union xhci_trb *trb)
657 {
658         int slot_id;
659         unsigned int ep_index;
660         struct xhci_ring *ep_ring;
661
662         slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
663         ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
664         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
665         /* This command will only fail if the endpoint wasn't halted,
666          * but we don't care.
667          */
668         xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
669                         (unsigned int) GET_COMP_CODE(event->status));
670
671         /* HW with the reset endpoint quirk needs to have a configure endpoint
672          * command complete before the endpoint can be used.  Queue that here
673          * because the HW can't handle two commands being queued in a row.
674          */
675         if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
676                 xhci_dbg(xhci, "Queueing configure endpoint command\n");
677                 xhci_queue_configure_endpoint(xhci,
678                                 xhci->devs[slot_id]->in_ctx->dma, slot_id,
679                                 false);
680                 xhci_ring_cmd_db(xhci);
681         } else {
682                 /* Clear our internal halted state and restart the ring */
683                 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
684                 ring_ep_doorbell(xhci, slot_id, ep_index);
685         }
686 }
687
688 /* Check to see if a command in the device's command queue matches this one.
689  * Signal the completion or free the command, and return 1.  Return 0 if the
690  * completed command isn't at the head of the command list.
691  */
692 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
693                 struct xhci_virt_device *virt_dev,
694                 struct xhci_event_cmd *event)
695 {
696         struct xhci_command *command;
697
698         if (list_empty(&virt_dev->cmd_list))
699                 return 0;
700
701         command = list_entry(virt_dev->cmd_list.next,
702                         struct xhci_command, cmd_list);
703         if (xhci->cmd_ring->dequeue != command->command_trb)
704                 return 0;
705
706         command->status =
707                 GET_COMP_CODE(event->status);
708         list_del(&command->cmd_list);
709         if (command->completion)
710                 complete(command->completion);
711         else
712                 xhci_free_command(xhci, command);
713         return 1;
714 }
715
716 static void handle_cmd_completion(struct xhci_hcd *xhci,
717                 struct xhci_event_cmd *event)
718 {
719         int slot_id = TRB_TO_SLOT_ID(event->flags);
720         u64 cmd_dma;
721         dma_addr_t cmd_dequeue_dma;
722         struct xhci_input_control_ctx *ctrl_ctx;
723         struct xhci_virt_device *virt_dev;
724         unsigned int ep_index;
725         struct xhci_ring *ep_ring;
726         unsigned int ep_state;
727
728         cmd_dma = event->cmd_trb;
729         cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
730                         xhci->cmd_ring->dequeue);
731         /* Is the command ring deq ptr out of sync with the deq seg ptr? */
732         if (cmd_dequeue_dma == 0) {
733                 xhci->error_bitmask |= 1 << 4;
734                 return;
735         }
736         /* Does the DMA address match our internal dequeue pointer address? */
737         if (cmd_dma != (u64) cmd_dequeue_dma) {
738                 xhci->error_bitmask |= 1 << 5;
739                 return;
740         }
741         switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
742         case TRB_TYPE(TRB_ENABLE_SLOT):
743                 if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
744                         xhci->slot_id = slot_id;
745                 else
746                         xhci->slot_id = 0;
747                 complete(&xhci->addr_dev);
748                 break;
749         case TRB_TYPE(TRB_DISABLE_SLOT):
750                 if (xhci->devs[slot_id])
751                         xhci_free_virt_device(xhci, slot_id);
752                 break;
753         case TRB_TYPE(TRB_CONFIG_EP):
754                 virt_dev = xhci->devs[slot_id];
755                 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
756                         break;
757                 /*
758                  * Configure endpoint commands can come from the USB core
759                  * configuration or alt setting changes, or because the HW
760                  * needed an extra configure endpoint command after a reset
761                  * endpoint command.  In the latter case, the xHCI driver is
762                  * not waiting on the configure endpoint command.
763                  */
764                 ctrl_ctx = xhci_get_input_control_ctx(xhci,
765                                 virt_dev->in_ctx);
766                 /* Input ctx add_flags are the endpoint index plus one */
767                 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
768                 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
769                 if (!ep_ring) {
770                         /* This must have been an initial configure endpoint */
771                         xhci->devs[slot_id]->cmd_status =
772                                 GET_COMP_CODE(event->status);
773                         complete(&xhci->devs[slot_id]->cmd_completion);
774                         break;
775                 }
776                 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
777                 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
778                                 "state = %d\n", ep_index, ep_state);
779                 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
780                                 ep_state & EP_HALTED) {
781                         /* Clear our internal halted state and restart ring */
782                         xhci->devs[slot_id]->eps[ep_index].ep_state &=
783                                 ~EP_HALTED;
784                         ring_ep_doorbell(xhci, slot_id, ep_index);
785                 } else {
786                         xhci->devs[slot_id]->cmd_status =
787                                 GET_COMP_CODE(event->status);
788                         complete(&xhci->devs[slot_id]->cmd_completion);
789                 }
790                 break;
791         case TRB_TYPE(TRB_EVAL_CONTEXT):
792                 virt_dev = xhci->devs[slot_id];
793                 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
794                         break;
795                 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
796                 complete(&xhci->devs[slot_id]->cmd_completion);
797                 break;
798         case TRB_TYPE(TRB_ADDR_DEV):
799                 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
800                 complete(&xhci->addr_dev);
801                 break;
802         case TRB_TYPE(TRB_STOP_RING):
803                 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
804                 break;
805         case TRB_TYPE(TRB_SET_DEQ):
806                 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
807                 break;
808         case TRB_TYPE(TRB_CMD_NOOP):
809                 ++xhci->noops_handled;
810                 break;
811         case TRB_TYPE(TRB_RESET_EP):
812                 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
813                 break;
814         default:
815                 /* Skip over unknown commands on the event ring */
816                 xhci->error_bitmask |= 1 << 6;
817                 break;
818         }
819         inc_deq(xhci, xhci->cmd_ring, false);
820 }
821
822 static void handle_port_status(struct xhci_hcd *xhci,
823                 union xhci_trb *event)
824 {
825         u32 port_id;
826
827         /* Port status change events always have a successful completion code */
828         if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
829                 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
830                 xhci->error_bitmask |= 1 << 8;
831         }
832         /* FIXME: core doesn't care about all port link state changes yet */
833         port_id = GET_PORT_ID(event->generic.field[0]);
834         xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
835
836         /* Update event ring dequeue pointer before dropping the lock */
837         inc_deq(xhci, xhci->event_ring, true);
838         xhci_set_hc_event_deq(xhci);
839
840         spin_unlock(&xhci->lock);
841         /* Pass this up to the core */
842         usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
843         spin_lock(&xhci->lock);
844 }
845
846 /*
847  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
848  * at end_trb, which may be in another segment.  If the suspect DMA address is a
849  * TRB in this TD, this function returns that TRB's segment.  Otherwise it
850  * returns 0.
851  */
852 static struct xhci_segment *trb_in_td(
853                 struct xhci_segment *start_seg,
854                 union xhci_trb  *start_trb,
855                 union xhci_trb  *end_trb,
856                 dma_addr_t      suspect_dma)
857 {
858         dma_addr_t start_dma;
859         dma_addr_t end_seg_dma;
860         dma_addr_t end_trb_dma;
861         struct xhci_segment *cur_seg;
862
863         start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
864         cur_seg = start_seg;
865
866         do {
867                 /* We may get an event for a Link TRB in the middle of a TD */
868                 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
869                                 &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
870                 /* If the end TRB isn't in this segment, this is set to 0 */
871                 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
872
873                 if (end_trb_dma > 0) {
874                         /* The end TRB is in this segment, so suspect should be here */
875                         if (start_dma <= end_trb_dma) {
876                                 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
877                                         return cur_seg;
878                         } else {
879                                 /* Case for one segment with
880                                  * a TD wrapped around to the top
881                                  */
882                                 if ((suspect_dma >= start_dma &&
883                                                         suspect_dma <= end_seg_dma) ||
884                                                 (suspect_dma >= cur_seg->dma &&
885                                                  suspect_dma <= end_trb_dma))
886                                         return cur_seg;
887                         }
888                         return 0;
889                 } else {
890                         /* Might still be somewhere in this segment */
891                         if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
892                                 return cur_seg;
893                 }
894                 cur_seg = cur_seg->next;
895                 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
896         } while (1);
897
898 }
899
900 /*
901  * If this function returns an error condition, it means it got a Transfer
902  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
903  * At this point, the host controller is probably hosed and should be reset.
904  */
905 static int handle_tx_event(struct xhci_hcd *xhci,
906                 struct xhci_transfer_event *event)
907 {
908         struct xhci_virt_device *xdev;
909         struct xhci_virt_ep *ep;
910         struct xhci_ring *ep_ring;
911         unsigned int slot_id;
912         int ep_index;
913         struct xhci_td *td = 0;
914         dma_addr_t event_dma;
915         struct xhci_segment *event_seg;
916         union xhci_trb *event_trb;
917         struct urb *urb = 0;
918         int status = -EINPROGRESS;
919         struct xhci_ep_ctx *ep_ctx;
920         u32 trb_comp_code;
921
922         xhci_dbg(xhci, "In %s\n", __func__);
923         slot_id = TRB_TO_SLOT_ID(event->flags);
924         xdev = xhci->devs[slot_id];
925         if (!xdev) {
926                 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
927                 return -ENODEV;
928         }
929
930         /* Endpoint ID is 1 based, our index is zero based */
931         ep_index = TRB_TO_EP_ID(event->flags) - 1;
932         xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
933         ep = &xdev->eps[ep_index];
934         ep_ring = ep->ring;
935         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
936         if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
937                 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
938                 return -ENODEV;
939         }
940
941         event_dma = event->buffer;
942         /* This TRB should be in the TD at the head of this ring's TD list */
943         xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
944         if (list_empty(&ep_ring->td_list)) {
945                 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
946                                 TRB_TO_SLOT_ID(event->flags), ep_index);
947                 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
948                                 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
949                 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
950                 urb = NULL;
951                 goto cleanup;
952         }
953         xhci_dbg(xhci, "%s - getting list entry\n", __func__);
954         td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
955
956         /* Is this a TRB in the currently executing TD? */
957         xhci_dbg(xhci, "%s - looking for TD\n", __func__);
958         event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
959                         td->last_trb, event_dma);
960         xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
961         if (!event_seg) {
962                 /* HC is busted, give up! */
963                 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
964                 return -ESHUTDOWN;
965         }
966         event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
967         xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
968                         (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
969         xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
970                         lower_32_bits(event->buffer));
971         xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
972                         upper_32_bits(event->buffer));
973         xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
974                         (unsigned int) event->transfer_len);
975         xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
976                         (unsigned int) event->flags);
977
978         /* Look for common error cases */
979         trb_comp_code = GET_COMP_CODE(event->transfer_len);
980         switch (trb_comp_code) {
981         /* Skip codes that require special handling depending on
982          * transfer type
983          */
984         case COMP_SUCCESS:
985         case COMP_SHORT_TX:
986                 break;
987         case COMP_STOP:
988                 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
989                 break;
990         case COMP_STOP_INVAL:
991                 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
992                 break;
993         case COMP_STALL:
994                 xhci_warn(xhci, "WARN: Stalled endpoint\n");
995                 ep->ep_state |= EP_HALTED;
996                 status = -EPIPE;
997                 break;
998         case COMP_TRB_ERR:
999                 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1000                 status = -EILSEQ;
1001                 break;
1002         case COMP_TX_ERR:
1003                 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1004                 status = -EPROTO;
1005                 break;
1006         case COMP_BABBLE:
1007                 xhci_warn(xhci, "WARN: babble error on endpoint\n");
1008                 status = -EOVERFLOW;
1009                 break;
1010         case COMP_DB_ERR:
1011                 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
1012                 status = -ENOSR;
1013                 break;
1014         default:
1015                 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
1016                 urb = NULL;
1017                 goto cleanup;
1018         }
1019         /* Now update the urb's actual_length and give back to the core */
1020         /* Was this a control transfer? */
1021         if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
1022                 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1023                 switch (trb_comp_code) {
1024                 case COMP_SUCCESS:
1025                         if (event_trb == ep_ring->dequeue) {
1026                                 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
1027                                 status = -ESHUTDOWN;
1028                         } else if (event_trb != td->last_trb) {
1029                                 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
1030                                 status = -ESHUTDOWN;
1031                         } else {
1032                                 xhci_dbg(xhci, "Successful control transfer!\n");
1033                                 status = 0;
1034                         }
1035                         break;
1036                 case COMP_SHORT_TX:
1037                         xhci_warn(xhci, "WARN: short transfer on control ep\n");
1038                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1039                                 status = -EREMOTEIO;
1040                         else
1041                                 status = 0;
1042                         break;
1043                 case COMP_BABBLE:
1044                         /* The 0.96 spec says a babbling control endpoint
1045                          * is not halted. The 0.96 spec says it is.  Some HW
1046                          * claims to be 0.95 compliant, but it halts the control
1047                          * endpoint anyway.  Check if a babble halted the
1048                          * endpoint.
1049                          */
1050                         if (ep_ctx->ep_info != EP_STATE_HALTED)
1051                                 break;
1052                         /* else fall through */
1053                 case COMP_STALL:
1054                         /* Did we transfer part of the data (middle) phase? */
1055                         if (event_trb != ep_ring->dequeue &&
1056                                         event_trb != td->last_trb)
1057                                 td->urb->actual_length =
1058                                         td->urb->transfer_buffer_length
1059                                         - TRB_LEN(event->transfer_len);
1060                         else
1061                                 td->urb->actual_length = 0;
1062
1063                         ep->stopped_td = td;
1064                         ep->stopped_trb = event_trb;
1065                         xhci_queue_reset_ep(xhci, slot_id, ep_index);
1066                         xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1067                         xhci_ring_cmd_db(xhci);
1068                         goto td_cleanup;
1069                 default:
1070                         /* Others already handled above */
1071                         break;
1072                 }
1073                 /*
1074                  * Did we transfer any data, despite the errors that might have
1075                  * happened?  I.e. did we get past the setup stage?
1076                  */
1077                 if (event_trb != ep_ring->dequeue) {
1078                         /* The event was for the status stage */
1079                         if (event_trb == td->last_trb) {
1080                                 if (td->urb->actual_length != 0) {
1081                                         /* Don't overwrite a previously set error code */
1082                                         if ((status == -EINPROGRESS ||
1083                                                                 status == 0) &&
1084                                                         (td->urb->transfer_flags
1085                                                          & URB_SHORT_NOT_OK))
1086                                                 /* Did we already see a short data stage? */
1087                                                 status = -EREMOTEIO;
1088                                 } else {
1089                                         td->urb->actual_length =
1090                                                 td->urb->transfer_buffer_length;
1091                                 }
1092                         } else {
1093                         /* Maybe the event was for the data stage? */
1094                                 if (trb_comp_code != COMP_STOP_INVAL) {
1095                                         /* We didn't stop on a link TRB in the middle */
1096                                         td->urb->actual_length =
1097                                                 td->urb->transfer_buffer_length -
1098                                                 TRB_LEN(event->transfer_len);
1099                                         xhci_dbg(xhci, "Waiting for status stage event\n");
1100                                         urb = NULL;
1101                                         goto cleanup;
1102                                 }
1103                         }
1104                 }
1105         } else {
1106                 switch (trb_comp_code) {
1107                 case COMP_SUCCESS:
1108                         /* Double check that the HW transferred everything. */
1109                         if (event_trb != td->last_trb) {
1110                                 xhci_warn(xhci, "WARN Successful completion "
1111                                                 "on short TX\n");
1112                                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1113                                         status = -EREMOTEIO;
1114                                 else
1115                                         status = 0;
1116                         } else {
1117                                 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
1118                                         xhci_dbg(xhci, "Successful bulk "
1119                                                         "transfer!\n");
1120                                 else
1121                                         xhci_dbg(xhci, "Successful interrupt "
1122                                                         "transfer!\n");
1123                                 status = 0;
1124                         }
1125                         break;
1126                 case COMP_SHORT_TX:
1127                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1128                                 status = -EREMOTEIO;
1129                         else
1130                                 status = 0;
1131                         break;
1132                 default:
1133                         /* Others already handled above */
1134                         break;
1135                 }
1136                 dev_dbg(&td->urb->dev->dev,
1137                                 "ep %#x - asked for %d bytes, "
1138                                 "%d bytes untransferred\n",
1139                                 td->urb->ep->desc.bEndpointAddress,
1140                                 td->urb->transfer_buffer_length,
1141                                 TRB_LEN(event->transfer_len));
1142                 /* Fast path - was this the last TRB in the TD for this URB? */
1143                 if (event_trb == td->last_trb) {
1144                         if (TRB_LEN(event->transfer_len) != 0) {
1145                                 td->urb->actual_length =
1146                                         td->urb->transfer_buffer_length -
1147                                         TRB_LEN(event->transfer_len);
1148                                 if (td->urb->transfer_buffer_length <
1149                                                 td->urb->actual_length) {
1150                                         xhci_warn(xhci, "HC gave bad length "
1151                                                         "of %d bytes left\n",
1152                                                         TRB_LEN(event->transfer_len));
1153                                         td->urb->actual_length = 0;
1154                                         if (td->urb->transfer_flags &
1155                                                         URB_SHORT_NOT_OK)
1156                                                 status = -EREMOTEIO;
1157                                         else
1158                                                 status = 0;
1159                                 }
1160                                 /* Don't overwrite a previously set error code */
1161                                 if (status == -EINPROGRESS) {
1162                                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1163                                                 status = -EREMOTEIO;
1164                                         else
1165                                                 status = 0;
1166                                 }
1167                         } else {
1168                                 td->urb->actual_length = td->urb->transfer_buffer_length;
1169                                 /* Ignore a short packet completion if the
1170                                  * untransferred length was zero.
1171                                  */
1172                                 if (status == -EREMOTEIO)
1173                                         status = 0;
1174                         }
1175                 } else {
1176                         /* Slow path - walk the list, starting from the dequeue
1177                          * pointer, to get the actual length transferred.
1178                          */
1179                         union xhci_trb *cur_trb;
1180                         struct xhci_segment *cur_seg;
1181
1182                         td->urb->actual_length = 0;
1183                         for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1184                                         cur_trb != event_trb;
1185                                         next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1186                                 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
1187                                                 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
1188                                         td->urb->actual_length +=
1189                                                 TRB_LEN(cur_trb->generic.field[2]);
1190                         }
1191                         /* If the ring didn't stop on a Link or No-op TRB, add
1192                          * in the actual bytes transferred from the Normal TRB
1193                          */
1194                         if (trb_comp_code != COMP_STOP_INVAL)
1195                                 td->urb->actual_length +=
1196                                         TRB_LEN(cur_trb->generic.field[2]) -
1197                                         TRB_LEN(event->transfer_len);
1198                 }
1199         }
1200         if (trb_comp_code == COMP_STOP_INVAL ||
1201                         trb_comp_code == COMP_STOP) {
1202                 /* The Endpoint Stop Command completion will take care of any
1203                  * stopped TDs.  A stopped TD may be restarted, so don't update
1204                  * the ring dequeue pointer or take this TD off any lists yet.
1205                  */
1206                 ep->stopped_td = td;
1207                 ep->stopped_trb = event_trb;
1208         } else {
1209                 if (trb_comp_code == COMP_STALL ||
1210                                 trb_comp_code == COMP_BABBLE) {
1211                         /* The transfer is completed from the driver's
1212                          * perspective, but we need to issue a set dequeue
1213                          * command for this stalled endpoint to move the dequeue
1214                          * pointer past the TD.  We can't do that here because
1215                          * the halt condition must be cleared first.
1216                          */
1217                         ep->stopped_td = td;
1218                         ep->stopped_trb = event_trb;
1219                 } else {
1220                         /* Update ring dequeue pointer */
1221                         while (ep_ring->dequeue != td->last_trb)
1222                                 inc_deq(xhci, ep_ring, false);
1223                         inc_deq(xhci, ep_ring, false);
1224                 }
1225
1226 td_cleanup:
1227                 /* Clean up the endpoint's TD list */
1228                 urb = td->urb;
1229                 /* Do one last check of the actual transfer length.
1230                  * If the host controller said we transferred more data than
1231                  * the buffer length, urb->actual_length will be a very big
1232                  * number (since it's unsigned).  Play it safe and say we didn't
1233                  * transfer anything.
1234                  */
1235                 if (urb->actual_length > urb->transfer_buffer_length) {
1236                         xhci_warn(xhci, "URB transfer length is wrong, "
1237                                         "xHC issue? req. len = %u, "
1238                                         "act. len = %u\n",
1239                                         urb->transfer_buffer_length,
1240                                         urb->actual_length);
1241                         urb->actual_length = 0;
1242                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1243                                 status = -EREMOTEIO;
1244                         else
1245                                 status = 0;
1246                 }
1247                 list_del(&td->td_list);
1248                 /* Was this TD slated to be cancelled but completed anyway? */
1249                 if (!list_empty(&td->cancelled_td_list)) {
1250                         list_del(&td->cancelled_td_list);
1251                         ep->cancels_pending--;
1252                 }
1253                 /* Leave the TD around for the reset endpoint function to use
1254                  * (but only if it's not a control endpoint, since we already
1255                  * queued the Set TR dequeue pointer command for stalled
1256                  * control endpoints).
1257                  */
1258                 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
1259                         (trb_comp_code != COMP_STALL &&
1260                                 trb_comp_code != COMP_BABBLE)) {
1261                         kfree(td);
1262                 }
1263                 urb->hcpriv = NULL;
1264         }
1265 cleanup:
1266         inc_deq(xhci, xhci->event_ring, true);
1267         xhci_set_hc_event_deq(xhci);
1268
1269         /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
1270         if (urb) {
1271                 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1272                 xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
1273                                 urb, urb->actual_length, status);
1274                 spin_unlock(&xhci->lock);
1275                 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1276                 spin_lock(&xhci->lock);
1277         }
1278         return 0;
1279 }
1280
1281 /*
1282  * This function handles all OS-owned events on the event ring.  It may drop
1283  * xhci->lock between event processing (e.g. to pass up port status changes).
1284  */
1285 void xhci_handle_event(struct xhci_hcd *xhci)
1286 {
1287         union xhci_trb *event;
1288         int update_ptrs = 1;
1289         int ret;
1290
1291         xhci_dbg(xhci, "In %s\n", __func__);
1292         if (!xhci->event_ring || !xhci->event_ring->dequeue) {
1293                 xhci->error_bitmask |= 1 << 1;
1294                 return;
1295         }
1296
1297         event = xhci->event_ring->dequeue;
1298         /* Does the HC or OS own the TRB? */
1299         if ((event->event_cmd.flags & TRB_CYCLE) !=
1300                         xhci->event_ring->cycle_state) {
1301                 xhci->error_bitmask |= 1 << 2;
1302                 return;
1303         }
1304         xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
1305
1306         /* FIXME: Handle more event types. */
1307         switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
1308         case TRB_TYPE(TRB_COMPLETION):
1309                 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
1310                 handle_cmd_completion(xhci, &event->event_cmd);
1311                 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
1312                 break;
1313         case TRB_TYPE(TRB_PORT_STATUS):
1314                 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
1315                 handle_port_status(xhci, event);
1316                 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
1317                 update_ptrs = 0;
1318                 break;
1319         case TRB_TYPE(TRB_TRANSFER):
1320                 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
1321                 ret = handle_tx_event(xhci, &event->trans_event);
1322                 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
1323                 if (ret < 0)
1324                         xhci->error_bitmask |= 1 << 9;
1325                 else
1326                         update_ptrs = 0;
1327                 break;
1328         default:
1329                 xhci->error_bitmask |= 1 << 3;
1330         }
1331
1332         if (update_ptrs) {
1333                 /* Update SW and HC event ring dequeue pointer */
1334                 inc_deq(xhci, xhci->event_ring, true);
1335                 xhci_set_hc_event_deq(xhci);
1336         }
1337         /* Are there more items on the event ring? */
1338         xhci_handle_event(xhci);
1339 }
1340
1341 /****           Endpoint Ring Operations        ****/
1342
1343 /*
1344  * Generic function for queueing a TRB on a ring.
1345  * The caller must have checked to make sure there's room on the ring.
1346  */
1347 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
1348                 bool consumer,
1349                 u32 field1, u32 field2, u32 field3, u32 field4)
1350 {
1351         struct xhci_generic_trb *trb;
1352
1353         trb = &ring->enqueue->generic;
1354         trb->field[0] = field1;
1355         trb->field[1] = field2;
1356         trb->field[2] = field3;
1357         trb->field[3] = field4;
1358         inc_enq(xhci, ring, consumer);
1359 }
1360
1361 /*
1362  * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
1363  * FIXME allocate segments if the ring is full.
1364  */
1365 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1366                 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
1367 {
1368         /* Make sure the endpoint has been added to xHC schedule */
1369         xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
1370         switch (ep_state) {
1371         case EP_STATE_DISABLED:
1372                 /*
1373                  * USB core changed config/interfaces without notifying us,
1374                  * or hardware is reporting the wrong state.
1375                  */
1376                 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
1377                 return -ENOENT;
1378         case EP_STATE_ERROR:
1379                 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
1380                 /* FIXME event handling code for error needs to clear it */
1381                 /* XXX not sure if this should be -ENOENT or not */
1382                 return -EINVAL;
1383         case EP_STATE_HALTED:
1384                 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
1385         case EP_STATE_STOPPED:
1386         case EP_STATE_RUNNING:
1387                 break;
1388         default:
1389                 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
1390                 /*
1391                  * FIXME issue Configure Endpoint command to try to get the HC
1392                  * back into a known state.
1393                  */
1394                 return -EINVAL;
1395         }
1396         if (!room_on_ring(xhci, ep_ring, num_trbs)) {
1397                 /* FIXME allocate more room */
1398                 xhci_err(xhci, "ERROR no room on ep ring\n");
1399                 return -ENOMEM;
1400         }
1401         return 0;
1402 }
1403
1404 static int prepare_transfer(struct xhci_hcd *xhci,
1405                 struct xhci_virt_device *xdev,
1406                 unsigned int ep_index,
1407                 unsigned int num_trbs,
1408                 struct urb *urb,
1409                 struct xhci_td **td,
1410                 gfp_t mem_flags)
1411 {
1412         int ret;
1413         struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1414         ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
1415                         ep_ctx->ep_info & EP_STATE_MASK,
1416                         num_trbs, mem_flags);
1417         if (ret)
1418                 return ret;
1419         *td = kzalloc(sizeof(struct xhci_td), mem_flags);
1420         if (!*td)
1421                 return -ENOMEM;
1422         INIT_LIST_HEAD(&(*td)->td_list);
1423         INIT_LIST_HEAD(&(*td)->cancelled_td_list);
1424
1425         ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
1426         if (unlikely(ret)) {
1427                 kfree(*td);
1428                 return ret;
1429         }
1430
1431         (*td)->urb = urb;
1432         urb->hcpriv = (void *) (*td);
1433         /* Add this TD to the tail of the endpoint ring's TD list */
1434         list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
1435         (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
1436         (*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
1437
1438         return 0;
1439 }
1440
1441 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1442 {
1443         int num_sgs, num_trbs, running_total, temp, i;
1444         struct scatterlist *sg;
1445
1446         sg = NULL;
1447         num_sgs = urb->num_sgs;
1448         temp = urb->transfer_buffer_length;
1449
1450         xhci_dbg(xhci, "count sg list trbs: \n");
1451         num_trbs = 0;
1452         for_each_sg(urb->sg->sg, sg, num_sgs, i) {
1453                 unsigned int previous_total_trbs = num_trbs;
1454                 unsigned int len = sg_dma_len(sg);
1455
1456                 /* Scatter gather list entries may cross 64KB boundaries */
1457                 running_total = TRB_MAX_BUFF_SIZE -
1458                         (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1459                 if (running_total != 0)
1460                         num_trbs++;
1461
1462                 /* How many more 64KB chunks to transfer, how many more TRBs? */
1463                 while (running_total < sg_dma_len(sg)) {
1464                         num_trbs++;
1465                         running_total += TRB_MAX_BUFF_SIZE;
1466                 }
1467                 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
1468                                 i, (unsigned long long)sg_dma_address(sg),
1469                                 len, len, num_trbs - previous_total_trbs);
1470
1471                 len = min_t(int, len, temp);
1472                 temp -= len;
1473                 if (temp == 0)
1474                         break;
1475         }
1476         xhci_dbg(xhci, "\n");
1477         if (!in_interrupt())
1478                 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
1479                                 urb->ep->desc.bEndpointAddress,
1480                                 urb->transfer_buffer_length,
1481                                 num_trbs);
1482         return num_trbs;
1483 }
1484
1485 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1486 {
1487         if (num_trbs != 0)
1488                 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
1489                                 "TRBs, %d left\n", __func__,
1490                                 urb->ep->desc.bEndpointAddress, num_trbs);
1491         if (running_total != urb->transfer_buffer_length)
1492                 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
1493                                 "queued %#x (%d), asked for %#x (%d)\n",
1494                                 __func__,
1495                                 urb->ep->desc.bEndpointAddress,
1496                                 running_total, running_total,
1497                                 urb->transfer_buffer_length,
1498                                 urb->transfer_buffer_length);
1499 }
1500
1501 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1502                 unsigned int ep_index, int start_cycle,
1503                 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1504 {
1505         /*
1506          * Pass all the TRBs to the hardware at once and make sure this write
1507          * isn't reordered.
1508          */
1509         wmb();
1510         start_trb->field[3] |= start_cycle;
1511         ring_ep_doorbell(xhci, slot_id, ep_index);
1512 }
1513
1514 /*
1515  * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
1516  * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
1517  * (comprised of sg list entries) can take several service intervals to
1518  * transmit.
1519  */
1520 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1521                 struct urb *urb, int slot_id, unsigned int ep_index)
1522 {
1523         struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
1524                         xhci->devs[slot_id]->out_ctx, ep_index);
1525         int xhci_interval;
1526         int ep_interval;
1527
1528         xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
1529         ep_interval = urb->interval;
1530         /* Convert to microframes */
1531         if (urb->dev->speed == USB_SPEED_LOW ||
1532                         urb->dev->speed == USB_SPEED_FULL)
1533                 ep_interval *= 8;
1534         /* FIXME change this to a warning and a suggestion to use the new API
1535          * to set the polling interval (once the API is added).
1536          */
1537         if (xhci_interval != ep_interval) {
1538                 if (!printk_ratelimit())
1539                         dev_dbg(&urb->dev->dev, "Driver uses different interval"
1540                                         " (%d microframe%s) than xHCI "
1541                                         "(%d microframe%s)\n",
1542                                         ep_interval,
1543                                         ep_interval == 1 ? "" : "s",
1544                                         xhci_interval,
1545                                         xhci_interval == 1 ? "" : "s");
1546                 urb->interval = xhci_interval;
1547                 /* Convert back to frames for LS/FS devices */
1548                 if (urb->dev->speed == USB_SPEED_LOW ||
1549                                 urb->dev->speed == USB_SPEED_FULL)
1550                         urb->interval /= 8;
1551         }
1552         return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
1553 }
1554
1555 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1556                 struct urb *urb, int slot_id, unsigned int ep_index)
1557 {
1558         struct xhci_ring *ep_ring;
1559         unsigned int num_trbs;
1560         struct xhci_td *td;
1561         struct scatterlist *sg;
1562         int num_sgs;
1563         int trb_buff_len, this_sg_len, running_total;
1564         bool first_trb;
1565         u64 addr;
1566
1567         struct xhci_generic_trb *start_trb;
1568         int start_cycle;
1569
1570         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1571         num_trbs = count_sg_trbs_needed(xhci, urb);
1572         num_sgs = urb->num_sgs;
1573
1574         trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1575                         ep_index, num_trbs, urb, &td, mem_flags);
1576         if (trb_buff_len < 0)
1577                 return trb_buff_len;
1578         /*
1579          * Don't give the first TRB to the hardware (by toggling the cycle bit)
1580          * until we've finished creating all the other TRBs.  The ring's cycle
1581          * state may change as we enqueue the other TRBs, so save it too.
1582          */
1583         start_trb = &ep_ring->enqueue->generic;
1584         start_cycle = ep_ring->cycle_state;
1585
1586         running_total = 0;
1587         /*
1588          * How much data is in the first TRB?
1589          *
1590          * There are three forces at work for TRB buffer pointers and lengths:
1591          * 1. We don't want to walk off the end of this sg-list entry buffer.
1592          * 2. The transfer length that the driver requested may be smaller than
1593          *    the amount of memory allocated for this scatter-gather list.
1594          * 3. TRBs buffers can't cross 64KB boundaries.
1595          */
1596         sg = urb->sg->sg;
1597         addr = (u64) sg_dma_address(sg);
1598         this_sg_len = sg_dma_len(sg);
1599         trb_buff_len = TRB_MAX_BUFF_SIZE -
1600                 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1601         trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1602         if (trb_buff_len > urb->transfer_buffer_length)
1603                 trb_buff_len = urb->transfer_buffer_length;
1604         xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
1605                         trb_buff_len);
1606
1607         first_trb = true;
1608         /* Queue the first TRB, even if it's zero-length */
1609         do {
1610                 u32 field = 0;
1611                 u32 length_field = 0;
1612
1613                 /* Don't change the cycle bit of the first TRB until later */
1614                 if (first_trb)
1615                         first_trb = false;
1616                 else
1617                         field |= ep_ring->cycle_state;
1618
1619                 /* Chain all the TRBs together; clear the chain bit in the last
1620                  * TRB to indicate it's the last TRB in the chain.
1621                  */
1622                 if (num_trbs > 1) {
1623                         field |= TRB_CHAIN;
1624                 } else {
1625                         /* FIXME - add check for ZERO_PACKET flag before this */
1626                         td->last_trb = ep_ring->enqueue;
1627                         field |= TRB_IOC;
1628                 }
1629                 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
1630                                 "64KB boundary at %#x, end dma = %#x\n",
1631                                 (unsigned int) addr, trb_buff_len, trb_buff_len,
1632                                 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1633                                 (unsigned int) addr + trb_buff_len);
1634                 if (TRB_MAX_BUFF_SIZE -
1635                                 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
1636                         xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
1637                         xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
1638                                         (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1639                                         (unsigned int) addr + trb_buff_len);
1640                 }
1641                 length_field = TRB_LEN(trb_buff_len) |
1642                         TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1643                         TRB_INTR_TARGET(0);
1644                 queue_trb(xhci, ep_ring, false,
1645                                 lower_32_bits(addr),
1646                                 upper_32_bits(addr),
1647                                 length_field,
1648                                 /* We always want to know if the TRB was short,
1649                                  * or we won't get an event when it completes.
1650                                  * (Unless we use event data TRBs, which are a
1651                                  * waste of space and HC resources.)
1652                                  */
1653                                 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
1654                 --num_trbs;
1655                 running_total += trb_buff_len;
1656
1657                 /* Calculate length for next transfer --
1658                  * Are we done queueing all the TRBs for this sg entry?
1659                  */
1660                 this_sg_len -= trb_buff_len;
1661                 if (this_sg_len == 0) {
1662                         --num_sgs;
1663                         if (num_sgs == 0)
1664                                 break;
1665                         sg = sg_next(sg);
1666                         addr = (u64) sg_dma_address(sg);
1667                         this_sg_len = sg_dma_len(sg);
1668                 } else {
1669                         addr += trb_buff_len;
1670                 }
1671
1672                 trb_buff_len = TRB_MAX_BUFF_SIZE -
1673                         (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1674                 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1675                 if (running_total + trb_buff_len > urb->transfer_buffer_length)
1676                         trb_buff_len =
1677                                 urb->transfer_buffer_length - running_total;
1678         } while (running_total < urb->transfer_buffer_length);
1679
1680         check_trb_math(urb, num_trbs, running_total);
1681         giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1682         return 0;
1683 }
1684
1685 /* This is very similar to what ehci-q.c qtd_fill() does */
1686 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1687                 struct urb *urb, int slot_id, unsigned int ep_index)
1688 {
1689         struct xhci_ring *ep_ring;
1690         struct xhci_td *td;
1691         int num_trbs;
1692         struct xhci_generic_trb *start_trb;
1693         bool first_trb;
1694         int start_cycle;
1695         u32 field, length_field;
1696
1697         int running_total, trb_buff_len, ret;
1698         u64 addr;
1699
1700         if (urb->sg)
1701                 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1702
1703         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1704
1705         num_trbs = 0;
1706         /* How much data is (potentially) left before the 64KB boundary? */
1707         running_total = TRB_MAX_BUFF_SIZE -
1708                 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1709
1710         /* If there's some data on this 64KB chunk, or we have to send a
1711          * zero-length transfer, we need at least one TRB
1712          */
1713         if (running_total != 0 || urb->transfer_buffer_length == 0)
1714                 num_trbs++;
1715         /* How many more 64KB chunks to transfer, how many more TRBs? */
1716         while (running_total < urb->transfer_buffer_length) {
1717                 num_trbs++;
1718                 running_total += TRB_MAX_BUFF_SIZE;
1719         }
1720         /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
1721
1722         if (!in_interrupt())
1723                 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
1724                                 urb->ep->desc.bEndpointAddress,
1725                                 urb->transfer_buffer_length,
1726                                 urb->transfer_buffer_length,
1727                                 (unsigned long long)urb->transfer_dma,
1728                                 num_trbs);
1729
1730         ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
1731                         num_trbs, urb, &td, mem_flags);
1732         if (ret < 0)
1733                 return ret;
1734
1735         /*
1736          * Don't give the first TRB to the hardware (by toggling the cycle bit)
1737          * until we've finished creating all the other TRBs.  The ring's cycle
1738          * state may change as we enqueue the other TRBs, so save it too.
1739          */
1740         start_trb = &ep_ring->enqueue->generic;
1741         start_cycle = ep_ring->cycle_state;
1742
1743         running_total = 0;
1744         /* How much data is in the first TRB? */
1745         addr = (u64) urb->transfer_dma;
1746         trb_buff_len = TRB_MAX_BUFF_SIZE -
1747                 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1748         if (urb->transfer_buffer_length < trb_buff_len)
1749                 trb_buff_len = urb->transfer_buffer_length;
1750
1751         first_trb = true;
1752
1753         /* Queue the first TRB, even if it's zero-length */
1754         do {
1755                 field = 0;
1756
1757                 /* Don't change the cycle bit of the first TRB until later */
1758                 if (first_trb)
1759                         first_trb = false;
1760                 else
1761                         field |= ep_ring->cycle_state;
1762
1763                 /* Chain all the TRBs together; clear the chain bit in the last
1764                  * TRB to indicate it's the last TRB in the chain.
1765                  */
1766                 if (num_trbs > 1) {
1767                         field |= TRB_CHAIN;
1768                 } else {
1769                         /* FIXME - add check for ZERO_PACKET flag before this */
1770                         td->last_trb = ep_ring->enqueue;
1771                         field |= TRB_IOC;
1772                 }
1773                 length_field = TRB_LEN(trb_buff_len) |
1774                         TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1775                         TRB_INTR_TARGET(0);
1776                 queue_trb(xhci, ep_ring, false,
1777                                 lower_32_bits(addr),
1778                                 upper_32_bits(addr),
1779                                 length_field,
1780                                 /* We always want to know if the TRB was short,
1781                                  * or we won't get an event when it completes.
1782                                  * (Unless we use event data TRBs, which are a
1783                                  * waste of space and HC resources.)
1784                                  */
1785                                 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
1786                 --num_trbs;
1787                 running_total += trb_buff_len;
1788
1789                 /* Calculate length for next transfer */
1790                 addr += trb_buff_len;
1791                 trb_buff_len = urb->transfer_buffer_length - running_total;
1792                 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
1793                         trb_buff_len = TRB_MAX_BUFF_SIZE;
1794         } while (running_total < urb->transfer_buffer_length);
1795
1796         check_trb_math(urb, num_trbs, running_total);
1797         giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1798         return 0;
1799 }
1800
1801 /* Caller must have locked xhci->lock */
1802 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1803                 struct urb *urb, int slot_id, unsigned int ep_index)
1804 {
1805         struct xhci_ring *ep_ring;
1806         int num_trbs;
1807         int ret;
1808         struct usb_ctrlrequest *setup;
1809         struct xhci_generic_trb *start_trb;
1810         int start_cycle;
1811         u32 field, length_field;
1812         struct xhci_td *td;
1813
1814         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1815
1816         /*
1817          * Need to copy setup packet into setup TRB, so we can't use the setup
1818          * DMA address.
1819          */
1820         if (!urb->setup_packet)
1821                 return -EINVAL;
1822
1823         if (!in_interrupt())
1824                 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
1825                                 slot_id, ep_index);
1826         /* 1 TRB for setup, 1 for status */
1827         num_trbs = 2;
1828         /*
1829          * Don't need to check if we need additional event data and normal TRBs,
1830          * since data in control transfers will never get bigger than 16MB
1831          * XXX: can we get a buffer that crosses 64KB boundaries?
1832          */
1833         if (urb->transfer_buffer_length > 0)
1834                 num_trbs++;
1835         ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
1836                         urb, &td, mem_flags);
1837         if (ret < 0)
1838                 return ret;
1839
1840         /*
1841          * Don't give the first TRB to the hardware (by toggling the cycle bit)
1842          * until we've finished creating all the other TRBs.  The ring's cycle
1843          * state may change as we enqueue the other TRBs, so save it too.
1844          */
1845         start_trb = &ep_ring->enqueue->generic;
1846         start_cycle = ep_ring->cycle_state;
1847
1848         /* Queue setup TRB - see section 6.4.1.2.1 */
1849         /* FIXME better way to translate setup_packet into two u32 fields? */
1850         setup = (struct usb_ctrlrequest *) urb->setup_packet;
1851         queue_trb(xhci, ep_ring, false,
1852                         /* FIXME endianness is probably going to bite my ass here. */
1853                         setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
1854                         setup->wIndex | setup->wLength << 16,
1855                         TRB_LEN(8) | TRB_INTR_TARGET(0),
1856                         /* Immediate data in pointer */
1857                         TRB_IDT | TRB_TYPE(TRB_SETUP));
1858
1859         /* If there's data, queue data TRBs */
1860         field = 0;
1861         length_field = TRB_LEN(urb->transfer_buffer_length) |
1862                 TD_REMAINDER(urb->transfer_buffer_length) |
1863                 TRB_INTR_TARGET(0);
1864         if (urb->transfer_buffer_length > 0) {
1865                 if (setup->bRequestType & USB_DIR_IN)
1866                         field |= TRB_DIR_IN;
1867                 queue_trb(xhci, ep_ring, false,
1868                                 lower_32_bits(urb->transfer_dma),
1869                                 upper_32_bits(urb->transfer_dma),
1870                                 length_field,
1871                                 /* Event on short tx */
1872                                 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
1873         }
1874
1875         /* Save the DMA address of the last TRB in the TD */
1876         td->last_trb = ep_ring->enqueue;
1877
1878         /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
1879         /* If the device sent data, the status stage is an OUT transfer */
1880         if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
1881                 field = 0;
1882         else
1883                 field = TRB_DIR_IN;
1884         queue_trb(xhci, ep_ring, false,
1885                         0,
1886                         0,
1887                         TRB_INTR_TARGET(0),
1888                         /* Event on completion */
1889                         field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
1890
1891         giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1892         return 0;
1893 }
1894
1895 /****           Command Ring Operations         ****/
1896
1897 /* Generic function for queueing a command TRB on the command ring.
1898  * Check to make sure there's room on the command ring for one command TRB.
1899  * Also check that there's room reserved for commands that must not fail.
1900  * If this is a command that must not fail, meaning command_must_succeed = TRUE,
1901  * then only check for the number of reserved spots.
1902  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
1903  * because the command event handler may want to resubmit a failed command.
1904  */
1905 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
1906                 u32 field3, u32 field4, bool command_must_succeed)
1907 {
1908         int reserved_trbs = xhci->cmd_ring_reserved_trbs;
1909         if (!command_must_succeed)
1910                 reserved_trbs++;
1911
1912         if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
1913                 if (!in_interrupt())
1914                         xhci_err(xhci, "ERR: No room for command on command ring\n");
1915                 if (command_must_succeed)
1916                         xhci_err(xhci, "ERR: Reserved TRB counting for "
1917                                         "unfailable commands failed.\n");
1918                 return -ENOMEM;
1919         }
1920         queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
1921                         field4 | xhci->cmd_ring->cycle_state);
1922         return 0;
1923 }
1924
1925 /* Queue a no-op command on the command ring */
1926 static int queue_cmd_noop(struct xhci_hcd *xhci)
1927 {
1928         return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
1929 }
1930
1931 /*
1932  * Place a no-op command on the command ring to test the command and
1933  * event ring.
1934  */
1935 void *xhci_setup_one_noop(struct xhci_hcd *xhci)
1936 {
1937         if (queue_cmd_noop(xhci) < 0)
1938                 return NULL;
1939         xhci->noops_submitted++;
1940         return xhci_ring_cmd_db;
1941 }
1942
1943 /* Queue a slot enable or disable request on the command ring */
1944 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1945 {
1946         return queue_command(xhci, 0, 0, 0,
1947                         TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
1948 }
1949
1950 /* Queue an address device command TRB */
1951 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1952                 u32 slot_id)
1953 {
1954         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1955                         upper_32_bits(in_ctx_ptr), 0,
1956                         TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
1957                         false);
1958 }
1959
1960 /* Queue a configure endpoint command TRB */
1961 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1962                 u32 slot_id, bool command_must_succeed)
1963 {
1964         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1965                         upper_32_bits(in_ctx_ptr), 0,
1966                         TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
1967                         command_must_succeed);
1968 }
1969
1970 /* Queue an evaluate context command TRB */
1971 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1972                 u32 slot_id)
1973 {
1974         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1975                         upper_32_bits(in_ctx_ptr), 0,
1976                         TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
1977                         false);
1978 }
1979
1980 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1981                 unsigned int ep_index)
1982 {
1983         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1984         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1985         u32 type = TRB_TYPE(TRB_STOP_RING);
1986
1987         return queue_command(xhci, 0, 0, 0,
1988                         trb_slot_id | trb_ep_index | type, false);
1989 }
1990
1991 /* Set Transfer Ring Dequeue Pointer command.
1992  * This should not be used for endpoints that have streams enabled.
1993  */
1994 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1995                 unsigned int ep_index, struct xhci_segment *deq_seg,
1996                 union xhci_trb *deq_ptr, u32 cycle_state)
1997 {
1998         dma_addr_t addr;
1999         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
2000         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
2001         u32 type = TRB_TYPE(TRB_SET_DEQ);
2002
2003         addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
2004         if (addr == 0) {
2005                 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
2006                 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
2007                                 deq_seg, deq_ptr);
2008                 return 0;
2009         }
2010         return queue_command(xhci, lower_32_bits(addr) | cycle_state,
2011                         upper_32_bits(addr), 0,
2012                         trb_slot_id | trb_ep_index | type, false);
2013 }
2014
2015 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
2016                 unsigned int ep_index)
2017 {
2018         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
2019         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
2020         u32 type = TRB_TYPE(TRB_RESET_EP);
2021
2022         return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
2023                         false);
2024 }