xhci: correct burst count field for isoc transfers on 1.0 xhci hosts
[pandora-kernel.git] / drivers / usb / host / xhci-mem.c
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
27
28 #include "xhci.h"
29
30 /*
31  * Allocates a generic ring segment from the ring pool, sets the dma address,
32  * initializes the segment to zero, and sets the private next pointer to NULL.
33  *
34  * Section 4.11.1.1:
35  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36  */
37 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
38 {
39         struct xhci_segment *seg;
40         dma_addr_t      dma;
41
42         seg = kzalloc(sizeof *seg, flags);
43         if (!seg)
44                 return NULL;
45         xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
46
47         seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
48         if (!seg->trbs) {
49                 kfree(seg);
50                 return NULL;
51         }
52         xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53                         seg->trbs, (unsigned long long)dma);
54
55         memset(seg->trbs, 0, SEGMENT_SIZE);
56         seg->dma = dma;
57         seg->next = NULL;
58
59         return seg;
60 }
61
62 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
63 {
64         if (seg->trbs) {
65                 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
66                                 seg->trbs, (unsigned long long)seg->dma);
67                 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
68                 seg->trbs = NULL;
69         }
70         xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
71         kfree(seg);
72 }
73
74 /*
75  * Make the prev segment point to the next segment.
76  *
77  * Change the last TRB in the prev segment to be a Link TRB which points to the
78  * DMA address of the next segment.  The caller needs to set any Link TRB
79  * related flags, such as End TRB, Toggle Cycle, and no snoop.
80  */
81 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
82                 struct xhci_segment *next, bool link_trbs, bool isoc)
83 {
84         u32 val;
85
86         if (!prev || !next)
87                 return;
88         prev->next = next;
89         if (link_trbs) {
90                 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
91                         cpu_to_le64(next->dma);
92
93                 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94                 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
95                 val &= ~TRB_TYPE_BITMASK;
96                 val |= TRB_TYPE(TRB_LINK);
97                 /* Always set the chain bit with 0.95 hardware */
98                 /* Set chain bit for isoc rings on AMD 0.96 host */
99                 if (xhci_link_trb_quirk(xhci) ||
100                                 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
101                         val |= TRB_CHAIN;
102                 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
103         }
104         xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
105                         (unsigned long long)prev->dma,
106                         (unsigned long long)next->dma);
107 }
108
109 /* XXX: Do we need the hcd structure in all these functions? */
110 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
111 {
112         struct xhci_segment *seg;
113         struct xhci_segment *first_seg;
114
115         if (!ring)
116                 return;
117         if (ring->first_seg) {
118                 first_seg = ring->first_seg;
119                 seg = first_seg->next;
120                 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
121                 while (seg != first_seg) {
122                         struct xhci_segment *next = seg->next;
123                         xhci_segment_free(xhci, seg);
124                         seg = next;
125                 }
126                 xhci_segment_free(xhci, first_seg);
127                 ring->first_seg = NULL;
128         }
129         kfree(ring);
130 }
131
132 static void xhci_initialize_ring_info(struct xhci_ring *ring)
133 {
134         /* The ring is empty, so the enqueue pointer == dequeue pointer */
135         ring->enqueue = ring->first_seg->trbs;
136         ring->enq_seg = ring->first_seg;
137         ring->dequeue = ring->enqueue;
138         ring->deq_seg = ring->first_seg;
139         /* The ring is initialized to 0. The producer must write 1 to the cycle
140          * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
141          * compare CCS to the cycle bit to check ownership, so CCS = 1.
142          */
143         ring->cycle_state = 1;
144         /* Not necessary for new rings, but needed for re-initialized rings */
145         ring->enq_updates = 0;
146         ring->deq_updates = 0;
147 }
148
149 /**
150  * Create a new ring with zero or more segments.
151  *
152  * Link each segment together into a ring.
153  * Set the end flag and the cycle toggle bit on the last segment.
154  * See section 4.9.1 and figures 15 and 16.
155  */
156 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
157                 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
158 {
159         struct xhci_ring        *ring;
160         struct xhci_segment     *prev;
161
162         ring = kzalloc(sizeof *(ring), flags);
163         xhci_dbg(xhci, "Allocating ring at %p\n", ring);
164         if (!ring)
165                 return NULL;
166
167         INIT_LIST_HEAD(&ring->td_list);
168         if (num_segs == 0)
169                 return ring;
170
171         ring->first_seg = xhci_segment_alloc(xhci, flags);
172         if (!ring->first_seg)
173                 goto fail;
174         num_segs--;
175
176         prev = ring->first_seg;
177         while (num_segs > 0) {
178                 struct xhci_segment     *next;
179
180                 next = xhci_segment_alloc(xhci, flags);
181                 if (!next) {
182                         prev = ring->first_seg;
183                         while (prev) {
184                                 next = prev->next;
185                                 xhci_segment_free(xhci, prev);
186                                 prev = next;
187                         }
188                         goto fail;
189                 }
190                 xhci_link_segments(xhci, prev, next, link_trbs, isoc);
191
192                 prev = next;
193                 num_segs--;
194         }
195         xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
196
197         if (link_trbs) {
198                 /* See section 4.9.2.1 and 6.4.4.1 */
199                 prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
200                         cpu_to_le32(LINK_TOGGLE);
201                 xhci_dbg(xhci, "Wrote link toggle flag to"
202                                 " segment %p (virtual), 0x%llx (DMA)\n",
203                                 prev, (unsigned long long)prev->dma);
204         }
205         xhci_initialize_ring_info(ring);
206         return ring;
207
208 fail:
209         kfree(ring);
210         return NULL;
211 }
212
213 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
214                 struct xhci_virt_device *virt_dev,
215                 unsigned int ep_index)
216 {
217         int rings_cached;
218
219         rings_cached = virt_dev->num_rings_cached;
220         if (rings_cached < XHCI_MAX_RINGS_CACHED) {
221                 virt_dev->ring_cache[rings_cached] =
222                         virt_dev->eps[ep_index].ring;
223                 virt_dev->num_rings_cached++;
224                 xhci_dbg(xhci, "Cached old ring, "
225                                 "%d ring%s cached\n",
226                                 virt_dev->num_rings_cached,
227                                 (virt_dev->num_rings_cached > 1) ? "s" : "");
228         } else {
229                 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
230                 xhci_dbg(xhci, "Ring cache full (%d rings), "
231                                 "freeing ring\n",
232                                 virt_dev->num_rings_cached);
233         }
234         virt_dev->eps[ep_index].ring = NULL;
235 }
236
237 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
238  * pointers to the beginning of the ring.
239  */
240 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
241                 struct xhci_ring *ring, bool isoc)
242 {
243         struct xhci_segment     *seg = ring->first_seg;
244         do {
245                 memset(seg->trbs, 0,
246                                 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
247                 /* All endpoint rings have link TRBs */
248                 xhci_link_segments(xhci, seg, seg->next, 1, isoc);
249                 seg = seg->next;
250         } while (seg != ring->first_seg);
251         xhci_initialize_ring_info(ring);
252         /* td list should be empty since all URBs have been cancelled,
253          * but just in case...
254          */
255         INIT_LIST_HEAD(&ring->td_list);
256 }
257
258 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
259
260 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
261                                                     int type, gfp_t flags)
262 {
263         struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
264         if (!ctx)
265                 return NULL;
266
267         BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
268         ctx->type = type;
269         ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
270         if (type == XHCI_CTX_TYPE_INPUT)
271                 ctx->size += CTX_SIZE(xhci->hcc_params);
272
273         ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
274         if (!ctx->bytes) {
275                 kfree(ctx);
276                 return NULL;
277         }
278         memset(ctx->bytes, 0, ctx->size);
279         return ctx;
280 }
281
282 static void xhci_free_container_ctx(struct xhci_hcd *xhci,
283                              struct xhci_container_ctx *ctx)
284 {
285         if (!ctx)
286                 return;
287         dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
288         kfree(ctx);
289 }
290
291 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
292                                               struct xhci_container_ctx *ctx)
293 {
294         BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
295         return (struct xhci_input_control_ctx *)ctx->bytes;
296 }
297
298 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
299                                         struct xhci_container_ctx *ctx)
300 {
301         if (ctx->type == XHCI_CTX_TYPE_DEVICE)
302                 return (struct xhci_slot_ctx *)ctx->bytes;
303
304         return (struct xhci_slot_ctx *)
305                 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
306 }
307
308 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
309                                     struct xhci_container_ctx *ctx,
310                                     unsigned int ep_index)
311 {
312         /* increment ep index by offset of start of ep ctx array */
313         ep_index++;
314         if (ctx->type == XHCI_CTX_TYPE_INPUT)
315                 ep_index++;
316
317         return (struct xhci_ep_ctx *)
318                 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
319 }
320
321
322 /***************** Streams structures manipulation *************************/
323
324 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
325                 unsigned int num_stream_ctxs,
326                 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
327 {
328         struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
329
330         if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
331                 dma_free_coherent(&pdev->dev,
332                                 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
333                                 stream_ctx, dma);
334         else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
335                 return dma_pool_free(xhci->small_streams_pool,
336                                 stream_ctx, dma);
337         else
338                 return dma_pool_free(xhci->medium_streams_pool,
339                                 stream_ctx, dma);
340 }
341
342 /*
343  * The stream context array for each endpoint with bulk streams enabled can
344  * vary in size, based on:
345  *  - how many streams the endpoint supports,
346  *  - the maximum primary stream array size the host controller supports,
347  *  - and how many streams the device driver asks for.
348  *
349  * The stream context array must be a power of 2, and can be as small as
350  * 64 bytes or as large as 1MB.
351  */
352 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
353                 unsigned int num_stream_ctxs, dma_addr_t *dma,
354                 gfp_t mem_flags)
355 {
356         struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
357
358         if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
359                 return dma_alloc_coherent(&pdev->dev,
360                                 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
361                                 dma, mem_flags);
362         else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
363                 return dma_pool_alloc(xhci->small_streams_pool,
364                                 mem_flags, dma);
365         else
366                 return dma_pool_alloc(xhci->medium_streams_pool,
367                                 mem_flags, dma);
368 }
369
370 struct xhci_ring *xhci_dma_to_transfer_ring(
371                 struct xhci_virt_ep *ep,
372                 u64 address)
373 {
374         if (ep->ep_state & EP_HAS_STREAMS)
375                 return radix_tree_lookup(&ep->stream_info->trb_address_map,
376                                 address >> SEGMENT_SHIFT);
377         return ep->ring;
378 }
379
380 /* Only use this when you know stream_info is valid */
381 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
382 static struct xhci_ring *dma_to_stream_ring(
383                 struct xhci_stream_info *stream_info,
384                 u64 address)
385 {
386         return radix_tree_lookup(&stream_info->trb_address_map,
387                         address >> SEGMENT_SHIFT);
388 }
389 #endif  /* CONFIG_USB_XHCI_HCD_DEBUGGING */
390
391 struct xhci_ring *xhci_stream_id_to_ring(
392                 struct xhci_virt_device *dev,
393                 unsigned int ep_index,
394                 unsigned int stream_id)
395 {
396         struct xhci_virt_ep *ep = &dev->eps[ep_index];
397
398         if (stream_id == 0)
399                 return ep->ring;
400         if (!ep->stream_info)
401                 return NULL;
402
403         if (stream_id > ep->stream_info->num_streams)
404                 return NULL;
405         return ep->stream_info->stream_rings[stream_id];
406 }
407
408 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
409 static int xhci_test_radix_tree(struct xhci_hcd *xhci,
410                 unsigned int num_streams,
411                 struct xhci_stream_info *stream_info)
412 {
413         u32 cur_stream;
414         struct xhci_ring *cur_ring;
415         u64 addr;
416
417         for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
418                 struct xhci_ring *mapped_ring;
419                 int trb_size = sizeof(union xhci_trb);
420
421                 cur_ring = stream_info->stream_rings[cur_stream];
422                 for (addr = cur_ring->first_seg->dma;
423                                 addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
424                                 addr += trb_size) {
425                         mapped_ring = dma_to_stream_ring(stream_info, addr);
426                         if (cur_ring != mapped_ring) {
427                                 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
428                                                 "didn't map to stream ID %u; "
429                                                 "mapped to ring %p\n",
430                                                 (unsigned long long) addr,
431                                                 cur_stream,
432                                                 mapped_ring);
433                                 return -EINVAL;
434                         }
435                 }
436                 /* One TRB after the end of the ring segment shouldn't return a
437                  * pointer to the current ring (although it may be a part of a
438                  * different ring).
439                  */
440                 mapped_ring = dma_to_stream_ring(stream_info, addr);
441                 if (mapped_ring != cur_ring) {
442                         /* One TRB before should also fail */
443                         addr = cur_ring->first_seg->dma - trb_size;
444                         mapped_ring = dma_to_stream_ring(stream_info, addr);
445                 }
446                 if (mapped_ring == cur_ring) {
447                         xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
448                                         "mapped to valid stream ID %u; "
449                                         "mapped ring = %p\n",
450                                         (unsigned long long) addr,
451                                         cur_stream,
452                                         mapped_ring);
453                         return -EINVAL;
454                 }
455         }
456         return 0;
457 }
458 #endif  /* CONFIG_USB_XHCI_HCD_DEBUGGING */
459
460 /*
461  * Change an endpoint's internal structure so it supports stream IDs.  The
462  * number of requested streams includes stream 0, which cannot be used by device
463  * drivers.
464  *
465  * The number of stream contexts in the stream context array may be bigger than
466  * the number of streams the driver wants to use.  This is because the number of
467  * stream context array entries must be a power of two.
468  *
469  * We need a radix tree for mapping physical addresses of TRBs to which stream
470  * ID they belong to.  We need to do this because the host controller won't tell
471  * us which stream ring the TRB came from.  We could store the stream ID in an
472  * event data TRB, but that doesn't help us for the cancellation case, since the
473  * endpoint may stop before it reaches that event data TRB.
474  *
475  * The radix tree maps the upper portion of the TRB DMA address to a ring
476  * segment that has the same upper portion of DMA addresses.  For example, say I
477  * have segments of size 1KB, that are always 64-byte aligned.  A segment may
478  * start at 0x10c91000 and end at 0x10c913f0.  If I use the upper 10 bits, the
479  * key to the stream ID is 0x43244.  I can use the DMA address of the TRB to
480  * pass the radix tree a key to get the right stream ID:
481  *
482  *      0x10c90fff >> 10 = 0x43243
483  *      0x10c912c0 >> 10 = 0x43244
484  *      0x10c91400 >> 10 = 0x43245
485  *
486  * Obviously, only those TRBs with DMA addresses that are within the segment
487  * will make the radix tree return the stream ID for that ring.
488  *
489  * Caveats for the radix tree:
490  *
491  * The radix tree uses an unsigned long as a key pair.  On 32-bit systems, an
492  * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
493  * 64-bits.  Since we only request 32-bit DMA addresses, we can use that as the
494  * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
495  * PCI DMA addresses on a 64-bit system).  There might be a problem on 32-bit
496  * extended systems (where the DMA address can be bigger than 32-bits),
497  * if we allow the PCI dma mask to be bigger than 32-bits.  So don't do that.
498  */
499 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
500                 unsigned int num_stream_ctxs,
501                 unsigned int num_streams, gfp_t mem_flags)
502 {
503         struct xhci_stream_info *stream_info;
504         u32 cur_stream;
505         struct xhci_ring *cur_ring;
506         unsigned long key;
507         u64 addr;
508         int ret;
509
510         xhci_dbg(xhci, "Allocating %u streams and %u "
511                         "stream context array entries.\n",
512                         num_streams, num_stream_ctxs);
513         if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
514                 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
515                 return NULL;
516         }
517         xhci->cmd_ring_reserved_trbs++;
518
519         stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
520         if (!stream_info)
521                 goto cleanup_trbs;
522
523         stream_info->num_streams = num_streams;
524         stream_info->num_stream_ctxs = num_stream_ctxs;
525
526         /* Initialize the array of virtual pointers to stream rings. */
527         stream_info->stream_rings = kzalloc(
528                         sizeof(struct xhci_ring *)*num_streams,
529                         mem_flags);
530         if (!stream_info->stream_rings)
531                 goto cleanup_info;
532
533         /* Initialize the array of DMA addresses for stream rings for the HW. */
534         stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
535                         num_stream_ctxs, &stream_info->ctx_array_dma,
536                         mem_flags);
537         if (!stream_info->stream_ctx_array)
538                 goto cleanup_ctx;
539         memset(stream_info->stream_ctx_array, 0,
540                         sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
541
542         /* Allocate everything needed to free the stream rings later */
543         stream_info->free_streams_command =
544                 xhci_alloc_command(xhci, true, true, mem_flags);
545         if (!stream_info->free_streams_command)
546                 goto cleanup_ctx;
547
548         INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
549
550         /* Allocate rings for all the streams that the driver will use,
551          * and add their segment DMA addresses to the radix tree.
552          * Stream 0 is reserved.
553          */
554         for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
555                 stream_info->stream_rings[cur_stream] =
556                         xhci_ring_alloc(xhci, 1, true, false, mem_flags);
557                 cur_ring = stream_info->stream_rings[cur_stream];
558                 if (!cur_ring)
559                         goto cleanup_rings;
560                 cur_ring->stream_id = cur_stream;
561                 /* Set deq ptr, cycle bit, and stream context type */
562                 addr = cur_ring->first_seg->dma |
563                         SCT_FOR_CTX(SCT_PRI_TR) |
564                         cur_ring->cycle_state;
565                 stream_info->stream_ctx_array[cur_stream].stream_ring =
566                         cpu_to_le64(addr);
567                 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
568                                 cur_stream, (unsigned long long) addr);
569
570                 key = (unsigned long)
571                         (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
572                 ret = radix_tree_insert(&stream_info->trb_address_map,
573                                 key, cur_ring);
574                 if (ret) {
575                         xhci_ring_free(xhci, cur_ring);
576                         stream_info->stream_rings[cur_stream] = NULL;
577                         goto cleanup_rings;
578                 }
579         }
580         /* Leave the other unused stream ring pointers in the stream context
581          * array initialized to zero.  This will cause the xHC to give us an
582          * error if the device asks for a stream ID we don't have setup (if it
583          * was any other way, the host controller would assume the ring is
584          * "empty" and wait forever for data to be queued to that stream ID).
585          */
586 #if XHCI_DEBUG
587         /* Do a little test on the radix tree to make sure it returns the
588          * correct values.
589          */
590         if (xhci_test_radix_tree(xhci, num_streams, stream_info))
591                 goto cleanup_rings;
592 #endif
593
594         return stream_info;
595
596 cleanup_rings:
597         for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
598                 cur_ring = stream_info->stream_rings[cur_stream];
599                 if (cur_ring) {
600                         addr = cur_ring->first_seg->dma;
601                         radix_tree_delete(&stream_info->trb_address_map,
602                                         addr >> SEGMENT_SHIFT);
603                         xhci_ring_free(xhci, cur_ring);
604                         stream_info->stream_rings[cur_stream] = NULL;
605                 }
606         }
607         xhci_free_command(xhci, stream_info->free_streams_command);
608 cleanup_ctx:
609         kfree(stream_info->stream_rings);
610 cleanup_info:
611         kfree(stream_info);
612 cleanup_trbs:
613         xhci->cmd_ring_reserved_trbs--;
614         return NULL;
615 }
616 /*
617  * Sets the MaxPStreams field and the Linear Stream Array field.
618  * Sets the dequeue pointer to the stream context array.
619  */
620 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
621                 struct xhci_ep_ctx *ep_ctx,
622                 struct xhci_stream_info *stream_info)
623 {
624         u32 max_primary_streams;
625         /* MaxPStreams is the number of stream context array entries, not the
626          * number we're actually using.  Must be in 2^(MaxPstreams + 1) format.
627          * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
628          */
629         max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
630         xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
631                         1 << (max_primary_streams + 1));
632         ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
633         ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
634                                        | EP_HAS_LSA);
635         ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
636 }
637
638 /*
639  * Sets the MaxPStreams field and the Linear Stream Array field to 0.
640  * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
641  * not at the beginning of the ring).
642  */
643 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
644                 struct xhci_ep_ctx *ep_ctx,
645                 struct xhci_virt_ep *ep)
646 {
647         dma_addr_t addr;
648         ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
649         addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
650         ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
651 }
652
653 /* Frees all stream contexts associated with the endpoint,
654  *
655  * Caller should fix the endpoint context streams fields.
656  */
657 void xhci_free_stream_info(struct xhci_hcd *xhci,
658                 struct xhci_stream_info *stream_info)
659 {
660         int cur_stream;
661         struct xhci_ring *cur_ring;
662         dma_addr_t addr;
663
664         if (!stream_info)
665                 return;
666
667         for (cur_stream = 1; cur_stream < stream_info->num_streams;
668                         cur_stream++) {
669                 cur_ring = stream_info->stream_rings[cur_stream];
670                 if (cur_ring) {
671                         addr = cur_ring->first_seg->dma;
672                         radix_tree_delete(&stream_info->trb_address_map,
673                                         addr >> SEGMENT_SHIFT);
674                         xhci_ring_free(xhci, cur_ring);
675                         stream_info->stream_rings[cur_stream] = NULL;
676                 }
677         }
678         xhci_free_command(xhci, stream_info->free_streams_command);
679         xhci->cmd_ring_reserved_trbs--;
680         if (stream_info->stream_ctx_array)
681                 xhci_free_stream_ctx(xhci,
682                                 stream_info->num_stream_ctxs,
683                                 stream_info->stream_ctx_array,
684                                 stream_info->ctx_array_dma);
685
686         if (stream_info)
687                 kfree(stream_info->stream_rings);
688         kfree(stream_info);
689 }
690
691
692 /***************** Device context manipulation *************************/
693
694 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
695                 struct xhci_virt_ep *ep)
696 {
697         init_timer(&ep->stop_cmd_timer);
698         ep->stop_cmd_timer.data = (unsigned long) ep;
699         ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
700         ep->xhci = xhci;
701 }
702
703 static void xhci_free_tt_info(struct xhci_hcd *xhci,
704                 struct xhci_virt_device *virt_dev,
705                 int slot_id)
706 {
707         struct list_head *tt_list_head;
708         struct xhci_tt_bw_info *tt_info, *next;
709         bool slot_found = false;
710
711         /* If the device never made it past the Set Address stage,
712          * it may not have the real_port set correctly.
713          */
714         if (virt_dev->real_port == 0 ||
715                         virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
716                 xhci_dbg(xhci, "Bad real port.\n");
717                 return;
718         }
719
720         tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
721         list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
722                 /* Multi-TT hubs will have more than one entry */
723                 if (tt_info->slot_id == slot_id) {
724                         slot_found = true;
725                         list_del(&tt_info->tt_list);
726                         kfree(tt_info);
727                 } else if (slot_found) {
728                         break;
729                 }
730         }
731 }
732
733 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
734                 struct xhci_virt_device *virt_dev,
735                 struct usb_device *hdev,
736                 struct usb_tt *tt, gfp_t mem_flags)
737 {
738         struct xhci_tt_bw_info          *tt_info;
739         unsigned int                    num_ports;
740         int                             i, j;
741
742         if (!tt->multi)
743                 num_ports = 1;
744         else
745                 num_ports = hdev->maxchild;
746
747         for (i = 0; i < num_ports; i++, tt_info++) {
748                 struct xhci_interval_bw_table *bw_table;
749
750                 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
751                 if (!tt_info)
752                         goto free_tts;
753                 INIT_LIST_HEAD(&tt_info->tt_list);
754                 list_add(&tt_info->tt_list,
755                                 &xhci->rh_bw[virt_dev->real_port - 1].tts);
756                 tt_info->slot_id = virt_dev->udev->slot_id;
757                 if (tt->multi)
758                         tt_info->ttport = i+1;
759                 bw_table = &tt_info->bw_table;
760                 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
761                         INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
762         }
763         return 0;
764
765 free_tts:
766         xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
767         return -ENOMEM;
768 }
769
770
771 /* All the xhci_tds in the ring's TD list should be freed at this point.
772  * Should be called with xhci->lock held if there is any chance the TT lists
773  * will be manipulated by the configure endpoint, allocate device, or update
774  * hub functions while this function is removing the TT entries from the list.
775  */
776 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
777 {
778         struct xhci_virt_device *dev;
779         int i;
780         int old_active_eps = 0;
781
782         /* Slot ID 0 is reserved */
783         if (slot_id == 0 || !xhci->devs[slot_id])
784                 return;
785
786         dev = xhci->devs[slot_id];
787         xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
788         if (!dev)
789                 return;
790
791         if (dev->tt_info)
792                 old_active_eps = dev->tt_info->active_eps;
793
794         for (i = 0; i < 31; ++i) {
795                 if (dev->eps[i].ring)
796                         xhci_ring_free(xhci, dev->eps[i].ring);
797                 if (dev->eps[i].stream_info)
798                         xhci_free_stream_info(xhci,
799                                         dev->eps[i].stream_info);
800                 /* Endpoints on the TT/root port lists should have been removed
801                  * when usb_disable_device() was called for the device.
802                  * We can't drop them anyway, because the udev might have gone
803                  * away by this point, and we can't tell what speed it was.
804                  */
805                 if (!list_empty(&dev->eps[i].bw_endpoint_list))
806                         xhci_warn(xhci, "Slot %u endpoint %u "
807                                         "not removed from BW list!\n",
808                                         slot_id, i);
809         }
810         /* If this is a hub, free the TT(s) from the TT list */
811         xhci_free_tt_info(xhci, dev, slot_id);
812         /* If necessary, update the number of active TTs on this root port */
813         xhci_update_tt_active_eps(xhci, dev, old_active_eps);
814
815         if (dev->ring_cache) {
816                 for (i = 0; i < dev->num_rings_cached; i++)
817                         xhci_ring_free(xhci, dev->ring_cache[i]);
818                 kfree(dev->ring_cache);
819         }
820
821         if (dev->in_ctx)
822                 xhci_free_container_ctx(xhci, dev->in_ctx);
823         if (dev->out_ctx)
824                 xhci_free_container_ctx(xhci, dev->out_ctx);
825
826         kfree(xhci->devs[slot_id]);
827         xhci->devs[slot_id] = NULL;
828 }
829
830 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
831                 struct usb_device *udev, gfp_t flags)
832 {
833         struct xhci_virt_device *dev;
834         int i;
835
836         /* Slot ID 0 is reserved */
837         if (slot_id == 0 || xhci->devs[slot_id]) {
838                 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
839                 return 0;
840         }
841
842         xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
843         if (!xhci->devs[slot_id])
844                 return 0;
845         dev = xhci->devs[slot_id];
846
847         /* Allocate the (output) device context that will be used in the HC. */
848         dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
849         if (!dev->out_ctx)
850                 goto fail;
851
852         xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
853                         (unsigned long long)dev->out_ctx->dma);
854
855         /* Allocate the (input) device context for address device command */
856         dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
857         if (!dev->in_ctx)
858                 goto fail;
859
860         xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
861                         (unsigned long long)dev->in_ctx->dma);
862
863         /* Initialize the cancellation list and watchdog timers for each ep */
864         for (i = 0; i < 31; i++) {
865                 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
866                 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
867                 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
868         }
869
870         /* Allocate endpoint 0 ring */
871         dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
872         if (!dev->eps[0].ring)
873                 goto fail;
874
875         /* Allocate pointers to the ring cache */
876         dev->ring_cache = kzalloc(
877                         sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
878                         flags);
879         if (!dev->ring_cache)
880                 goto fail;
881         dev->num_rings_cached = 0;
882
883         init_completion(&dev->cmd_completion);
884         INIT_LIST_HEAD(&dev->cmd_list);
885         dev->udev = udev;
886
887         /* Point to output device context in dcbaa. */
888         xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
889         xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
890                  slot_id,
891                  &xhci->dcbaa->dev_context_ptrs[slot_id],
892                  le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
893
894         return 1;
895 fail:
896         xhci_free_virt_device(xhci, slot_id);
897         return 0;
898 }
899
900 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
901                 struct usb_device *udev)
902 {
903         struct xhci_virt_device *virt_dev;
904         struct xhci_ep_ctx      *ep0_ctx;
905         struct xhci_ring        *ep_ring;
906
907         virt_dev = xhci->devs[udev->slot_id];
908         ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
909         ep_ring = virt_dev->eps[0].ring;
910         /*
911          * FIXME we don't keep track of the dequeue pointer very well after a
912          * Set TR dequeue pointer, so we're setting the dequeue pointer of the
913          * host to our enqueue pointer.  This should only be called after a
914          * configured device has reset, so all control transfers should have
915          * been completed or cancelled before the reset.
916          */
917         ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
918                                                         ep_ring->enqueue)
919                                    | ep_ring->cycle_state);
920 }
921
922 /*
923  * The xHCI roothub may have ports of differing speeds in any order in the port
924  * status registers.  xhci->port_array provides an array of the port speed for
925  * each offset into the port status registers.
926  *
927  * The xHCI hardware wants to know the roothub port number that the USB device
928  * is attached to (or the roothub port its ancestor hub is attached to).  All we
929  * know is the index of that port under either the USB 2.0 or the USB 3.0
930  * roothub, but that doesn't give us the real index into the HW port status
931  * registers.  Scan through the xHCI roothub port array, looking for the Nth
932  * entry of the correct port speed.  Return the port number of that entry.
933  */
934 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
935                 struct usb_device *udev)
936 {
937         struct usb_device *top_dev;
938         unsigned int num_similar_speed_ports;
939         unsigned int faked_port_num;
940         int i;
941
942         for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
943                         top_dev = top_dev->parent)
944                 /* Found device below root hub */;
945         faked_port_num = top_dev->portnum;
946         for (i = 0, num_similar_speed_ports = 0;
947                         i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
948                 u8 port_speed = xhci->port_array[i];
949
950                 /*
951                  * Skip ports that don't have known speeds, or have duplicate
952                  * Extended Capabilities port speed entries.
953                  */
954                 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
955                         continue;
956
957                 /*
958                  * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
959                  * 1.1 ports are under the USB 2.0 hub.  If the port speed
960                  * matches the device speed, it's a similar speed port.
961                  */
962                 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
963                         num_similar_speed_ports++;
964                 if (num_similar_speed_ports == faked_port_num)
965                         /* Roothub ports are numbered from 1 to N */
966                         return i+1;
967         }
968         return 0;
969 }
970
971 /* Setup an xHCI virtual device for a Set Address command */
972 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
973 {
974         struct xhci_virt_device *dev;
975         struct xhci_ep_ctx      *ep0_ctx;
976         struct xhci_slot_ctx    *slot_ctx;
977         u32                     port_num;
978         struct usb_device *top_dev;
979
980         dev = xhci->devs[udev->slot_id];
981         /* Slot ID 0 is reserved */
982         if (udev->slot_id == 0 || !dev) {
983                 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
984                                 udev->slot_id);
985                 return -EINVAL;
986         }
987         ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
988         slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
989
990         /* 3) Only the control endpoint is valid - one endpoint context */
991         slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
992         switch (udev->speed) {
993         case USB_SPEED_SUPER:
994                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
995                 break;
996         case USB_SPEED_HIGH:
997                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
998                 break;
999         case USB_SPEED_FULL:
1000                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1001                 break;
1002         case USB_SPEED_LOW:
1003                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1004                 break;
1005         case USB_SPEED_WIRELESS:
1006                 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1007                 return -EINVAL;
1008                 break;
1009         default:
1010                 /* Speed was set earlier, this shouldn't happen. */
1011                 BUG();
1012         }
1013         /* Find the root hub port this device is under */
1014         port_num = xhci_find_real_port_number(xhci, udev);
1015         if (!port_num)
1016                 return -EINVAL;
1017         slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1018         /* Set the port number in the virtual_device to the faked port number */
1019         for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1020                         top_dev = top_dev->parent)
1021                 /* Found device below root hub */;
1022         dev->fake_port = top_dev->portnum;
1023         dev->real_port = port_num;
1024         xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1025         xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1026
1027         /* Find the right bandwidth table that this device will be a part of.
1028          * If this is a full speed device attached directly to a root port (or a
1029          * decendent of one), it counts as a primary bandwidth domain, not a
1030          * secondary bandwidth domain under a TT.  An xhci_tt_info structure
1031          * will never be created for the HS root hub.
1032          */
1033         if (!udev->tt || !udev->tt->hub->parent) {
1034                 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1035         } else {
1036                 struct xhci_root_port_bw_info *rh_bw;
1037                 struct xhci_tt_bw_info *tt_bw;
1038
1039                 rh_bw = &xhci->rh_bw[port_num - 1];
1040                 /* Find the right TT. */
1041                 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1042                         if (tt_bw->slot_id != udev->tt->hub->slot_id)
1043                                 continue;
1044
1045                         if (!dev->udev->tt->multi ||
1046                                         (udev->tt->multi &&
1047                                          tt_bw->ttport == dev->udev->ttport)) {
1048                                 dev->bw_table = &tt_bw->bw_table;
1049                                 dev->tt_info = tt_bw;
1050                                 break;
1051                         }
1052                 }
1053                 if (!dev->tt_info)
1054                         xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1055         }
1056
1057         /* Is this a LS/FS device under an external HS hub? */
1058         if (udev->tt && udev->tt->hub->parent) {
1059                 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1060                                                 (udev->ttport << 8));
1061                 if (udev->tt->multi)
1062                         slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1063         }
1064         xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1065         xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1066
1067         /* Step 4 - ring already allocated */
1068         /* Step 5 */
1069         ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1070         /*
1071          * XXX: Not sure about wireless USB devices.
1072          */
1073         switch (udev->speed) {
1074         case USB_SPEED_SUPER:
1075                 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
1076                 break;
1077         case USB_SPEED_HIGH:
1078         /* USB core guesses at a 64-byte max packet first for FS devices */
1079         case USB_SPEED_FULL:
1080                 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
1081                 break;
1082         case USB_SPEED_LOW:
1083                 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
1084                 break;
1085         case USB_SPEED_WIRELESS:
1086                 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1087                 return -EINVAL;
1088                 break;
1089         default:
1090                 /* New speed? */
1091                 BUG();
1092         }
1093         /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1094         ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
1095
1096         ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1097                                    dev->eps[0].ring->cycle_state);
1098
1099         /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1100
1101         return 0;
1102 }
1103
1104 /*
1105  * Convert interval expressed as 2^(bInterval - 1) == interval into
1106  * straight exponent value 2^n == interval.
1107  *
1108  */
1109 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1110                 struct usb_host_endpoint *ep)
1111 {
1112         unsigned int interval;
1113
1114         interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1115         if (interval != ep->desc.bInterval - 1)
1116                 dev_warn(&udev->dev,
1117                          "ep %#x - rounding interval to %d %sframes\n",
1118                          ep->desc.bEndpointAddress,
1119                          1 << interval,
1120                          udev->speed == USB_SPEED_FULL ? "" : "micro");
1121
1122         if (udev->speed == USB_SPEED_FULL) {
1123                 /*
1124                  * Full speed isoc endpoints specify interval in frames,
1125                  * not microframes. We are using microframes everywhere,
1126                  * so adjust accordingly.
1127                  */
1128                 interval += 3;  /* 1 frame = 2^3 uframes */
1129         }
1130
1131         return interval;
1132 }
1133
1134 /*
1135  * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1136  * microframes, rounded down to nearest power of 2.
1137  */
1138 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1139                 struct usb_host_endpoint *ep, unsigned int desc_interval,
1140                 unsigned int min_exponent, unsigned int max_exponent)
1141 {
1142         unsigned int interval;
1143
1144         interval = fls(desc_interval) - 1;
1145         interval = clamp_val(interval, min_exponent, max_exponent);
1146         if ((1 << interval) != desc_interval)
1147                 dev_warn(&udev->dev,
1148                          "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1149                          ep->desc.bEndpointAddress,
1150                          1 << interval,
1151                          desc_interval);
1152
1153         return interval;
1154 }
1155
1156 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1157                 struct usb_host_endpoint *ep)
1158 {
1159         if (ep->desc.bInterval == 0)
1160                 return 0;
1161         return xhci_microframes_to_exponent(udev, ep,
1162                         ep->desc.bInterval, 0, 15);
1163 }
1164
1165
1166 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1167                 struct usb_host_endpoint *ep)
1168 {
1169         return xhci_microframes_to_exponent(udev, ep,
1170                         ep->desc.bInterval * 8, 3, 10);
1171 }
1172
1173 /* Return the polling or NAK interval.
1174  *
1175  * The polling interval is expressed in "microframes".  If xHCI's Interval field
1176  * is set to N, it will service the endpoint every 2^(Interval)*125us.
1177  *
1178  * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1179  * is set to 0.
1180  */
1181 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1182                 struct usb_host_endpoint *ep)
1183 {
1184         unsigned int interval = 0;
1185
1186         switch (udev->speed) {
1187         case USB_SPEED_HIGH:
1188                 /* Max NAK rate */
1189                 if (usb_endpoint_xfer_control(&ep->desc) ||
1190                     usb_endpoint_xfer_bulk(&ep->desc)) {
1191                         interval = xhci_parse_microframe_interval(udev, ep);
1192                         break;
1193                 }
1194                 /* Fall through - SS and HS isoc/int have same decoding */
1195
1196         case USB_SPEED_SUPER:
1197                 if (usb_endpoint_xfer_int(&ep->desc) ||
1198                     usb_endpoint_xfer_isoc(&ep->desc)) {
1199                         interval = xhci_parse_exponent_interval(udev, ep);
1200                 }
1201                 break;
1202
1203         case USB_SPEED_FULL:
1204                 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1205                         interval = xhci_parse_exponent_interval(udev, ep);
1206                         break;
1207                 }
1208                 /*
1209                  * Fall through for interrupt endpoint interval decoding
1210                  * since it uses the same rules as low speed interrupt
1211                  * endpoints.
1212                  */
1213
1214         case USB_SPEED_LOW:
1215                 if (usb_endpoint_xfer_int(&ep->desc) ||
1216                     usb_endpoint_xfer_isoc(&ep->desc)) {
1217
1218                         interval = xhci_parse_frame_interval(udev, ep);
1219                 }
1220                 break;
1221
1222         default:
1223                 BUG();
1224         }
1225         return EP_INTERVAL(interval);
1226 }
1227
1228 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1229  * High speed endpoint descriptors can define "the number of additional
1230  * transaction opportunities per microframe", but that goes in the Max Burst
1231  * endpoint context field.
1232  */
1233 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1234                 struct usb_host_endpoint *ep)
1235 {
1236         if (udev->speed != USB_SPEED_SUPER ||
1237                         !usb_endpoint_xfer_isoc(&ep->desc))
1238                 return 0;
1239         return ep->ss_ep_comp.bmAttributes;
1240 }
1241
1242 static u32 xhci_get_endpoint_type(struct usb_device *udev,
1243                 struct usb_host_endpoint *ep)
1244 {
1245         int in;
1246         u32 type;
1247
1248         in = usb_endpoint_dir_in(&ep->desc);
1249         if (usb_endpoint_xfer_control(&ep->desc)) {
1250                 type = EP_TYPE(CTRL_EP);
1251         } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1252                 if (in)
1253                         type = EP_TYPE(BULK_IN_EP);
1254                 else
1255                         type = EP_TYPE(BULK_OUT_EP);
1256         } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1257                 if (in)
1258                         type = EP_TYPE(ISOC_IN_EP);
1259                 else
1260                         type = EP_TYPE(ISOC_OUT_EP);
1261         } else if (usb_endpoint_xfer_int(&ep->desc)) {
1262                 if (in)
1263                         type = EP_TYPE(INT_IN_EP);
1264                 else
1265                         type = EP_TYPE(INT_OUT_EP);
1266         } else {
1267                 BUG();
1268         }
1269         return type;
1270 }
1271
1272 /* Return the maximum endpoint service interval time (ESIT) payload.
1273  * Basically, this is the maxpacket size, multiplied by the burst size
1274  * and mult size.
1275  */
1276 static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1277                 struct usb_device *udev,
1278                 struct usb_host_endpoint *ep)
1279 {
1280         int max_burst;
1281         int max_packet;
1282
1283         /* Only applies for interrupt or isochronous endpoints */
1284         if (usb_endpoint_xfer_control(&ep->desc) ||
1285                         usb_endpoint_xfer_bulk(&ep->desc))
1286                 return 0;
1287
1288         if (udev->speed == USB_SPEED_SUPER)
1289                 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1290
1291         max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1292         max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1293         /* A 0 in max burst means 1 transfer per ESIT */
1294         return max_packet * (max_burst + 1);
1295 }
1296
1297 /* Set up an endpoint with one ring segment.  Do not allocate stream rings.
1298  * Drivers will have to call usb_alloc_streams() to do that.
1299  */
1300 int xhci_endpoint_init(struct xhci_hcd *xhci,
1301                 struct xhci_virt_device *virt_dev,
1302                 struct usb_device *udev,
1303                 struct usb_host_endpoint *ep,
1304                 gfp_t mem_flags)
1305 {
1306         unsigned int ep_index;
1307         struct xhci_ep_ctx *ep_ctx;
1308         struct xhci_ring *ep_ring;
1309         unsigned int max_packet;
1310         unsigned int max_burst;
1311         u32 max_esit_payload;
1312
1313         ep_index = xhci_get_endpoint_index(&ep->desc);
1314         ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1315
1316         /* Set up the endpoint ring */
1317         /*
1318          * Isochronous endpoint ring needs bigger size because one isoc URB
1319          * carries multiple packets and it will insert multiple tds to the
1320          * ring.
1321          * This should be replaced with dynamic ring resizing in the future.
1322          */
1323         if (usb_endpoint_xfer_isoc(&ep->desc))
1324                 virt_dev->eps[ep_index].new_ring =
1325                         xhci_ring_alloc(xhci, 8, true, true, mem_flags);
1326         else
1327                 virt_dev->eps[ep_index].new_ring =
1328                         xhci_ring_alloc(xhci, 1, true, false, mem_flags);
1329         if (!virt_dev->eps[ep_index].new_ring) {
1330                 /* Attempt to use the ring cache */
1331                 if (virt_dev->num_rings_cached == 0)
1332                         return -ENOMEM;
1333                 virt_dev->eps[ep_index].new_ring =
1334                         virt_dev->ring_cache[virt_dev->num_rings_cached];
1335                 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1336                 virt_dev->num_rings_cached--;
1337                 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1338                         usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
1339         }
1340         virt_dev->eps[ep_index].skip = false;
1341         ep_ring = virt_dev->eps[ep_index].new_ring;
1342         ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1343
1344         ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1345                                       | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1346
1347         /* FIXME dig Mult and streams info out of ep companion desc */
1348
1349         /* Allow 3 retries for everything but isoc;
1350          * CErr shall be set to 0 for Isoch endpoints.
1351          */
1352         if (!usb_endpoint_xfer_isoc(&ep->desc))
1353                 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
1354         else
1355                 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
1356
1357         ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
1358
1359         /* Set the max packet size and max burst */
1360         max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1361         max_burst = 0;
1362         switch (udev->speed) {
1363         case USB_SPEED_SUPER:
1364                 /* dig out max burst from ep companion desc */
1365                 max_burst = ep->ss_ep_comp.bMaxBurst;
1366                 break;
1367         case USB_SPEED_HIGH:
1368                 /* Some devices get this wrong */
1369                 if (usb_endpoint_xfer_bulk(&ep->desc))
1370                         max_packet = 512;
1371                 /* bits 11:12 specify the number of additional transaction
1372                  * opportunities per microframe (USB 2.0, section 9.6.6)
1373                  */
1374                 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1375                                 usb_endpoint_xfer_int(&ep->desc)) {
1376                         max_burst = (usb_endpoint_maxp(&ep->desc)
1377                                      & 0x1800) >> 11;
1378                 }
1379                 break;
1380         case USB_SPEED_FULL:
1381         case USB_SPEED_LOW:
1382                 break;
1383         default:
1384                 BUG();
1385         }
1386         ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1387                         MAX_BURST(max_burst));
1388         max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1389         ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1390
1391         /*
1392          * XXX no idea how to calculate the average TRB buffer length for bulk
1393          * endpoints, as the driver gives us no clue how big each scatter gather
1394          * list entry (or buffer) is going to be.
1395          *
1396          * For isochronous and interrupt endpoints, we set it to the max
1397          * available, until we have new API in the USB core to allow drivers to
1398          * declare how much bandwidth they actually need.
1399          *
1400          * Normally, it would be calculated by taking the total of the buffer
1401          * lengths in the TD and then dividing by the number of TRBs in a TD,
1402          * including link TRBs, No-op TRBs, and Event data TRBs.  Since we don't
1403          * use Event Data TRBs, and we don't chain in a link TRB on short
1404          * transfers, we're basically dividing by 1.
1405          *
1406          * xHCI 1.0 specification indicates that the Average TRB Length should
1407          * be set to 8 for control endpoints.
1408          */
1409         if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1410                 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1411         else
1412                 ep_ctx->tx_info |=
1413                          cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1414
1415         /* FIXME Debug endpoint context */
1416         return 0;
1417 }
1418
1419 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1420                 struct xhci_virt_device *virt_dev,
1421                 struct usb_host_endpoint *ep)
1422 {
1423         unsigned int ep_index;
1424         struct xhci_ep_ctx *ep_ctx;
1425
1426         ep_index = xhci_get_endpoint_index(&ep->desc);
1427         ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1428
1429         ep_ctx->ep_info = 0;
1430         ep_ctx->ep_info2 = 0;
1431         ep_ctx->deq = 0;
1432         ep_ctx->tx_info = 0;
1433         /* Don't free the endpoint ring until the set interface or configuration
1434          * request succeeds.
1435          */
1436 }
1437
1438 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1439 {
1440         bw_info->ep_interval = 0;
1441         bw_info->mult = 0;
1442         bw_info->num_packets = 0;
1443         bw_info->max_packet_size = 0;
1444         bw_info->type = 0;
1445         bw_info->max_esit_payload = 0;
1446 }
1447
1448 void xhci_update_bw_info(struct xhci_hcd *xhci,
1449                 struct xhci_container_ctx *in_ctx,
1450                 struct xhci_input_control_ctx *ctrl_ctx,
1451                 struct xhci_virt_device *virt_dev)
1452 {
1453         struct xhci_bw_info *bw_info;
1454         struct xhci_ep_ctx *ep_ctx;
1455         unsigned int ep_type;
1456         int i;
1457
1458         for (i = 1; i < 31; ++i) {
1459                 bw_info = &virt_dev->eps[i].bw_info;
1460
1461                 /* We can't tell what endpoint type is being dropped, but
1462                  * unconditionally clearing the bandwidth info for non-periodic
1463                  * endpoints should be harmless because the info will never be
1464                  * set in the first place.
1465                  */
1466                 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1467                         /* Dropped endpoint */
1468                         xhci_clear_endpoint_bw_info(bw_info);
1469                         continue;
1470                 }
1471
1472                 if (EP_IS_ADDED(ctrl_ctx, i)) {
1473                         ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1474                         ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1475
1476                         /* Ignore non-periodic endpoints */
1477                         if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1478                                         ep_type != ISOC_IN_EP &&
1479                                         ep_type != INT_IN_EP)
1480                                 continue;
1481
1482                         /* Added or changed endpoint */
1483                         bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1484                                         le32_to_cpu(ep_ctx->ep_info));
1485                         /* Number of packets and mult are zero-based in the
1486                          * input context, but we want one-based for the
1487                          * interval table.
1488                          */
1489                         bw_info->mult = CTX_TO_EP_MULT(
1490                                         le32_to_cpu(ep_ctx->ep_info)) + 1;
1491                         bw_info->num_packets = CTX_TO_MAX_BURST(
1492                                         le32_to_cpu(ep_ctx->ep_info2)) + 1;
1493                         bw_info->max_packet_size = MAX_PACKET_DECODED(
1494                                         le32_to_cpu(ep_ctx->ep_info2));
1495                         bw_info->type = ep_type;
1496                         bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1497                                         le32_to_cpu(ep_ctx->tx_info));
1498                 }
1499         }
1500 }
1501
1502 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1503  * Useful when you want to change one particular aspect of the endpoint and then
1504  * issue a configure endpoint command.
1505  */
1506 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1507                 struct xhci_container_ctx *in_ctx,
1508                 struct xhci_container_ctx *out_ctx,
1509                 unsigned int ep_index)
1510 {
1511         struct xhci_ep_ctx *out_ep_ctx;
1512         struct xhci_ep_ctx *in_ep_ctx;
1513
1514         out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1515         in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1516
1517         in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1518         in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1519         in_ep_ctx->deq = out_ep_ctx->deq;
1520         in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1521 }
1522
1523 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1524  * Useful when you want to change one particular aspect of the endpoint and then
1525  * issue a configure endpoint command.  Only the context entries field matters,
1526  * but we'll copy the whole thing anyway.
1527  */
1528 void xhci_slot_copy(struct xhci_hcd *xhci,
1529                 struct xhci_container_ctx *in_ctx,
1530                 struct xhci_container_ctx *out_ctx)
1531 {
1532         struct xhci_slot_ctx *in_slot_ctx;
1533         struct xhci_slot_ctx *out_slot_ctx;
1534
1535         in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1536         out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1537
1538         in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1539         in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1540         in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1541         in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1542 }
1543
1544 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1545 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1546 {
1547         int i;
1548         struct device *dev = xhci_to_hcd(xhci)->self.controller;
1549         int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1550
1551         xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1552
1553         if (!num_sp)
1554                 return 0;
1555
1556         xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1557         if (!xhci->scratchpad)
1558                 goto fail_sp;
1559
1560         xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1561                                      num_sp * sizeof(u64),
1562                                      &xhci->scratchpad->sp_dma, flags);
1563         if (!xhci->scratchpad->sp_array)
1564                 goto fail_sp2;
1565
1566         xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1567         if (!xhci->scratchpad->sp_buffers)
1568                 goto fail_sp3;
1569
1570         xhci->scratchpad->sp_dma_buffers =
1571                 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1572
1573         if (!xhci->scratchpad->sp_dma_buffers)
1574                 goto fail_sp4;
1575
1576         xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1577         for (i = 0; i < num_sp; i++) {
1578                 dma_addr_t dma;
1579                 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1580                                 flags);
1581                 if (!buf)
1582                         goto fail_sp5;
1583
1584                 xhci->scratchpad->sp_array[i] = dma;
1585                 xhci->scratchpad->sp_buffers[i] = buf;
1586                 xhci->scratchpad->sp_dma_buffers[i] = dma;
1587         }
1588
1589         return 0;
1590
1591  fail_sp5:
1592         for (i = i - 1; i >= 0; i--) {
1593                 dma_free_coherent(dev, xhci->page_size,
1594                                     xhci->scratchpad->sp_buffers[i],
1595                                     xhci->scratchpad->sp_dma_buffers[i]);
1596         }
1597         kfree(xhci->scratchpad->sp_dma_buffers);
1598
1599  fail_sp4:
1600         kfree(xhci->scratchpad->sp_buffers);
1601
1602  fail_sp3:
1603         dma_free_coherent(dev, num_sp * sizeof(u64),
1604                             xhci->scratchpad->sp_array,
1605                             xhci->scratchpad->sp_dma);
1606
1607  fail_sp2:
1608         kfree(xhci->scratchpad);
1609         xhci->scratchpad = NULL;
1610
1611  fail_sp:
1612         return -ENOMEM;
1613 }
1614
1615 static void scratchpad_free(struct xhci_hcd *xhci)
1616 {
1617         int num_sp;
1618         int i;
1619         struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1620
1621         if (!xhci->scratchpad)
1622                 return;
1623
1624         num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1625
1626         for (i = 0; i < num_sp; i++) {
1627                 dma_free_coherent(&pdev->dev, xhci->page_size,
1628                                     xhci->scratchpad->sp_buffers[i],
1629                                     xhci->scratchpad->sp_dma_buffers[i]);
1630         }
1631         kfree(xhci->scratchpad->sp_dma_buffers);
1632         kfree(xhci->scratchpad->sp_buffers);
1633         dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
1634                             xhci->scratchpad->sp_array,
1635                             xhci->scratchpad->sp_dma);
1636         kfree(xhci->scratchpad);
1637         xhci->scratchpad = NULL;
1638 }
1639
1640 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1641                 bool allocate_in_ctx, bool allocate_completion,
1642                 gfp_t mem_flags)
1643 {
1644         struct xhci_command *command;
1645
1646         command = kzalloc(sizeof(*command), mem_flags);
1647         if (!command)
1648                 return NULL;
1649
1650         if (allocate_in_ctx) {
1651                 command->in_ctx =
1652                         xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1653                                         mem_flags);
1654                 if (!command->in_ctx) {
1655                         kfree(command);
1656                         return NULL;
1657                 }
1658         }
1659
1660         if (allocate_completion) {
1661                 command->completion =
1662                         kzalloc(sizeof(struct completion), mem_flags);
1663                 if (!command->completion) {
1664                         xhci_free_container_ctx(xhci, command->in_ctx);
1665                         kfree(command);
1666                         return NULL;
1667                 }
1668                 init_completion(command->completion);
1669         }
1670
1671         command->status = 0;
1672         INIT_LIST_HEAD(&command->cmd_list);
1673         return command;
1674 }
1675
1676 void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1677 {
1678         if (urb_priv) {
1679                 kfree(urb_priv->td[0]);
1680                 kfree(urb_priv);
1681         }
1682 }
1683
1684 void xhci_free_command(struct xhci_hcd *xhci,
1685                 struct xhci_command *command)
1686 {
1687         xhci_free_container_ctx(xhci,
1688                         command->in_ctx);
1689         kfree(command->completion);
1690         kfree(command);
1691 }
1692
1693 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1694 {
1695         struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1696         struct dev_info *dev_info, *next;
1697         struct xhci_cd  *cur_cd, *next_cd;
1698         unsigned long   flags;
1699         int size;
1700         int i, j, num_ports;
1701
1702         /* Free the Event Ring Segment Table and the actual Event Ring */
1703         size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1704         if (xhci->erst.entries)
1705                 dma_free_coherent(&pdev->dev, size,
1706                                 xhci->erst.entries, xhci->erst.erst_dma_addr);
1707         xhci->erst.entries = NULL;
1708         xhci_dbg(xhci, "Freed ERST\n");
1709         if (xhci->event_ring)
1710                 xhci_ring_free(xhci, xhci->event_ring);
1711         xhci->event_ring = NULL;
1712         xhci_dbg(xhci, "Freed event ring\n");
1713
1714         xhci->cmd_ring_reserved_trbs = 0;
1715         if (xhci->cmd_ring)
1716                 xhci_ring_free(xhci, xhci->cmd_ring);
1717         xhci->cmd_ring = NULL;
1718         xhci_dbg(xhci, "Freed command ring\n");
1719         list_for_each_entry_safe(cur_cd, next_cd,
1720                         &xhci->cancel_cmd_list, cancel_cmd_list) {
1721                 list_del(&cur_cd->cancel_cmd_list);
1722                 kfree(cur_cd);
1723         }
1724
1725         num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1726         for (i = 0; i < num_ports; i++) {
1727                 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1728                 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1729                         struct list_head *ep = &bwt->interval_bw[j].endpoints;
1730                         while (!list_empty(ep))
1731                                 list_del_init(ep->next);
1732                 }
1733         }
1734
1735         for (i = 1; i < MAX_HC_SLOTS; ++i)
1736                 xhci_free_virt_device(xhci, i);
1737
1738         if (xhci->segment_pool)
1739                 dma_pool_destroy(xhci->segment_pool);
1740         xhci->segment_pool = NULL;
1741         xhci_dbg(xhci, "Freed segment pool\n");
1742
1743         if (xhci->device_pool)
1744                 dma_pool_destroy(xhci->device_pool);
1745         xhci->device_pool = NULL;
1746         xhci_dbg(xhci, "Freed device context pool\n");
1747
1748         if (xhci->small_streams_pool)
1749                 dma_pool_destroy(xhci->small_streams_pool);
1750         xhci->small_streams_pool = NULL;
1751         xhci_dbg(xhci, "Freed small stream array pool\n");
1752
1753         if (xhci->medium_streams_pool)
1754                 dma_pool_destroy(xhci->medium_streams_pool);
1755         xhci->medium_streams_pool = NULL;
1756         xhci_dbg(xhci, "Freed medium stream array pool\n");
1757
1758         if (xhci->dcbaa)
1759                 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1760                                 xhci->dcbaa, xhci->dcbaa->dma);
1761         xhci->dcbaa = NULL;
1762
1763         scratchpad_free(xhci);
1764
1765         spin_lock_irqsave(&xhci->lock, flags);
1766         list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1767                 list_del(&dev_info->list);
1768                 kfree(dev_info);
1769         }
1770         spin_unlock_irqrestore(&xhci->lock, flags);
1771
1772         if (!xhci->rh_bw)
1773                 goto no_bw;
1774
1775         for (i = 0; i < num_ports; i++) {
1776                 struct xhci_tt_bw_info *tt, *n;
1777                 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1778                         list_del(&tt->tt_list);
1779                         kfree(tt);
1780                 }
1781         }
1782
1783 no_bw:
1784         xhci->num_usb2_ports = 0;
1785         xhci->num_usb3_ports = 0;
1786         xhci->num_active_eps = 0;
1787         kfree(xhci->usb2_ports);
1788         kfree(xhci->usb3_ports);
1789         kfree(xhci->port_array);
1790         kfree(xhci->rh_bw);
1791
1792         xhci->page_size = 0;
1793         xhci->page_shift = 0;
1794         xhci->bus_state[0].bus_suspended = 0;
1795         xhci->bus_state[1].bus_suspended = 0;
1796 }
1797
1798 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1799                 struct xhci_segment *input_seg,
1800                 union xhci_trb *start_trb,
1801                 union xhci_trb *end_trb,
1802                 dma_addr_t input_dma,
1803                 struct xhci_segment *result_seg,
1804                 char *test_name, int test_number)
1805 {
1806         unsigned long long start_dma;
1807         unsigned long long end_dma;
1808         struct xhci_segment *seg;
1809
1810         start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1811         end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1812
1813         seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1814         if (seg != result_seg) {
1815                 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1816                                 test_name, test_number);
1817                 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1818                                 "input DMA 0x%llx\n",
1819                                 input_seg,
1820                                 (unsigned long long) input_dma);
1821                 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1822                                 "ending TRB %p (0x%llx DMA)\n",
1823                                 start_trb, start_dma,
1824                                 end_trb, end_dma);
1825                 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1826                                 result_seg, seg);
1827                 return -1;
1828         }
1829         return 0;
1830 }
1831
1832 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1833 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1834 {
1835         struct {
1836                 dma_addr_t              input_dma;
1837                 struct xhci_segment     *result_seg;
1838         } simple_test_vector [] = {
1839                 /* A zeroed DMA field should fail */
1840                 { 0, NULL },
1841                 /* One TRB before the ring start should fail */
1842                 { xhci->event_ring->first_seg->dma - 16, NULL },
1843                 /* One byte before the ring start should fail */
1844                 { xhci->event_ring->first_seg->dma - 1, NULL },
1845                 /* Starting TRB should succeed */
1846                 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1847                 /* Ending TRB should succeed */
1848                 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1849                         xhci->event_ring->first_seg },
1850                 /* One byte after the ring end should fail */
1851                 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1852                 /* One TRB after the ring end should fail */
1853                 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1854                 /* An address of all ones should fail */
1855                 { (dma_addr_t) (~0), NULL },
1856         };
1857         struct {
1858                 struct xhci_segment     *input_seg;
1859                 union xhci_trb          *start_trb;
1860                 union xhci_trb          *end_trb;
1861                 dma_addr_t              input_dma;
1862                 struct xhci_segment     *result_seg;
1863         } complex_test_vector [] = {
1864                 /* Test feeding a valid DMA address from a different ring */
1865                 {       .input_seg = xhci->event_ring->first_seg,
1866                         .start_trb = xhci->event_ring->first_seg->trbs,
1867                         .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1868                         .input_dma = xhci->cmd_ring->first_seg->dma,
1869                         .result_seg = NULL,
1870                 },
1871                 /* Test feeding a valid end TRB from a different ring */
1872                 {       .input_seg = xhci->event_ring->first_seg,
1873                         .start_trb = xhci->event_ring->first_seg->trbs,
1874                         .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1875                         .input_dma = xhci->cmd_ring->first_seg->dma,
1876                         .result_seg = NULL,
1877                 },
1878                 /* Test feeding a valid start and end TRB from a different ring */
1879                 {       .input_seg = xhci->event_ring->first_seg,
1880                         .start_trb = xhci->cmd_ring->first_seg->trbs,
1881                         .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1882                         .input_dma = xhci->cmd_ring->first_seg->dma,
1883                         .result_seg = NULL,
1884                 },
1885                 /* TRB in this ring, but after this TD */
1886                 {       .input_seg = xhci->event_ring->first_seg,
1887                         .start_trb = &xhci->event_ring->first_seg->trbs[0],
1888                         .end_trb = &xhci->event_ring->first_seg->trbs[3],
1889                         .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1890                         .result_seg = NULL,
1891                 },
1892                 /* TRB in this ring, but before this TD */
1893                 {       .input_seg = xhci->event_ring->first_seg,
1894                         .start_trb = &xhci->event_ring->first_seg->trbs[3],
1895                         .end_trb = &xhci->event_ring->first_seg->trbs[6],
1896                         .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1897                         .result_seg = NULL,
1898                 },
1899                 /* TRB in this ring, but after this wrapped TD */
1900                 {       .input_seg = xhci->event_ring->first_seg,
1901                         .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1902                         .end_trb = &xhci->event_ring->first_seg->trbs[1],
1903                         .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1904                         .result_seg = NULL,
1905                 },
1906                 /* TRB in this ring, but before this wrapped TD */
1907                 {       .input_seg = xhci->event_ring->first_seg,
1908                         .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1909                         .end_trb = &xhci->event_ring->first_seg->trbs[1],
1910                         .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1911                         .result_seg = NULL,
1912                 },
1913                 /* TRB not in this ring, and we have a wrapped TD */
1914                 {       .input_seg = xhci->event_ring->first_seg,
1915                         .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1916                         .end_trb = &xhci->event_ring->first_seg->trbs[1],
1917                         .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1918                         .result_seg = NULL,
1919                 },
1920         };
1921
1922         unsigned int num_tests;
1923         int i, ret;
1924
1925         num_tests = ARRAY_SIZE(simple_test_vector);
1926         for (i = 0; i < num_tests; i++) {
1927                 ret = xhci_test_trb_in_td(xhci,
1928                                 xhci->event_ring->first_seg,
1929                                 xhci->event_ring->first_seg->trbs,
1930                                 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1931                                 simple_test_vector[i].input_dma,
1932                                 simple_test_vector[i].result_seg,
1933                                 "Simple", i);
1934                 if (ret < 0)
1935                         return ret;
1936         }
1937
1938         num_tests = ARRAY_SIZE(complex_test_vector);
1939         for (i = 0; i < num_tests; i++) {
1940                 ret = xhci_test_trb_in_td(xhci,
1941                                 complex_test_vector[i].input_seg,
1942                                 complex_test_vector[i].start_trb,
1943                                 complex_test_vector[i].end_trb,
1944                                 complex_test_vector[i].input_dma,
1945                                 complex_test_vector[i].result_seg,
1946                                 "Complex", i);
1947                 if (ret < 0)
1948                         return ret;
1949         }
1950         xhci_dbg(xhci, "TRB math tests passed.\n");
1951         return 0;
1952 }
1953
1954 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1955 {
1956         u64 temp;
1957         dma_addr_t deq;
1958
1959         deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
1960                         xhci->event_ring->dequeue);
1961         if (deq == 0 && !in_interrupt())
1962                 xhci_warn(xhci, "WARN something wrong with SW event ring "
1963                                 "dequeue ptr.\n");
1964         /* Update HC event ring dequeue pointer */
1965         temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
1966         temp &= ERST_PTR_MASK;
1967         /* Don't clear the EHB bit (which is RW1C) because
1968          * there might be more events to service.
1969          */
1970         temp &= ~ERST_EHB;
1971         xhci_dbg(xhci, "// Write event ring dequeue pointer, "
1972                         "preserving EHB bit\n");
1973         xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
1974                         &xhci->ir_set->erst_dequeue);
1975 }
1976
1977 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1978                 __le32 __iomem *addr, u8 major_revision)
1979 {
1980         u32 temp, port_offset, port_count;
1981         int i;
1982
1983         if (major_revision > 0x03) {
1984                 xhci_warn(xhci, "Ignoring unknown port speed, "
1985                                 "Ext Cap %p, revision = 0x%x\n",
1986                                 addr, major_revision);
1987                 /* Ignoring port protocol we can't understand. FIXME */
1988                 return;
1989         }
1990
1991         /* Port offset and count in the third dword, see section 7.2 */
1992         temp = xhci_readl(xhci, addr + 2);
1993         port_offset = XHCI_EXT_PORT_OFF(temp);
1994         port_count = XHCI_EXT_PORT_COUNT(temp);
1995         xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
1996                         "count = %u, revision = 0x%x\n",
1997                         addr, port_offset, port_count, major_revision);
1998         /* Port count includes the current port offset */
1999         if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2000                 /* WTF? "Valid values are â€˜1’ to MaxPorts" */
2001                 return;
2002
2003         /* Check the host's USB2 LPM capability */
2004         if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2005                         (temp & XHCI_L1C)) {
2006                 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
2007                 xhci->sw_lpm_support = 1;
2008         }
2009
2010         if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2011                 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
2012                 xhci->sw_lpm_support = 1;
2013                 if (temp & XHCI_HLC) {
2014                         xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
2015                         xhci->hw_lpm_support = 1;
2016                 }
2017         }
2018
2019         port_offset--;
2020         for (i = port_offset; i < (port_offset + port_count); i++) {
2021                 /* Duplicate entry.  Ignore the port if the revisions differ. */
2022                 if (xhci->port_array[i] != 0) {
2023                         xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2024                                         " port %u\n", addr, i);
2025                         xhci_warn(xhci, "Port was marked as USB %u, "
2026                                         "duplicated as USB %u\n",
2027                                         xhci->port_array[i], major_revision);
2028                         /* Only adjust the roothub port counts if we haven't
2029                          * found a similar duplicate.
2030                          */
2031                         if (xhci->port_array[i] != major_revision &&
2032                                 xhci->port_array[i] != DUPLICATE_ENTRY) {
2033                                 if (xhci->port_array[i] == 0x03)
2034                                         xhci->num_usb3_ports--;
2035                                 else
2036                                         xhci->num_usb2_ports--;
2037                                 xhci->port_array[i] = DUPLICATE_ENTRY;
2038                         }
2039                         /* FIXME: Should we disable the port? */
2040                         continue;
2041                 }
2042                 xhci->port_array[i] = major_revision;
2043                 if (major_revision == 0x03)
2044                         xhci->num_usb3_ports++;
2045                 else
2046                         xhci->num_usb2_ports++;
2047         }
2048         /* FIXME: Should we disable ports not in the Extended Capabilities? */
2049 }
2050
2051 /*
2052  * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2053  * specify what speeds each port is supposed to be.  We can't count on the port
2054  * speed bits in the PORTSC register being correct until a device is connected,
2055  * but we need to set up the two fake roothubs with the correct number of USB
2056  * 3.0 and USB 2.0 ports at host controller initialization time.
2057  */
2058 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2059 {
2060         __le32 __iomem *addr;
2061         u32 offset;
2062         unsigned int num_ports;
2063         int i, j, port_index;
2064
2065         addr = &xhci->cap_regs->hcc_params;
2066         offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2067         if (offset == 0) {
2068                 xhci_err(xhci, "No Extended Capability registers, "
2069                                 "unable to set up roothub.\n");
2070                 return -ENODEV;
2071         }
2072
2073         num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2074         xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2075         if (!xhci->port_array)
2076                 return -ENOMEM;
2077
2078         xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2079         if (!xhci->rh_bw)
2080                 return -ENOMEM;
2081         for (i = 0; i < num_ports; i++) {
2082                 struct xhci_interval_bw_table *bw_table;
2083
2084                 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2085                 bw_table = &xhci->rh_bw[i].bw_table;
2086                 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2087                         INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2088         }
2089
2090         /*
2091          * For whatever reason, the first capability offset is from the
2092          * capability register base, not from the HCCPARAMS register.
2093          * See section 5.3.6 for offset calculation.
2094          */
2095         addr = &xhci->cap_regs->hc_capbase + offset;
2096         while (1) {
2097                 u32 cap_id;
2098
2099                 cap_id = xhci_readl(xhci, addr);
2100                 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2101                         xhci_add_in_port(xhci, num_ports, addr,
2102                                         (u8) XHCI_EXT_PORT_MAJOR(cap_id));
2103                 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2104                 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2105                                 == num_ports)
2106                         break;
2107                 /*
2108                  * Once you're into the Extended Capabilities, the offset is
2109                  * always relative to the register holding the offset.
2110                  */
2111                 addr += offset;
2112         }
2113
2114         if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2115                 xhci_warn(xhci, "No ports on the roothubs?\n");
2116                 return -ENODEV;
2117         }
2118         xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
2119                         xhci->num_usb2_ports, xhci->num_usb3_ports);
2120
2121         /* Place limits on the number of roothub ports so that the hub
2122          * descriptors aren't longer than the USB core will allocate.
2123          */
2124         if (xhci->num_usb3_ports > 15) {
2125                 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
2126                 xhci->num_usb3_ports = 15;
2127         }
2128         if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2129                 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
2130                                 USB_MAXCHILDREN);
2131                 xhci->num_usb2_ports = USB_MAXCHILDREN;
2132         }
2133
2134         /*
2135          * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2136          * Not sure how the USB core will handle a hub with no ports...
2137          */
2138         if (xhci->num_usb2_ports) {
2139                 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2140                                 xhci->num_usb2_ports, flags);
2141                 if (!xhci->usb2_ports)
2142                         return -ENOMEM;
2143
2144                 port_index = 0;
2145                 for (i = 0; i < num_ports; i++) {
2146                         if (xhci->port_array[i] == 0x03 ||
2147                                         xhci->port_array[i] == 0 ||
2148                                         xhci->port_array[i] == DUPLICATE_ENTRY)
2149                                 continue;
2150
2151                         xhci->usb2_ports[port_index] =
2152                                 &xhci->op_regs->port_status_base +
2153                                 NUM_PORT_REGS*i;
2154                         xhci_dbg(xhci, "USB 2.0 port at index %u, "
2155                                         "addr = %p\n", i,
2156                                         xhci->usb2_ports[port_index]);
2157                         port_index++;
2158                         if (port_index == xhci->num_usb2_ports)
2159                                 break;
2160                 }
2161         }
2162         if (xhci->num_usb3_ports) {
2163                 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2164                                 xhci->num_usb3_ports, flags);
2165                 if (!xhci->usb3_ports)
2166                         return -ENOMEM;
2167
2168                 port_index = 0;
2169                 for (i = 0; i < num_ports; i++)
2170                         if (xhci->port_array[i] == 0x03) {
2171                                 xhci->usb3_ports[port_index] =
2172                                         &xhci->op_regs->port_status_base +
2173                                         NUM_PORT_REGS*i;
2174                                 xhci_dbg(xhci, "USB 3.0 port at index %u, "
2175                                                 "addr = %p\n", i,
2176                                                 xhci->usb3_ports[port_index]);
2177                                 port_index++;
2178                                 if (port_index == xhci->num_usb3_ports)
2179                                         break;
2180                         }
2181         }
2182         return 0;
2183 }
2184
2185 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2186 {
2187         dma_addr_t      dma;
2188         struct device   *dev = xhci_to_hcd(xhci)->self.controller;
2189         unsigned int    val, val2;
2190         u64             val_64;
2191         struct xhci_segment     *seg;
2192         u32 page_size;
2193         int i;
2194
2195         INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2196         INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2197
2198         page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2199         xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
2200         for (i = 0; i < 16; i++) {
2201                 if ((0x1 & page_size) != 0)
2202                         break;
2203                 page_size = page_size >> 1;
2204         }
2205         if (i < 16)
2206                 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
2207         else
2208                 xhci_warn(xhci, "WARN: no supported page size\n");
2209         /* Use 4K pages, since that's common and the minimum the HC supports */
2210         xhci->page_shift = 12;
2211         xhci->page_size = 1 << xhci->page_shift;
2212         xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
2213
2214         /*
2215          * Program the Number of Device Slots Enabled field in the CONFIG
2216          * register with the max value of slots the HC can handle.
2217          */
2218         val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2219         xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
2220                         (unsigned int) val);
2221         val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2222         val |= (val2 & ~HCS_SLOTS_MASK);
2223         xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
2224                         (unsigned int) val);
2225         xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2226
2227         /*
2228          * Section 5.4.8 - doorbell array must be
2229          * "physically contiguous and 64-byte (cache line) aligned".
2230          */
2231         xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2232                         GFP_KERNEL);
2233         if (!xhci->dcbaa)
2234                 goto fail;
2235         memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2236         xhci->dcbaa->dma = dma;
2237         xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
2238                         (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2239         xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2240
2241         /*
2242          * Initialize the ring segment pool.  The ring must be a contiguous
2243          * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
2244          * however, the command ring segment needs 64-byte aligned segments,
2245          * so we pick the greater alignment need.
2246          */
2247         xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2248                         SEGMENT_SIZE, 64, xhci->page_size);
2249
2250         /* See Table 46 and Note on Figure 55 */
2251         xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2252                         2112, 64, xhci->page_size);
2253         if (!xhci->segment_pool || !xhci->device_pool)
2254                 goto fail;
2255
2256         /* Linear stream context arrays don't have any boundary restrictions,
2257          * and only need to be 16-byte aligned.
2258          */
2259         xhci->small_streams_pool =
2260                 dma_pool_create("xHCI 256 byte stream ctx arrays",
2261                         dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2262         xhci->medium_streams_pool =
2263                 dma_pool_create("xHCI 1KB stream ctx arrays",
2264                         dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2265         /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2266          * will be allocated with dma_alloc_coherent()
2267          */
2268
2269         if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2270                 goto fail;
2271
2272         /* Set up the command ring to have one segments for now. */
2273         xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
2274         if (!xhci->cmd_ring)
2275                 goto fail;
2276         xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2277         xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2278                         (unsigned long long)xhci->cmd_ring->first_seg->dma);
2279
2280         /* Set the address in the Command Ring Control register */
2281         val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2282         val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2283                 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2284                 xhci->cmd_ring->cycle_state;
2285         xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
2286         xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2287         xhci_dbg_cmd_ptrs(xhci);
2288
2289         val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2290         val &= DBOFF_MASK;
2291         xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
2292                         " from cap regs base addr\n", val);
2293         xhci->dba = (void __iomem *) xhci->cap_regs + val;
2294         xhci_dbg_regs(xhci);
2295         xhci_print_run_regs(xhci);
2296         /* Set ir_set to interrupt register set 0 */
2297         xhci->ir_set = &xhci->run_regs->ir_set[0];
2298
2299         /*
2300          * Event ring setup: Allocate a normal ring, but also setup
2301          * the event ring segment table (ERST).  Section 4.9.3.
2302          */
2303         xhci_dbg(xhci, "// Allocating event ring\n");
2304         xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
2305                                                 flags);
2306         if (!xhci->event_ring)
2307                 goto fail;
2308         if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2309                 goto fail;
2310
2311         xhci->erst.entries = dma_alloc_coherent(dev,
2312                         sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2313                         GFP_KERNEL);
2314         if (!xhci->erst.entries)
2315                 goto fail;
2316         xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
2317                         (unsigned long long)dma);
2318
2319         memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2320         xhci->erst.num_entries = ERST_NUM_SEGS;
2321         xhci->erst.erst_dma_addr = dma;
2322         xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
2323                         xhci->erst.num_entries,
2324                         xhci->erst.entries,
2325                         (unsigned long long)xhci->erst.erst_dma_addr);
2326
2327         /* set ring base address and size for each segment table entry */
2328         for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2329                 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2330                 entry->seg_addr = cpu_to_le64(seg->dma);
2331                 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2332                 entry->rsvd = 0;
2333                 seg = seg->next;
2334         }
2335
2336         /* set ERST count with the number of entries in the segment table */
2337         val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2338         val &= ERST_SIZE_MASK;
2339         val |= ERST_NUM_SEGS;
2340         xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2341                         val);
2342         xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2343
2344         xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
2345         /* set the segment table base address */
2346         xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2347                         (unsigned long long)xhci->erst.erst_dma_addr);
2348         val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2349         val_64 &= ERST_PTR_MASK;
2350         val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2351         xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2352
2353         /* Set the event ring dequeue address */
2354         xhci_set_hc_event_deq(xhci);
2355         xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
2356         xhci_print_ir_set(xhci, 0);
2357
2358         /*
2359          * XXX: Might need to set the Interrupter Moderation Register to
2360          * something other than the default (~1ms minimum between interrupts).
2361          * See section 5.5.1.2.
2362          */
2363         init_completion(&xhci->addr_dev);
2364         for (i = 0; i < MAX_HC_SLOTS; ++i)
2365                 xhci->devs[i] = NULL;
2366         for (i = 0; i < USB_MAXCHILDREN; ++i) {
2367                 xhci->bus_state[0].resume_done[i] = 0;
2368                 xhci->bus_state[1].resume_done[i] = 0;
2369         }
2370
2371         if (scratchpad_alloc(xhci, flags))
2372                 goto fail;
2373         if (xhci_setup_port_arrays(xhci, flags))
2374                 goto fail;
2375
2376         return 0;
2377
2378 fail:
2379         xhci_warn(xhci, "Couldn't initialize memory\n");
2380         xhci_halt(xhci);
2381         xhci_reset(xhci);
2382         xhci_mem_cleanup(xhci);
2383         return -ENOMEM;
2384 }