2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
31 * Allocates a generic ring segment from the ring pool, sets the dma address,
32 * initializes the segment to zero, and sets the private next pointer to NULL.
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
37 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
39 struct xhci_segment *seg;
42 seg = kzalloc(sizeof *seg, flags);
45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53 seg->trbs, (unsigned long long)dma);
55 memset(seg->trbs, 0, SEGMENT_SIZE);
62 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
65 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
66 seg->trbs, (unsigned long long)seg->dma);
67 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
70 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
75 * Make the prev segment point to the next segment.
77 * Change the last TRB in the prev segment to be a Link TRB which points to the
78 * DMA address of the next segment. The caller needs to set any Link TRB
79 * related flags, such as End TRB, Toggle Cycle, and no snoop.
81 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
82 struct xhci_segment *next, bool link_trbs, bool isoc)
90 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
91 cpu_to_le64(next->dma);
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
95 val &= ~TRB_TYPE_BITMASK;
96 val |= TRB_TYPE(TRB_LINK);
97 /* Always set the chain bit with 0.95 hardware */
98 /* Set chain bit for isoc rings on AMD 0.96 host */
99 if (xhci_link_trb_quirk(xhci) ||
100 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
104 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
105 (unsigned long long)prev->dma,
106 (unsigned long long)next->dma);
109 /* XXX: Do we need the hcd structure in all these functions? */
110 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
112 struct xhci_segment *seg;
113 struct xhci_segment *first_seg;
117 if (ring->first_seg) {
118 first_seg = ring->first_seg;
119 seg = first_seg->next;
120 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
121 while (seg != first_seg) {
122 struct xhci_segment *next = seg->next;
123 xhci_segment_free(xhci, seg);
126 xhci_segment_free(xhci, first_seg);
127 ring->first_seg = NULL;
132 static void xhci_initialize_ring_info(struct xhci_ring *ring)
134 /* The ring is empty, so the enqueue pointer == dequeue pointer */
135 ring->enqueue = ring->first_seg->trbs;
136 ring->enq_seg = ring->first_seg;
137 ring->dequeue = ring->enqueue;
138 ring->deq_seg = ring->first_seg;
139 /* The ring is initialized to 0. The producer must write 1 to the cycle
140 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
141 * compare CCS to the cycle bit to check ownership, so CCS = 1.
143 ring->cycle_state = 1;
144 /* Not necessary for new rings, but needed for re-initialized rings */
145 ring->enq_updates = 0;
146 ring->deq_updates = 0;
150 * Create a new ring with zero or more segments.
152 * Link each segment together into a ring.
153 * Set the end flag and the cycle toggle bit on the last segment.
154 * See section 4.9.1 and figures 15 and 16.
156 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
157 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
159 struct xhci_ring *ring;
160 struct xhci_segment *prev;
162 ring = kzalloc(sizeof *(ring), flags);
163 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
167 INIT_LIST_HEAD(&ring->td_list);
171 ring->first_seg = xhci_segment_alloc(xhci, flags);
172 if (!ring->first_seg)
176 prev = ring->first_seg;
177 while (num_segs > 0) {
178 struct xhci_segment *next;
180 next = xhci_segment_alloc(xhci, flags);
182 prev = ring->first_seg;
185 xhci_segment_free(xhci, prev);
190 xhci_link_segments(xhci, prev, next, link_trbs, isoc);
195 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
198 /* See section 4.9.2.1 and 6.4.4.1 */
199 prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
200 cpu_to_le32(LINK_TOGGLE);
201 xhci_dbg(xhci, "Wrote link toggle flag to"
202 " segment %p (virtual), 0x%llx (DMA)\n",
203 prev, (unsigned long long)prev->dma);
205 xhci_initialize_ring_info(ring);
213 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
214 struct xhci_virt_device *virt_dev,
215 unsigned int ep_index)
219 rings_cached = virt_dev->num_rings_cached;
220 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
221 virt_dev->ring_cache[rings_cached] =
222 virt_dev->eps[ep_index].ring;
223 virt_dev->num_rings_cached++;
224 xhci_dbg(xhci, "Cached old ring, "
225 "%d ring%s cached\n",
226 virt_dev->num_rings_cached,
227 (virt_dev->num_rings_cached > 1) ? "s" : "");
229 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
230 xhci_dbg(xhci, "Ring cache full (%d rings), "
232 virt_dev->num_rings_cached);
234 virt_dev->eps[ep_index].ring = NULL;
237 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
238 * pointers to the beginning of the ring.
240 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
241 struct xhci_ring *ring, bool isoc)
243 struct xhci_segment *seg = ring->first_seg;
246 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
247 /* All endpoint rings have link TRBs */
248 xhci_link_segments(xhci, seg, seg->next, 1, isoc);
250 } while (seg != ring->first_seg);
251 xhci_initialize_ring_info(ring);
252 /* td list should be empty since all URBs have been cancelled,
253 * but just in case...
255 INIT_LIST_HEAD(&ring->td_list);
258 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
260 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
261 int type, gfp_t flags)
263 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
267 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
269 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
270 if (type == XHCI_CTX_TYPE_INPUT)
271 ctx->size += CTX_SIZE(xhci->hcc_params);
273 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
278 memset(ctx->bytes, 0, ctx->size);
282 static void xhci_free_container_ctx(struct xhci_hcd *xhci,
283 struct xhci_container_ctx *ctx)
287 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
291 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
292 struct xhci_container_ctx *ctx)
294 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
295 return (struct xhci_input_control_ctx *)ctx->bytes;
298 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
299 struct xhci_container_ctx *ctx)
301 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
302 return (struct xhci_slot_ctx *)ctx->bytes;
304 return (struct xhci_slot_ctx *)
305 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
308 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
309 struct xhci_container_ctx *ctx,
310 unsigned int ep_index)
312 /* increment ep index by offset of start of ep ctx array */
314 if (ctx->type == XHCI_CTX_TYPE_INPUT)
317 return (struct xhci_ep_ctx *)
318 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
322 /***************** Streams structures manipulation *************************/
324 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
325 unsigned int num_stream_ctxs,
326 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
328 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
330 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
331 dma_free_coherent(&pdev->dev,
332 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
334 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
335 return dma_pool_free(xhci->small_streams_pool,
338 return dma_pool_free(xhci->medium_streams_pool,
343 * The stream context array for each endpoint with bulk streams enabled can
344 * vary in size, based on:
345 * - how many streams the endpoint supports,
346 * - the maximum primary stream array size the host controller supports,
347 * - and how many streams the device driver asks for.
349 * The stream context array must be a power of 2, and can be as small as
350 * 64 bytes or as large as 1MB.
352 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
353 unsigned int num_stream_ctxs, dma_addr_t *dma,
356 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
358 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
359 return dma_alloc_coherent(&pdev->dev,
360 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
362 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
363 return dma_pool_alloc(xhci->small_streams_pool,
366 return dma_pool_alloc(xhci->medium_streams_pool,
370 struct xhci_ring *xhci_dma_to_transfer_ring(
371 struct xhci_virt_ep *ep,
374 if (ep->ep_state & EP_HAS_STREAMS)
375 return radix_tree_lookup(&ep->stream_info->trb_address_map,
376 address >> SEGMENT_SHIFT);
380 /* Only use this when you know stream_info is valid */
381 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
382 static struct xhci_ring *dma_to_stream_ring(
383 struct xhci_stream_info *stream_info,
386 return radix_tree_lookup(&stream_info->trb_address_map,
387 address >> SEGMENT_SHIFT);
389 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
391 struct xhci_ring *xhci_stream_id_to_ring(
392 struct xhci_virt_device *dev,
393 unsigned int ep_index,
394 unsigned int stream_id)
396 struct xhci_virt_ep *ep = &dev->eps[ep_index];
400 if (!ep->stream_info)
403 if (stream_id > ep->stream_info->num_streams)
405 return ep->stream_info->stream_rings[stream_id];
408 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
409 static int xhci_test_radix_tree(struct xhci_hcd *xhci,
410 unsigned int num_streams,
411 struct xhci_stream_info *stream_info)
414 struct xhci_ring *cur_ring;
417 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
418 struct xhci_ring *mapped_ring;
419 int trb_size = sizeof(union xhci_trb);
421 cur_ring = stream_info->stream_rings[cur_stream];
422 for (addr = cur_ring->first_seg->dma;
423 addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
425 mapped_ring = dma_to_stream_ring(stream_info, addr);
426 if (cur_ring != mapped_ring) {
427 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
428 "didn't map to stream ID %u; "
429 "mapped to ring %p\n",
430 (unsigned long long) addr,
436 /* One TRB after the end of the ring segment shouldn't return a
437 * pointer to the current ring (although it may be a part of a
440 mapped_ring = dma_to_stream_ring(stream_info, addr);
441 if (mapped_ring != cur_ring) {
442 /* One TRB before should also fail */
443 addr = cur_ring->first_seg->dma - trb_size;
444 mapped_ring = dma_to_stream_ring(stream_info, addr);
446 if (mapped_ring == cur_ring) {
447 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
448 "mapped to valid stream ID %u; "
449 "mapped ring = %p\n",
450 (unsigned long long) addr,
458 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
461 * Change an endpoint's internal structure so it supports stream IDs. The
462 * number of requested streams includes stream 0, which cannot be used by device
465 * The number of stream contexts in the stream context array may be bigger than
466 * the number of streams the driver wants to use. This is because the number of
467 * stream context array entries must be a power of two.
469 * We need a radix tree for mapping physical addresses of TRBs to which stream
470 * ID they belong to. We need to do this because the host controller won't tell
471 * us which stream ring the TRB came from. We could store the stream ID in an
472 * event data TRB, but that doesn't help us for the cancellation case, since the
473 * endpoint may stop before it reaches that event data TRB.
475 * The radix tree maps the upper portion of the TRB DMA address to a ring
476 * segment that has the same upper portion of DMA addresses. For example, say I
477 * have segments of size 1KB, that are always 64-byte aligned. A segment may
478 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
479 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
480 * pass the radix tree a key to get the right stream ID:
482 * 0x10c90fff >> 10 = 0x43243
483 * 0x10c912c0 >> 10 = 0x43244
484 * 0x10c91400 >> 10 = 0x43245
486 * Obviously, only those TRBs with DMA addresses that are within the segment
487 * will make the radix tree return the stream ID for that ring.
489 * Caveats for the radix tree:
491 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
492 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
493 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
494 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
495 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
496 * extended systems (where the DMA address can be bigger than 32-bits),
497 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
499 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
500 unsigned int num_stream_ctxs,
501 unsigned int num_streams, gfp_t mem_flags)
503 struct xhci_stream_info *stream_info;
505 struct xhci_ring *cur_ring;
510 xhci_dbg(xhci, "Allocating %u streams and %u "
511 "stream context array entries.\n",
512 num_streams, num_stream_ctxs);
513 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
514 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
517 xhci->cmd_ring_reserved_trbs++;
519 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
523 stream_info->num_streams = num_streams;
524 stream_info->num_stream_ctxs = num_stream_ctxs;
526 /* Initialize the array of virtual pointers to stream rings. */
527 stream_info->stream_rings = kzalloc(
528 sizeof(struct xhci_ring *)*num_streams,
530 if (!stream_info->stream_rings)
533 /* Initialize the array of DMA addresses for stream rings for the HW. */
534 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
535 num_stream_ctxs, &stream_info->ctx_array_dma,
537 if (!stream_info->stream_ctx_array)
539 memset(stream_info->stream_ctx_array, 0,
540 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
542 /* Allocate everything needed to free the stream rings later */
543 stream_info->free_streams_command =
544 xhci_alloc_command(xhci, true, true, mem_flags);
545 if (!stream_info->free_streams_command)
548 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
550 /* Allocate rings for all the streams that the driver will use,
551 * and add their segment DMA addresses to the radix tree.
552 * Stream 0 is reserved.
554 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
555 stream_info->stream_rings[cur_stream] =
556 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
557 cur_ring = stream_info->stream_rings[cur_stream];
560 cur_ring->stream_id = cur_stream;
561 /* Set deq ptr, cycle bit, and stream context type */
562 addr = cur_ring->first_seg->dma |
563 SCT_FOR_CTX(SCT_PRI_TR) |
564 cur_ring->cycle_state;
565 stream_info->stream_ctx_array[cur_stream].stream_ring =
567 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
568 cur_stream, (unsigned long long) addr);
570 key = (unsigned long)
571 (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
572 ret = radix_tree_insert(&stream_info->trb_address_map,
575 xhci_ring_free(xhci, cur_ring);
576 stream_info->stream_rings[cur_stream] = NULL;
580 /* Leave the other unused stream ring pointers in the stream context
581 * array initialized to zero. This will cause the xHC to give us an
582 * error if the device asks for a stream ID we don't have setup (if it
583 * was any other way, the host controller would assume the ring is
584 * "empty" and wait forever for data to be queued to that stream ID).
587 /* Do a little test on the radix tree to make sure it returns the
590 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
597 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
598 cur_ring = stream_info->stream_rings[cur_stream];
600 addr = cur_ring->first_seg->dma;
601 radix_tree_delete(&stream_info->trb_address_map,
602 addr >> SEGMENT_SHIFT);
603 xhci_ring_free(xhci, cur_ring);
604 stream_info->stream_rings[cur_stream] = NULL;
607 xhci_free_command(xhci, stream_info->free_streams_command);
609 kfree(stream_info->stream_rings);
613 xhci->cmd_ring_reserved_trbs--;
617 * Sets the MaxPStreams field and the Linear Stream Array field.
618 * Sets the dequeue pointer to the stream context array.
620 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
621 struct xhci_ep_ctx *ep_ctx,
622 struct xhci_stream_info *stream_info)
624 u32 max_primary_streams;
625 /* MaxPStreams is the number of stream context array entries, not the
626 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
627 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
629 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
630 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
631 1 << (max_primary_streams + 1));
632 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
633 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
635 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
639 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
640 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
641 * not at the beginning of the ring).
643 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
644 struct xhci_ep_ctx *ep_ctx,
645 struct xhci_virt_ep *ep)
648 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
649 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
650 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
653 /* Frees all stream contexts associated with the endpoint,
655 * Caller should fix the endpoint context streams fields.
657 void xhci_free_stream_info(struct xhci_hcd *xhci,
658 struct xhci_stream_info *stream_info)
661 struct xhci_ring *cur_ring;
667 for (cur_stream = 1; cur_stream < stream_info->num_streams;
669 cur_ring = stream_info->stream_rings[cur_stream];
671 addr = cur_ring->first_seg->dma;
672 radix_tree_delete(&stream_info->trb_address_map,
673 addr >> SEGMENT_SHIFT);
674 xhci_ring_free(xhci, cur_ring);
675 stream_info->stream_rings[cur_stream] = NULL;
678 xhci_free_command(xhci, stream_info->free_streams_command);
679 xhci->cmd_ring_reserved_trbs--;
680 if (stream_info->stream_ctx_array)
681 xhci_free_stream_ctx(xhci,
682 stream_info->num_stream_ctxs,
683 stream_info->stream_ctx_array,
684 stream_info->ctx_array_dma);
687 kfree(stream_info->stream_rings);
692 /***************** Device context manipulation *************************/
694 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
695 struct xhci_virt_ep *ep)
697 init_timer(&ep->stop_cmd_timer);
698 ep->stop_cmd_timer.data = (unsigned long) ep;
699 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
703 static void xhci_free_tt_info(struct xhci_hcd *xhci,
704 struct xhci_virt_device *virt_dev,
707 struct list_head *tt_list_head;
708 struct xhci_tt_bw_info *tt_info, *next;
709 bool slot_found = false;
711 /* If the device never made it past the Set Address stage,
712 * it may not have the real_port set correctly.
714 if (virt_dev->real_port == 0 ||
715 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
716 xhci_dbg(xhci, "Bad real port.\n");
720 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
721 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
722 /* Multi-TT hubs will have more than one entry */
723 if (tt_info->slot_id == slot_id) {
725 list_del(&tt_info->tt_list);
727 } else if (slot_found) {
733 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
734 struct xhci_virt_device *virt_dev,
735 struct usb_device *hdev,
736 struct usb_tt *tt, gfp_t mem_flags)
738 struct xhci_tt_bw_info *tt_info;
739 unsigned int num_ports;
745 num_ports = hdev->maxchild;
747 for (i = 0; i < num_ports; i++, tt_info++) {
748 struct xhci_interval_bw_table *bw_table;
750 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
753 INIT_LIST_HEAD(&tt_info->tt_list);
754 list_add(&tt_info->tt_list,
755 &xhci->rh_bw[virt_dev->real_port - 1].tts);
756 tt_info->slot_id = virt_dev->udev->slot_id;
758 tt_info->ttport = i+1;
759 bw_table = &tt_info->bw_table;
760 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
761 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
766 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
771 /* All the xhci_tds in the ring's TD list should be freed at this point.
772 * Should be called with xhci->lock held if there is any chance the TT lists
773 * will be manipulated by the configure endpoint, allocate device, or update
774 * hub functions while this function is removing the TT entries from the list.
776 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
778 struct xhci_virt_device *dev;
780 int old_active_eps = 0;
782 /* Slot ID 0 is reserved */
783 if (slot_id == 0 || !xhci->devs[slot_id])
786 dev = xhci->devs[slot_id];
787 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
792 old_active_eps = dev->tt_info->active_eps;
794 for (i = 0; i < 31; ++i) {
795 if (dev->eps[i].ring)
796 xhci_ring_free(xhci, dev->eps[i].ring);
797 if (dev->eps[i].stream_info)
798 xhci_free_stream_info(xhci,
799 dev->eps[i].stream_info);
800 /* Endpoints on the TT/root port lists should have been removed
801 * when usb_disable_device() was called for the device.
802 * We can't drop them anyway, because the udev might have gone
803 * away by this point, and we can't tell what speed it was.
805 if (!list_empty(&dev->eps[i].bw_endpoint_list))
806 xhci_warn(xhci, "Slot %u endpoint %u "
807 "not removed from BW list!\n",
810 /* If this is a hub, free the TT(s) from the TT list */
811 xhci_free_tt_info(xhci, dev, slot_id);
812 /* If necessary, update the number of active TTs on this root port */
813 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
815 if (dev->ring_cache) {
816 for (i = 0; i < dev->num_rings_cached; i++)
817 xhci_ring_free(xhci, dev->ring_cache[i]);
818 kfree(dev->ring_cache);
822 xhci_free_container_ctx(xhci, dev->in_ctx);
824 xhci_free_container_ctx(xhci, dev->out_ctx);
826 kfree(xhci->devs[slot_id]);
827 xhci->devs[slot_id] = NULL;
831 * Free a virt_device structure.
832 * If the virt_device added a tt_info (a hub) and has children pointing to
833 * that tt_info, then free the child first. Recursive.
834 * We can't rely on udev at this point to find child-parent relationships.
836 void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
838 struct xhci_virt_device *vdev;
839 struct list_head *tt_list_head;
840 struct xhci_tt_bw_info *tt_info, *next;
843 vdev = xhci->devs[slot_id];
847 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
848 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
849 /* is this a hub device that added a tt_info to the tts list */
850 if (tt_info->slot_id == slot_id) {
851 /* are any devices using this tt_info? */
852 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
853 vdev = xhci->devs[i];
854 if (vdev && (vdev->tt_info == tt_info))
855 xhci_free_virt_devices_depth_first(
860 /* we are now at a leaf device */
861 xhci_free_virt_device(xhci, slot_id);
864 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
865 struct usb_device *udev, gfp_t flags)
867 struct xhci_virt_device *dev;
870 /* Slot ID 0 is reserved */
871 if (slot_id == 0 || xhci->devs[slot_id]) {
872 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
876 dev = kzalloc(sizeof(*dev), flags);
880 /* Allocate the (output) device context that will be used in the HC. */
881 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
885 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
886 (unsigned long long)dev->out_ctx->dma);
888 /* Allocate the (input) device context for address device command */
889 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
893 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
894 (unsigned long long)dev->in_ctx->dma);
896 /* Initialize the cancellation list and watchdog timers for each ep */
897 for (i = 0; i < 31; i++) {
898 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
899 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
900 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
903 /* Allocate endpoint 0 ring */
904 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
905 if (!dev->eps[0].ring)
908 /* Allocate pointers to the ring cache */
909 dev->ring_cache = kzalloc(
910 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
912 if (!dev->ring_cache)
914 dev->num_rings_cached = 0;
916 init_completion(&dev->cmd_completion);
917 INIT_LIST_HEAD(&dev->cmd_list);
920 /* Point to output device context in dcbaa. */
921 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
922 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
924 &xhci->dcbaa->dev_context_ptrs[slot_id],
925 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
927 xhci->devs[slot_id] = dev;
932 if (dev->eps[0].ring)
933 xhci_ring_free(xhci, dev->eps[0].ring);
935 xhci_free_container_ctx(xhci, dev->in_ctx);
937 xhci_free_container_ctx(xhci, dev->out_ctx);
943 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
944 struct usb_device *udev)
946 struct xhci_virt_device *virt_dev;
947 struct xhci_ep_ctx *ep0_ctx;
948 struct xhci_ring *ep_ring;
950 virt_dev = xhci->devs[udev->slot_id];
951 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
952 ep_ring = virt_dev->eps[0].ring;
954 * FIXME we don't keep track of the dequeue pointer very well after a
955 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
956 * host to our enqueue pointer. This should only be called after a
957 * configured device has reset, so all control transfers should have
958 * been completed or cancelled before the reset.
960 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
962 | ep_ring->cycle_state);
966 * The xHCI roothub may have ports of differing speeds in any order in the port
967 * status registers. xhci->port_array provides an array of the port speed for
968 * each offset into the port status registers.
970 * The xHCI hardware wants to know the roothub port number that the USB device
971 * is attached to (or the roothub port its ancestor hub is attached to). All we
972 * know is the index of that port under either the USB 2.0 or the USB 3.0
973 * roothub, but that doesn't give us the real index into the HW port status
974 * registers. Scan through the xHCI roothub port array, looking for the Nth
975 * entry of the correct port speed. Return the port number of that entry.
977 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
978 struct usb_device *udev)
980 struct usb_device *top_dev;
981 unsigned int num_similar_speed_ports;
982 unsigned int faked_port_num;
985 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
986 top_dev = top_dev->parent)
987 /* Found device below root hub */;
988 faked_port_num = top_dev->portnum;
989 for (i = 0, num_similar_speed_ports = 0;
990 i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
991 u8 port_speed = xhci->port_array[i];
994 * Skip ports that don't have known speeds, or have duplicate
995 * Extended Capabilities port speed entries.
997 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1001 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1002 * 1.1 ports are under the USB 2.0 hub. If the port speed
1003 * matches the device speed, it's a similar speed port.
1005 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
1006 num_similar_speed_ports++;
1007 if (num_similar_speed_ports == faked_port_num)
1008 /* Roothub ports are numbered from 1 to N */
1014 /* Setup an xHCI virtual device for a Set Address command */
1015 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1017 struct xhci_virt_device *dev;
1018 struct xhci_ep_ctx *ep0_ctx;
1019 struct xhci_slot_ctx *slot_ctx;
1021 struct usb_device *top_dev;
1023 dev = xhci->devs[udev->slot_id];
1024 /* Slot ID 0 is reserved */
1025 if (udev->slot_id == 0 || !dev) {
1026 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1030 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1031 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1033 /* 3) Only the control endpoint is valid - one endpoint context */
1034 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1035 switch (udev->speed) {
1036 case USB_SPEED_SUPER:
1037 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1039 case USB_SPEED_HIGH:
1040 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1042 case USB_SPEED_FULL:
1043 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1046 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1048 case USB_SPEED_WIRELESS:
1049 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1053 /* Speed was set earlier, this shouldn't happen. */
1056 /* Find the root hub port this device is under */
1057 port_num = xhci_find_real_port_number(xhci, udev);
1060 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1061 /* Set the port number in the virtual_device to the faked port number */
1062 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1063 top_dev = top_dev->parent)
1064 /* Found device below root hub */;
1065 dev->fake_port = top_dev->portnum;
1066 dev->real_port = port_num;
1067 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1068 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1070 /* Find the right bandwidth table that this device will be a part of.
1071 * If this is a full speed device attached directly to a root port (or a
1072 * decendent of one), it counts as a primary bandwidth domain, not a
1073 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1074 * will never be created for the HS root hub.
1076 if (!udev->tt || !udev->tt->hub->parent) {
1077 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1079 struct xhci_root_port_bw_info *rh_bw;
1080 struct xhci_tt_bw_info *tt_bw;
1082 rh_bw = &xhci->rh_bw[port_num - 1];
1083 /* Find the right TT. */
1084 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1085 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1088 if (!dev->udev->tt->multi ||
1090 tt_bw->ttport == dev->udev->ttport)) {
1091 dev->bw_table = &tt_bw->bw_table;
1092 dev->tt_info = tt_bw;
1097 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1100 /* Is this a LS/FS device under an external HS hub? */
1101 if (udev->tt && udev->tt->hub->parent) {
1102 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1103 (udev->ttport << 8));
1104 if (udev->tt->multi)
1105 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1107 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1108 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1110 /* Step 4 - ring already allocated */
1112 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1114 * XXX: Not sure about wireless USB devices.
1116 switch (udev->speed) {
1117 case USB_SPEED_SUPER:
1118 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
1120 case USB_SPEED_HIGH:
1121 /* USB core guesses at a 64-byte max packet first for FS devices */
1122 case USB_SPEED_FULL:
1123 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
1126 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
1128 case USB_SPEED_WIRELESS:
1129 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1136 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1137 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
1139 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1140 dev->eps[0].ring->cycle_state);
1142 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1148 * Convert interval expressed as 2^(bInterval - 1) == interval into
1149 * straight exponent value 2^n == interval.
1152 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1153 struct usb_host_endpoint *ep)
1155 unsigned int interval;
1157 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1158 if (interval != ep->desc.bInterval - 1)
1159 dev_warn(&udev->dev,
1160 "ep %#x - rounding interval to %d %sframes\n",
1161 ep->desc.bEndpointAddress,
1163 udev->speed == USB_SPEED_FULL ? "" : "micro");
1165 if (udev->speed == USB_SPEED_FULL) {
1167 * Full speed isoc endpoints specify interval in frames,
1168 * not microframes. We are using microframes everywhere,
1169 * so adjust accordingly.
1171 interval += 3; /* 1 frame = 2^3 uframes */
1178 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1179 * microframes, rounded down to nearest power of 2.
1181 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1182 struct usb_host_endpoint *ep, unsigned int desc_interval,
1183 unsigned int min_exponent, unsigned int max_exponent)
1185 unsigned int interval;
1187 interval = fls(desc_interval) - 1;
1188 interval = clamp_val(interval, min_exponent, max_exponent);
1189 if ((1 << interval) != desc_interval)
1190 dev_warn(&udev->dev,
1191 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1192 ep->desc.bEndpointAddress,
1199 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1200 struct usb_host_endpoint *ep)
1202 if (ep->desc.bInterval == 0)
1204 return xhci_microframes_to_exponent(udev, ep,
1205 ep->desc.bInterval, 0, 15);
1209 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1210 struct usb_host_endpoint *ep)
1212 return xhci_microframes_to_exponent(udev, ep,
1213 ep->desc.bInterval * 8, 3, 10);
1216 /* Return the polling or NAK interval.
1218 * The polling interval is expressed in "microframes". If xHCI's Interval field
1219 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1221 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1224 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1225 struct usb_host_endpoint *ep)
1227 unsigned int interval = 0;
1229 switch (udev->speed) {
1230 case USB_SPEED_HIGH:
1232 if (usb_endpoint_xfer_control(&ep->desc) ||
1233 usb_endpoint_xfer_bulk(&ep->desc)) {
1234 interval = xhci_parse_microframe_interval(udev, ep);
1237 /* Fall through - SS and HS isoc/int have same decoding */
1239 case USB_SPEED_SUPER:
1240 if (usb_endpoint_xfer_int(&ep->desc) ||
1241 usb_endpoint_xfer_isoc(&ep->desc)) {
1242 interval = xhci_parse_exponent_interval(udev, ep);
1246 case USB_SPEED_FULL:
1247 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1248 interval = xhci_parse_exponent_interval(udev, ep);
1252 * Fall through for interrupt endpoint interval decoding
1253 * since it uses the same rules as low speed interrupt
1258 if (usb_endpoint_xfer_int(&ep->desc) ||
1259 usb_endpoint_xfer_isoc(&ep->desc)) {
1261 interval = xhci_parse_frame_interval(udev, ep);
1268 return EP_INTERVAL(interval);
1271 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1272 * High speed endpoint descriptors can define "the number of additional
1273 * transaction opportunities per microframe", but that goes in the Max Burst
1274 * endpoint context field.
1276 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1277 struct usb_host_endpoint *ep)
1279 if (udev->speed != USB_SPEED_SUPER ||
1280 !usb_endpoint_xfer_isoc(&ep->desc))
1282 return ep->ss_ep_comp.bmAttributes;
1285 static u32 xhci_get_endpoint_type(struct usb_device *udev,
1286 struct usb_host_endpoint *ep)
1291 in = usb_endpoint_dir_in(&ep->desc);
1292 if (usb_endpoint_xfer_control(&ep->desc)) {
1293 type = EP_TYPE(CTRL_EP);
1294 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1296 type = EP_TYPE(BULK_IN_EP);
1298 type = EP_TYPE(BULK_OUT_EP);
1299 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1301 type = EP_TYPE(ISOC_IN_EP);
1303 type = EP_TYPE(ISOC_OUT_EP);
1304 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1306 type = EP_TYPE(INT_IN_EP);
1308 type = EP_TYPE(INT_OUT_EP);
1315 /* Return the maximum endpoint service interval time (ESIT) payload.
1316 * Basically, this is the maxpacket size, multiplied by the burst size
1319 static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1320 struct usb_device *udev,
1321 struct usb_host_endpoint *ep)
1326 /* Only applies for interrupt or isochronous endpoints */
1327 if (usb_endpoint_xfer_control(&ep->desc) ||
1328 usb_endpoint_xfer_bulk(&ep->desc))
1331 if (udev->speed == USB_SPEED_SUPER)
1332 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1334 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1335 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1336 /* A 0 in max burst means 1 transfer per ESIT */
1337 return max_packet * (max_burst + 1);
1340 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1341 * Drivers will have to call usb_alloc_streams() to do that.
1343 int xhci_endpoint_init(struct xhci_hcd *xhci,
1344 struct xhci_virt_device *virt_dev,
1345 struct usb_device *udev,
1346 struct usb_host_endpoint *ep,
1349 unsigned int ep_index;
1350 struct xhci_ep_ctx *ep_ctx;
1351 struct xhci_ring *ep_ring;
1352 unsigned int max_packet;
1353 unsigned int max_burst;
1354 u32 max_esit_payload;
1356 ep_index = xhci_get_endpoint_index(&ep->desc);
1357 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1359 /* Set up the endpoint ring */
1361 * Isochronous endpoint ring needs bigger size because one isoc URB
1362 * carries multiple packets and it will insert multiple tds to the
1364 * This should be replaced with dynamic ring resizing in the future.
1366 if (usb_endpoint_xfer_isoc(&ep->desc))
1367 virt_dev->eps[ep_index].new_ring =
1368 xhci_ring_alloc(xhci, 8, true, true, mem_flags);
1370 virt_dev->eps[ep_index].new_ring =
1371 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
1372 if (!virt_dev->eps[ep_index].new_ring) {
1373 /* Attempt to use the ring cache */
1374 if (virt_dev->num_rings_cached == 0)
1376 virt_dev->num_rings_cached--;
1377 virt_dev->eps[ep_index].new_ring =
1378 virt_dev->ring_cache[virt_dev->num_rings_cached];
1379 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1380 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1381 usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
1383 virt_dev->eps[ep_index].skip = false;
1384 ep_ring = virt_dev->eps[ep_index].new_ring;
1385 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1387 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1388 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1390 /* FIXME dig Mult and streams info out of ep companion desc */
1392 /* Allow 3 retries for everything but isoc;
1393 * CErr shall be set to 0 for Isoch endpoints.
1395 if (!usb_endpoint_xfer_isoc(&ep->desc))
1396 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
1398 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
1400 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
1402 /* Set the max packet size and max burst */
1403 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1405 switch (udev->speed) {
1406 case USB_SPEED_SUPER:
1407 /* dig out max burst from ep companion desc */
1408 max_burst = ep->ss_ep_comp.bMaxBurst;
1410 case USB_SPEED_HIGH:
1411 /* Some devices get this wrong */
1412 if (usb_endpoint_xfer_bulk(&ep->desc))
1414 /* bits 11:12 specify the number of additional transaction
1415 * opportunities per microframe (USB 2.0, section 9.6.6)
1417 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1418 usb_endpoint_xfer_int(&ep->desc)) {
1419 max_burst = (usb_endpoint_maxp(&ep->desc)
1423 case USB_SPEED_FULL:
1429 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1430 MAX_BURST(max_burst));
1431 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1432 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1435 * XXX no idea how to calculate the average TRB buffer length for bulk
1436 * endpoints, as the driver gives us no clue how big each scatter gather
1437 * list entry (or buffer) is going to be.
1439 * For isochronous and interrupt endpoints, we set it to the max
1440 * available, until we have new API in the USB core to allow drivers to
1441 * declare how much bandwidth they actually need.
1443 * Normally, it would be calculated by taking the total of the buffer
1444 * lengths in the TD and then dividing by the number of TRBs in a TD,
1445 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1446 * use Event Data TRBs, and we don't chain in a link TRB on short
1447 * transfers, we're basically dividing by 1.
1449 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
1450 * should be set to 8 for control endpoints.
1452 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1453 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1456 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1458 /* FIXME Debug endpoint context */
1462 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1463 struct xhci_virt_device *virt_dev,
1464 struct usb_host_endpoint *ep)
1466 unsigned int ep_index;
1467 struct xhci_ep_ctx *ep_ctx;
1469 ep_index = xhci_get_endpoint_index(&ep->desc);
1470 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1472 ep_ctx->ep_info = 0;
1473 ep_ctx->ep_info2 = 0;
1475 ep_ctx->tx_info = 0;
1476 /* Don't free the endpoint ring until the set interface or configuration
1481 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1483 bw_info->ep_interval = 0;
1485 bw_info->num_packets = 0;
1486 bw_info->max_packet_size = 0;
1488 bw_info->max_esit_payload = 0;
1491 void xhci_update_bw_info(struct xhci_hcd *xhci,
1492 struct xhci_container_ctx *in_ctx,
1493 struct xhci_input_control_ctx *ctrl_ctx,
1494 struct xhci_virt_device *virt_dev)
1496 struct xhci_bw_info *bw_info;
1497 struct xhci_ep_ctx *ep_ctx;
1498 unsigned int ep_type;
1501 for (i = 1; i < 31; ++i) {
1502 bw_info = &virt_dev->eps[i].bw_info;
1504 /* We can't tell what endpoint type is being dropped, but
1505 * unconditionally clearing the bandwidth info for non-periodic
1506 * endpoints should be harmless because the info will never be
1507 * set in the first place.
1509 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1510 /* Dropped endpoint */
1511 xhci_clear_endpoint_bw_info(bw_info);
1515 if (EP_IS_ADDED(ctrl_ctx, i)) {
1516 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1517 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1519 /* Ignore non-periodic endpoints */
1520 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1521 ep_type != ISOC_IN_EP &&
1522 ep_type != INT_IN_EP)
1525 /* Added or changed endpoint */
1526 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1527 le32_to_cpu(ep_ctx->ep_info));
1528 /* Number of packets and mult are zero-based in the
1529 * input context, but we want one-based for the
1532 bw_info->mult = CTX_TO_EP_MULT(
1533 le32_to_cpu(ep_ctx->ep_info)) + 1;
1534 bw_info->num_packets = CTX_TO_MAX_BURST(
1535 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1536 bw_info->max_packet_size = MAX_PACKET_DECODED(
1537 le32_to_cpu(ep_ctx->ep_info2));
1538 bw_info->type = ep_type;
1539 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1540 le32_to_cpu(ep_ctx->tx_info));
1545 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1546 * Useful when you want to change one particular aspect of the endpoint and then
1547 * issue a configure endpoint command.
1549 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1550 struct xhci_container_ctx *in_ctx,
1551 struct xhci_container_ctx *out_ctx,
1552 unsigned int ep_index)
1554 struct xhci_ep_ctx *out_ep_ctx;
1555 struct xhci_ep_ctx *in_ep_ctx;
1557 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1558 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1560 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1561 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1562 in_ep_ctx->deq = out_ep_ctx->deq;
1563 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1566 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1567 * Useful when you want to change one particular aspect of the endpoint and then
1568 * issue a configure endpoint command. Only the context entries field matters,
1569 * but we'll copy the whole thing anyway.
1571 void xhci_slot_copy(struct xhci_hcd *xhci,
1572 struct xhci_container_ctx *in_ctx,
1573 struct xhci_container_ctx *out_ctx)
1575 struct xhci_slot_ctx *in_slot_ctx;
1576 struct xhci_slot_ctx *out_slot_ctx;
1578 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1579 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1581 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1582 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1583 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1584 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1587 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1588 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1591 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1592 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1594 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1599 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1600 if (!xhci->scratchpad)
1603 xhci->scratchpad->sp_array = dma_zalloc_coherent(dev,
1604 num_sp * sizeof(u64),
1605 &xhci->scratchpad->sp_dma, flags);
1606 if (!xhci->scratchpad->sp_array)
1609 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1610 if (!xhci->scratchpad->sp_buffers)
1613 xhci->scratchpad->sp_dma_buffers =
1614 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1616 if (!xhci->scratchpad->sp_dma_buffers)
1619 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1620 for (i = 0; i < num_sp; i++) {
1622 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1627 xhci->scratchpad->sp_array[i] = dma;
1628 xhci->scratchpad->sp_buffers[i] = buf;
1629 xhci->scratchpad->sp_dma_buffers[i] = dma;
1635 for (i = i - 1; i >= 0; i--) {
1636 dma_free_coherent(dev, xhci->page_size,
1637 xhci->scratchpad->sp_buffers[i],
1638 xhci->scratchpad->sp_dma_buffers[i]);
1640 kfree(xhci->scratchpad->sp_dma_buffers);
1643 kfree(xhci->scratchpad->sp_buffers);
1646 dma_free_coherent(dev, num_sp * sizeof(u64),
1647 xhci->scratchpad->sp_array,
1648 xhci->scratchpad->sp_dma);
1651 kfree(xhci->scratchpad);
1652 xhci->scratchpad = NULL;
1658 static void scratchpad_free(struct xhci_hcd *xhci)
1662 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1664 if (!xhci->scratchpad)
1667 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1669 for (i = 0; i < num_sp; i++) {
1670 dma_free_coherent(&pdev->dev, xhci->page_size,
1671 xhci->scratchpad->sp_buffers[i],
1672 xhci->scratchpad->sp_dma_buffers[i]);
1674 kfree(xhci->scratchpad->sp_dma_buffers);
1675 kfree(xhci->scratchpad->sp_buffers);
1676 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
1677 xhci->scratchpad->sp_array,
1678 xhci->scratchpad->sp_dma);
1679 kfree(xhci->scratchpad);
1680 xhci->scratchpad = NULL;
1683 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1684 bool allocate_in_ctx, bool allocate_completion,
1687 struct xhci_command *command;
1689 command = kzalloc(sizeof(*command), mem_flags);
1693 if (allocate_in_ctx) {
1695 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1697 if (!command->in_ctx) {
1703 if (allocate_completion) {
1704 command->completion =
1705 kzalloc(sizeof(struct completion), mem_flags);
1706 if (!command->completion) {
1707 xhci_free_container_ctx(xhci, command->in_ctx);
1711 init_completion(command->completion);
1714 command->status = 0;
1715 INIT_LIST_HEAD(&command->cmd_list);
1719 void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1722 kfree(urb_priv->td[0]);
1727 void xhci_free_command(struct xhci_hcd *xhci,
1728 struct xhci_command *command)
1730 xhci_free_container_ctx(xhci,
1732 kfree(command->completion);
1736 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1738 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1739 struct dev_info *dev_info, *next;
1740 struct xhci_cd *cur_cd, *next_cd;
1741 unsigned long flags;
1743 int i, j, num_ports;
1745 /* Free the Event Ring Segment Table and the actual Event Ring */
1746 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1747 if (xhci->erst.entries)
1748 dma_free_coherent(&pdev->dev, size,
1749 xhci->erst.entries, xhci->erst.erst_dma_addr);
1750 xhci->erst.entries = NULL;
1751 xhci_dbg(xhci, "Freed ERST\n");
1752 if (xhci->event_ring)
1753 xhci_ring_free(xhci, xhci->event_ring);
1754 xhci->event_ring = NULL;
1755 xhci_dbg(xhci, "Freed event ring\n");
1757 xhci->cmd_ring_reserved_trbs = 0;
1759 xhci_ring_free(xhci, xhci->cmd_ring);
1760 xhci->cmd_ring = NULL;
1761 xhci_dbg(xhci, "Freed command ring\n");
1762 list_for_each_entry_safe(cur_cd, next_cd,
1763 &xhci->cancel_cmd_list, cancel_cmd_list) {
1764 list_del(&cur_cd->cancel_cmd_list);
1768 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1769 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1770 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1771 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1772 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1773 while (!list_empty(ep))
1774 list_del_init(ep->next);
1778 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1779 xhci_free_virt_devices_depth_first(xhci, i);
1781 if (xhci->segment_pool)
1782 dma_pool_destroy(xhci->segment_pool);
1783 xhci->segment_pool = NULL;
1784 xhci_dbg(xhci, "Freed segment pool\n");
1786 if (xhci->device_pool)
1787 dma_pool_destroy(xhci->device_pool);
1788 xhci->device_pool = NULL;
1789 xhci_dbg(xhci, "Freed device context pool\n");
1791 if (xhci->small_streams_pool)
1792 dma_pool_destroy(xhci->small_streams_pool);
1793 xhci->small_streams_pool = NULL;
1794 xhci_dbg(xhci, "Freed small stream array pool\n");
1796 if (xhci->medium_streams_pool)
1797 dma_pool_destroy(xhci->medium_streams_pool);
1798 xhci->medium_streams_pool = NULL;
1799 xhci_dbg(xhci, "Freed medium stream array pool\n");
1802 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1803 xhci->dcbaa, xhci->dcbaa->dma);
1806 scratchpad_free(xhci);
1808 spin_lock_irqsave(&xhci->lock, flags);
1809 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1810 list_del(&dev_info->list);
1813 spin_unlock_irqrestore(&xhci->lock, flags);
1818 for (i = 0; i < num_ports; i++) {
1819 struct xhci_tt_bw_info *tt, *n;
1820 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1821 list_del(&tt->tt_list);
1827 xhci->num_usb2_ports = 0;
1828 xhci->num_usb3_ports = 0;
1829 xhci->num_active_eps = 0;
1830 kfree(xhci->usb2_ports);
1831 kfree(xhci->usb3_ports);
1832 kfree(xhci->port_array);
1835 xhci->usb2_ports = NULL;
1836 xhci->usb3_ports = NULL;
1837 xhci->port_array = NULL;
1840 xhci->page_size = 0;
1841 xhci->page_shift = 0;
1842 xhci->bus_state[0].bus_suspended = 0;
1843 xhci->bus_state[1].bus_suspended = 0;
1846 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1847 struct xhci_segment *input_seg,
1848 union xhci_trb *start_trb,
1849 union xhci_trb *end_trb,
1850 dma_addr_t input_dma,
1851 struct xhci_segment *result_seg,
1852 char *test_name, int test_number)
1854 unsigned long long start_dma;
1855 unsigned long long end_dma;
1856 struct xhci_segment *seg;
1858 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1859 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1861 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1862 if (seg != result_seg) {
1863 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1864 test_name, test_number);
1865 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1866 "input DMA 0x%llx\n",
1868 (unsigned long long) input_dma);
1869 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1870 "ending TRB %p (0x%llx DMA)\n",
1871 start_trb, start_dma,
1873 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1880 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1881 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1884 dma_addr_t input_dma;
1885 struct xhci_segment *result_seg;
1886 } simple_test_vector [] = {
1887 /* A zeroed DMA field should fail */
1889 /* One TRB before the ring start should fail */
1890 { xhci->event_ring->first_seg->dma - 16, NULL },
1891 /* One byte before the ring start should fail */
1892 { xhci->event_ring->first_seg->dma - 1, NULL },
1893 /* Starting TRB should succeed */
1894 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1895 /* Ending TRB should succeed */
1896 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1897 xhci->event_ring->first_seg },
1898 /* One byte after the ring end should fail */
1899 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1900 /* One TRB after the ring end should fail */
1901 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1902 /* An address of all ones should fail */
1903 { (dma_addr_t) (~0), NULL },
1906 struct xhci_segment *input_seg;
1907 union xhci_trb *start_trb;
1908 union xhci_trb *end_trb;
1909 dma_addr_t input_dma;
1910 struct xhci_segment *result_seg;
1911 } complex_test_vector [] = {
1912 /* Test feeding a valid DMA address from a different ring */
1913 { .input_seg = xhci->event_ring->first_seg,
1914 .start_trb = xhci->event_ring->first_seg->trbs,
1915 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1916 .input_dma = xhci->cmd_ring->first_seg->dma,
1919 /* Test feeding a valid end TRB from a different ring */
1920 { .input_seg = xhci->event_ring->first_seg,
1921 .start_trb = xhci->event_ring->first_seg->trbs,
1922 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1923 .input_dma = xhci->cmd_ring->first_seg->dma,
1926 /* Test feeding a valid start and end TRB from a different ring */
1927 { .input_seg = xhci->event_ring->first_seg,
1928 .start_trb = xhci->cmd_ring->first_seg->trbs,
1929 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1930 .input_dma = xhci->cmd_ring->first_seg->dma,
1933 /* TRB in this ring, but after this TD */
1934 { .input_seg = xhci->event_ring->first_seg,
1935 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1936 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1937 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1940 /* TRB in this ring, but before this TD */
1941 { .input_seg = xhci->event_ring->first_seg,
1942 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1943 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1944 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1947 /* TRB in this ring, but after this wrapped TD */
1948 { .input_seg = xhci->event_ring->first_seg,
1949 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1950 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1951 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1954 /* TRB in this ring, but before this wrapped TD */
1955 { .input_seg = xhci->event_ring->first_seg,
1956 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1957 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1958 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1961 /* TRB not in this ring, and we have a wrapped TD */
1962 { .input_seg = xhci->event_ring->first_seg,
1963 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1964 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1965 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1970 unsigned int num_tests;
1973 num_tests = ARRAY_SIZE(simple_test_vector);
1974 for (i = 0; i < num_tests; i++) {
1975 ret = xhci_test_trb_in_td(xhci,
1976 xhci->event_ring->first_seg,
1977 xhci->event_ring->first_seg->trbs,
1978 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1979 simple_test_vector[i].input_dma,
1980 simple_test_vector[i].result_seg,
1986 num_tests = ARRAY_SIZE(complex_test_vector);
1987 for (i = 0; i < num_tests; i++) {
1988 ret = xhci_test_trb_in_td(xhci,
1989 complex_test_vector[i].input_seg,
1990 complex_test_vector[i].start_trb,
1991 complex_test_vector[i].end_trb,
1992 complex_test_vector[i].input_dma,
1993 complex_test_vector[i].result_seg,
1998 xhci_dbg(xhci, "TRB math tests passed.\n");
2002 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2007 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2008 xhci->event_ring->dequeue);
2009 if (deq == 0 && !in_interrupt())
2010 xhci_warn(xhci, "WARN something wrong with SW event ring "
2012 /* Update HC event ring dequeue pointer */
2013 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2014 temp &= ERST_PTR_MASK;
2015 /* Don't clear the EHB bit (which is RW1C) because
2016 * there might be more events to service.
2019 xhci_dbg(xhci, "// Write event ring dequeue pointer, "
2020 "preserving EHB bit\n");
2021 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2022 &xhci->ir_set->erst_dequeue);
2025 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2026 __le32 __iomem *addr, u8 major_revision)
2028 u32 temp, port_offset, port_count;
2031 if (major_revision > 0x03) {
2032 xhci_warn(xhci, "Ignoring unknown port speed, "
2033 "Ext Cap %p, revision = 0x%x\n",
2034 addr, major_revision);
2035 /* Ignoring port protocol we can't understand. FIXME */
2039 /* Port offset and count in the third dword, see section 7.2 */
2040 temp = xhci_readl(xhci, addr + 2);
2041 port_offset = XHCI_EXT_PORT_OFF(temp);
2042 port_count = XHCI_EXT_PORT_COUNT(temp);
2043 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
2044 "count = %u, revision = 0x%x\n",
2045 addr, port_offset, port_count, major_revision);
2046 /* Port count includes the current port offset */
2047 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2048 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2051 /* Check the host's USB2 LPM capability */
2052 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2053 (temp & XHCI_L1C)) {
2054 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
2055 xhci->sw_lpm_support = 1;
2058 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2059 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
2060 xhci->sw_lpm_support = 1;
2061 if (temp & XHCI_HLC) {
2062 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
2063 xhci->hw_lpm_support = 1;
2068 for (i = port_offset; i < (port_offset + port_count); i++) {
2069 /* Duplicate entry. Ignore the port if the revisions differ. */
2070 if (xhci->port_array[i] != 0) {
2071 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2072 " port %u\n", addr, i);
2073 xhci_warn(xhci, "Port was marked as USB %u, "
2074 "duplicated as USB %u\n",
2075 xhci->port_array[i], major_revision);
2076 /* Only adjust the roothub port counts if we haven't
2077 * found a similar duplicate.
2079 if (xhci->port_array[i] != major_revision &&
2080 xhci->port_array[i] != DUPLICATE_ENTRY) {
2081 if (xhci->port_array[i] == 0x03)
2082 xhci->num_usb3_ports--;
2084 xhci->num_usb2_ports--;
2085 xhci->port_array[i] = DUPLICATE_ENTRY;
2087 /* FIXME: Should we disable the port? */
2090 xhci->port_array[i] = major_revision;
2091 if (major_revision == 0x03)
2092 xhci->num_usb3_ports++;
2094 xhci->num_usb2_ports++;
2096 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2100 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2101 * specify what speeds each port is supposed to be. We can't count on the port
2102 * speed bits in the PORTSC register being correct until a device is connected,
2103 * but we need to set up the two fake roothubs with the correct number of USB
2104 * 3.0 and USB 2.0 ports at host controller initialization time.
2106 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2108 __le32 __iomem *addr;
2110 unsigned int num_ports;
2111 int i, j, port_index;
2113 addr = &xhci->cap_regs->hcc_params;
2114 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2116 xhci_err(xhci, "No Extended Capability registers, "
2117 "unable to set up roothub.\n");
2121 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2122 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2123 if (!xhci->port_array)
2126 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2129 for (i = 0; i < num_ports; i++) {
2130 struct xhci_interval_bw_table *bw_table;
2132 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2133 bw_table = &xhci->rh_bw[i].bw_table;
2134 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2135 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2139 * For whatever reason, the first capability offset is from the
2140 * capability register base, not from the HCCPARAMS register.
2141 * See section 5.3.6 for offset calculation.
2143 addr = &xhci->cap_regs->hc_capbase + offset;
2147 cap_id = xhci_readl(xhci, addr);
2148 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2149 xhci_add_in_port(xhci, num_ports, addr,
2150 (u8) XHCI_EXT_PORT_MAJOR(cap_id));
2151 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2152 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2156 * Once you're into the Extended Capabilities, the offset is
2157 * always relative to the register holding the offset.
2162 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2163 xhci_warn(xhci, "No ports on the roothubs?\n");
2166 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
2167 xhci->num_usb2_ports, xhci->num_usb3_ports);
2169 /* Place limits on the number of roothub ports so that the hub
2170 * descriptors aren't longer than the USB core will allocate.
2172 if (xhci->num_usb3_ports > 15) {
2173 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
2174 xhci->num_usb3_ports = 15;
2176 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2177 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
2179 xhci->num_usb2_ports = USB_MAXCHILDREN;
2183 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2184 * Not sure how the USB core will handle a hub with no ports...
2186 if (xhci->num_usb2_ports) {
2187 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2188 xhci->num_usb2_ports, flags);
2189 if (!xhci->usb2_ports)
2193 for (i = 0; i < num_ports; i++) {
2194 if (xhci->port_array[i] == 0x03 ||
2195 xhci->port_array[i] == 0 ||
2196 xhci->port_array[i] == DUPLICATE_ENTRY)
2199 xhci->usb2_ports[port_index] =
2200 &xhci->op_regs->port_status_base +
2202 xhci_dbg(xhci, "USB 2.0 port at index %u, "
2204 xhci->usb2_ports[port_index]);
2206 if (port_index == xhci->num_usb2_ports)
2210 if (xhci->num_usb3_ports) {
2211 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2212 xhci->num_usb3_ports, flags);
2213 if (!xhci->usb3_ports)
2217 for (i = 0; i < num_ports; i++)
2218 if (xhci->port_array[i] == 0x03) {
2219 xhci->usb3_ports[port_index] =
2220 &xhci->op_regs->port_status_base +
2222 xhci_dbg(xhci, "USB 3.0 port at index %u, "
2224 xhci->usb3_ports[port_index]);
2226 if (port_index == xhci->num_usb3_ports)
2233 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2236 struct device *dev = xhci_to_hcd(xhci)->self.controller;
2237 unsigned int val, val2;
2239 struct xhci_segment *seg;
2243 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2244 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2246 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2247 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
2248 for (i = 0; i < 16; i++) {
2249 if ((0x1 & page_size) != 0)
2251 page_size = page_size >> 1;
2254 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
2256 xhci_warn(xhci, "WARN: no supported page size\n");
2257 /* Use 4K pages, since that's common and the minimum the HC supports */
2258 xhci->page_shift = 12;
2259 xhci->page_size = 1 << xhci->page_shift;
2260 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
2263 * Program the Number of Device Slots Enabled field in the CONFIG
2264 * register with the max value of slots the HC can handle.
2266 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2267 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
2268 (unsigned int) val);
2269 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2270 val |= (val2 & ~HCS_SLOTS_MASK);
2271 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
2272 (unsigned int) val);
2273 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2276 * Section 5.4.8 - doorbell array must be
2277 * "physically contiguous and 64-byte (cache line) aligned".
2279 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2283 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2284 xhci->dcbaa->dma = dma;
2285 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
2286 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2287 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2290 * Initialize the ring segment pool. The ring must be a contiguous
2291 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2292 * however, the command ring segment needs 64-byte aligned segments,
2293 * so we pick the greater alignment need.
2295 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2296 SEGMENT_SIZE, 64, xhci->page_size);
2298 /* See Table 46 and Note on Figure 55 */
2299 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2300 2112, 64, xhci->page_size);
2301 if (!xhci->segment_pool || !xhci->device_pool)
2304 /* Linear stream context arrays don't have any boundary restrictions,
2305 * and only need to be 16-byte aligned.
2307 xhci->small_streams_pool =
2308 dma_pool_create("xHCI 256 byte stream ctx arrays",
2309 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2310 xhci->medium_streams_pool =
2311 dma_pool_create("xHCI 1KB stream ctx arrays",
2312 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2313 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2314 * will be allocated with dma_alloc_coherent()
2317 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2320 /* Set up the command ring to have one segments for now. */
2321 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
2322 if (!xhci->cmd_ring)
2324 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2325 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2326 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2328 /* Set the address in the Command Ring Control register */
2329 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2330 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2331 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2332 xhci->cmd_ring->cycle_state;
2333 xhci_dbg(xhci, "// Setting command ring address to 0x%016llx\n", val_64);
2334 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2335 xhci_dbg_cmd_ptrs(xhci);
2337 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2339 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
2340 " from cap regs base addr\n", val);
2341 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2342 xhci_dbg_regs(xhci);
2343 xhci_print_run_regs(xhci);
2344 /* Set ir_set to interrupt register set 0 */
2345 xhci->ir_set = &xhci->run_regs->ir_set[0];
2348 * Event ring setup: Allocate a normal ring, but also setup
2349 * the event ring segment table (ERST). Section 4.9.3.
2351 xhci_dbg(xhci, "// Allocating event ring\n");
2352 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
2354 if (!xhci->event_ring)
2356 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2359 xhci->erst.entries = dma_alloc_coherent(dev,
2360 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2362 if (!xhci->erst.entries)
2364 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
2365 (unsigned long long)dma);
2367 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2368 xhci->erst.num_entries = ERST_NUM_SEGS;
2369 xhci->erst.erst_dma_addr = dma;
2370 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
2371 xhci->erst.num_entries,
2373 (unsigned long long)xhci->erst.erst_dma_addr);
2375 /* set ring base address and size for each segment table entry */
2376 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2377 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2378 entry->seg_addr = cpu_to_le64(seg->dma);
2379 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2384 /* set ERST count with the number of entries in the segment table */
2385 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2386 val &= ERST_SIZE_MASK;
2387 val |= ERST_NUM_SEGS;
2388 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2390 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2392 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
2393 /* set the segment table base address */
2394 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2395 (unsigned long long)xhci->erst.erst_dma_addr);
2396 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2397 val_64 &= ERST_PTR_MASK;
2398 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2399 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2401 /* Set the event ring dequeue address */
2402 xhci_set_hc_event_deq(xhci);
2403 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
2404 xhci_print_ir_set(xhci, 0);
2407 * XXX: Might need to set the Interrupter Moderation Register to
2408 * something other than the default (~1ms minimum between interrupts).
2409 * See section 5.5.1.2.
2411 init_completion(&xhci->addr_dev);
2412 for (i = 0; i < MAX_HC_SLOTS; ++i)
2413 xhci->devs[i] = NULL;
2414 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2415 xhci->bus_state[0].resume_done[i] = 0;
2416 xhci->bus_state[1].resume_done[i] = 0;
2419 if (scratchpad_alloc(xhci, flags))
2421 if (xhci_setup_port_arrays(xhci, flags))
2427 xhci_warn(xhci, "Couldn't initialize memory\n");
2430 xhci_mem_cleanup(xhci);