return NULL;
}
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
{
if (ep->ep_state & EP_HAS_STREAMS)
return radix_tree_lookup(&ep->stream_info->trb_address_map,
- address >> SEGMENT_SHIFT);
+ address >> TRB_SEGMENT_SHIFT);
return ep->ring;
}
u64 address)
{
return radix_tree_lookup(&stream_info->trb_address_map,
- address >> SEGMENT_SHIFT);
+ address >> TRB_SEGMENT_SHIFT);
}
#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
cur_ring = stream_info->stream_rings[cur_stream];
for (addr = cur_ring->first_seg->dma;
- addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
+ addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE;
addr += trb_size) {
mapped_ring = dma_to_stream_ring(stream_info, addr);
if (cur_ring != mapped_ring) {
cur_stream, (unsigned long long) addr);
key = (unsigned long)
- (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
+ (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
ret = radix_tree_insert(&stream_info->trb_address_map,
key, cur_ring);
if (ret) {
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
- addr >> SEGMENT_SHIFT);
+ addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
- addr >> SEGMENT_SHIFT);
+ addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
* so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
- SEGMENT_SIZE, 64, xhci->page_size);
+ TRB_SEGMENT_SIZE, 64, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
-#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
-#define SEGMENT_SHIFT (ilog2(SEGMENT_SIZE))
+#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE))
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)