return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
}
+static inline int enqueue_is_link_trb(struct xhci_ring *ring)
+{
+ struct xhci_link_trb *link = &ring->enqueue->link;
+ return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
- /* If we're not dealing with 0.95 hardware,
- * carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!xhci_link_trb_quirk(xhci)) {
- next->link.control &= ~TRB_CHAIN;
- next->link.control |= chain;
+ if (chain) {
+ next->link.control |= TRB_CHAIN;
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= TRB_CYCLE;
+ } else {
+ break;
}
- /* Give this link TRB to the hardware */
- wmb();
- if (next->link.control & TRB_CYCLE)
- next->link.control &= (u32) ~TRB_CYCLE;
- else
- next->link.control |= (u32) TRB_CYCLE;
}
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
struct xhci_segment *cur_seg;
unsigned int left_on_ring;
+ /* If we are currently pointing to a link TRB, advance the
+ * enqueue pointer before checking for space */
+ while (last_trb(xhci, ring, enq_seg, enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
+
/* Check if ring is empty */
if (enq == ring->dequeue) {
/* Can't use link trbs */
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+ if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
+ TRB_TYPE(TRB_LINK) &&
(generic_trb->field[3] & LINK_TOGGLE))
*cycle_state = ~(*cycle_state) & 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
- return 0;
+ return NULL;
}
return cur_seg;
}
BUG();
trb = &state->new_deq_ptr->generic;
- if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+ if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
(trb->field[3] & LINK_TOGGLE))
state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
- struct xhci_td *cur_td = 0;
+ struct xhci_td *cur_td = NULL;
struct xhci_td *last_unlinked_td;
struct xhci_dequeue_state deq_state;
xhci_warn(xhci, "Reset device command completion "
"for disabled slot %u\n", slot_id);
break;
+ case TRB_TYPE(TRB_NEC_GET_FW):
+ if (!(xhci->quirks & XHCI_NEC_HOST)) {
+ xhci->error_bitmask |= 1 << 6;
+ break;
+ }
+ xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
+ NEC_FW_MAJOR(event->status),
+ NEC_FW_MINOR(event->status));
+ break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
inc_deq(xhci, xhci->cmd_ring, false);
}
+static void handle_vendor_event(struct xhci_hcd *xhci,
+ union xhci_trb *event)
+{
+ u32 trb_type;
+
+ trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
+ xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
+ if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
+ handle_cmd_completion(xhci, &event->event_cmd);
+}
+
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
do {
if (start_dma == 0)
- return 0;
+ return NULL;
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
suspect_dma <= end_trb_dma))
return cur_seg;
}
- return 0;
+ return NULL;
} else {
/* Might still be somewhere in this segment */
if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
} while (cur_seg != start_seg);
- return 0;
+ return NULL;
}
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
ep->stopped_td = NULL;
ep->stopped_trb = NULL;
+ ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
}
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
- struct xhci_td *td = 0;
+ struct xhci_td *td = NULL;
dma_addr_t event_dma;
struct xhci_segment *event_seg;
union xhci_trb *event_trb;
- struct urb *urb = 0;
+ struct urb *urb = NULL;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
- TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]);
}
update_ptrs = 0;
break;
default:
- xhci->error_bitmask |= 1 << 3;
+ if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
+ handle_vendor_event(xhci, event);
+ else
+ xhci->error_bitmask |= 1 << 3;
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
xhci_err(xhci, "ERROR no room on ep ring\n");
return -ENOMEM;
}
+
+ if (enqueue_is_link_trb(ep_ring)) {
+ struct xhci_ring *ring = ep_ring;
+ union xhci_trb *next;
+
+ xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
+ next = ring->enqueue;
+
+ while (last_trb(xhci, ring, ring->enq_seg, next)) {
+
+ /* If we're not dealing with 0.95 hardware,
+ * clear the chain bit.
+ */
+ if (!xhci_link_trb_quirk(xhci))
+ next->link.control &= ~TRB_CHAIN;
+ else
+ next->link.control |= TRB_CHAIN;
+
+ wmb();
+ next->link.control ^= (u32) TRB_CYCLE;
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt()) {
+ xhci_dbg(xhci, "queue_trb: Toggle cycle "
+ "state for ring %p = %i\n",
+ ring, (unsigned int)ring->cycle_state);
+ }
+ }
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+ }
+
return 0;
}
xhci_dbg(xhci, "count sg list trbs: \n");
num_trbs = 0;
- for_each_sg(urb->sg->sg, sg, num_sgs, i) {
+ for_each_sg(urb->sg, sg, num_sgs, i) {
unsigned int previous_total_trbs = num_trbs;
unsigned int len = sg_dma_len(sg);
* the amount of memory allocated for this scatter-gather list.
* 3. TRBs buffers can't cross 64KB boundaries.
*/
- sg = urb->sg->sg;
+ sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
trb_buff_len = TRB_MAX_BUFF_SIZE -
false);
}
+int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+ u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ return queue_command(xhci, field1, field2, field3, field4, false);
+}
+
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{