Merge 3.1-rc4 into usb-next
[pandora-kernel.git] / drivers / usb / host / xhci-ring.c
index 70cacbb..58a6e26 100644 (file)
@@ -113,15 +113,13 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
        if (ring == xhci->event_ring)
                return trb == &seg->trbs[TRBS_PER_SEGMENT];
        else
-               return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
-                       == TRB_TYPE(TRB_LINK);
+               return TRB_TYPE_LINK_LE32(trb->link.control);
 }
 
 static int enqueue_is_link_trb(struct xhci_ring *ring)
 {
        struct xhci_link_trb *link = &ring->enqueue->link;
-       return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) ==
-               TRB_TYPE(TRB_LINK));
+       return TRB_TYPE_LINK_LE32(link->control);
 }
 
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -372,7 +370,7 @@ static struct xhci_segment *find_trb_seg(
        while (cur_seg->trbs > trb ||
                        &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
                generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
-               if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE)
+               if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
                        *cycle_state ^= 0x1;
                cur_seg = cur_seg->next;
                if (cur_seg == start_seg)
@@ -489,8 +487,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        }
 
        trb = &state->new_deq_ptr->generic;
-       if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) ==
-           TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE))
+       if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+           (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
                state->new_cycle_state ^= 0x1;
        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 
@@ -516,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
                        (unsigned long long) addr);
 }
 
+/* flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
+ */
 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
-               struct xhci_td *cur_td)
+               struct xhci_td *cur_td, bool flip_cycle)
 {
        struct xhci_segment *cur_seg;
        union xhci_trb *cur_trb;
@@ -525,12 +527,17 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
        for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
                        true;
                        next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-               if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK)
-                   == TRB_TYPE(TRB_LINK)) {
+               if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
                        /* Unchain any chained Link TRBs, but
                         * leave the pointers intact.
                         */
                        cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
+                       /* Flip the cycle bit (link TRBs can't be the first
+                        * or last TRB).
+                        */
+                       if (flip_cycle)
+                               cur_trb->generic.field[3] ^=
+                                       cpu_to_le32(TRB_CYCLE);
                        xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
                        xhci_dbg(xhci, "Address = %p (0x%llx dma); "
                                        "in seg %p (0x%llx dma)\n",
@@ -544,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
                        cur_trb->generic.field[2] = 0;
                        /* Preserve only the cycle bit of this TRB */
                        cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+                       /* Flip the cycle bit except on the first or last TRB */
+                       if (flip_cycle && cur_trb != cur_td->first_trb &&
+                                       cur_trb != cur_td->last_trb)
+                               cur_trb->generic.field[3] ^=
+                                       cpu_to_le32(TRB_CYCLE);
                        cur_trb->generic.field[3] |= cpu_to_le32(
                                TRB_TYPE(TRB_TR_NOOP));
                        xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -722,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
                                        cur_td->urb->stream_id,
                                        cur_td, &deq_state);
                else
-                       td_to_noop(xhci, ep_ring, cur_td);
+                       td_to_noop(xhci, ep_ring, cur_td, false);
 remove_finished_td:
                /*
                 * The event handler won't see a completion for this TD anymore,
                 * so remove it from the endpoint ring's TD list.  Keep it in
                 * the cancelled TD list for URB completion later.
                 */
-               list_del(&cur_td->td_list);
+               list_del_init(&cur_td->td_list);
        }
        last_unlinked_td = cur_td;
        xhci_stop_watchdog_timer_in_irq(xhci, ep);
@@ -757,7 +769,7 @@ remove_finished_td:
        do {
                cur_td = list_entry(ep->cancelled_td_list.next,
                                struct xhci_td, cancelled_td_list);
-               list_del(&cur_td->cancelled_td_list);
+               list_del_init(&cur_td->cancelled_td_list);
 
                /* Clean up the cancelled URB */
                /* Doesn't matter what we pass for status, since the core will
@@ -865,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
                                cur_td = list_first_entry(&ring->td_list,
                                                struct xhci_td,
                                                td_list);
-                               list_del(&cur_td->td_list);
+                               list_del_init(&cur_td->td_list);
                                if (!list_empty(&cur_td->cancelled_td_list))
-                                       list_del(&cur_td->cancelled_td_list);
+                                       list_del_init(&cur_td->cancelled_td_list);
                                xhci_giveback_urb_in_irq(xhci, cur_td,
                                                -ESHUTDOWN, "killed");
                        }
@@ -876,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
                                                &temp_ep->cancelled_td_list,
                                                struct xhci_td,
                                                cancelled_td_list);
-                               list_del(&cur_td->cancelled_td_list);
+                               list_del_init(&cur_td->cancelled_td_list);
                                xhci_giveback_urb_in_irq(xhci, cur_td,
                                                -ESHUTDOWN, "killed");
                        }
@@ -1000,7 +1012,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
         * but we don't care.
         */
        xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
-                (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status)));
+                GET_COMP_CODE(le32_to_cpu(event->status)));
 
        /* HW with the reset endpoint quirk needs to have a configure endpoint
         * command complete before the endpoint can be used.  Queue that here
@@ -1458,7 +1470,8 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
                 * endpoint anyway.  Check if a babble halted the
                 * endpoint.
                 */
-               if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
+               if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
+                   cpu_to_le32(EP_STATE_HALTED))
                        return 1;
 
        return 0;
@@ -1567,10 +1580,10 @@ td_cleanup:
                        else
                                *status = 0;
                }
-               list_del(&td->td_list);
+               list_del_init(&td->td_list);
                /* Was this TD slated to be cancelled but completed anyway? */
                if (!list_empty(&td->cancelled_td_list))
-                       list_del(&td->cancelled_td_list);
+                       list_del_init(&td->cancelled_td_list);
 
                urb_priv->td_cnt++;
                /* Giveback the urb when all the tds are completed */
@@ -1753,10 +1766,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                for (cur_trb = ep_ring->dequeue,
                     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
                     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-                       if ((le32_to_cpu(cur_trb->generic.field[3]) &
-                        TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
-                           (le32_to_cpu(cur_trb->generic.field[3]) &
-                        TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+                       if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+                           !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
                                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
                }
                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
@@ -1885,10 +1896,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
                                cur_trb != event_trb;
                                next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-                       if ((le32_to_cpu(cur_trb->generic.field[3]) &
-                        TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
-                           (le32_to_cpu(cur_trb->generic.field[3]) &
-                        TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+                       if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+                           !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
                                td->urb->actual_length +=
                                        TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
                }
@@ -2047,8 +2056,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
                                  ep_index);
                        xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
-                                (unsigned int) (le32_to_cpu(event->flags)
-                                                & TRB_TYPE_BITMASK)>>10);
+                                (le32_to_cpu(event->flags) &
+                                 TRB_TYPE_BITMASK)>>10);
                        xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
                        if (ep->skip) {
                                ep->skip = false;
@@ -2119,9 +2128,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 * corresponding TD has been cancelled. Just ignore
                 * the TD.
                 */
-               if ((le32_to_cpu(event_trb->generic.field[3])
-                            & TRB_TYPE_BITMASK)
-                                == TRB_TYPE(TRB_TR_NOOP)) {
+               if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
                        xhci_dbg(xhci,
                                 "event_trb is a no-op TRB. Skip it\n");
                        goto cleanup;
@@ -2452,7 +2459,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
                                next->link.control |= cpu_to_le32(TRB_CHAIN);
 
                        wmb();
-                       next->link.control ^= cpu_to_le32((u32) TRB_CYCLE);
+                       next->link.control ^= cpu_to_le32(TRB_CYCLE);
 
                        /* Toggle the cycle bit after the last ring segment. */
                        if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -2508,11 +2515,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
 
        if (td_index == 0) {
                ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
-               if (unlikely(ret)) {
-                       xhci_urb_free_priv(xhci, urb_priv);
-                       urb->hcpriv = NULL;
+               if (unlikely(ret))
                        return ret;
-               }
        }
 
        td->urb = urb;
@@ -2680,11 +2684,15 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
 {
        int packets_transferred;
 
+       /* One TRB with a zero-length data packet. */
+       if (running_total == 0 && trb_buff_len == 0)
+               return 0;
+
        /* All the TRB queueing functions don't count the current TRB in
         * running_total.
         */
        packets_transferred = (running_total + trb_buff_len) /
-               le16_to_cpu(urb->ep->desc.wMaxPacketSize);
+               usb_endpoint_maxp(&urb->ep->desc);
 
        return xhci_td_remainder(total_packet_count - packets_transferred);
 }
@@ -2714,7 +2722,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        num_trbs = count_sg_trbs_needed(xhci, urb);
        num_sgs = urb->num_sgs;
        total_packet_count = roundup(urb->transfer_buffer_length,
-                       le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+                       usb_endpoint_maxp(&urb->ep->desc));
 
        trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
@@ -2921,7 +2929,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        running_total = 0;
        total_packet_count = roundup(urb->transfer_buffer_length,
-                       le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+                       usb_endpoint_maxp(&urb->ep->desc));
        /* How much data is in the first TRB? */
        addr = (u64) urb->transfer_dma;
        trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -3121,20 +3129,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
                struct urb *urb, int i)
 {
        int num_trbs = 0;
-       u64 addr, td_len, running_total;
+       u64 addr, td_len;
 
        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
        td_len = urb->iso_frame_desc[i].length;
 
-       running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
-       running_total &= TRB_MAX_BUFF_SIZE - 1;
-       if (running_total != 0)
-               num_trbs++;
-
-       while (running_total < td_len) {
+       num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+                       TRB_MAX_BUFF_SIZE);
+       if (num_trbs == 0)
                num_trbs++;
-               running_total += TRB_MAX_BUFF_SIZE;
-       }
 
        return num_trbs;
 }
@@ -3234,6 +3237,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        start_trb = &ep_ring->enqueue->generic;
        start_cycle = ep_ring->cycle_state;
 
+       urb_priv = urb->hcpriv;
        /* Queue the first TRB, even if it's zero-length */
        for (i = 0; i < num_tds; i++) {
                unsigned int total_packet_count;
@@ -3245,9 +3249,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                addr = start_addr + urb->iso_frame_desc[i].offset;
                td_len = urb->iso_frame_desc[i].length;
                td_remain_len = td_len;
-               /* FIXME: Ignoring zero-length packets, can those happen? */
                total_packet_count = roundup(td_len,
-                               le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+                               usb_endpoint_maxp(&urb->ep->desc));
+               /* A zero-length transfer still involves at least one packet. */
+               if (total_packet_count == 0)
+                       total_packet_count++;
                burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
                                total_packet_count);
                residue = xhci_get_last_burst_packet_count(xhci,
@@ -3257,12 +3263,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
                ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
                                urb->stream_id, trbs_per_td, urb, i, mem_flags);
-               if (ret < 0)
-                       return ret;
+               if (ret < 0) {
+                       if (i == 0)
+                               return ret;
+                       goto cleanup;
+               }
 
-               urb_priv = urb->hcpriv;
                td = urb_priv->td[i];
-
                for (j = 0; j < trbs_per_td; j++) {
                        u32 remainder = 0;
                        field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3352,6 +3359,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
                        start_cycle, start_trb);
        return 0;
+cleanup:
+       /* Clean up a partially enqueued isoc transfer. */
+
+       for (i--; i >= 0; i--)
+               list_del_init(&urb_priv->td[i]->td_list);
+
+       /* Use the first TD as a temporary variable to turn the TDs we've queued
+        * into No-ops with a software-owned cycle bit. That way the hardware
+        * won't accidentally start executing bogus TDs when we partially
+        * overwrite them.  td->first_trb and td->start_seg are already set.
+        */
+       urb_priv->td[0]->last_trb = ep_ring->enqueue;
+       /* Every TRB except the first & last will have its cycle bit flipped. */
+       td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
+
+       /* Reset the ring enqueue back to the first TRB and its cycle bit. */
+       ep_ring->enqueue = urb_priv->td[0]->first_trb;
+       ep_ring->enq_seg = urb_priv->td[0]->start_seg;
+       ep_ring->cycle_state = start_cycle;
+       usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+       return ret;
 }
 
 /*