1 From: Ajay Kumar Gupta <ajay.gupta@ti.com>
2 To: linux-usb@vger.kernel.org
3 Cc: linux-omap@vger.kernel.org, felipe.balbi@nokia.com,
4 Subject: [PATCH] MUSB: Fix for kernel panic with multiple bulk transfer
5 Date: Wed, 1 Oct 2008 13:08:56 +0530
7 Fixes kernel panic when multiple copy is performed among more than two mass
8 storage media and transfer is aborted.musb_advance_schedule(),
9 musb_urb_dequeue(),musb_cleanup_urb() and musb_h_disable() functions have
10 been modified to correct urb handling associated with bulk and control
11 endpoints which are multiplexed on one hardware endpoint.
13 musb_advance_schedule() has been removed from musb_cleanup_urb() and added
14 to musb_urb_dequeue(). musb_h_disable() has been modified to take care of
15 multiple qh on same hw_ep scenario.
17 Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
18 CC: Romit Dasgupta <romit@ti.com>
20 Suggestions welcome to move while loop doing kfree(qh) from
21 musb_advance_schedule() and musb_h_disable() to musb_giveback().
23 drivers/usb/musb/musb_host.c | 105 ++++++++++++++++++++++++++++++-----------
24 1 files changed, 77 insertions(+), 28 deletions(-)
26 diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
27 index 8b4be01..c2474de 100644
28 --- a/drivers/usb/musb/musb_host.c
29 +++ b/drivers/usb/musb/musb_host.c
30 @@ -427,8 +427,17 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
31 qh = musb_giveback(qh, urb, 0);
33 qh = musb_giveback(qh, urb, urb->status);
34 + while (qh && qh->is_ready && list_empty(&qh->hep->urb_list)) {
35 + struct list_head *head;
36 + head = qh->ring.prev;
37 + list_del(&qh->ring);
38 + qh->hep->hcpriv = NULL;
40 + qh = first_qh(head);
43 - if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
45 + if (qh && qh->is_ready) {
46 DBG(4, "... next ep%d %cX urb %p\n",
47 hw_ep->epnum, is_in ? 'R' : 'T',
49 @@ -1964,8 +1973,6 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
50 /* flush cpu writebuffer */
51 csr = musb_readw(epio, MUSB_TXCSR);
54 - musb_advance_schedule(ep->musb, urb, ep, is_in);
58 @@ -2026,13 +2033,24 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
59 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
60 if (ret < 0 || (sched && qh != first_qh(sched))) {
61 int ready = qh->is_ready;
63 + int type = urb->pipe;
66 __musb_giveback(musb, urb, 0);
67 - qh->is_ready = ready;
70 + if (list_empty(&qh->hep->urb_list) && list_empty(&qh->ring))
71 + list_del(&qh->ring);
73 + qh->is_ready = ready;
74 + if (usb_pipeisoc(type))
75 + musb->periodic[qh->hw_ep->epnum] = NULL;
77 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
79 + musb_advance_schedule(qh->hw_ep->musb, urb, qh->hw_ep,
80 + urb->pipe & USB_DIR_IN);
84 spin_unlock_irqrestore(&musb->lock, flags);
86 @@ -2046,14 +2064,17 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
88 struct musb *musb = hcd_to_musb(hcd);
89 u8 is_in = epnum & USB_DIR_IN;
90 - struct musb_qh *qh = hep->hcpriv;
91 + struct musb_qh *qh, *qh_for_curr_urb;
92 struct urb *urb, *tmp;
93 struct list_head *sched;
99 spin_lock_irqsave(&musb->lock, flags);
102 + spin_unlock_irqrestore(&musb->lock, flags);
107 case USB_ENDPOINT_XFER_CONTROL:
108 @@ -2065,6 +2086,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
110 sched = &musb->out_bulk;
112 + case USB_ENDPOINT_XFER_ISOC:
113 + case USB_ENDPOINT_XFER_INT:
114 + for (i = 0; i < musb->nr_endpoints; i++) {
115 + if (musb->periodic[i] == qh)
120 /* REVISIT when we get a schedule tree, periodic transfers
121 * won't always be at the head of a singleton queue...
122 @@ -2073,26 +2101,47 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
126 - /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
128 /* kick first urb off the hardware, if needed */
130 - if (!sched || qh == first_qh(sched)) {
132 + qh_for_curr_urb = qh;
135 - /* make software (then hardware) stop ASAP */
136 - if (!urb->unlinked)
137 - urb->status = -ESHUTDOWN;
140 - musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
144 - /* then just nuke all the others */
145 - list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
146 - musb_giveback(qh, urb, -ESHUTDOWN);
149 + /* make software (then hardware) stop ASAP */
150 + if (!urb->unlinked)
151 + urb->status = -ESHUTDOWN;
152 + /* cleanup first urb of first qh; */
153 + if (qh == first_qh(sched)) {
154 + musb_cleanup_urb(urb, qh,
155 + urb->pipe & USB_DIR_IN);
157 + qh = musb_giveback(qh, urb, -ESHUTDOWN);
158 + if (qh == qh_for_curr_urb) {
159 + list_for_each_entry_safe_from(urb, tmp,
160 + &hep->urb_list, urb_list) {
161 + qh = musb_giveback(qh, tmp, -ESHUTDOWN);
162 + if (qh != qh_for_curr_urb)
167 + /* pick the next candidate and go */
168 + if (qh && qh->is_ready) {
169 + while (qh && qh->is_ready &&
170 + list_empty(&qh->hep->urb_list)) {
171 + struct list_head *head;
172 + head = qh->ring.prev;
173 + list_del(&qh->ring);
174 + qh->hep->hcpriv = NULL;
176 + qh = first_qh(head);
178 + if (qh && qh->is_ready) {
179 + epnum = qh->hep->desc.bEndpointAddress;
180 + is_in = epnum & USB_DIR_IN;
181 + musb_start_urb(musb, is_in, qh);
185 spin_unlock_irqrestore(&musb->lock, flags);
192 To unsubscribe from this list: send the line "unsubscribe linux-omap" in
193 the body of a message to majordomo@vger.kernel.org
194 More majordomo info at http://vger.kernel.org/majordomo-info.html