Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / usb / wusbcore / wa-xfer.c
1 /*
2  * WUSB Wire Adapter
3  * Data transfer and URB enqueing
4  *
5  * Copyright (C) 2005-2006 Intel Corporation
6  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  *
23  * How transfers work: get a buffer, break it up in segments (segment
24  * size is a multiple of the maxpacket size). For each segment issue a
25  * segment request (struct wa_xfer_*), then send the data buffer if
26  * out or nothing if in (all over the DTO endpoint).
27  *
28  * For each submitted segment request, a notification will come over
29  * the NEP endpoint and a transfer result (struct xfer_result) will
30  * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31  * data coming (inbound transfer), schedule a read and handle it.
32  *
33  * Sounds simple, it is a pain to implement.
34  *
35  *
36  * ENTRY POINTS
37  *
38  *   FIXME
39  *
40  * LIFE CYCLE / STATE DIAGRAM
41  *
42  *   FIXME
43  *
44  * THIS CODE IS DISGUSTING
45  *
46  *   Warned you are; it's my second try and still not happy with it.
47  *
48  * NOTES:
49  *
50  *   - No iso
51  *
52  *   - Supports DMA xfers, control, bulk and maybe interrupt
53  *
54  *   - Does not recycle unused rpipes
55  *
56  *     An rpipe is assigned to an endpoint the first time it is used,
57  *     and then it's there, assigned, until the endpoint is disabled
58  *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59  *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60  *     (should be a mutex).
61  *
62  *     Two methods it could be done:
63  *
64  *     (a) set up a timer every time an rpipe's use count drops to 1
65  *         (which means unused) or when a transfer ends. Reset the
66  *         timer when a xfer is queued. If the timer expires, release
67  *         the rpipe [see rpipe_ep_disable()].
68  *
69  *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70  *         when none are found go over the list, check their endpoint
71  *         and their activity record (if no last-xfer-done-ts in the
72  *         last x seconds) take it
73  *
74  *     However, due to the fact that we have a set of limited
75  *     resources (max-segments-at-the-same-time per xfer,
76  *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77  *     we are going to have to rebuild all this based on an scheduler,
78  *     to where we have a list of transactions to do and based on the
79  *     availability of the different required components (blocks,
80  *     rpipes, segment slots, etc), we go scheduling them. Painful.
81  */
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87
88 #include "wa-hc.h"
89 #include "wusbhc.h"
90
91 enum {
92         WA_SEGS_MAX = 255,
93 };
94
95 enum wa_seg_status {
96         WA_SEG_NOTREADY,
97         WA_SEG_READY,
98         WA_SEG_DELAYED,
99         WA_SEG_SUBMITTED,
100         WA_SEG_PENDING,
101         WA_SEG_DTI_PENDING,
102         WA_SEG_DONE,
103         WA_SEG_ERROR,
104         WA_SEG_ABORTED,
105 };
106
107 static void wa_xfer_delayed_run(struct wa_rpipe *);
108
109 /*
110  * Life cycle governed by 'struct urb' (the refcount of the struct is
111  * that of the 'struct urb' and usb_free_urb() would free the whole
112  * struct).
113  */
114 struct wa_seg {
115         struct urb urb;
116         struct urb *dto_urb;            /* for data output? */
117         struct list_head list_node;     /* for rpipe->req_list */
118         struct wa_xfer *xfer;           /* out xfer */
119         u8 index;                       /* which segment we are */
120         enum wa_seg_status status;
121         ssize_t result;                 /* bytes xfered or error */
122         struct wa_xfer_hdr xfer_hdr;
123         u8 xfer_extra[];                /* xtra space for xfer_hdr_ctl */
124 };
125
126 static void wa_seg_init(struct wa_seg *seg)
127 {
128         /* usb_init_urb() repeats a lot of work, so we do it here */
129         kref_init(&seg->urb.kref);
130 }
131
132 /*
133  * Protected by xfer->lock
134  *
135  */
136 struct wa_xfer {
137         struct kref refcnt;
138         struct list_head list_node;
139         spinlock_t lock;
140         u32 id;
141
142         struct wahc *wa;                /* Wire adapter we are plugged to */
143         struct usb_host_endpoint *ep;
144         struct urb *urb;                /* URB we are transferring for */
145         struct wa_seg **seg;            /* transfer segments */
146         u8 segs, segs_submitted, segs_done;
147         unsigned is_inbound:1;
148         unsigned is_dma:1;
149         size_t seg_size;
150         int result;
151
152         gfp_t gfp;                      /* allocation mask */
153
154         struct wusb_dev *wusb_dev;      /* for activity timestamps */
155 };
156
157 static inline void wa_xfer_init(struct wa_xfer *xfer)
158 {
159         kref_init(&xfer->refcnt);
160         INIT_LIST_HEAD(&xfer->list_node);
161         spin_lock_init(&xfer->lock);
162 }
163
164 /*
165  * Destroy a transfer structure
166  *
167  * Note that the xfer->seg[index] thingies follow the URB life cycle,
168  * so we need to put them, not free them.
169  */
170 static void wa_xfer_destroy(struct kref *_xfer)
171 {
172         struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
173         if (xfer->seg) {
174                 unsigned cnt;
175                 for (cnt = 0; cnt < xfer->segs; cnt++) {
176                         if (xfer->is_inbound)
177                                 usb_put_urb(xfer->seg[cnt]->dto_urb);
178                         usb_put_urb(&xfer->seg[cnt]->urb);
179                 }
180         }
181         kfree(xfer);
182 }
183
184 static void wa_xfer_get(struct wa_xfer *xfer)
185 {
186         kref_get(&xfer->refcnt);
187 }
188
189 static void wa_xfer_put(struct wa_xfer *xfer)
190 {
191         kref_put(&xfer->refcnt, wa_xfer_destroy);
192 }
193
194 /*
195  * xfer is referenced
196  *
197  * xfer->lock has to be unlocked
198  *
199  * We take xfer->lock for setting the result; this is a barrier
200  * against drivers/usb/core/hcd.c:unlink1() being called after we call
201  * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
202  * reference to the transfer.
203  */
204 static void wa_xfer_giveback(struct wa_xfer *xfer)
205 {
206         unsigned long flags;
207
208         spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
209         list_del_init(&xfer->list_node);
210         spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
211         /* FIXME: segmentation broken -- kills DWA */
212         wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
213         wa_put(xfer->wa);
214         wa_xfer_put(xfer);
215 }
216
217 /*
218  * xfer is referenced
219  *
220  * xfer->lock has to be unlocked
221  */
222 static void wa_xfer_completion(struct wa_xfer *xfer)
223 {
224         if (xfer->wusb_dev)
225                 wusb_dev_put(xfer->wusb_dev);
226         rpipe_put(xfer->ep->hcpriv);
227         wa_xfer_giveback(xfer);
228 }
229
230 /*
231  * If transfer is done, wrap it up and return true
232  *
233  * xfer->lock has to be locked
234  */
235 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
236 {
237         struct device *dev = &xfer->wa->usb_iface->dev;
238         unsigned result, cnt;
239         struct wa_seg *seg;
240         struct urb *urb = xfer->urb;
241         unsigned found_short = 0;
242
243         result = xfer->segs_done == xfer->segs_submitted;
244         if (result == 0)
245                 goto out;
246         urb->actual_length = 0;
247         for (cnt = 0; cnt < xfer->segs; cnt++) {
248                 seg = xfer->seg[cnt];
249                 switch (seg->status) {
250                 case WA_SEG_DONE:
251                         if (found_short && seg->result > 0) {
252                                 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
253                                         xfer, cnt, seg->result);
254                                 urb->status = -EINVAL;
255                                 goto out;
256                         }
257                         urb->actual_length += seg->result;
258                         if (seg->result < xfer->seg_size
259                             && cnt != xfer->segs-1)
260                                 found_short = 1;
261                         dev_dbg(dev, "xfer %p#%u: DONE short %d "
262                                 "result %zu urb->actual_length %d\n",
263                                 xfer, seg->index, found_short, seg->result,
264                                 urb->actual_length);
265                         break;
266                 case WA_SEG_ERROR:
267                         xfer->result = seg->result;
268                         dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
269                                 xfer, seg->index, seg->result);
270                         goto out;
271                 case WA_SEG_ABORTED:
272                         dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
273                                 xfer, seg->index, urb->status);
274                         xfer->result = urb->status;
275                         goto out;
276                 default:
277                         dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
278                                  xfer, cnt, seg->status);
279                         xfer->result = -EINVAL;
280                         goto out;
281                 }
282         }
283         xfer->result = 0;
284 out:
285         return result;
286 }
287
288 /*
289  * Initialize a transfer's ID
290  *
291  * We need to use a sequential number; if we use the pointer or the
292  * hash of the pointer, it can repeat over sequential transfers and
293  * then it will confuse the HWA....wonder why in hell they put a 32
294  * bit handle in there then.
295  */
296 static void wa_xfer_id_init(struct wa_xfer *xfer)
297 {
298         xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
299 }
300
301 /*
302  * Return the xfer's ID associated with xfer
303  *
304  * Need to generate a
305  */
306 static u32 wa_xfer_id(struct wa_xfer *xfer)
307 {
308         return xfer->id;
309 }
310
311 /*
312  * Search for a transfer list ID on the HCD's URB list
313  *
314  * For 32 bit architectures, we use the pointer itself; for 64 bits, a
315  * 32-bit hash of the pointer.
316  *
317  * @returns NULL if not found.
318  */
319 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
320 {
321         unsigned long flags;
322         struct wa_xfer *xfer_itr;
323         spin_lock_irqsave(&wa->xfer_list_lock, flags);
324         list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
325                 if (id == xfer_itr->id) {
326                         wa_xfer_get(xfer_itr);
327                         goto out;
328                 }
329         }
330         xfer_itr = NULL;
331 out:
332         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
333         return xfer_itr;
334 }
335
336 struct wa_xfer_abort_buffer {
337         struct urb urb;
338         struct wa_xfer_abort cmd;
339 };
340
341 static void __wa_xfer_abort_cb(struct urb *urb)
342 {
343         struct wa_xfer_abort_buffer *b = urb->context;
344         usb_put_urb(&b->urb);
345 }
346
347 /*
348  * Aborts an ongoing transaction
349  *
350  * Assumes the transfer is referenced and locked and in a submitted
351  * state (mainly that there is an endpoint/rpipe assigned).
352  *
353  * The callback (see above) does nothing but freeing up the data by
354  * putting the URB. Because the URB is allocated at the head of the
355  * struct, the whole space we allocated is kfreed.
356  *
357  * We'll get an 'aborted transaction' xfer result on DTI, that'll
358  * politely ignore because at this point the transaction has been
359  * marked as aborted already.
360  */
361 static void __wa_xfer_abort(struct wa_xfer *xfer)
362 {
363         int result;
364         struct device *dev = &xfer->wa->usb_iface->dev;
365         struct wa_xfer_abort_buffer *b;
366         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
367
368         b = kmalloc(sizeof(*b), GFP_ATOMIC);
369         if (b == NULL)
370                 goto error_kmalloc;
371         b->cmd.bLength =  sizeof(b->cmd);
372         b->cmd.bRequestType = WA_XFER_ABORT;
373         b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
374         b->cmd.dwTransferID = wa_xfer_id(xfer);
375
376         usb_init_urb(&b->urb);
377         usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
378                 usb_sndbulkpipe(xfer->wa->usb_dev,
379                                 xfer->wa->dto_epd->bEndpointAddress),
380                 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
381         result = usb_submit_urb(&b->urb, GFP_ATOMIC);
382         if (result < 0)
383                 goto error_submit;
384         return;                         /* callback frees! */
385
386
387 error_submit:
388         if (printk_ratelimit())
389                 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
390                         xfer, result);
391         kfree(b);
392 error_kmalloc:
393         return;
394
395 }
396
397 /*
398  *
399  * @returns < 0 on error, transfer segment request size if ok
400  */
401 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
402                                      enum wa_xfer_type *pxfer_type)
403 {
404         ssize_t result;
405         struct device *dev = &xfer->wa->usb_iface->dev;
406         size_t maxpktsize;
407         struct urb *urb = xfer->urb;
408         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
409
410         switch (rpipe->descr.bmAttribute & 0x3) {
411         case USB_ENDPOINT_XFER_CONTROL:
412                 *pxfer_type = WA_XFER_TYPE_CTL;
413                 result = sizeof(struct wa_xfer_ctl);
414                 break;
415         case USB_ENDPOINT_XFER_INT:
416         case USB_ENDPOINT_XFER_BULK:
417                 *pxfer_type = WA_XFER_TYPE_BI;
418                 result = sizeof(struct wa_xfer_bi);
419                 break;
420         case USB_ENDPOINT_XFER_ISOC:
421                 dev_err(dev, "FIXME: ISOC not implemented\n");
422                 result = -ENOSYS;
423                 goto error;
424         default:
425                 /* never happens */
426                 BUG();
427                 result = -EINVAL;       /* shut gcc up */
428         };
429         xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
430         xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
431         xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
432                 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
433         /* Compute the segment size and make sure it is a multiple of
434          * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
435          * a check (FIXME) */
436         maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
437         if (xfer->seg_size < maxpktsize) {
438                 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
439                         "%zu\n", xfer->seg_size, maxpktsize);
440                 result = -EINVAL;
441                 goto error;
442         }
443         xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
444         xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
445                 / xfer->seg_size;
446         if (xfer->segs >= WA_SEGS_MAX) {
447                 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
448                         (int)(urb->transfer_buffer_length / xfer->seg_size),
449                         WA_SEGS_MAX);
450                 result = -EINVAL;
451                 goto error;
452         }
453         if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
454                 xfer->segs = 1;
455 error:
456         return result;
457 }
458
459 /* Fill in the common request header and xfer-type specific data. */
460 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
461                                  struct wa_xfer_hdr *xfer_hdr0,
462                                  enum wa_xfer_type xfer_type,
463                                  size_t xfer_hdr_size)
464 {
465         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
466
467         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
468         xfer_hdr0->bLength = xfer_hdr_size;
469         xfer_hdr0->bRequestType = xfer_type;
470         xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
471         xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
472         xfer_hdr0->bTransferSegment = 0;
473         switch (xfer_type) {
474         case WA_XFER_TYPE_CTL: {
475                 struct wa_xfer_ctl *xfer_ctl =
476                         container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
477                 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
478                 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
479                        sizeof(xfer_ctl->baSetupData));
480                 break;
481         }
482         case WA_XFER_TYPE_BI:
483                 break;
484         case WA_XFER_TYPE_ISO:
485                 printk(KERN_ERR "FIXME: ISOC not implemented\n");
486         default:
487                 BUG();
488         };
489 }
490
491 /*
492  * Callback for the OUT data phase of the segment request
493  *
494  * Check wa_seg_cb(); most comments also apply here because this
495  * function does almost the same thing and they work closely
496  * together.
497  *
498  * If the seg request has failed but this DTO phase has succeeded,
499  * wa_seg_cb() has already failed the segment and moved the
500  * status to WA_SEG_ERROR, so this will go through 'case 0' and
501  * effectively do nothing.
502  */
503 static void wa_seg_dto_cb(struct urb *urb)
504 {
505         struct wa_seg *seg = urb->context;
506         struct wa_xfer *xfer = seg->xfer;
507         struct wahc *wa;
508         struct device *dev;
509         struct wa_rpipe *rpipe;
510         unsigned long flags;
511         unsigned rpipe_ready = 0;
512         u8 done = 0;
513
514         switch (urb->status) {
515         case 0:
516                 spin_lock_irqsave(&xfer->lock, flags);
517                 wa = xfer->wa;
518                 dev = &wa->usb_iface->dev;
519                 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
520                         xfer, seg->index, urb->actual_length);
521                 if (seg->status < WA_SEG_PENDING)
522                         seg->status = WA_SEG_PENDING;
523                 seg->result = urb->actual_length;
524                 spin_unlock_irqrestore(&xfer->lock, flags);
525                 break;
526         case -ECONNRESET:       /* URB unlinked; no need to do anything */
527         case -ENOENT:           /* as it was done by the who unlinked us */
528                 break;
529         default:                /* Other errors ... */
530                 spin_lock_irqsave(&xfer->lock, flags);
531                 wa = xfer->wa;
532                 dev = &wa->usb_iface->dev;
533                 rpipe = xfer->ep->hcpriv;
534                 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
535                         xfer, seg->index, urb->status);
536                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
537                             EDC_ERROR_TIMEFRAME)){
538                         dev_err(dev, "DTO: URB max acceptable errors "
539                                 "exceeded, resetting device\n");
540                         wa_reset_all(wa);
541                 }
542                 if (seg->status != WA_SEG_ERROR) {
543                         seg->status = WA_SEG_ERROR;
544                         seg->result = urb->status;
545                         xfer->segs_done++;
546                         __wa_xfer_abort(xfer);
547                         rpipe_ready = rpipe_avail_inc(rpipe);
548                         done = __wa_xfer_is_done(xfer);
549                 }
550                 spin_unlock_irqrestore(&xfer->lock, flags);
551                 if (done)
552                         wa_xfer_completion(xfer);
553                 if (rpipe_ready)
554                         wa_xfer_delayed_run(rpipe);
555         }
556 }
557
558 /*
559  * Callback for the segment request
560  *
561  * If successful transition state (unless already transitioned or
562  * outbound transfer); otherwise, take a note of the error, mark this
563  * segment done and try completion.
564  *
565  * Note we don't access until we are sure that the transfer hasn't
566  * been cancelled (ECONNRESET, ENOENT), which could mean that
567  * seg->xfer could be already gone.
568  *
569  * We have to check before setting the status to WA_SEG_PENDING
570  * because sometimes the xfer result callback arrives before this
571  * callback (geeeeeeze), so it might happen that we are already in
572  * another state. As well, we don't set it if the transfer is inbound,
573  * as in that case, wa_seg_dto_cb will do it when the OUT data phase
574  * finishes.
575  */
576 static void wa_seg_cb(struct urb *urb)
577 {
578         struct wa_seg *seg = urb->context;
579         struct wa_xfer *xfer = seg->xfer;
580         struct wahc *wa;
581         struct device *dev;
582         struct wa_rpipe *rpipe;
583         unsigned long flags;
584         unsigned rpipe_ready;
585         u8 done = 0;
586
587         switch (urb->status) {
588         case 0:
589                 spin_lock_irqsave(&xfer->lock, flags);
590                 wa = xfer->wa;
591                 dev = &wa->usb_iface->dev;
592                 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
593                 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
594                         seg->status = WA_SEG_PENDING;
595                 spin_unlock_irqrestore(&xfer->lock, flags);
596                 break;
597         case -ECONNRESET:       /* URB unlinked; no need to do anything */
598         case -ENOENT:           /* as it was done by the who unlinked us */
599                 break;
600         default:                /* Other errors ... */
601                 spin_lock_irqsave(&xfer->lock, flags);
602                 wa = xfer->wa;
603                 dev = &wa->usb_iface->dev;
604                 rpipe = xfer->ep->hcpriv;
605                 if (printk_ratelimit())
606                         dev_err(dev, "xfer %p#%u: request error %d\n",
607                                 xfer, seg->index, urb->status);
608                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
609                             EDC_ERROR_TIMEFRAME)){
610                         dev_err(dev, "DTO: URB max acceptable errors "
611                                 "exceeded, resetting device\n");
612                         wa_reset_all(wa);
613                 }
614                 usb_unlink_urb(seg->dto_urb);
615                 seg->status = WA_SEG_ERROR;
616                 seg->result = urb->status;
617                 xfer->segs_done++;
618                 __wa_xfer_abort(xfer);
619                 rpipe_ready = rpipe_avail_inc(rpipe);
620                 done = __wa_xfer_is_done(xfer);
621                 spin_unlock_irqrestore(&xfer->lock, flags);
622                 if (done)
623                         wa_xfer_completion(xfer);
624                 if (rpipe_ready)
625                         wa_xfer_delayed_run(rpipe);
626         }
627 }
628
629 /*
630  * Allocate the segs array and initialize each of them
631  *
632  * The segments are freed by wa_xfer_destroy() when the xfer use count
633  * drops to zero; however, because each segment is given the same life
634  * cycle as the USB URB it contains, it is actually freed by
635  * usb_put_urb() on the contained USB URB (twisted, eh?).
636  */
637 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
638 {
639         int result, cnt;
640         size_t alloc_size = sizeof(*xfer->seg[0])
641                 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
642         struct usb_device *usb_dev = xfer->wa->usb_dev;
643         const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
644         struct wa_seg *seg;
645         size_t buf_itr, buf_size, buf_itr_size;
646
647         result = -ENOMEM;
648         xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
649         if (xfer->seg == NULL)
650                 goto error_segs_kzalloc;
651         buf_itr = 0;
652         buf_size = xfer->urb->transfer_buffer_length;
653         for (cnt = 0; cnt < xfer->segs; cnt++) {
654                 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
655                 if (seg == NULL)
656                         goto error_seg_kzalloc;
657                 wa_seg_init(seg);
658                 seg->xfer = xfer;
659                 seg->index = cnt;
660                 usb_fill_bulk_urb(&seg->urb, usb_dev,
661                                   usb_sndbulkpipe(usb_dev,
662                                                   dto_epd->bEndpointAddress),
663                                   &seg->xfer_hdr, xfer_hdr_size,
664                                   wa_seg_cb, seg);
665                 buf_itr_size = buf_size > xfer->seg_size ?
666                         xfer->seg_size : buf_size;
667                 if (xfer->is_inbound == 0 && buf_size > 0) {
668                         seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
669                         if (seg->dto_urb == NULL)
670                                 goto error_dto_alloc;
671                         usb_fill_bulk_urb(
672                                 seg->dto_urb, usb_dev,
673                                 usb_sndbulkpipe(usb_dev,
674                                                 dto_epd->bEndpointAddress),
675                                 NULL, 0, wa_seg_dto_cb, seg);
676                         if (xfer->is_dma) {
677                                 seg->dto_urb->transfer_dma =
678                                         xfer->urb->transfer_dma + buf_itr;
679                                 seg->dto_urb->transfer_flags |=
680                                         URB_NO_TRANSFER_DMA_MAP;
681                         } else
682                                 seg->dto_urb->transfer_buffer =
683                                         xfer->urb->transfer_buffer + buf_itr;
684                         seg->dto_urb->transfer_buffer_length = buf_itr_size;
685                 }
686                 seg->status = WA_SEG_READY;
687                 buf_itr += buf_itr_size;
688                 buf_size -= buf_itr_size;
689         }
690         return 0;
691
692 error_dto_alloc:
693         kfree(xfer->seg[cnt]);
694         cnt--;
695 error_seg_kzalloc:
696         /* use the fact that cnt is left at were it failed */
697         for (; cnt > 0; cnt--) {
698                 if (xfer->is_inbound == 0)
699                         kfree(xfer->seg[cnt]->dto_urb);
700                 kfree(xfer->seg[cnt]);
701         }
702 error_segs_kzalloc:
703         return result;
704 }
705
706 /*
707  * Allocates all the stuff needed to submit a transfer
708  *
709  * Breaks the whole data buffer in a list of segments, each one has a
710  * structure allocated to it and linked in xfer->seg[index]
711  *
712  * FIXME: merge setup_segs() and the last part of this function, no
713  *        need to do two for loops when we could run everything in a
714  *        single one
715  */
716 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
717 {
718         int result;
719         struct device *dev = &xfer->wa->usb_iface->dev;
720         enum wa_xfer_type xfer_type = 0; /* shut up GCC */
721         size_t xfer_hdr_size, cnt, transfer_size;
722         struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
723
724         result = __wa_xfer_setup_sizes(xfer, &xfer_type);
725         if (result < 0)
726                 goto error_setup_sizes;
727         xfer_hdr_size = result;
728         result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
729         if (result < 0) {
730                 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
731                         xfer, xfer->segs, result);
732                 goto error_setup_segs;
733         }
734         /* Fill the first header */
735         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
736         wa_xfer_id_init(xfer);
737         __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
738
739         /* Fill remainig headers */
740         xfer_hdr = xfer_hdr0;
741         transfer_size = urb->transfer_buffer_length;
742         xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
743                 xfer->seg_size : transfer_size;
744         transfer_size -=  xfer->seg_size;
745         for (cnt = 1; cnt < xfer->segs; cnt++) {
746                 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
747                 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
748                 xfer_hdr->bTransferSegment = cnt;
749                 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
750                         cpu_to_le32(xfer->seg_size)
751                         : cpu_to_le32(transfer_size);
752                 xfer->seg[cnt]->status = WA_SEG_READY;
753                 transfer_size -=  xfer->seg_size;
754         }
755         xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
756         result = 0;
757 error_setup_segs:
758 error_setup_sizes:
759         return result;
760 }
761
762 /*
763  *
764  *
765  * rpipe->seg_lock is held!
766  */
767 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
768                            struct wa_seg *seg)
769 {
770         int result;
771         result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
772         if (result < 0) {
773                 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
774                        xfer, seg->index, result);
775                 goto error_seg_submit;
776         }
777         if (seg->dto_urb) {
778                 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
779                 if (result < 0) {
780                         printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
781                                xfer, seg->index, result);
782                         goto error_dto_submit;
783                 }
784         }
785         seg->status = WA_SEG_SUBMITTED;
786         rpipe_avail_dec(rpipe);
787         return 0;
788
789 error_dto_submit:
790         usb_unlink_urb(&seg->urb);
791 error_seg_submit:
792         seg->status = WA_SEG_ERROR;
793         seg->result = result;
794         return result;
795 }
796
797 /*
798  * Execute more queued request segments until the maximum concurrent allowed
799  *
800  * The ugly unlock/lock sequence on the error path is needed as the
801  * xfer->lock normally nests the seg_lock and not viceversa.
802  *
803  */
804 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
805 {
806         int result;
807         struct device *dev = &rpipe->wa->usb_iface->dev;
808         struct wa_seg *seg;
809         struct wa_xfer *xfer;
810         unsigned long flags;
811
812         spin_lock_irqsave(&rpipe->seg_lock, flags);
813         while (atomic_read(&rpipe->segs_available) > 0
814               && !list_empty(&rpipe->seg_list)) {
815                 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
816                                  list_node);
817                 list_del(&seg->list_node);
818                 xfer = seg->xfer;
819                 result = __wa_seg_submit(rpipe, xfer, seg);
820                 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
821                         xfer, seg->index, atomic_read(&rpipe->segs_available), result);
822                 if (unlikely(result < 0)) {
823                         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
824                         spin_lock_irqsave(&xfer->lock, flags);
825                         __wa_xfer_abort(xfer);
826                         xfer->segs_done++;
827                         spin_unlock_irqrestore(&xfer->lock, flags);
828                         spin_lock_irqsave(&rpipe->seg_lock, flags);
829                 }
830         }
831         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
832 }
833
834 /*
835  *
836  * xfer->lock is taken
837  *
838  * On failure submitting we just stop submitting and return error;
839  * wa_urb_enqueue_b() will execute the completion path
840  */
841 static int __wa_xfer_submit(struct wa_xfer *xfer)
842 {
843         int result;
844         struct wahc *wa = xfer->wa;
845         struct device *dev = &wa->usb_iface->dev;
846         unsigned cnt;
847         struct wa_seg *seg;
848         unsigned long flags;
849         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
850         size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
851         u8 available;
852         u8 empty;
853
854         spin_lock_irqsave(&wa->xfer_list_lock, flags);
855         list_add_tail(&xfer->list_node, &wa->xfer_list);
856         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
857
858         BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
859         result = 0;
860         spin_lock_irqsave(&rpipe->seg_lock, flags);
861         for (cnt = 0; cnt < xfer->segs; cnt++) {
862                 available = atomic_read(&rpipe->segs_available);
863                 empty = list_empty(&rpipe->seg_list);
864                 seg = xfer->seg[cnt];
865                 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
866                         xfer, cnt, available, empty,
867                         available == 0 || !empty ? "delayed" : "submitted");
868                 if (available == 0 || !empty) {
869                         dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
870                         seg->status = WA_SEG_DELAYED;
871                         list_add_tail(&seg->list_node, &rpipe->seg_list);
872                 } else {
873                         result = __wa_seg_submit(rpipe, xfer, seg);
874                         if (result < 0) {
875                                 __wa_xfer_abort(xfer);
876                                 goto error_seg_submit;
877                         }
878                 }
879                 xfer->segs_submitted++;
880         }
881 error_seg_submit:
882         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
883         return result;
884 }
885
886 /*
887  * Second part of a URB/transfer enqueuement
888  *
889  * Assumes this comes from wa_urb_enqueue() [maybe through
890  * wa_urb_enqueue_run()]. At this point:
891  *
892  * xfer->wa     filled and refcounted
893  * xfer->ep     filled with rpipe refcounted if
894  *              delayed == 0
895  * xfer->urb    filled and refcounted (this is the case when called
896  *              from wa_urb_enqueue() as we come from usb_submit_urb()
897  *              and when called by wa_urb_enqueue_run(), as we took an
898  *              extra ref dropped by _run() after we return).
899  * xfer->gfp    filled
900  *
901  * If we fail at __wa_xfer_submit(), then we just check if we are done
902  * and if so, we run the completion procedure. However, if we are not
903  * yet done, we do nothing and wait for the completion handlers from
904  * the submitted URBs or from the xfer-result path to kick in. If xfer
905  * result never kicks in, the xfer will timeout from the USB code and
906  * dequeue() will be called.
907  */
908 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
909 {
910         int result;
911         unsigned long flags;
912         struct urb *urb = xfer->urb;
913         struct wahc *wa = xfer->wa;
914         struct wusbhc *wusbhc = wa->wusb;
915         struct wusb_dev *wusb_dev;
916         unsigned done;
917
918         result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
919         if (result < 0)
920                 goto error_rpipe_get;
921         result = -ENODEV;
922         /* FIXME: segmentation broken -- kills DWA */
923         mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
924         if (urb->dev == NULL) {
925                 mutex_unlock(&wusbhc->mutex);
926                 goto error_dev_gone;
927         }
928         wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
929         if (wusb_dev == NULL) {
930                 mutex_unlock(&wusbhc->mutex);
931                 goto error_dev_gone;
932         }
933         mutex_unlock(&wusbhc->mutex);
934
935         spin_lock_irqsave(&xfer->lock, flags);
936         xfer->wusb_dev = wusb_dev;
937         result = urb->status;
938         if (urb->status != -EINPROGRESS)
939                 goto error_dequeued;
940
941         result = __wa_xfer_setup(xfer, urb);
942         if (result < 0)
943                 goto error_xfer_setup;
944         result = __wa_xfer_submit(xfer);
945         if (result < 0)
946                 goto error_xfer_submit;
947         spin_unlock_irqrestore(&xfer->lock, flags);
948         return;
949
950         /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
951          * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
952          * upundo setup().
953          */
954 error_xfer_setup:
955 error_dequeued:
956         spin_unlock_irqrestore(&xfer->lock, flags);
957         /* FIXME: segmentation broken, kills DWA */
958         if (wusb_dev)
959                 wusb_dev_put(wusb_dev);
960 error_dev_gone:
961         rpipe_put(xfer->ep->hcpriv);
962 error_rpipe_get:
963         xfer->result = result;
964         wa_xfer_giveback(xfer);
965         return;
966
967 error_xfer_submit:
968         done = __wa_xfer_is_done(xfer);
969         xfer->result = result;
970         spin_unlock_irqrestore(&xfer->lock, flags);
971         if (done)
972                 wa_xfer_completion(xfer);
973 }
974
975 /*
976  * Execute the delayed transfers in the Wire Adapter @wa
977  *
978  * We need to be careful here, as dequeue() could be called in the
979  * middle.  That's why we do the whole thing under the
980  * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
981  * and then checks the list -- so as we would be acquiring in inverse
982  * order, we just drop the lock once we have the xfer and reacquire it
983  * later.
984  */
985 void wa_urb_enqueue_run(struct work_struct *ws)
986 {
987         struct wahc *wa = container_of(ws, struct wahc, xfer_work);
988         struct wa_xfer *xfer, *next;
989         struct urb *urb;
990
991         spin_lock_irq(&wa->xfer_list_lock);
992         list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
993                                  list_node) {
994                 list_del_init(&xfer->list_node);
995                 spin_unlock_irq(&wa->xfer_list_lock);
996
997                 urb = xfer->urb;
998                 wa_urb_enqueue_b(xfer);
999                 usb_put_urb(urb);       /* taken when queuing */
1000
1001                 spin_lock_irq(&wa->xfer_list_lock);
1002         }
1003         spin_unlock_irq(&wa->xfer_list_lock);
1004 }
1005 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1006
1007 /*
1008  * Submit a transfer to the Wire Adapter in a delayed way
1009  *
1010  * The process of enqueuing involves possible sleeps() [see
1011  * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1012  * in an atomic section, we defer the enqueue_b() call--else we call direct.
1013  *
1014  * @urb: We own a reference to it done by the HCI Linux USB stack that
1015  *       will be given up by calling usb_hcd_giveback_urb() or by
1016  *       returning error from this function -> ergo we don't have to
1017  *       refcount it.
1018  */
1019 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1020                    struct urb *urb, gfp_t gfp)
1021 {
1022         int result;
1023         struct device *dev = &wa->usb_iface->dev;
1024         struct wa_xfer *xfer;
1025         unsigned long my_flags;
1026         unsigned cant_sleep = irqs_disabled() | in_atomic();
1027
1028         if (urb->transfer_buffer == NULL
1029             && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1030             && urb->transfer_buffer_length != 0) {
1031                 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1032                 dump_stack();
1033         }
1034
1035         result = -ENOMEM;
1036         xfer = kzalloc(sizeof(*xfer), gfp);
1037         if (xfer == NULL)
1038                 goto error_kmalloc;
1039
1040         result = -ENOENT;
1041         if (urb->status != -EINPROGRESS)        /* cancelled */
1042                 goto error_dequeued;            /* before starting? */
1043         wa_xfer_init(xfer);
1044         xfer->wa = wa_get(wa);
1045         xfer->urb = urb;
1046         xfer->gfp = gfp;
1047         xfer->ep = ep;
1048         urb->hcpriv = xfer;
1049
1050         dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1051                 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1052                 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1053                 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1054                 cant_sleep ? "deferred" : "inline");
1055
1056         if (cant_sleep) {
1057                 usb_get_urb(urb);
1058                 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1059                 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1060                 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1061                 queue_work(wusbd, &wa->xfer_work);
1062         } else {
1063                 wa_urb_enqueue_b(xfer);
1064         }
1065         return 0;
1066
1067 error_dequeued:
1068         kfree(xfer);
1069 error_kmalloc:
1070         return result;
1071 }
1072 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1073
1074 /*
1075  * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1076  * handler] is called.
1077  *
1078  * Until a transfer goes successfully through wa_urb_enqueue() it
1079  * needs to be dequeued with completion calling; when stuck in delayed
1080  * or before wa_xfer_setup() is called, we need to do completion.
1081  *
1082  *  not setup  If there is no hcpriv yet, that means that that enqueue
1083  *             still had no time to set the xfer up. Because
1084  *             urb->status should be other than -EINPROGRESS,
1085  *             enqueue() will catch that and bail out.
1086  *
1087  * If the transfer has gone through setup, we just need to clean it
1088  * up. If it has gone through submit(), we have to abort it [with an
1089  * asynch request] and then make sure we cancel each segment.
1090  *
1091  */
1092 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1093 {
1094         unsigned long flags, flags2;
1095         struct wa_xfer *xfer;
1096         struct wa_seg *seg;
1097         struct wa_rpipe *rpipe;
1098         unsigned cnt;
1099         unsigned rpipe_ready = 0;
1100
1101         xfer = urb->hcpriv;
1102         if (xfer == NULL) {
1103                 /* NOthing setup yet enqueue will see urb->status !=
1104                  * -EINPROGRESS (by hcd layer) and bail out with
1105                  * error, no need to do completion
1106                  */
1107                 BUG_ON(urb->status == -EINPROGRESS);
1108                 goto out;
1109         }
1110         spin_lock_irqsave(&xfer->lock, flags);
1111         rpipe = xfer->ep->hcpriv;
1112         /* Check the delayed list -> if there, release and complete */
1113         spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1114         if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1115                 goto dequeue_delayed;
1116         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1117         if (xfer->seg == NULL)          /* still hasn't reached */
1118                 goto out_unlock;        /* setup(), enqueue_b() completes */
1119         /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1120         __wa_xfer_abort(xfer);
1121         for (cnt = 0; cnt < xfer->segs; cnt++) {
1122                 seg = xfer->seg[cnt];
1123                 switch (seg->status) {
1124                 case WA_SEG_NOTREADY:
1125                 case WA_SEG_READY:
1126                         printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1127                                xfer, cnt, seg->status);
1128                         WARN_ON(1);
1129                         break;
1130                 case WA_SEG_DELAYED:
1131                         seg->status = WA_SEG_ABORTED;
1132                         spin_lock_irqsave(&rpipe->seg_lock, flags2);
1133                         list_del(&seg->list_node);
1134                         xfer->segs_done++;
1135                         rpipe_ready = rpipe_avail_inc(rpipe);
1136                         spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1137                         break;
1138                 case WA_SEG_SUBMITTED:
1139                         seg->status = WA_SEG_ABORTED;
1140                         usb_unlink_urb(&seg->urb);
1141                         if (xfer->is_inbound == 0)
1142                                 usb_unlink_urb(seg->dto_urb);
1143                         xfer->segs_done++;
1144                         rpipe_ready = rpipe_avail_inc(rpipe);
1145                         break;
1146                 case WA_SEG_PENDING:
1147                         seg->status = WA_SEG_ABORTED;
1148                         xfer->segs_done++;
1149                         rpipe_ready = rpipe_avail_inc(rpipe);
1150                         break;
1151                 case WA_SEG_DTI_PENDING:
1152                         usb_unlink_urb(wa->dti_urb);
1153                         seg->status = WA_SEG_ABORTED;
1154                         xfer->segs_done++;
1155                         rpipe_ready = rpipe_avail_inc(rpipe);
1156                         break;
1157                 case WA_SEG_DONE:
1158                 case WA_SEG_ERROR:
1159                 case WA_SEG_ABORTED:
1160                         break;
1161                 }
1162         }
1163         xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
1164         __wa_xfer_is_done(xfer);
1165         spin_unlock_irqrestore(&xfer->lock, flags);
1166         wa_xfer_completion(xfer);
1167         if (rpipe_ready)
1168                 wa_xfer_delayed_run(rpipe);
1169         return 0;
1170
1171 out_unlock:
1172         spin_unlock_irqrestore(&xfer->lock, flags);
1173 out:
1174         return 0;
1175
1176 dequeue_delayed:
1177         list_del_init(&xfer->list_node);
1178         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1179         xfer->result = urb->status;
1180         spin_unlock_irqrestore(&xfer->lock, flags);
1181         wa_xfer_giveback(xfer);
1182         usb_put_urb(urb);               /* we got a ref in enqueue() */
1183         return 0;
1184 }
1185 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1186
1187 /*
1188  * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1189  * codes
1190  *
1191  * Positive errno values are internal inconsistencies and should be
1192  * flagged louder. Negative are to be passed up to the user in the
1193  * normal way.
1194  *
1195  * @status: USB WA status code -- high two bits are stripped.
1196  */
1197 static int wa_xfer_status_to_errno(u8 status)
1198 {
1199         int errno;
1200         u8 real_status = status;
1201         static int xlat[] = {
1202                 [WA_XFER_STATUS_SUCCESS] =              0,
1203                 [WA_XFER_STATUS_HALTED] =               -EPIPE,
1204                 [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
1205                 [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
1206                 [WA_XFER_RESERVED] =                    EINVAL,
1207                 [WA_XFER_STATUS_NOT_FOUND] =            0,
1208                 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1209                 [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
1210                 [WA_XFER_STATUS_ABORTED] =              -EINTR,
1211                 [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
1212                 [WA_XFER_INVALID_FORMAT] =              EINVAL,
1213                 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
1214                 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
1215         };
1216         status &= 0x3f;
1217
1218         if (status == 0)
1219                 return 0;
1220         if (status >= ARRAY_SIZE(xlat)) {
1221                 printk_ratelimited(KERN_ERR "%s(): BUG? "
1222                                "Unknown WA transfer status 0x%02x\n",
1223                                __func__, real_status);
1224                 return -EINVAL;
1225         }
1226         errno = xlat[status];
1227         if (unlikely(errno > 0)) {
1228                 printk_ratelimited(KERN_ERR "%s(): BUG? "
1229                                "Inconsistent WA status: 0x%02x\n",
1230                                __func__, real_status);
1231                 errno = -errno;
1232         }
1233         return errno;
1234 }
1235
1236 /*
1237  * Process a xfer result completion message
1238  *
1239  * inbound transfers: need to schedule a DTI read
1240  *
1241  * FIXME: this functio needs to be broken up in parts
1242  */
1243 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1244 {
1245         int result;
1246         struct device *dev = &wa->usb_iface->dev;
1247         unsigned long flags;
1248         u8 seg_idx;
1249         struct wa_seg *seg;
1250         struct wa_rpipe *rpipe;
1251         struct wa_xfer_result *xfer_result = wa->xfer_result;
1252         u8 done = 0;
1253         u8 usb_status;
1254         unsigned rpipe_ready = 0;
1255
1256         spin_lock_irqsave(&xfer->lock, flags);
1257         seg_idx = xfer_result->bTransferSegment & 0x7f;
1258         if (unlikely(seg_idx >= xfer->segs))
1259                 goto error_bad_seg;
1260         seg = xfer->seg[seg_idx];
1261         rpipe = xfer->ep->hcpriv;
1262         usb_status = xfer_result->bTransferStatus;
1263         dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1264                 xfer, seg_idx, usb_status, seg->status);
1265         if (seg->status == WA_SEG_ABORTED
1266             || seg->status == WA_SEG_ERROR)     /* already handled */
1267                 goto segment_aborted;
1268         if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
1269                 seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
1270         if (seg->status != WA_SEG_PENDING) {
1271                 if (printk_ratelimit())
1272                         dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1273                                 xfer, seg_idx, seg->status);
1274                 seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
1275         }
1276         if (usb_status & 0x80) {
1277                 seg->result = wa_xfer_status_to_errno(usb_status);
1278                 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1279                         xfer, seg->index, usb_status);
1280                 goto error_complete;
1281         }
1282         /* FIXME: we ignore warnings, tally them for stats */
1283         if (usb_status & 0x40)          /* Warning?... */
1284                 usb_status = 0;         /* ... pass */
1285         if (xfer->is_inbound) { /* IN data phase: read to buffer */
1286                 seg->status = WA_SEG_DTI_PENDING;
1287                 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1288                 if (xfer->is_dma) {
1289                         wa->buf_in_urb->transfer_dma =
1290                                 xfer->urb->transfer_dma
1291                                 + seg_idx * xfer->seg_size;
1292                         wa->buf_in_urb->transfer_flags
1293                                 |= URB_NO_TRANSFER_DMA_MAP;
1294                 } else {
1295                         wa->buf_in_urb->transfer_buffer =
1296                                 xfer->urb->transfer_buffer
1297                                 + seg_idx * xfer->seg_size;
1298                         wa->buf_in_urb->transfer_flags
1299                                 &= ~URB_NO_TRANSFER_DMA_MAP;
1300                 }
1301                 wa->buf_in_urb->transfer_buffer_length =
1302                         le32_to_cpu(xfer_result->dwTransferLength);
1303                 wa->buf_in_urb->context = seg;
1304                 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1305                 if (result < 0)
1306                         goto error_submit_buf_in;
1307         } else {
1308                 /* OUT data phase, complete it -- */
1309                 seg->status = WA_SEG_DONE;
1310                 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1311                 xfer->segs_done++;
1312                 rpipe_ready = rpipe_avail_inc(rpipe);
1313                 done = __wa_xfer_is_done(xfer);
1314         }
1315         spin_unlock_irqrestore(&xfer->lock, flags);
1316         if (done)
1317                 wa_xfer_completion(xfer);
1318         if (rpipe_ready)
1319                 wa_xfer_delayed_run(rpipe);
1320         return;
1321
1322 error_submit_buf_in:
1323         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1324                 dev_err(dev, "DTI: URB max acceptable errors "
1325                         "exceeded, resetting device\n");
1326                 wa_reset_all(wa);
1327         }
1328         if (printk_ratelimit())
1329                 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1330                         xfer, seg_idx, result);
1331         seg->result = result;
1332 error_complete:
1333         seg->status = WA_SEG_ERROR;
1334         xfer->segs_done++;
1335         rpipe_ready = rpipe_avail_inc(rpipe);
1336         __wa_xfer_abort(xfer);
1337         done = __wa_xfer_is_done(xfer);
1338         spin_unlock_irqrestore(&xfer->lock, flags);
1339         if (done)
1340                 wa_xfer_completion(xfer);
1341         if (rpipe_ready)
1342                 wa_xfer_delayed_run(rpipe);
1343         return;
1344
1345 error_bad_seg:
1346         spin_unlock_irqrestore(&xfer->lock, flags);
1347         wa_urb_dequeue(wa, xfer->urb);
1348         if (printk_ratelimit())
1349                 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1350         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1351                 dev_err(dev, "DTI: URB max acceptable errors "
1352                         "exceeded, resetting device\n");
1353                 wa_reset_all(wa);
1354         }
1355         return;
1356
1357 segment_aborted:
1358         /* nothing to do, as the aborter did the completion */
1359         spin_unlock_irqrestore(&xfer->lock, flags);
1360 }
1361
1362 /*
1363  * Callback for the IN data phase
1364  *
1365  * If successful transition state; otherwise, take a note of the
1366  * error, mark this segment done and try completion.
1367  *
1368  * Note we don't access until we are sure that the transfer hasn't
1369  * been cancelled (ECONNRESET, ENOENT), which could mean that
1370  * seg->xfer could be already gone.
1371  */
1372 static void wa_buf_in_cb(struct urb *urb)
1373 {
1374         struct wa_seg *seg = urb->context;
1375         struct wa_xfer *xfer = seg->xfer;
1376         struct wahc *wa;
1377         struct device *dev;
1378         struct wa_rpipe *rpipe;
1379         unsigned rpipe_ready;
1380         unsigned long flags;
1381         u8 done = 0;
1382
1383         switch (urb->status) {
1384         case 0:
1385                 spin_lock_irqsave(&xfer->lock, flags);
1386                 wa = xfer->wa;
1387                 dev = &wa->usb_iface->dev;
1388                 rpipe = xfer->ep->hcpriv;
1389                 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1390                         xfer, seg->index, (size_t)urb->actual_length);
1391                 seg->status = WA_SEG_DONE;
1392                 seg->result = urb->actual_length;
1393                 xfer->segs_done++;
1394                 rpipe_ready = rpipe_avail_inc(rpipe);
1395                 done = __wa_xfer_is_done(xfer);
1396                 spin_unlock_irqrestore(&xfer->lock, flags);
1397                 if (done)
1398                         wa_xfer_completion(xfer);
1399                 if (rpipe_ready)
1400                         wa_xfer_delayed_run(rpipe);
1401                 break;
1402         case -ECONNRESET:       /* URB unlinked; no need to do anything */
1403         case -ENOENT:           /* as it was done by the who unlinked us */
1404                 break;
1405         default:                /* Other errors ... */
1406                 spin_lock_irqsave(&xfer->lock, flags);
1407                 wa = xfer->wa;
1408                 dev = &wa->usb_iface->dev;
1409                 rpipe = xfer->ep->hcpriv;
1410                 if (printk_ratelimit())
1411                         dev_err(dev, "xfer %p#%u: data in error %d\n",
1412                                 xfer, seg->index, urb->status);
1413                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1414                             EDC_ERROR_TIMEFRAME)){
1415                         dev_err(dev, "DTO: URB max acceptable errors "
1416                                 "exceeded, resetting device\n");
1417                         wa_reset_all(wa);
1418                 }
1419                 seg->status = WA_SEG_ERROR;
1420                 seg->result = urb->status;
1421                 xfer->segs_done++;
1422                 rpipe_ready = rpipe_avail_inc(rpipe);
1423                 __wa_xfer_abort(xfer);
1424                 done = __wa_xfer_is_done(xfer);
1425                 spin_unlock_irqrestore(&xfer->lock, flags);
1426                 if (done)
1427                         wa_xfer_completion(xfer);
1428                 if (rpipe_ready)
1429                         wa_xfer_delayed_run(rpipe);
1430         }
1431 }
1432
1433 /*
1434  * Handle an incoming transfer result buffer
1435  *
1436  * Given a transfer result buffer, it completes the transfer (possibly
1437  * scheduling and buffer in read) and then resubmits the DTI URB for a
1438  * new transfer result read.
1439  *
1440  *
1441  * The xfer_result DTI URB state machine
1442  *
1443  * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1444  *
1445  * We start in OFF mode, the first xfer_result notification [through
1446  * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1447  * read.
1448  *
1449  * We receive a buffer -- if it is not a xfer_result, we complain and
1450  * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1451  * request accounting. If it is an IN segment, we move to RBI and post
1452  * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1453  * repost the DTI-URB and move to RXR state. if there was no IN
1454  * segment, it will repost the DTI-URB.
1455  *
1456  * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1457  * errors) in the URBs.
1458  */
1459 static void wa_xfer_result_cb(struct urb *urb)
1460 {
1461         int result;
1462         struct wahc *wa = urb->context;
1463         struct device *dev = &wa->usb_iface->dev;
1464         struct wa_xfer_result *xfer_result;
1465         u32 xfer_id;
1466         struct wa_xfer *xfer;
1467         u8 usb_status;
1468
1469         BUG_ON(wa->dti_urb != urb);
1470         switch (wa->dti_urb->status) {
1471         case 0:
1472                 /* We have a xfer result buffer; check it */
1473                 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1474                         urb->actual_length, urb->transfer_buffer);
1475                 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1476                         dev_err(dev, "DTI Error: xfer result--bad size "
1477                                 "xfer result (%d bytes vs %zu needed)\n",
1478                                 urb->actual_length, sizeof(*xfer_result));
1479                         break;
1480                 }
1481                 xfer_result = wa->xfer_result;
1482                 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1483                         dev_err(dev, "DTI Error: xfer result--"
1484                                 "bad header length %u\n",
1485                                 xfer_result->hdr.bLength);
1486                         break;
1487                 }
1488                 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1489                         dev_err(dev, "DTI Error: xfer result--"
1490                                 "bad header type 0x%02x\n",
1491                                 xfer_result->hdr.bNotifyType);
1492                         break;
1493                 }
1494                 usb_status = xfer_result->bTransferStatus & 0x3f;
1495                 if (usb_status == WA_XFER_STATUS_ABORTED
1496                     || usb_status == WA_XFER_STATUS_NOT_FOUND)
1497                         /* taken care of already */
1498                         break;
1499                 xfer_id = xfer_result->dwTransferID;
1500                 xfer = wa_xfer_get_by_id(wa, xfer_id);
1501                 if (xfer == NULL) {
1502                         /* FIXME: transaction might have been cancelled */
1503                         dev_err(dev, "DTI Error: xfer result--"
1504                                 "unknown xfer 0x%08x (status 0x%02x)\n",
1505                                 xfer_id, usb_status);
1506                         break;
1507                 }
1508                 wa_xfer_result_chew(wa, xfer);
1509                 wa_xfer_put(xfer);
1510                 break;
1511         case -ENOENT:           /* (we killed the URB)...so, no broadcast */
1512         case -ESHUTDOWN:        /* going away! */
1513                 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1514                 goto out;
1515         default:
1516                 /* Unknown error */
1517                 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1518                             EDC_ERROR_TIMEFRAME)) {
1519                         dev_err(dev, "DTI: URB max acceptable errors "
1520                                 "exceeded, resetting device\n");
1521                         wa_reset_all(wa);
1522                         goto out;
1523                 }
1524                 if (printk_ratelimit())
1525                         dev_err(dev, "DTI: URB error %d\n", urb->status);
1526                 break;
1527         }
1528         /* Resubmit the DTI URB */
1529         result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1530         if (result < 0) {
1531                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1532                         "resetting\n", result);
1533                 wa_reset_all(wa);
1534         }
1535 out:
1536         return;
1537 }
1538
1539 /*
1540  * Transfer complete notification
1541  *
1542  * Called from the notif.c code. We get a notification on EP2 saying
1543  * that some endpoint has some transfer result data available. We are
1544  * about to read it.
1545  *
1546  * To speed up things, we always have a URB reading the DTI URB; we
1547  * don't really set it up and start it until the first xfer complete
1548  * notification arrives, which is what we do here.
1549  *
1550  * Follow up in wa_xfer_result_cb(), as that's where the whole state
1551  * machine starts.
1552  *
1553  * So here we just initialize the DTI URB for reading transfer result
1554  * notifications and also the buffer-in URB, for reading buffers. Then
1555  * we just submit the DTI URB.
1556  *
1557  * @wa shall be referenced
1558  */
1559 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1560 {
1561         int result;
1562         struct device *dev = &wa->usb_iface->dev;
1563         struct wa_notif_xfer *notif_xfer;
1564         const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1565
1566         notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1567         BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1568
1569         if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1570                 /* FIXME: hardcoded limitation, adapt */
1571                 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1572                         notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1573                 goto error;
1574         }
1575         if (wa->dti_urb != NULL)        /* DTI URB already started */
1576                 goto out;
1577
1578         wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1579         if (wa->dti_urb == NULL) {
1580                 dev_err(dev, "Can't allocate DTI URB\n");
1581                 goto error_dti_urb_alloc;
1582         }
1583         usb_fill_bulk_urb(
1584                 wa->dti_urb, wa->usb_dev,
1585                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1586                 wa->xfer_result, wa->xfer_result_size,
1587                 wa_xfer_result_cb, wa);
1588
1589         wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1590         if (wa->buf_in_urb == NULL) {
1591                 dev_err(dev, "Can't allocate BUF-IN URB\n");
1592                 goto error_buf_in_urb_alloc;
1593         }
1594         usb_fill_bulk_urb(
1595                 wa->buf_in_urb, wa->usb_dev,
1596                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1597                 NULL, 0, wa_buf_in_cb, wa);
1598         result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1599         if (result < 0) {
1600                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1601                         "resetting\n", result);
1602                 goto error_dti_urb_submit;
1603         }
1604 out:
1605         return;
1606
1607 error_dti_urb_submit:
1608         usb_put_urb(wa->buf_in_urb);
1609 error_buf_in_urb_alloc:
1610         usb_put_urb(wa->dti_urb);
1611         wa->dti_urb = NULL;
1612 error_dti_urb_alloc:
1613 error:
1614         wa_reset_all(wa);
1615 }