1 #include <linux/module.h>
2 #include <linux/string.h>
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/init.h>
6 #include <linux/log2.h>
8 #include <linux/wait.h>
11 #define to_urb(d) container_of(d, struct urb, kref)
14 static void urb_destroy(struct kref *kref)
16 struct urb *urb = to_urb(kref);
18 if (urb->transfer_flags & URB_FREE_BUFFER)
19 kfree(urb->transfer_buffer);
25 * usb_init_urb - initializes a urb so that it can be used by a USB driver
26 * @urb: pointer to the urb to initialize
28 * Initializes a urb so that the USB subsystem can use it properly.
30 * If a urb is created with a call to usb_alloc_urb() it is not
31 * necessary to call this function. Only use this if you allocate the
32 * space for a struct urb on your own. If you call this function, be
33 * careful when freeing the memory for your urb that it is no longer in
34 * use by the USB core.
36 * Only use this function if you _really_ understand what you are doing.
38 void usb_init_urb(struct urb *urb)
41 memset(urb, 0, sizeof(*urb));
42 kref_init(&urb->kref);
43 INIT_LIST_HEAD(&urb->anchor_list);
46 EXPORT_SYMBOL_GPL(usb_init_urb);
49 * usb_alloc_urb - creates a new urb for a USB driver to use
50 * @iso_packets: number of iso packets for this urb
51 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
52 * valid options for this.
54 * Creates an urb for the USB driver to use, initializes a few internal
55 * structures, incrementes the usage counter, and returns a pointer to it.
57 * If no memory is available, NULL is returned.
59 * If the driver want to use this urb for interrupt, control, or bulk
60 * endpoints, pass '0' as the number of iso packets.
62 * The driver must call usb_free_urb() when it is finished with the urb.
64 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
68 urb = kmalloc(sizeof(struct urb) +
69 iso_packets * sizeof(struct usb_iso_packet_descriptor),
72 printk(KERN_ERR "alloc_urb: kmalloc failed\n");
78 EXPORT_SYMBOL_GPL(usb_alloc_urb);
81 * usb_free_urb - frees the memory used by a urb when all users of it are finished
82 * @urb: pointer to the urb to free, may be NULL
84 * Must be called when a user of a urb is finished with it. When the last user
85 * of the urb calls this function, the memory of the urb is freed.
87 * Note: The transfer buffer associated with the urb is not freed unless the
88 * URB_FREE_BUFFER transfer flag is set.
90 void usb_free_urb(struct urb *urb)
93 kref_put(&urb->kref, urb_destroy);
95 EXPORT_SYMBOL_GPL(usb_free_urb);
98 * usb_get_urb - increments the reference count of the urb
99 * @urb: pointer to the urb to modify, may be NULL
101 * This must be called whenever a urb is transferred from a device driver to a
102 * host controller driver. This allows proper reference counting to happen
105 * A pointer to the urb with the incremented reference counter is returned.
107 struct urb *usb_get_urb(struct urb *urb)
110 kref_get(&urb->kref);
113 EXPORT_SYMBOL_GPL(usb_get_urb);
116 * usb_anchor_urb - anchors an URB while it is processed
117 * @urb: pointer to the urb to anchor
118 * @anchor: pointer to the anchor
120 * This can be called to have access to URBs which are to be executed
121 * without bothering to track them
123 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
127 spin_lock_irqsave(&anchor->lock, flags);
129 list_add_tail(&urb->anchor_list, &anchor->urb_list);
130 urb->anchor = anchor;
132 if (unlikely(anchor->poisoned)) {
133 atomic_inc(&urb->reject);
136 spin_unlock_irqrestore(&anchor->lock, flags);
138 EXPORT_SYMBOL_GPL(usb_anchor_urb);
141 * usb_unanchor_urb - unanchors an URB
142 * @urb: pointer to the urb to anchor
144 * Call this to stop the system keeping track of this URB
146 void usb_unanchor_urb(struct urb *urb)
149 struct usb_anchor *anchor;
154 anchor = urb->anchor;
158 spin_lock_irqsave(&anchor->lock, flags);
159 if (unlikely(anchor != urb->anchor)) {
160 /* we've lost the race to another thread */
161 spin_unlock_irqrestore(&anchor->lock, flags);
165 list_del(&urb->anchor_list);
166 spin_unlock_irqrestore(&anchor->lock, flags);
168 if (list_empty(&anchor->urb_list))
169 wake_up(&anchor->wait);
171 EXPORT_SYMBOL_GPL(usb_unanchor_urb);
173 /*-------------------------------------------------------------------*/
176 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
177 * @urb: pointer to the urb describing the request
178 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
179 * of valid options for this.
181 * This submits a transfer request, and transfers control of the URB
182 * describing that request to the USB subsystem. Request completion will
183 * be indicated later, asynchronously, by calling the completion handler.
184 * The three types of completion are success, error, and unlink
185 * (a software-induced fault, also called "request cancellation").
187 * URBs may be submitted in interrupt context.
189 * The caller must have correctly initialized the URB before submitting
190 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
191 * available to ensure that most fields are correctly initialized, for
192 * the particular kind of transfer, although they will not initialize
193 * any transfer flags.
195 * Successful submissions return 0; otherwise this routine returns a
196 * negative error number. If the submission is successful, the complete()
197 * callback from the URB will be called exactly once, when the USB core and
198 * Host Controller Driver (HCD) are finished with the URB. When the completion
199 * function is called, control of the URB is returned to the device
200 * driver which issued the request. The completion handler may then
201 * immediately free or reuse that URB.
203 * With few exceptions, USB device drivers should never access URB fields
204 * provided by usbcore or the HCD until its complete() is called.
205 * The exceptions relate to periodic transfer scheduling. For both
206 * interrupt and isochronous urbs, as part of successful URB submission
207 * urb->interval is modified to reflect the actual transfer period used
208 * (normally some power of two units). And for isochronous urbs,
209 * urb->start_frame is modified to reflect when the URB's transfers were
210 * scheduled to start. Not all isochronous transfer scheduling policies
211 * will work, but most host controller drivers should easily handle ISO
212 * queues going from now until 10-200 msec into the future.
214 * For control endpoints, the synchronous usb_control_msg() call is
215 * often used (in non-interrupt context) instead of this call.
216 * That is often used through convenience wrappers, for the requests
217 * that are standardized in the USB 2.0 specification. For bulk
218 * endpoints, a synchronous usb_bulk_msg() call is available.
222 * URBs may be submitted to endpoints before previous ones complete, to
223 * minimize the impact of interrupt latencies and system overhead on data
224 * throughput. With that queuing policy, an endpoint's queue would never
225 * be empty. This is required for continuous isochronous data streams,
226 * and may also be required for some kinds of interrupt transfers. Such
227 * queuing also maximizes bandwidth utilization by letting USB controllers
228 * start work on later requests before driver software has finished the
229 * completion processing for earlier (successful) requests.
231 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
232 * than one. This was previously a HCD-specific behavior, except for ISO
233 * transfers. Non-isochronous endpoint queues are inactive during cleanup
234 * after faults (transfer errors or cancellation).
236 * Reserved Bandwidth Transfers:
238 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
239 * using the interval specified in the urb. Submitting the first urb to
240 * the endpoint reserves the bandwidth necessary to make those transfers.
241 * If the USB subsystem can't allocate sufficient bandwidth to perform
242 * the periodic request, submitting such a periodic request should fail.
244 * For devices under xHCI, the bandwidth is reserved at configuration time, or
245 * when the alt setting is selected. If there is not enough bus bandwidth, the
246 * configuration/alt setting request will fail. Therefore, submissions to
247 * periodic endpoints on devices under xHCI should never fail due to bandwidth
250 * Device drivers must explicitly request that repetition, by ensuring that
251 * some URB is always on the endpoint's queue (except possibly for short
252 * periods during completion callacks). When there is no longer an urb
253 * queued, the endpoint's bandwidth reservation is canceled. This means
254 * drivers can use their completion handlers to ensure they keep bandwidth
255 * they need, by reinitializing and resubmitting the just-completed urb
256 * until the driver longer needs that periodic bandwidth.
260 * The general rules for how to decide which mem_flags to use
261 * are the same as for kmalloc. There are four
262 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
265 * GFP_NOFS is not ever used, as it has not been implemented yet.
267 * GFP_ATOMIC is used when
268 * (a) you are inside a completion handler, an interrupt, bottom half,
269 * tasklet or timer, or
270 * (b) you are holding a spinlock or rwlock (does not apply to
272 * (c) current->state != TASK_RUNNING, this is the case only after
275 * GFP_NOIO is used in the block io path and error handling of storage
278 * All other situations use GFP_KERNEL.
280 * Some more specific rules for mem_flags can be inferred, such as
281 * (1) start_xmit, timeout, and receive methods of network drivers must
282 * use GFP_ATOMIC (they are called with a spinlock held);
283 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
284 * called with a spinlock held);
285 * (3) If you use a kernel thread with a network driver you must use
286 * GFP_NOIO, unless (b) or (c) apply;
287 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
288 * apply or your are in a storage driver's block io path;
289 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
290 * (6) changing firmware on a running storage or net device uses
291 * GFP_NOIO, unless b) or c) apply
294 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
297 struct usb_device *dev;
298 struct usb_host_endpoint *ep;
301 if (!urb || urb->hcpriv || !urb->complete)
304 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
307 /* For now, get the endpoint from the pipe. Eventually drivers
308 * will be required to set urb->ep directly and we will eliminate
311 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
312 [usb_pipeendpoint(urb->pipe)];
317 urb->status = -EINPROGRESS;
318 urb->actual_length = 0;
320 /* Lots of sanity checks, so HCDs can rely on clean data
321 * and don't need to duplicate tests
323 xfertype = usb_endpoint_type(&ep->desc);
324 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
325 struct usb_ctrlrequest *setup =
326 (struct usb_ctrlrequest *) urb->setup_packet;
330 is_out = !(setup->bRequestType & USB_DIR_IN) ||
333 is_out = usb_endpoint_dir_out(&ep->desc);
336 /* Cache the direction for later use */
337 urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
338 (is_out ? URB_DIR_OUT : URB_DIR_IN);
340 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
341 dev->state < USB_STATE_CONFIGURED)
344 max = le16_to_cpu(ep->desc.wMaxPacketSize);
347 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
348 usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
353 /* periodic transfers limit size per frame/uframe,
354 * but drivers only control those sizes for ISO.
355 * while we're checking, initialize return status.
357 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
360 /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */
361 /* "high bandwidth" mode, 1-3 packets/uframe? */
362 if (dev->speed == USB_SPEED_HIGH) {
363 int mult = 1 + ((max >> 11) & 0x03);
368 if (urb->number_of_packets <= 0)
370 for (n = 0; n < urb->number_of_packets; n++) {
371 len = urb->iso_frame_desc[n].length;
372 if (len < 0 || len > max)
374 urb->iso_frame_desc[n].status = -EXDEV;
375 urb->iso_frame_desc[n].actual_length = 0;
379 /* the I/O buffer must be mapped/unmapped, except when length=0 */
380 if (urb->transfer_buffer_length > INT_MAX)
384 /* stuff that drivers shouldn't do, but which shouldn't
385 * cause problems in HCDs if they get it wrong.
388 unsigned int orig_flags = urb->transfer_flags;
389 unsigned int allowed;
391 /* enforce simple/standard policy */
392 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
393 URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER);
395 case USB_ENDPOINT_XFER_BULK:
397 allowed |= URB_ZERO_PACKET;
399 case USB_ENDPOINT_XFER_CONTROL:
400 allowed |= URB_NO_FSBR; /* only affects UHCI */
402 default: /* all non-iso endpoints */
404 allowed |= URB_SHORT_NOT_OK;
406 case USB_ENDPOINT_XFER_ISOC:
407 allowed |= URB_ISO_ASAP;
410 urb->transfer_flags &= allowed;
412 /* fail if submitter gave bogus flags */
413 if (urb->transfer_flags != orig_flags) {
414 dev_err(&dev->dev, "BOGUS urb flags, %x --> %x\n",
415 orig_flags, urb->transfer_flags);
421 * Force periodic transfer intervals to be legal values that are
422 * a power of two (so HCDs don't need to).
424 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
425 * supports different values... this uses EHCI/UHCI defaults (and
426 * EHCI can use smaller non-default values).
429 case USB_ENDPOINT_XFER_ISOC:
430 case USB_ENDPOINT_XFER_INT:
432 switch (dev->speed) {
433 case USB_SPEED_VARIABLE:
434 if (urb->interval < 6)
438 if (urb->interval <= 0)
443 switch (dev->speed) {
444 case USB_SPEED_SUPER: /* units are 125us */
445 /* Handle up to 2^(16-1) microframes */
446 if (urb->interval > (1 << 15))
449 case USB_SPEED_VARIABLE:
450 if (urb->interval > 16)
453 case USB_SPEED_HIGH: /* units are microframes */
454 /* NOTE usb handles 2^15 */
455 if (urb->interval > (1024 * 8))
456 urb->interval = 1024 * 8;
459 case USB_SPEED_FULL: /* units are frames/msec */
461 if (xfertype == USB_ENDPOINT_XFER_INT) {
462 if (urb->interval > 255)
464 /* NOTE ohci only handles up to 32 */
467 if (urb->interval > 1024)
468 urb->interval = 1024;
469 /* NOTE usb and ohci handle up to 2^15 */
476 if (dev->speed != USB_SPEED_VARIABLE) {
477 /* Round down to a power of 2, no more than max */
478 urb->interval = min(max, 1 << ilog2(urb->interval));
482 return usb_hcd_submit_urb(urb, mem_flags);
484 EXPORT_SYMBOL_GPL(usb_submit_urb);
486 /*-------------------------------------------------------------------*/
489 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
490 * @urb: pointer to urb describing a previously submitted request,
493 * This routine cancels an in-progress request. URBs complete only once
494 * per submission, and may be canceled only once per submission.
495 * Successful cancellation means termination of @urb will be expedited
496 * and the completion handler will be called with a status code
497 * indicating that the request has been canceled (rather than any other
500 * Drivers should not call this routine or related routines, such as
501 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
502 * method has returned. The disconnect function should synchronize with
503 * a driver's I/O routines to insure that all URB-related activity has
504 * completed before it returns.
506 * This request is always asynchronous. Success is indicated by
507 * returning -EINPROGRESS, at which time the URB will probably not yet
508 * have been given back to the device driver. When it is eventually
509 * called, the completion function will see @urb->status == -ECONNRESET.
510 * Failure is indicated by usb_unlink_urb() returning any other value.
511 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
512 * never submitted, or it was unlinked before, or the hardware is already
513 * finished with it), even if the completion handler has not yet run.
515 * Unlinking and Endpoint Queues:
517 * [The behaviors and guarantees described below do not apply to virtual
518 * root hubs but only to endpoint queues for physical USB devices.]
520 * Host Controller Drivers (HCDs) place all the URBs for a particular
521 * endpoint in a queue. Normally the queue advances as the controller
522 * hardware processes each request. But when an URB terminates with an
523 * error its queue generally stops (see below), at least until that URB's
524 * completion routine returns. It is guaranteed that a stopped queue
525 * will not restart until all its unlinked URBs have been fully retired,
526 * with their completion routines run, even if that's not until some time
527 * after the original completion handler returns. The same behavior and
528 * guarantee apply when an URB terminates because it was unlinked.
530 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
531 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
532 * and -EREMOTEIO. Control endpoint queues behave the same way except
533 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
534 * for isochronous endpoints are treated differently, because they must
535 * advance at fixed rates. Such queues do not stop when an URB
536 * encounters an error or is unlinked. An unlinked isochronous URB may
537 * leave a gap in the stream of packets; it is undefined whether such
538 * gaps can be filled in.
540 * Note that early termination of an URB because a short packet was
541 * received will generate a -EREMOTEIO error if and only if the
542 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
543 * drivers can build deep queues for large or complex bulk transfers
544 * and clean them up reliably after any sort of aborted transfer by
545 * unlinking all pending URBs at the first fault.
547 * When a control URB terminates with an error other than -EREMOTEIO, it
548 * is quite likely that the status stage of the transfer will not take
551 int usb_unlink_urb(struct urb *urb)
559 return usb_hcd_unlink_urb(urb, -ECONNRESET);
561 EXPORT_SYMBOL_GPL(usb_unlink_urb);
564 * usb_kill_urb - cancel a transfer request and wait for it to finish
565 * @urb: pointer to URB describing a previously submitted request,
568 * This routine cancels an in-progress request. It is guaranteed that
569 * upon return all completion handlers will have finished and the URB
570 * will be totally idle and available for reuse. These features make
571 * this an ideal way to stop I/O in a disconnect() callback or close()
572 * function. If the request has not already finished or been unlinked
573 * the completion handler will see urb->status == -ENOENT.
575 * While the routine is running, attempts to resubmit the URB will fail
576 * with error -EPERM. Thus even if the URB's completion handler always
577 * tries to resubmit, it will not succeed and the URB will become idle.
579 * This routine may not be used in an interrupt context (such as a bottom
580 * half or a completion handler), or when holding a spinlock, or in other
581 * situations where the caller can't schedule().
583 * This routine should not be called by a driver after its disconnect
584 * method has returned.
586 void usb_kill_urb(struct urb *urb)
589 if (!(urb && urb->dev && urb->ep))
591 atomic_inc(&urb->reject);
593 usb_hcd_unlink_urb(urb, -ENOENT);
594 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
596 atomic_dec(&urb->reject);
598 EXPORT_SYMBOL_GPL(usb_kill_urb);
601 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
602 * @urb: pointer to URB describing a previously submitted request,
605 * This routine cancels an in-progress request. It is guaranteed that
606 * upon return all completion handlers will have finished and the URB
607 * will be totally idle and cannot be reused. These features make
608 * this an ideal way to stop I/O in a disconnect() callback.
609 * If the request has not already finished or been unlinked
610 * the completion handler will see urb->status == -ENOENT.
612 * After and while the routine runs, attempts to resubmit the URB will fail
613 * with error -EPERM. Thus even if the URB's completion handler always
614 * tries to resubmit, it will not succeed and the URB will become idle.
616 * This routine may not be used in an interrupt context (such as a bottom
617 * half or a completion handler), or when holding a spinlock, or in other
618 * situations where the caller can't schedule().
620 * This routine should not be called by a driver after its disconnect
621 * method has returned.
623 void usb_poison_urb(struct urb *urb)
626 if (!(urb && urb->dev && urb->ep))
628 atomic_inc(&urb->reject);
630 usb_hcd_unlink_urb(urb, -ENOENT);
631 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
633 EXPORT_SYMBOL_GPL(usb_poison_urb);
635 void usb_unpoison_urb(struct urb *urb)
640 atomic_dec(&urb->reject);
642 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
645 * usb_kill_anchored_urbs - cancel transfer requests en masse
646 * @anchor: anchor the requests are bound to
648 * this allows all outstanding URBs to be killed starting
649 * from the back of the queue
651 * This routine should not be called by a driver after its disconnect
652 * method has returned.
654 void usb_kill_anchored_urbs(struct usb_anchor *anchor)
658 spin_lock_irq(&anchor->lock);
659 while (!list_empty(&anchor->urb_list)) {
660 victim = list_entry(anchor->urb_list.prev, struct urb,
662 /* we must make sure the URB isn't freed before we kill it*/
664 spin_unlock_irq(&anchor->lock);
665 /* this will unanchor the URB */
666 usb_kill_urb(victim);
668 spin_lock_irq(&anchor->lock);
670 spin_unlock_irq(&anchor->lock);
672 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
676 * usb_poison_anchored_urbs - cease all traffic from an anchor
677 * @anchor: anchor the requests are bound to
679 * this allows all outstanding URBs to be poisoned starting
680 * from the back of the queue. Newly added URBs will also be
683 * This routine should not be called by a driver after its disconnect
684 * method has returned.
686 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
690 spin_lock_irq(&anchor->lock);
691 anchor->poisoned = 1;
692 while (!list_empty(&anchor->urb_list)) {
693 victim = list_entry(anchor->urb_list.prev, struct urb,
695 /* we must make sure the URB isn't freed before we kill it*/
697 spin_unlock_irq(&anchor->lock);
698 /* this will unanchor the URB */
699 usb_poison_urb(victim);
701 spin_lock_irq(&anchor->lock);
703 spin_unlock_irq(&anchor->lock);
705 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
708 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
709 * @anchor: anchor the requests are bound to
711 * Reverses the effect of usb_poison_anchored_urbs
712 * the anchor can be used normally after it returns
714 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
719 spin_lock_irqsave(&anchor->lock, flags);
720 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
721 usb_unpoison_urb(lazarus);
723 anchor->poisoned = 0;
724 spin_unlock_irqrestore(&anchor->lock, flags);
726 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
728 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
729 * @anchor: anchor the requests are bound to
731 * this allows all outstanding URBs to be unlinked starting
732 * from the back of the queue. This function is asynchronous.
733 * The unlinking is just tiggered. It may happen after this
734 * function has returned.
736 * This routine should not be called by a driver after its disconnect
737 * method has returned.
739 void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
744 spin_lock_irqsave(&anchor->lock, flags);
745 while (!list_empty(&anchor->urb_list)) {
746 victim = list_entry(anchor->urb_list.prev, struct urb,
749 spin_unlock_irqrestore(&anchor->lock, flags);
750 /* this will unanchor the URB */
751 usb_unlink_urb(victim);
753 spin_lock_irqsave(&anchor->lock, flags);
755 spin_unlock_irqrestore(&anchor->lock, flags);
757 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
760 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
761 * @anchor: the anchor you want to become unused
762 * @timeout: how long you are willing to wait in milliseconds
764 * Call this is you want to be sure all an anchor's
767 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
768 unsigned int timeout)
770 return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
771 msecs_to_jiffies(timeout));
773 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
776 * usb_get_from_anchor - get an anchor's oldest urb
777 * @anchor: the anchor whose urb you want
779 * this will take the oldest urb from an anchor,
780 * unanchor and return it
782 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
787 spin_lock_irqsave(&anchor->lock, flags);
788 if (!list_empty(&anchor->urb_list)) {
789 victim = list_entry(anchor->urb_list.next, struct urb,
792 spin_unlock_irqrestore(&anchor->lock, flags);
793 usb_unanchor_urb(victim);
795 spin_unlock_irqrestore(&anchor->lock, flags);
802 EXPORT_SYMBOL_GPL(usb_get_from_anchor);
805 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
806 * @anchor: the anchor whose urbs you want to unanchor
808 * use this to get rid of all an anchor's urbs
810 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
815 spin_lock_irqsave(&anchor->lock, flags);
816 while (!list_empty(&anchor->urb_list)) {
817 victim = list_entry(anchor->urb_list.prev, struct urb,
820 spin_unlock_irqrestore(&anchor->lock, flags);
821 /* this may free the URB */
822 usb_unanchor_urb(victim);
824 spin_lock_irqsave(&anchor->lock, flags);
826 spin_unlock_irqrestore(&anchor->lock, flags);
829 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
832 * usb_anchor_empty - is an anchor empty
833 * @anchor: the anchor you want to query
835 * returns 1 if the anchor has no urbs associated with it
837 int usb_anchor_empty(struct usb_anchor *anchor)
839 return list_empty(&anchor->urb_list);
842 EXPORT_SYMBOL_GPL(usb_anchor_empty);