2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include <linux/gfp.h>
19 #include <linux/init.h>
20 #include <linux/ratelimit.h>
21 #include <linux/usb.h>
22 #include <linux/usb/audio.h>
24 #include <sound/core.h>
25 #include <sound/pcm.h>
34 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
35 * this will overflow at approx 524 kHz
37 static inline unsigned get_usb_full_speed_rate(unsigned int rate)
39 return ((rate << 13) + 62) / 125;
43 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
44 * this will overflow at approx 4 MHz
46 static inline unsigned get_usb_high_speed_rate(unsigned int rate)
48 return ((rate << 10) + 62) / 125;
54 static int deactivate_urbs(struct snd_usb_substream *subs, int force, int can_sleep)
56 struct snd_usb_audio *chip = subs->stream->chip;
62 if (!force && subs->stream->chip->shutdown) /* to be sure... */
65 async = !can_sleep && chip->async_unlink;
67 if (!async && in_interrupt())
70 for (i = 0; i < subs->nurbs; i++) {
71 if (test_bit(i, &subs->active_mask)) {
72 if (!test_and_set_bit(i, &subs->unlink_mask)) {
73 struct urb *u = subs->dataurb[i].urb;
82 for (i = 0; i < SYNC_URBS; i++) {
83 if (test_bit(i+16, &subs->active_mask)) {
84 if (!test_and_set_bit(i+16, &subs->unlink_mask)) {
85 struct urb *u = subs->syncurb[i].urb;
101 static void release_urb_ctx(struct snd_urb_ctx *u)
105 usb_free_coherent(u->subs->dev, u->buffer_size,
106 u->urb->transfer_buffer,
107 u->urb->transfer_dma);
108 usb_free_urb(u->urb);
114 * wait until all urbs are processed.
116 static int wait_clear_urbs(struct snd_usb_substream *subs)
118 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
124 for (i = 0; i < subs->nurbs; i++) {
125 if (test_bit(i, &subs->active_mask))
128 if (subs->syncpipe) {
129 for (i = 0; i < SYNC_URBS; i++) {
130 if (test_bit(i + 16, &subs->active_mask))
136 schedule_timeout_uninterruptible(1);
137 } while (time_before(jiffies, end_time));
139 snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
144 * release a substream
146 void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
150 /* stop urbs (to be sure) */
151 if (!subs->stream->chip->shutdown) {
152 deactivate_urbs(subs, force, 1);
153 wait_clear_urbs(subs);
156 for (i = 0; i < MAX_URBS; i++)
157 release_urb_ctx(&subs->dataurb[i]);
158 for (i = 0; i < SYNC_URBS; i++)
159 release_urb_ctx(&subs->syncurb[i]);
160 usb_free_coherent(subs->dev, SYNC_URBS * 4,
161 subs->syncbuf, subs->sync_dma);
162 subs->syncbuf = NULL;
167 * complete callback from data urb
169 static void snd_complete_urb(struct urb *urb)
171 struct snd_urb_ctx *ctx = urb->context;
172 struct snd_usb_substream *subs = ctx->subs;
173 struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
176 if ((subs->running && subs->ops.retire(subs, substream->runtime, urb)) ||
177 !subs->running || /* can be stopped during retire callback */
178 (err = subs->ops.prepare(subs, substream->runtime, urb)) < 0 ||
179 (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
180 clear_bit(ctx->index, &subs->active_mask);
182 snd_printd(KERN_ERR "cannot submit urb (err = %d)\n", err);
183 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
190 * complete callback from sync urb
192 static void snd_complete_sync_urb(struct urb *urb)
194 struct snd_urb_ctx *ctx = urb->context;
195 struct snd_usb_substream *subs = ctx->subs;
196 struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
199 if ((subs->running && subs->ops.retire_sync(subs, substream->runtime, urb)) ||
200 !subs->running || /* can be stopped during retire callback */
201 (err = subs->ops.prepare_sync(subs, substream->runtime, urb)) < 0 ||
202 (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
203 clear_bit(ctx->index + 16, &subs->active_mask);
205 snd_printd(KERN_ERR "cannot submit sync urb (err = %d)\n", err);
206 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
213 * initialize a substream for plaback/capture
215 int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
216 unsigned int period_bytes,
218 unsigned int frame_bits)
220 unsigned int maxsize, i;
221 int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
222 unsigned int urb_packs, total_packs, packs_per_ms;
223 struct snd_usb_audio *chip = subs->stream->chip;
225 /* calculate the frequency in 16.16 format */
226 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
227 subs->freqn = get_usb_full_speed_rate(rate);
229 subs->freqn = get_usb_high_speed_rate(rate);
230 subs->freqm = subs->freqn;
231 subs->freqshift = INT_MIN;
232 /* calculate max. frequency */
233 if (subs->maxpacksize) {
234 /* whatever fits into a max. size packet */
235 maxsize = subs->maxpacksize;
236 subs->freqmax = (maxsize / (frame_bits >> 3))
237 << (16 - subs->datainterval);
239 /* no max. packet size: just take 25% higher than nominal */
240 subs->freqmax = subs->freqn + (subs->freqn >> 2);
241 maxsize = ((subs->freqmax + 0xffff) * (frame_bits >> 3))
242 >> (16 - subs->datainterval);
247 subs->curpacksize = subs->maxpacksize;
249 subs->curpacksize = maxsize;
251 if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL)
252 packs_per_ms = 8 >> subs->datainterval;
257 urb_packs = max(chip->nrpacks, 1);
258 urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
261 urb_packs *= packs_per_ms;
263 urb_packs = min(urb_packs, 1U << subs->syncinterval);
265 /* decide how many packets to be used */
267 unsigned int minsize, maxpacks;
268 /* determine how small a packet can be */
269 minsize = (subs->freqn >> (16 - subs->datainterval))
271 /* with sync from device, assume it can be 12% lower */
273 minsize -= minsize >> 3;
274 minsize = max(minsize, 1u);
275 total_packs = (period_bytes + minsize - 1) / minsize;
276 /* we need at least two URBs for queueing */
277 if (total_packs < 2) {
280 /* and we don't want too long a queue either */
281 maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
282 total_packs = min(total_packs, maxpacks);
285 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
287 total_packs = MAX_URBS * urb_packs;
289 subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
290 if (subs->nurbs > MAX_URBS) {
292 subs->nurbs = MAX_URBS;
293 total_packs = MAX_URBS * urb_packs;
294 } else if (subs->nurbs < 2) {
295 /* too little - we need at least two packets
296 * to ensure contiguous playback/capture
301 /* allocate and initialize data urbs */
302 for (i = 0; i < subs->nurbs; i++) {
303 struct snd_urb_ctx *u = &subs->dataurb[i];
306 u->packets = (i + 1) * total_packs / subs->nurbs
307 - i * total_packs / subs->nurbs;
308 u->buffer_size = maxsize * u->packets;
309 if (subs->fmt_type == UAC_FORMAT_TYPE_II)
310 u->packets++; /* for transfer delimiter */
311 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
314 u->urb->transfer_buffer =
315 usb_alloc_coherent(subs->dev, u->buffer_size,
316 GFP_KERNEL, &u->urb->transfer_dma);
317 if (!u->urb->transfer_buffer)
319 u->urb->pipe = subs->datapipe;
320 u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
321 u->urb->interval = 1 << subs->datainterval;
323 u->urb->complete = snd_complete_urb;
326 if (subs->syncpipe) {
327 /* allocate and initialize sync urbs */
328 subs->syncbuf = usb_alloc_coherent(subs->dev, SYNC_URBS * 4,
329 GFP_KERNEL, &subs->sync_dma);
332 for (i = 0; i < SYNC_URBS; i++) {
333 struct snd_urb_ctx *u = &subs->syncurb[i];
337 u->urb = usb_alloc_urb(1, GFP_KERNEL);
340 u->urb->transfer_buffer = subs->syncbuf + i * 4;
341 u->urb->transfer_dma = subs->sync_dma + i * 4;
342 u->urb->transfer_buffer_length = 4;
343 u->urb->pipe = subs->syncpipe;
344 u->urb->transfer_flags = URB_ISO_ASAP |
345 URB_NO_TRANSFER_DMA_MAP;
346 u->urb->number_of_packets = 1;
347 u->urb->interval = 1 << subs->syncinterval;
349 u->urb->complete = snd_complete_sync_urb;
355 snd_usb_release_substream_urbs(subs, 0);
360 * prepare urb for full speed capture sync pipe
362 * fill the length and offset of each urb descriptor.
363 * the fixed 10.14 frequency is passed through the pipe.
365 static int prepare_capture_sync_urb(struct snd_usb_substream *subs,
366 struct snd_pcm_runtime *runtime,
369 unsigned char *cp = urb->transfer_buffer;
370 struct snd_urb_ctx *ctx = urb->context;
372 urb->dev = ctx->subs->dev; /* we need to set this at each time */
373 urb->iso_frame_desc[0].length = 3;
374 urb->iso_frame_desc[0].offset = 0;
375 cp[0] = subs->freqn >> 2;
376 cp[1] = subs->freqn >> 10;
377 cp[2] = subs->freqn >> 18;
382 * prepare urb for high speed capture sync pipe
384 * fill the length and offset of each urb descriptor.
385 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
387 static int prepare_capture_sync_urb_hs(struct snd_usb_substream *subs,
388 struct snd_pcm_runtime *runtime,
391 unsigned char *cp = urb->transfer_buffer;
392 struct snd_urb_ctx *ctx = urb->context;
394 urb->dev = ctx->subs->dev; /* we need to set this at each time */
395 urb->iso_frame_desc[0].length = 4;
396 urb->iso_frame_desc[0].offset = 0;
398 cp[1] = subs->freqn >> 8;
399 cp[2] = subs->freqn >> 16;
400 cp[3] = subs->freqn >> 24;
405 * process after capture sync complete
408 static int retire_capture_sync_urb(struct snd_usb_substream *subs,
409 struct snd_pcm_runtime *runtime,
416 * prepare urb for capture data pipe
418 * fill the offset and length of each descriptor.
420 * we use a temporary buffer to write the captured data.
421 * since the length of written data is determined by host, we cannot
422 * write onto the pcm buffer directly... the data is thus copied
423 * later at complete callback to the global buffer.
425 static int prepare_capture_urb(struct snd_usb_substream *subs,
426 struct snd_pcm_runtime *runtime,
430 struct snd_urb_ctx *ctx = urb->context;
433 urb->dev = ctx->subs->dev; /* we need to set this at each time */
434 for (i = 0; i < ctx->packets; i++) {
435 urb->iso_frame_desc[i].offset = offs;
436 urb->iso_frame_desc[i].length = subs->curpacksize;
437 offs += subs->curpacksize;
439 urb->transfer_buffer_length = offs;
440 urb->number_of_packets = ctx->packets;
445 * process after capture complete
447 * copy the data from each desctiptor to the pcm buffer, and
448 * update the current position.
450 static int retire_capture_urb(struct snd_usb_substream *subs,
451 struct snd_pcm_runtime *runtime,
457 unsigned int stride, frames, bytes, oldptr;
458 int period_elapsed = 0;
460 stride = runtime->frame_bits >> 3;
462 for (i = 0; i < urb->number_of_packets; i++) {
463 cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
464 if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
465 snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
468 bytes = urb->iso_frame_desc[i].actual_length;
469 frames = bytes / stride;
470 if (!subs->txfr_quirk)
471 bytes = frames * stride;
472 if (bytes % (runtime->sample_bits >> 3) != 0) {
473 #ifdef CONFIG_SND_DEBUG_VERBOSE
474 int oldbytes = bytes;
476 bytes = frames * stride;
477 snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n",
480 /* update the current pointer */
481 spin_lock_irqsave(&subs->lock, flags);
482 oldptr = subs->hwptr_done;
483 subs->hwptr_done += bytes;
484 if (subs->hwptr_done >= runtime->buffer_size * stride)
485 subs->hwptr_done -= runtime->buffer_size * stride;
486 frames = (bytes + (oldptr % stride)) / stride;
487 subs->transfer_done += frames;
488 if (subs->transfer_done >= runtime->period_size) {
489 subs->transfer_done -= runtime->period_size;
492 spin_unlock_irqrestore(&subs->lock, flags);
493 /* copy a data chunk */
494 if (oldptr + bytes > runtime->buffer_size * stride) {
495 unsigned int bytes1 =
496 runtime->buffer_size * stride - oldptr;
497 memcpy(runtime->dma_area + oldptr, cp, bytes1);
498 memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
500 memcpy(runtime->dma_area + oldptr, cp, bytes);
504 snd_pcm_period_elapsed(subs->pcm_substream);
509 * Process after capture complete when paused. Nothing to do.
511 static int retire_paused_capture_urb(struct snd_usb_substream *subs,
512 struct snd_pcm_runtime *runtime,
520 * prepare urb for playback sync pipe
522 * set up the offset and length to receive the current frequency.
524 static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
525 struct snd_pcm_runtime *runtime,
528 struct snd_urb_ctx *ctx = urb->context;
530 urb->dev = ctx->subs->dev; /* we need to set this at each time */
531 urb->iso_frame_desc[0].length = min(4u, ctx->subs->syncmaxsize);
532 urb->iso_frame_desc[0].offset = 0;
537 * process after playback sync complete
539 * Full speed devices report feedback values in 10.14 format as samples per
540 * frame, high speed devices in 16.16 format as samples per microframe.
541 * Because the Audio Class 1 spec was written before USB 2.0, many high speed
542 * devices use a wrong interpretation, some others use an entirely different
543 * format. Therefore, we cannot predict what format any particular device uses
544 * and must detect it automatically.
546 static int retire_playback_sync_urb(struct snd_usb_substream *subs,
547 struct snd_pcm_runtime *runtime,
554 if (urb->iso_frame_desc[0].status != 0 ||
555 urb->iso_frame_desc[0].actual_length < 3)
558 f = le32_to_cpup(urb->transfer_buffer);
559 if (urb->iso_frame_desc[0].actual_length == 3)
566 if (unlikely(subs->freqshift == INT_MIN)) {
568 * The first time we see a feedback value, determine its format
569 * by shifting it left or right until it matches the nominal
570 * frequency value. This assumes that the feedback does not
571 * differ from the nominal value more than +50% or -25%.
574 while (f < subs->freqn - subs->freqn / 4) {
578 while (f > subs->freqn + subs->freqn / 2) {
582 subs->freqshift = shift;
584 else if (subs->freqshift >= 0)
585 f <<= subs->freqshift;
587 f >>= -subs->freqshift;
589 if (likely(f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax)) {
591 * If the frequency looks valid, set it.
592 * This value is referred to in prepare_playback_urb().
594 spin_lock_irqsave(&subs->lock, flags);
596 spin_unlock_irqrestore(&subs->lock, flags);
599 * Out of range; maybe the shift value is wrong.
600 * Reset it so that we autodetect again the next time.
602 subs->freqshift = INT_MIN;
608 /* determine the number of frames in the next packet */
609 static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs)
612 return subs->maxframesize;
614 subs->phase = (subs->phase & 0xffff)
615 + (subs->freqm << subs->datainterval);
616 return min(subs->phase >> 16, subs->maxframesize);
621 * Prepare urb for streaming before playback starts or when paused.
623 * We don't have any data, so we send silence.
625 static int prepare_nodata_playback_urb(struct snd_usb_substream *subs,
626 struct snd_pcm_runtime *runtime,
629 unsigned int i, offs, counts;
630 struct snd_urb_ctx *ctx = urb->context;
631 int stride = runtime->frame_bits >> 3;
634 urb->dev = ctx->subs->dev;
635 for (i = 0; i < ctx->packets; ++i) {
636 counts = snd_usb_audio_next_packet_size(subs);
637 urb->iso_frame_desc[i].offset = offs * stride;
638 urb->iso_frame_desc[i].length = counts * stride;
641 urb->number_of_packets = ctx->packets;
642 urb->transfer_buffer_length = offs * stride;
643 memset(urb->transfer_buffer,
644 runtime->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0,
650 * prepare urb for playback data pipe
652 * Since a URB can handle only a single linear buffer, we must use double
653 * buffering when the data to be transferred overflows the buffer boundary.
654 * To avoid inconsistencies when updating hwptr_done, we use double buffering
657 static int prepare_playback_urb(struct snd_usb_substream *subs,
658 struct snd_pcm_runtime *runtime,
662 unsigned int counts, frames, bytes;
664 int period_elapsed = 0;
665 struct snd_urb_ctx *ctx = urb->context;
667 stride = runtime->frame_bits >> 3;
670 urb->dev = ctx->subs->dev; /* we need to set this at each time */
671 urb->number_of_packets = 0;
672 spin_lock_irqsave(&subs->lock, flags);
673 for (i = 0; i < ctx->packets; i++) {
674 counts = snd_usb_audio_next_packet_size(subs);
675 /* set up descriptor */
676 urb->iso_frame_desc[i].offset = frames * stride;
677 urb->iso_frame_desc[i].length = counts * stride;
679 urb->number_of_packets++;
680 subs->transfer_done += counts;
681 if (subs->transfer_done >= runtime->period_size) {
682 subs->transfer_done -= runtime->period_size;
684 if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
685 if (subs->transfer_done > 0) {
686 /* FIXME: fill-max mode is not
688 frames -= subs->transfer_done;
689 counts -= subs->transfer_done;
690 urb->iso_frame_desc[i].length =
692 subs->transfer_done = 0;
695 if (i < ctx->packets) {
696 /* add a transfer delimiter */
697 urb->iso_frame_desc[i].offset =
699 urb->iso_frame_desc[i].length = 0;
700 urb->number_of_packets++;
705 if (period_elapsed) /* finish at the period boundary */
708 bytes = frames * stride;
709 if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
710 /* err, the transferred area goes over buffer boundary. */
711 unsigned int bytes1 =
712 runtime->buffer_size * stride - subs->hwptr_done;
713 memcpy(urb->transfer_buffer,
714 runtime->dma_area + subs->hwptr_done, bytes1);
715 memcpy(urb->transfer_buffer + bytes1,
716 runtime->dma_area, bytes - bytes1);
718 memcpy(urb->transfer_buffer,
719 runtime->dma_area + subs->hwptr_done, bytes);
721 subs->hwptr_done += bytes;
722 if (subs->hwptr_done >= runtime->buffer_size * stride)
723 subs->hwptr_done -= runtime->buffer_size * stride;
725 /* update delay with exact number of samples queued */
726 runtime->delay = subs->last_delay;
727 runtime->delay += frames;
728 subs->last_delay = runtime->delay;
730 /* realign last_frame_number */
731 subs->last_frame_number = usb_get_current_frame_number(subs->dev);
732 subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
734 spin_unlock_irqrestore(&subs->lock, flags);
735 urb->transfer_buffer_length = bytes;
737 snd_pcm_period_elapsed(subs->pcm_substream);
742 * process after playback data complete
743 * - decrease the delay count again
745 static int retire_playback_urb(struct snd_usb_substream *subs,
746 struct snd_pcm_runtime *runtime,
750 int stride = runtime->frame_bits >> 3;
751 int processed = urb->transfer_buffer_length / stride;
754 spin_lock_irqsave(&subs->lock, flags);
756 est_delay = snd_usb_pcm_delay(subs, runtime->rate);
757 /* update delay with exact number of samples played */
758 if (processed > subs->last_delay)
759 subs->last_delay = 0;
761 subs->last_delay -= processed;
762 runtime->delay = subs->last_delay;
765 * Report when delay estimate is off by more than 2ms.
766 * The error should be lower than 2ms since the estimate relies
767 * on two reads of a counter updated every ms.
769 if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
770 snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
771 est_delay, subs->last_delay);
773 spin_unlock_irqrestore(&subs->lock, flags);
777 static const char *usb_error_string(int err)
783 return "endpoint not enabled";
785 return "endpoint stalled";
787 return "not enough bandwidth";
789 return "device disabled";
791 return "device suspended";
796 return "internal error";
798 return "unknown error";
803 * set up and start data/sync urbs
805 static int start_urbs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime)
810 if (subs->stream->chip->shutdown)
813 for (i = 0; i < subs->nurbs; i++) {
814 if (snd_BUG_ON(!subs->dataurb[i].urb))
816 if (subs->ops.prepare(subs, runtime, subs->dataurb[i].urb) < 0) {
817 snd_printk(KERN_ERR "cannot prepare datapipe for urb %d\n", i);
821 if (subs->syncpipe) {
822 for (i = 0; i < SYNC_URBS; i++) {
823 if (snd_BUG_ON(!subs->syncurb[i].urb))
825 if (subs->ops.prepare_sync(subs, runtime, subs->syncurb[i].urb) < 0) {
826 snd_printk(KERN_ERR "cannot prepare syncpipe for urb %d\n", i);
832 subs->active_mask = 0;
833 subs->unlink_mask = 0;
835 for (i = 0; i < subs->nurbs; i++) {
836 err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC);
838 snd_printk(KERN_ERR "cannot submit datapipe "
839 "for urb %d, error %d: %s\n",
840 i, err, usb_error_string(err));
843 set_bit(i, &subs->active_mask);
845 if (subs->syncpipe) {
846 for (i = 0; i < SYNC_URBS; i++) {
847 err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC);
849 snd_printk(KERN_ERR "cannot submit syncpipe "
850 "for urb %d, error %d: %s\n",
851 i, err, usb_error_string(err));
854 set_bit(i + 16, &subs->active_mask);
860 // snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
861 deactivate_urbs(subs, 0, 0);
868 static struct snd_urb_ops audio_urb_ops[2] = {
870 .prepare = prepare_nodata_playback_urb,
871 .retire = retire_playback_urb,
872 .prepare_sync = prepare_playback_sync_urb,
873 .retire_sync = retire_playback_sync_urb,
876 .prepare = prepare_capture_urb,
877 .retire = retire_capture_urb,
878 .prepare_sync = prepare_capture_sync_urb,
879 .retire_sync = retire_capture_sync_urb,
884 * initialize the substream instance.
887 void snd_usb_init_substream(struct snd_usb_stream *as,
888 int stream, struct audioformat *fp)
890 struct snd_usb_substream *subs = &as->substream[stream];
892 INIT_LIST_HEAD(&subs->fmt_list);
893 spin_lock_init(&subs->lock);
896 subs->direction = stream;
897 subs->dev = as->chip->dev;
898 subs->txfr_quirk = as->chip->txfr_quirk;
899 subs->ops = audio_urb_ops[stream];
900 subs->speed = snd_usb_get_speed(subs->dev);
901 if (subs->speed >= USB_SPEED_HIGH)
902 subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
904 snd_usb_set_pcm_ops(as->pcm, stream);
906 list_add_tail(&fp->list, &subs->fmt_list);
907 subs->formats |= fp->formats;
908 subs->endpoint = fp->endpoint;
910 subs->fmt_type = fp->fmt_type;
913 int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd)
915 struct snd_usb_substream *subs = substream->runtime->private_data;
918 case SNDRV_PCM_TRIGGER_START:
919 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
920 subs->ops.prepare = prepare_playback_urb;
922 case SNDRV_PCM_TRIGGER_STOP:
923 return deactivate_urbs(subs, 0, 0);
924 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
925 subs->ops.prepare = prepare_nodata_playback_urb;
932 int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
934 struct snd_usb_substream *subs = substream->runtime->private_data;
937 case SNDRV_PCM_TRIGGER_START:
938 subs->ops.retire = retire_capture_urb;
939 return start_urbs(subs, substream->runtime);
940 case SNDRV_PCM_TRIGGER_STOP:
941 return deactivate_urbs(subs, 0, 0);
942 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
943 subs->ops.retire = retire_paused_capture_urb;
945 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
946 subs->ops.retire = retire_capture_urb;
953 int snd_usb_substream_prepare(struct snd_usb_substream *subs,
954 struct snd_pcm_runtime *runtime)
956 /* clear urbs (to be sure) */
957 deactivate_urbs(subs, 0, 1);
958 wait_clear_urbs(subs);
960 /* for playback, submit the URBs now; otherwise, the first hwptr_done
961 * updates for all URBs would happen at the same time when starting */
962 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
963 subs->ops.prepare = prepare_nodata_playback_urb;
964 return start_urbs(subs, runtime);