2 * generic helper functions for handling video4linux capture buffers
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
9 * (c) 2006 Ted Walther and John Sokol
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
24 #include <media/videobuf-core.h>
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
29 if (unlikely((is) != (should))) { \
31 "magic mismatch: %x (expected %x)\n", \
38 module_param(debug, int, 0644);
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
42 MODULE_LICENSE("GPL");
44 #define dprintk(level, fmt, arg...) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
50 /* --------------------------------------------------------------------- */
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
55 struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q)
57 struct videobuf_buffer *vb;
59 BUG_ON(q->msize < sizeof(*vb));
61 if (!q->int_ops || !q->int_ops->alloc) {
62 printk(KERN_ERR "No specific ops defined!\n");
66 vb = q->int_ops->alloc(q->msize);
68 init_waitqueue_head(&vb->done);
69 vb->magic = MAGIC_BUFFER;
74 EXPORT_SYMBOL_GPL(videobuf_alloc);
76 #define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
77 vb->state != VIDEOBUF_QUEUED)
78 int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
80 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
90 return wait_event_interruptible(vb->done, WAITON_CONDITION);
92 wait_event(vb->done, WAITON_CONDITION);
96 EXPORT_SYMBOL_GPL(videobuf_waiton);
98 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
99 struct v4l2_framebuffer *fbuf)
101 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
102 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
104 return CALL(q, iolock, q, vb, fbuf);
106 EXPORT_SYMBOL_GPL(videobuf_iolock);
108 void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
109 struct videobuf_buffer *buf)
111 if (q->int_ops->vaddr)
112 return q->int_ops->vaddr(buf);
115 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
117 /* --------------------------------------------------------------------- */
120 void videobuf_queue_core_init(struct videobuf_queue *q,
121 const struct videobuf_queue_ops *ops,
124 enum v4l2_buf_type type,
125 enum v4l2_field field,
128 struct videobuf_qtype_ops *int_ops)
131 memset(q, 0, sizeof(*q));
132 q->irqlock = irqlock;
139 q->int_ops = int_ops;
141 /* All buffer operations are mandatory */
142 BUG_ON(!q->ops->buf_setup);
143 BUG_ON(!q->ops->buf_prepare);
144 BUG_ON(!q->ops->buf_queue);
145 BUG_ON(!q->ops->buf_release);
147 /* Lock is mandatory for queue_cancel to work */
150 /* Having implementations for abstract methods are mandatory */
153 mutex_init(&q->vb_lock);
154 init_waitqueue_head(&q->wait);
155 INIT_LIST_HEAD(&q->stream);
157 EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
159 /* Locking: Only usage in bttv unsafe find way to remove */
160 int videobuf_queue_is_busy(struct videobuf_queue *q)
164 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
167 dprintk(1, "busy: streaming active\n");
171 dprintk(1, "busy: pending read #1\n");
175 dprintk(1, "busy: pending read #2\n");
178 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
179 if (NULL == q->bufs[i])
181 if (q->bufs[i]->map) {
182 dprintk(1, "busy: buffer #%d mapped\n", i);
185 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
186 dprintk(1, "busy: buffer #%d queued\n", i);
189 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
190 dprintk(1, "busy: buffer #%d avtive\n", i);
196 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
198 /* Locking: Caller holds q->vb_lock */
199 void videobuf_queue_cancel(struct videobuf_queue *q)
201 unsigned long flags = 0;
206 wake_up_interruptible_sync(&q->wait);
208 /* remove queued buffers from list */
209 spin_lock_irqsave(q->irqlock, flags);
210 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
211 if (NULL == q->bufs[i])
213 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
214 list_del(&q->bufs[i]->queue);
215 q->bufs[i]->state = VIDEOBUF_ERROR;
216 wake_up_all(&q->bufs[i]->done);
219 spin_unlock_irqrestore(q->irqlock, flags);
221 /* free all buffers + clear queue */
222 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
223 if (NULL == q->bufs[i])
225 q->ops->buf_release(q, q->bufs[i]);
227 INIT_LIST_HEAD(&q->stream);
229 EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
231 /* --------------------------------------------------------------------- */
233 /* Locking: Caller holds q->vb_lock */
234 enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
236 enum v4l2_field field = q->field;
238 BUG_ON(V4L2_FIELD_ANY == field);
240 if (V4L2_FIELD_ALTERNATE == field) {
241 if (V4L2_FIELD_TOP == q->last) {
242 field = V4L2_FIELD_BOTTOM;
243 q->last = V4L2_FIELD_BOTTOM;
245 field = V4L2_FIELD_TOP;
246 q->last = V4L2_FIELD_TOP;
251 EXPORT_SYMBOL_GPL(videobuf_next_field);
253 /* Locking: Caller holds q->vb_lock */
254 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
255 struct videobuf_buffer *vb, enum v4l2_buf_type type)
257 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
258 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
263 b->memory = vb->memory;
265 case V4L2_MEMORY_MMAP:
266 b->m.offset = vb->boff;
267 b->length = vb->bsize;
269 case V4L2_MEMORY_USERPTR:
270 b->m.userptr = vb->baddr;
271 b->length = vb->bsize;
273 case V4L2_MEMORY_OVERLAY:
274 b->m.offset = vb->boff;
280 b->flags |= V4L2_BUF_FLAG_MAPPED;
283 case VIDEOBUF_PREPARED:
284 case VIDEOBUF_QUEUED:
285 case VIDEOBUF_ACTIVE:
286 b->flags |= V4L2_BUF_FLAG_QUEUED;
290 b->flags |= V4L2_BUF_FLAG_DONE;
292 case VIDEOBUF_NEEDS_INIT:
298 if (vb->input != UNSET) {
299 b->flags |= V4L2_BUF_FLAG_INPUT;
300 b->input = vb->input;
303 b->field = vb->field;
304 b->timestamp = vb->ts;
305 b->bytesused = vb->size;
306 b->sequence = vb->field_count >> 1;
309 /* Locking: Caller holds q->vb_lock */
310 static int __videobuf_mmap_free(struct videobuf_queue *q)
317 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
319 for (i = 0; i < VIDEO_MAX_FRAME; i++)
320 if (q->bufs[i] && q->bufs[i]->map)
323 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
324 if (NULL == q->bufs[i])
326 q->ops->buf_release(q, q->bufs[i]);
334 int videobuf_mmap_free(struct videobuf_queue *q)
337 mutex_lock(&q->vb_lock);
338 ret = __videobuf_mmap_free(q);
339 mutex_unlock(&q->vb_lock);
342 EXPORT_SYMBOL_GPL(videobuf_mmap_free);
344 /* Locking: Caller holds q->vb_lock */
345 int __videobuf_mmap_setup(struct videobuf_queue *q,
346 unsigned int bcount, unsigned int bsize,
347 enum v4l2_memory memory)
352 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
354 err = __videobuf_mmap_free(q);
358 /* Allocate and initialize buffers */
359 for (i = 0; i < bcount; i++) {
360 q->bufs[i] = videobuf_alloc(q);
362 if (NULL == q->bufs[i])
366 q->bufs[i]->input = UNSET;
367 q->bufs[i]->memory = memory;
368 q->bufs[i]->bsize = bsize;
370 case V4L2_MEMORY_MMAP:
371 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
373 case V4L2_MEMORY_USERPTR:
374 case V4L2_MEMORY_OVERLAY:
383 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
387 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
389 int videobuf_mmap_setup(struct videobuf_queue *q,
390 unsigned int bcount, unsigned int bsize,
391 enum v4l2_memory memory)
394 mutex_lock(&q->vb_lock);
395 ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
396 mutex_unlock(&q->vb_lock);
399 EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
401 int videobuf_reqbufs(struct videobuf_queue *q,
402 struct v4l2_requestbuffers *req)
404 unsigned int size, count;
407 if (req->count < 1) {
408 dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
412 if (req->memory != V4L2_MEMORY_MMAP &&
413 req->memory != V4L2_MEMORY_USERPTR &&
414 req->memory != V4L2_MEMORY_OVERLAY) {
415 dprintk(1, "reqbufs: memory type invalid\n");
419 mutex_lock(&q->vb_lock);
420 if (req->type != q->type) {
421 dprintk(1, "reqbufs: queue type invalid\n");
427 dprintk(1, "reqbufs: streaming already exists\n");
431 if (!list_empty(&q->stream)) {
432 dprintk(1, "reqbufs: stream running\n");
438 if (count > VIDEO_MAX_FRAME)
439 count = VIDEO_MAX_FRAME;
441 q->ops->buf_setup(q, &count, &size);
442 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
444 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
446 retval = __videobuf_mmap_setup(q, count, size, req->memory);
448 dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
456 mutex_unlock(&q->vb_lock);
459 EXPORT_SYMBOL_GPL(videobuf_reqbufs);
461 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
465 mutex_lock(&q->vb_lock);
466 if (unlikely(b->type != q->type)) {
467 dprintk(1, "querybuf: Wrong type.\n");
470 if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
471 dprintk(1, "querybuf: index out of range.\n");
474 if (unlikely(NULL == q->bufs[b->index])) {
475 dprintk(1, "querybuf: buffer is null.\n");
479 videobuf_status(q, b, q->bufs[b->index], q->type);
483 mutex_unlock(&q->vb_lock);
486 EXPORT_SYMBOL_GPL(videobuf_querybuf);
488 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
490 struct videobuf_buffer *buf;
491 enum v4l2_field field;
492 unsigned long flags = 0;
495 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
497 if (b->memory == V4L2_MEMORY_MMAP)
498 down_read(¤t->mm->mmap_sem);
500 mutex_lock(&q->vb_lock);
503 dprintk(1, "qbuf: Reading running...\n");
507 if (b->type != q->type) {
508 dprintk(1, "qbuf: Wrong type.\n");
511 if (b->index >= VIDEO_MAX_FRAME) {
512 dprintk(1, "qbuf: index out of range.\n");
515 buf = q->bufs[b->index];
517 dprintk(1, "qbuf: buffer is null.\n");
520 MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
521 if (buf->memory != b->memory) {
522 dprintk(1, "qbuf: memory type is wrong.\n");
525 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
526 dprintk(1, "qbuf: buffer is already queued or active.\n");
530 if (b->flags & V4L2_BUF_FLAG_INPUT) {
531 if (b->input >= q->inputs) {
532 dprintk(1, "qbuf: wrong input.\n");
535 buf->input = b->input;
541 case V4L2_MEMORY_MMAP:
542 if (0 == buf->baddr) {
543 dprintk(1, "qbuf: mmap requested "
544 "but buffer addr is zero!\n");
547 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
548 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
549 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
550 buf->size = b->bytesused;
551 buf->field = b->field;
552 buf->ts = b->timestamp;
555 case V4L2_MEMORY_USERPTR:
556 if (b->length < buf->bsize) {
557 dprintk(1, "qbuf: buffer length is not enough\n");
560 if (VIDEOBUF_NEEDS_INIT != buf->state &&
561 buf->baddr != b->m.userptr)
562 q->ops->buf_release(q, buf);
563 buf->baddr = b->m.userptr;
565 case V4L2_MEMORY_OVERLAY:
566 buf->boff = b->m.offset;
569 dprintk(1, "qbuf: wrong memory type\n");
573 dprintk(1, "qbuf: requesting next field\n");
574 field = videobuf_next_field(q);
575 retval = q->ops->buf_prepare(q, buf, field);
577 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
581 list_add_tail(&buf->stream, &q->stream);
583 spin_lock_irqsave(q->irqlock, flags);
584 q->ops->buf_queue(q, buf);
585 spin_unlock_irqrestore(q->irqlock, flags);
587 dprintk(1, "qbuf: succeeded\n");
589 wake_up_interruptible_sync(&q->wait);
592 mutex_unlock(&q->vb_lock);
594 if (b->memory == V4L2_MEMORY_MMAP)
595 up_read(¤t->mm->mmap_sem);
599 EXPORT_SYMBOL_GPL(videobuf_qbuf);
601 /* Locking: Caller holds q->vb_lock */
602 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
608 dprintk(1, "next_buffer: Not streaming\n");
613 if (list_empty(&q->stream)) {
616 dprintk(2, "next_buffer: no buffers to dequeue\n");
619 dprintk(2, "next_buffer: waiting on buffer\n");
621 /* Drop lock to avoid deadlock with qbuf */
622 mutex_unlock(&q->vb_lock);
624 /* Checking list_empty and streaming is safe without
625 * locks because we goto checks to validate while
626 * holding locks before proceeding */
627 retval = wait_event_interruptible(q->wait,
628 !list_empty(&q->stream) || !q->streaming);
629 mutex_lock(&q->vb_lock);
644 /* Locking: Caller holds q->vb_lock */
645 static int stream_next_buffer(struct videobuf_queue *q,
646 struct videobuf_buffer **vb, int nonblocking)
649 struct videobuf_buffer *buf = NULL;
651 retval = stream_next_buffer_check_queue(q, nonblocking);
655 buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
656 retval = videobuf_waiton(buf, nonblocking, 1);
665 int videobuf_dqbuf(struct videobuf_queue *q,
666 struct v4l2_buffer *b, int nonblocking)
668 struct videobuf_buffer *buf = NULL;
671 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
673 mutex_lock(&q->vb_lock);
675 retval = stream_next_buffer(q, &buf, nonblocking);
677 dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
681 switch (buf->state) {
683 dprintk(1, "dqbuf: state is error\n");
685 CALL(q, sync, q, buf);
686 buf->state = VIDEOBUF_IDLE;
689 dprintk(1, "dqbuf: state is done\n");
690 CALL(q, sync, q, buf);
691 buf->state = VIDEOBUF_IDLE;
694 dprintk(1, "dqbuf: state invalid\n");
698 list_del(&buf->stream);
699 memset(b, 0, sizeof(*b));
700 videobuf_status(q, b, buf, q->type);
702 mutex_unlock(&q->vb_lock);
705 EXPORT_SYMBOL_GPL(videobuf_dqbuf);
707 int videobuf_streamon(struct videobuf_queue *q)
709 struct videobuf_buffer *buf;
710 unsigned long flags = 0;
713 mutex_lock(&q->vb_lock);
721 spin_lock_irqsave(q->irqlock, flags);
722 list_for_each_entry(buf, &q->stream, stream)
723 if (buf->state == VIDEOBUF_PREPARED)
724 q->ops->buf_queue(q, buf);
725 spin_unlock_irqrestore(q->irqlock, flags);
727 wake_up_interruptible_sync(&q->wait);
729 mutex_unlock(&q->vb_lock);
732 EXPORT_SYMBOL_GPL(videobuf_streamon);
734 /* Locking: Caller holds q->vb_lock */
735 static int __videobuf_streamoff(struct videobuf_queue *q)
740 videobuf_queue_cancel(q);
745 int videobuf_streamoff(struct videobuf_queue *q)
749 mutex_lock(&q->vb_lock);
750 retval = __videobuf_streamoff(q);
751 mutex_unlock(&q->vb_lock);
755 EXPORT_SYMBOL_GPL(videobuf_streamoff);
757 /* Locking: Caller holds q->vb_lock */
758 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
760 size_t count, loff_t *ppos)
762 enum v4l2_field field;
763 unsigned long flags = 0;
766 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
769 q->read_buf = videobuf_alloc(q);
770 if (NULL == q->read_buf)
773 q->read_buf->memory = V4L2_MEMORY_USERPTR;
774 q->read_buf->baddr = (unsigned long)data;
775 q->read_buf->bsize = count;
777 field = videobuf_next_field(q);
778 retval = q->ops->buf_prepare(q, q->read_buf, field);
782 /* start capture & wait */
783 spin_lock_irqsave(q->irqlock, flags);
784 q->ops->buf_queue(q, q->read_buf);
785 spin_unlock_irqrestore(q->irqlock, flags);
786 retval = videobuf_waiton(q->read_buf, 0, 0);
788 CALL(q, sync, q, q->read_buf);
789 if (VIDEOBUF_ERROR == q->read_buf->state)
792 retval = q->read_buf->size;
797 q->ops->buf_release(q, q->read_buf);
803 static int __videobuf_copy_to_user(struct videobuf_queue *q,
804 struct videobuf_buffer *buf,
805 char __user *data, size_t count,
808 void *vaddr = CALL(q, vaddr, buf);
810 /* copy to userspace */
811 if (count > buf->size - q->read_off)
812 count = buf->size - q->read_off;
814 if (copy_to_user(data, vaddr + q->read_off, count))
820 static int __videobuf_copy_stream(struct videobuf_queue *q,
821 struct videobuf_buffer *buf,
822 char __user *data, size_t count, size_t pos,
823 int vbihack, int nonblocking)
825 unsigned int *fc = CALL(q, vaddr, buf);
828 /* dirty, undocumented hack -- pass the frame counter
829 * within the last four bytes of each vbi data block.
830 * We need that one to maintain backward compatibility
831 * to all vbi decoding software out there ... */
832 fc += (buf->size >> 2) - 1;
833 *fc = buf->field_count >> 1;
834 dprintk(1, "vbihack: %d\n", *fc);
837 /* copy stuff using the common method */
838 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
840 if ((count == -EFAULT) && (pos == 0))
846 ssize_t videobuf_read_one(struct videobuf_queue *q,
847 char __user *data, size_t count, loff_t *ppos,
850 enum v4l2_field field;
851 unsigned long flags = 0;
852 unsigned size = 0, nbufs = 1;
855 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
857 mutex_lock(&q->vb_lock);
859 q->ops->buf_setup(q, &nbufs, &size);
861 if (NULL == q->read_buf &&
864 retval = videobuf_read_zerocopy(q, data, count, ppos);
865 if (retval >= 0 || retval == -EIO)
868 /* fallback to kernel bounce buffer on failures */
871 if (NULL == q->read_buf) {
872 /* need to capture a new frame */
874 q->read_buf = videobuf_alloc(q);
876 dprintk(1, "video alloc=0x%p\n", q->read_buf);
877 if (NULL == q->read_buf)
879 q->read_buf->memory = V4L2_MEMORY_USERPTR;
880 q->read_buf->bsize = count; /* preferred size */
881 field = videobuf_next_field(q);
882 retval = q->ops->buf_prepare(q, q->read_buf, field);
890 spin_lock_irqsave(q->irqlock, flags);
891 q->ops->buf_queue(q, q->read_buf);
892 spin_unlock_irqrestore(q->irqlock, flags);
897 /* wait until capture is done */
898 retval = videobuf_waiton(q->read_buf, nonblocking, 1);
902 CALL(q, sync, q, q->read_buf);
904 if (VIDEOBUF_ERROR == q->read_buf->state) {
905 /* catch I/O errors */
906 q->ops->buf_release(q, q->read_buf);
913 /* Copy to userspace */
914 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
918 q->read_off += retval;
919 if (q->read_off == q->read_buf->size) {
920 /* all data copied, cleanup */
921 q->ops->buf_release(q, q->read_buf);
927 mutex_unlock(&q->vb_lock);
930 EXPORT_SYMBOL_GPL(videobuf_read_one);
932 /* Locking: Caller holds q->vb_lock */
933 static int __videobuf_read_start(struct videobuf_queue *q)
935 enum v4l2_field field;
936 unsigned long flags = 0;
937 unsigned int count = 0, size = 0;
940 q->ops->buf_setup(q, &count, &size);
943 if (count > VIDEO_MAX_FRAME)
944 count = VIDEO_MAX_FRAME;
945 size = PAGE_ALIGN(size);
947 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
953 for (i = 0; i < count; i++) {
954 field = videobuf_next_field(q);
955 err = q->ops->buf_prepare(q, q->bufs[i], field);
958 list_add_tail(&q->bufs[i]->stream, &q->stream);
960 spin_lock_irqsave(q->irqlock, flags);
961 for (i = 0; i < count; i++)
962 q->ops->buf_queue(q, q->bufs[i]);
963 spin_unlock_irqrestore(q->irqlock, flags);
968 static void __videobuf_read_stop(struct videobuf_queue *q)
972 videobuf_queue_cancel(q);
973 __videobuf_mmap_free(q);
974 INIT_LIST_HEAD(&q->stream);
975 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
976 if (NULL == q->bufs[i])
984 int videobuf_read_start(struct videobuf_queue *q)
988 mutex_lock(&q->vb_lock);
989 rc = __videobuf_read_start(q);
990 mutex_unlock(&q->vb_lock);
994 EXPORT_SYMBOL_GPL(videobuf_read_start);
996 void videobuf_read_stop(struct videobuf_queue *q)
998 mutex_lock(&q->vb_lock);
999 __videobuf_read_stop(q);
1000 mutex_unlock(&q->vb_lock);
1002 EXPORT_SYMBOL_GPL(videobuf_read_stop);
1004 void videobuf_stop(struct videobuf_queue *q)
1006 mutex_lock(&q->vb_lock);
1009 __videobuf_streamoff(q);
1012 __videobuf_read_stop(q);
1014 mutex_unlock(&q->vb_lock);
1016 EXPORT_SYMBOL_GPL(videobuf_stop);
1018 ssize_t videobuf_read_stream(struct videobuf_queue *q,
1019 char __user *data, size_t count, loff_t *ppos,
1020 int vbihack, int nonblocking)
1023 unsigned long flags = 0;
1025 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1027 dprintk(2, "%s\n", __func__);
1028 mutex_lock(&q->vb_lock);
1033 retval = __videobuf_read_start(q);
1040 /* get / wait for data */
1041 if (NULL == q->read_buf) {
1042 q->read_buf = list_entry(q->stream.next,
1043 struct videobuf_buffer,
1045 list_del(&q->read_buf->stream);
1048 rc = videobuf_waiton(q->read_buf, nonblocking, 1);
1055 if (q->read_buf->state == VIDEOBUF_DONE) {
1056 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
1057 retval, vbihack, nonblocking);
1067 q->read_off = q->read_buf->size;
1072 /* requeue buffer when done with copying */
1073 if (q->read_off == q->read_buf->size) {
1074 list_add_tail(&q->read_buf->stream,
1076 spin_lock_irqsave(q->irqlock, flags);
1077 q->ops->buf_queue(q, q->read_buf);
1078 spin_unlock_irqrestore(q->irqlock, flags);
1086 mutex_unlock(&q->vb_lock);
1089 EXPORT_SYMBOL_GPL(videobuf_read_stream);
1091 unsigned int videobuf_poll_stream(struct file *file,
1092 struct videobuf_queue *q,
1095 struct videobuf_buffer *buf = NULL;
1096 unsigned int rc = 0;
1098 mutex_lock(&q->vb_lock);
1100 if (!list_empty(&q->stream))
1101 buf = list_entry(q->stream.next,
1102 struct videobuf_buffer, stream);
1105 __videobuf_read_start(q);
1108 } else if (NULL == q->read_buf) {
1109 q->read_buf = list_entry(q->stream.next,
1110 struct videobuf_buffer,
1112 list_del(&q->read_buf->stream);
1121 poll_wait(file, &buf->done, wait);
1122 if (buf->state == VIDEOBUF_DONE ||
1123 buf->state == VIDEOBUF_ERROR)
1124 rc = POLLIN|POLLRDNORM;
1126 mutex_unlock(&q->vb_lock);
1129 EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1131 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
1136 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1138 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
1139 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1143 mutex_lock(&q->vb_lock);
1144 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1145 struct videobuf_buffer *buf = q->bufs[i];
1147 if (buf && buf->memory == V4L2_MEMORY_MMAP &&
1148 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
1149 rc = CALL(q, mmap_mapper, q, buf, vma);
1153 mutex_unlock(&q->vb_lock);
1157 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
1159 #ifdef CONFIG_VIDEO_V4L1_COMPAT
1160 int videobuf_cgmbuf(struct videobuf_queue *q,
1161 struct video_mbuf *mbuf, int count)
1163 struct v4l2_requestbuffers req;
1166 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1168 memset(&req, 0, sizeof(req));
1171 req.memory = V4L2_MEMORY_MMAP;
1172 rc = videobuf_reqbufs(q, &req);
1176 mbuf->frames = req.count;
1178 for (i = 0; i < mbuf->frames; i++) {
1179 mbuf->offsets[i] = q->bufs[i]->boff;
1180 mbuf->size += PAGE_ALIGN(q->bufs[i]->bsize);
1185 EXPORT_SYMBOL_GPL(videobuf_cgmbuf);