return ret;
}
+/*
+ * VMA operations.
+ */
+static void uvc_vm_open(struct vm_area_struct *vma)
+{
+ struct uvc_buffer *buffer = vma->vm_private_data;
+ buffer->vma_use_count++;
+}
+
+static void uvc_vm_close(struct vm_area_struct *vma)
+{
+ struct uvc_buffer *buffer = vma->vm_private_data;
+ buffer->vma_use_count--;
+}
+
+static const struct vm_operations_struct uvc_vm_ops = {
+ .open = uvc_vm_open,
+ .close = uvc_vm_close,
+};
+
+/*
+ * Memory-map a video buffer.
+ *
+ * This function implements video buffers memory mapping and is intended to be
+ * used by the device mmap handler.
+ */
+int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
+{
+ struct uvc_buffer *uninitialized_var(buffer);
+ struct page *page;
+ unsigned long addr, start, size;
+ unsigned int i;
+ int ret = 0;
+
+ start = vma->vm_start;
+ size = vma->vm_end - vma->vm_start;
+
+ mutex_lock(&queue->mutex);
+
+ for (i = 0; i < queue->count; ++i) {
+ buffer = &queue->buffer[i];
+ if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
+ break;
+ }
+
+ if (i == queue->count || size != queue->buf_size) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * VM_IO marks the area as being an mmaped region for I/O to a
+ * device. It also prevents the region from being core dumped.
+ */
+ vma->vm_flags |= VM_IO;
+
+ addr = (unsigned long)queue->mem + buffer->buf.m.offset;
+ while (size > 0) {
+ page = vmalloc_to_page((void *)addr);
+ if ((ret = vm_insert_page(vma, start, page)) < 0)
+ goto done;
+
+ start += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ vma->vm_ops = &uvc_vm_ops;
+ vma->vm_private_data = buffer;
+ uvc_vm_open(vma);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
/*
* Poll the video queue.
*
return -EINVAL;
}
-/*
- * VMA operations.
- */
-static void uvc_vm_open(struct vm_area_struct *vma)
-{
- struct uvc_buffer *buffer = vma->vm_private_data;
- buffer->vma_use_count++;
-}
-
-static void uvc_vm_close(struct vm_area_struct *vma)
-{
- struct uvc_buffer *buffer = vma->vm_private_data;
- buffer->vma_use_count--;
-}
-
-static const struct vm_operations_struct uvc_vm_ops = {
- .open = uvc_vm_open,
- .close = uvc_vm_close,
-};
-
static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
{
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
- struct uvc_video_queue *queue = &stream->queue;
- struct uvc_buffer *uninitialized_var(buffer);
- struct page *page;
- unsigned long addr, start, size;
- unsigned int i;
- int ret = 0;
uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n");
- start = vma->vm_start;
- size = vma->vm_end - vma->vm_start;
-
- mutex_lock(&queue->mutex);
-
- for (i = 0; i < queue->count; ++i) {
- buffer = &queue->buffer[i];
- if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
- break;
- }
-
- if (i == queue->count || size != queue->buf_size) {
- ret = -EINVAL;
- goto done;
- }
-
- /*
- * VM_IO marks the area as being an mmaped region for I/O to a
- * device. It also prevents the region from being core dumped.
- */
- vma->vm_flags |= VM_IO;
-
- addr = (unsigned long)queue->mem + buffer->buf.m.offset;
- while (size > 0) {
- page = vmalloc_to_page((void *)addr);
- if ((ret = vm_insert_page(vma, start, page)) < 0)
- goto done;
-
- start += PAGE_SIZE;
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- vma->vm_ops = &uvc_vm_ops;
- vma->vm_private_data = buffer;
- uvc_vm_open(vma);
-
-done:
- mutex_unlock(&queue->mutex);
- return ret;
+ return uvc_queue_mmap(&stream->queue, vma);
}
static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait)
extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
+extern int uvc_queue_mmap(struct uvc_video_queue *queue,
+ struct vm_area_struct *vma);
extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
extern int uvc_queue_allocated(struct uvc_video_queue *queue);