2 * helper functions for physically contiguous capture buffers
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
7 * Copyright (c) 2008 Magnus Damm
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
17 #include <linux/init.h>
18 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <media/videobuf-dma-contig.h>
26 struct videobuf_dma_contig_memory {
29 dma_addr_t dma_handle;
34 #define MAGIC_DC_MEM 0x0733ac61
35 #define MAGIC_CHECK(is, should) \
36 if (unlikely((is) != (should))) { \
37 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
41 static int __videobuf_dc_alloc(struct device *dev,
42 struct videobuf_dma_contig_memory *mem,
43 unsigned long size, gfp_t flags)
47 mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
51 mem->dma_handle = dma_map_single(dev, mem->vaddr,
54 err = dma_mapping_error(dev, mem->dma_handle);
56 dev_err(dev, "dma_map_single failed\n");
58 free_pages_exact(mem->vaddr, mem->size);
64 mem->vaddr = dma_alloc_coherent(dev, mem->size,
65 &mem->dma_handle, flags);
68 dev_err(dev, "memory alloc size %ld failed\n", mem->size);
72 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
77 static void __videobuf_dc_free(struct device *dev,
78 struct videobuf_dma_contig_memory *mem)
83 dma_unmap_single(dev, mem->dma_handle, mem->size,
85 free_pages_exact(mem->vaddr, mem->size);
87 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
92 static void videobuf_vm_open(struct vm_area_struct *vma)
94 struct videobuf_mapping *map = vma->vm_private_data;
96 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
97 map, map->count, vma->vm_start, vma->vm_end);
102 static void videobuf_vm_close(struct vm_area_struct *vma)
104 struct videobuf_mapping *map = vma->vm_private_data;
105 struct videobuf_queue *q = map->q;
108 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
109 map, map->count, vma->vm_start, vma->vm_end);
112 if (0 == map->count) {
113 struct videobuf_dma_contig_memory *mem;
115 dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
116 videobuf_queue_lock(q);
118 /* We need first to cancel streams, before unmapping */
120 videobuf_queue_cancel(q);
122 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
123 if (NULL == q->bufs[i])
126 if (q->bufs[i]->map != map)
129 mem = q->bufs[i]->priv;
131 /* This callback is called only if kernel has
132 allocated memory and this memory is mmapped.
133 In this case, memory should be freed,
134 in order to do memory unmap.
137 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
139 /* vfree is not atomic - can't be
140 called with IRQ's disabled
142 dev_dbg(q->dev, "buf[%d] freeing %p\n",
145 __videobuf_dc_free(q->dev, mem);
149 q->bufs[i]->map = NULL;
150 q->bufs[i]->baddr = 0;
155 videobuf_queue_unlock(q);
159 static const struct vm_operations_struct videobuf_vm_ops = {
160 .open = videobuf_vm_open,
161 .close = videobuf_vm_close,
165 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
166 * @mem: per-buffer private videobuf-dma-contig data
168 * This function resets the user space pointer
170 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
177 * videobuf_dma_contig_user_get() - setup user space memory pointer
178 * @mem: per-buffer private videobuf-dma-contig data
179 * @vb: video buffer to map
181 * This function validates and sets up a pointer to user space memory.
182 * Only physically contiguous pfn-mapped memory is accepted.
184 * Returns 0 if successful.
186 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
187 struct videobuf_buffer *vb)
189 struct mm_struct *mm = current->mm;
190 struct vm_area_struct *vma;
191 unsigned long prev_pfn, this_pfn;
192 unsigned long pages_done, user_address;
196 offset = vb->baddr & ~PAGE_MASK;
197 mem->size = PAGE_ALIGN(vb->size + offset);
200 down_read(&mm->mmap_sem);
202 vma = find_vma(mm, vb->baddr);
206 if ((vb->baddr + mem->size) > vma->vm_end)
210 prev_pfn = 0; /* kill warning */
211 user_address = vb->baddr;
213 while (pages_done < (mem->size >> PAGE_SHIFT)) {
214 ret = follow_pfn(vma, user_address, &this_pfn);
219 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
220 else if (this_pfn != (prev_pfn + 1))
227 user_address += PAGE_SIZE;
232 up_read(¤t->mm->mmap_sem);
237 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
239 struct videobuf_dma_contig_memory *mem;
240 struct videobuf_buffer *vb;
242 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
244 vb->priv = ((char *)vb) + size;
246 mem->magic = MAGIC_DC_MEM;
247 mem->cached = cached;
253 static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
255 return __videobuf_alloc_vb(size, false);
258 static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
260 return __videobuf_alloc_vb(size, true);
263 static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
265 struct videobuf_dma_contig_memory *mem = buf->priv;
268 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
273 static int __videobuf_iolock(struct videobuf_queue *q,
274 struct videobuf_buffer *vb,
275 struct v4l2_framebuffer *fbuf)
277 struct videobuf_dma_contig_memory *mem = vb->priv;
280 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
282 switch (vb->memory) {
283 case V4L2_MEMORY_MMAP:
284 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
286 /* All handling should be done by __videobuf_mmap_mapper() */
288 dev_err(q->dev, "memory is not alloced/mmapped.\n");
292 case V4L2_MEMORY_USERPTR:
293 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
295 /* handle pointer from user space */
297 return videobuf_dma_contig_user_get(mem, vb);
299 /* allocate memory for the read() method */
300 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
304 case V4L2_MEMORY_OVERLAY:
306 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
313 static int __videobuf_sync(struct videobuf_queue *q,
314 struct videobuf_buffer *buf)
316 struct videobuf_dma_contig_memory *mem = buf->priv;
318 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
320 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
326 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
327 struct videobuf_buffer *buf,
328 struct vm_area_struct *vma)
330 struct videobuf_dma_contig_memory *mem;
331 struct videobuf_mapping *map;
334 unsigned long pos, start = vma->vm_start;
337 dev_dbg(q->dev, "%s\n", __func__);
339 /* create mapping + update buffer list */
340 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
347 buf->baddr = vma->vm_start;
351 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
353 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
354 GFP_KERNEL | __GFP_COMP))
357 /* Try to remap memory */
359 size = vma->vm_end - vma->vm_start;
360 size = (size < mem->size) ? size : mem->size;
363 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
365 pos = (unsigned long)mem->vaddr;
368 page = virt_to_page((void *)pos);
370 dev_err(q->dev, "mmap: virt_to_page failed\n");
371 __videobuf_dc_free(q->dev, mem);
374 retval = vm_insert_page(vma, start, page);
376 dev_err(q->dev, "mmap: insert failed with error %d\n",
378 __videobuf_dc_free(q->dev, mem);
384 if (size > PAGE_SIZE)
390 vma->vm_ops = &videobuf_vm_ops;
391 vma->vm_flags |= VM_DONTEXPAND;
392 vma->vm_private_data = map;
394 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
395 map, q, vma->vm_start, vma->vm_end,
396 (long int)buf->bsize, vma->vm_pgoff, buf->i);
398 videobuf_vm_open(vma);
407 static struct videobuf_qtype_ops qops = {
408 .magic = MAGIC_QTYPE_OPS,
409 .alloc_vb = __videobuf_alloc_uncached,
410 .iolock = __videobuf_iolock,
411 .mmap_mapper = __videobuf_mmap_mapper,
412 .vaddr = __videobuf_to_vaddr,
415 static struct videobuf_qtype_ops qops_cached = {
416 .magic = MAGIC_QTYPE_OPS,
417 .alloc_vb = __videobuf_alloc_cached,
418 .iolock = __videobuf_iolock,
419 .sync = __videobuf_sync,
420 .mmap_mapper = __videobuf_mmap_mapper,
421 .vaddr = __videobuf_to_vaddr,
424 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
425 const struct videobuf_queue_ops *ops,
428 enum v4l2_buf_type type,
429 enum v4l2_field field,
432 struct mutex *ext_lock)
434 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
435 priv, &qops, ext_lock);
437 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
439 void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
440 const struct videobuf_queue_ops *ops,
443 enum v4l2_buf_type type,
444 enum v4l2_field field,
446 void *priv, struct mutex *ext_lock)
448 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
449 priv, &qops_cached, ext_lock);
451 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
453 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
455 struct videobuf_dma_contig_memory *mem = buf->priv;
458 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
460 return mem->dma_handle;
462 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
464 void videobuf_dma_contig_free(struct videobuf_queue *q,
465 struct videobuf_buffer *buf)
467 struct videobuf_dma_contig_memory *mem = buf->priv;
469 /* mmapped memory can't be freed here, otherwise mmapped region
470 would be released, while still needed. In this case, the memory
471 release should happen inside videobuf_vm_close().
472 So, it should free memory only if the memory were allocated for
475 if (buf->memory != V4L2_MEMORY_USERPTR)
481 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
483 /* handle user space pointer case */
485 videobuf_dma_contig_user_put(mem);
491 __videobuf_dc_free(q->dev, mem);
495 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
497 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
498 MODULE_AUTHOR("Magnus Damm");
499 MODULE_LICENSE("GPL");