Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[pandora-kernel.git] / drivers / media / video / videobuf-dma-contig.c
1 /*
2  * helper functions for physically contiguous capture buffers
3  *
4  * The functions support hardware lacking scatter gather support
5  * (i.e. the buffers must be linear in physical memory)
6  *
7  * Copyright (c) 2008 Magnus Damm
8  *
9  * Based on videobuf-vmalloc.c,
10  * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2
15  */
16
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <media/videobuf-dma-contig.h>
25
26 struct videobuf_dma_contig_memory {
27         u32 magic;
28         void *vaddr;
29         dma_addr_t dma_handle;
30         unsigned long size;
31 };
32
33 #define MAGIC_DC_MEM 0x0733ac61
34 #define MAGIC_CHECK(is, should)                                             \
35         if (unlikely((is) != (should))) {                                   \
36                 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
37                 BUG();                                                      \
38         }
39
40 static void
41 videobuf_vm_open(struct vm_area_struct *vma)
42 {
43         struct videobuf_mapping *map = vma->vm_private_data;
44
45         dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
46                 map, map->count, vma->vm_start, vma->vm_end);
47
48         map->count++;
49 }
50
51 static void videobuf_vm_close(struct vm_area_struct *vma)
52 {
53         struct videobuf_mapping *map = vma->vm_private_data;
54         struct videobuf_queue *q = map->q;
55         int i;
56
57         dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
58                 map, map->count, vma->vm_start, vma->vm_end);
59
60         map->count--;
61         if (0 == map->count) {
62                 struct videobuf_dma_contig_memory *mem;
63
64                 dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
65                 videobuf_queue_lock(q);
66
67                 /* We need first to cancel streams, before unmapping */
68                 if (q->streaming)
69                         videobuf_queue_cancel(q);
70
71                 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
72                         if (NULL == q->bufs[i])
73                                 continue;
74
75                         if (q->bufs[i]->map != map)
76                                 continue;
77
78                         mem = q->bufs[i]->priv;
79                         if (mem) {
80                                 /* This callback is called only if kernel has
81                                    allocated memory and this memory is mmapped.
82                                    In this case, memory should be freed,
83                                    in order to do memory unmap.
84                                  */
85
86                                 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
87
88                                 /* vfree is not atomic - can't be
89                                    called with IRQ's disabled
90                                  */
91                                 dev_dbg(q->dev, "buf[%d] freeing %p\n",
92                                         i, mem->vaddr);
93
94                                 dma_free_coherent(q->dev, mem->size,
95                                                   mem->vaddr, mem->dma_handle);
96                                 mem->vaddr = NULL;
97                         }
98
99                         q->bufs[i]->map   = NULL;
100                         q->bufs[i]->baddr = 0;
101                 }
102
103                 kfree(map);
104
105                 videobuf_queue_unlock(q);
106         }
107 }
108
109 static const struct vm_operations_struct videobuf_vm_ops = {
110         .open     = videobuf_vm_open,
111         .close    = videobuf_vm_close,
112 };
113
114 /**
115  * videobuf_dma_contig_user_put() - reset pointer to user space buffer
116  * @mem: per-buffer private videobuf-dma-contig data
117  *
118  * This function resets the user space pointer
119  */
120 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
121 {
122         mem->dma_handle = 0;
123         mem->size = 0;
124 }
125
126 /**
127  * videobuf_dma_contig_user_get() - setup user space memory pointer
128  * @mem: per-buffer private videobuf-dma-contig data
129  * @vb: video buffer to map
130  *
131  * This function validates and sets up a pointer to user space memory.
132  * Only physically contiguous pfn-mapped memory is accepted.
133  *
134  * Returns 0 if successful.
135  */
136 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
137                                         struct videobuf_buffer *vb)
138 {
139         struct mm_struct *mm = current->mm;
140         struct vm_area_struct *vma;
141         unsigned long prev_pfn, this_pfn;
142         unsigned long pages_done, user_address;
143         unsigned int offset;
144         int ret;
145
146         offset = vb->baddr & ~PAGE_MASK;
147         mem->size = PAGE_ALIGN(vb->size + offset);
148         ret = -EINVAL;
149
150         down_read(&mm->mmap_sem);
151
152         vma = find_vma(mm, vb->baddr);
153         if (!vma)
154                 goto out_up;
155
156         if ((vb->baddr + mem->size) > vma->vm_end)
157                 goto out_up;
158
159         pages_done = 0;
160         prev_pfn = 0; /* kill warning */
161         user_address = vb->baddr;
162
163         while (pages_done < (mem->size >> PAGE_SHIFT)) {
164                 ret = follow_pfn(vma, user_address, &this_pfn);
165                 if (ret)
166                         break;
167
168                 if (pages_done == 0)
169                         mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
170                 else if (this_pfn != (prev_pfn + 1))
171                         ret = -EFAULT;
172
173                 if (ret)
174                         break;
175
176                 prev_pfn = this_pfn;
177                 user_address += PAGE_SIZE;
178                 pages_done++;
179         }
180
181  out_up:
182         up_read(&current->mm->mmap_sem);
183
184         return ret;
185 }
186
187 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
188 {
189         struct videobuf_dma_contig_memory *mem;
190         struct videobuf_buffer *vb;
191
192         vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
193         if (vb) {
194                 mem = vb->priv = ((char *)vb) + size;
195                 mem->magic = MAGIC_DC_MEM;
196         }
197
198         return vb;
199 }
200
201 static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
202 {
203         struct videobuf_dma_contig_memory *mem = buf->priv;
204
205         BUG_ON(!mem);
206         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
207
208         return mem->vaddr;
209 }
210
211 static int __videobuf_iolock(struct videobuf_queue *q,
212                              struct videobuf_buffer *vb,
213                              struct v4l2_framebuffer *fbuf)
214 {
215         struct videobuf_dma_contig_memory *mem = vb->priv;
216
217         BUG_ON(!mem);
218         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
219
220         switch (vb->memory) {
221         case V4L2_MEMORY_MMAP:
222                 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
223
224                 /* All handling should be done by __videobuf_mmap_mapper() */
225                 if (!mem->vaddr) {
226                         dev_err(q->dev, "memory is not alloced/mmapped.\n");
227                         return -EINVAL;
228                 }
229                 break;
230         case V4L2_MEMORY_USERPTR:
231                 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
232
233                 /* handle pointer from user space */
234                 if (vb->baddr)
235                         return videobuf_dma_contig_user_get(mem, vb);
236
237                 /* allocate memory for the read() method */
238                 mem->size = PAGE_ALIGN(vb->size);
239                 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
240                                                 &mem->dma_handle, GFP_KERNEL);
241                 if (!mem->vaddr) {
242                         dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
243                                          mem->size);
244                         return -ENOMEM;
245                 }
246
247                 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
248                         mem->vaddr, mem->size);
249                 break;
250         case V4L2_MEMORY_OVERLAY:
251         default:
252                 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
253                         __func__);
254                 return -EINVAL;
255         }
256
257         return 0;
258 }
259
260 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
261                                   struct videobuf_buffer *buf,
262                                   struct vm_area_struct *vma)
263 {
264         struct videobuf_dma_contig_memory *mem;
265         struct videobuf_mapping *map;
266         int retval;
267         unsigned long size;
268
269         dev_dbg(q->dev, "%s\n", __func__);
270
271         /* create mapping + update buffer list */
272         map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
273         if (!map)
274                 return -ENOMEM;
275
276         buf->map = map;
277         map->q = q;
278
279         buf->baddr = vma->vm_start;
280
281         mem = buf->priv;
282         BUG_ON(!mem);
283         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
284
285         mem->size = PAGE_ALIGN(buf->bsize);
286         mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
287                                         &mem->dma_handle, GFP_KERNEL);
288         if (!mem->vaddr) {
289                 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
290                         mem->size);
291                 goto error;
292         }
293         dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
294                 mem->vaddr, mem->size);
295
296         /* Try to remap memory */
297
298         size = vma->vm_end - vma->vm_start;
299         size = (size < mem->size) ? size : mem->size;
300
301         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
302         retval = remap_pfn_range(vma, vma->vm_start,
303                                  mem->dma_handle >> PAGE_SHIFT,
304                                  size, vma->vm_page_prot);
305         if (retval) {
306                 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
307                 dma_free_coherent(q->dev, mem->size,
308                                   mem->vaddr, mem->dma_handle);
309                 goto error;
310         }
311
312         vma->vm_ops          = &videobuf_vm_ops;
313         vma->vm_flags       |= VM_DONTEXPAND;
314         vma->vm_private_data = map;
315
316         dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
317                 map, q, vma->vm_start, vma->vm_end,
318                 (long int)buf->bsize,
319                 vma->vm_pgoff, buf->i);
320
321         videobuf_vm_open(vma);
322
323         return 0;
324
325 error:
326         kfree(map);
327         return -ENOMEM;
328 }
329
330 static struct videobuf_qtype_ops qops = {
331         .magic        = MAGIC_QTYPE_OPS,
332
333         .alloc_vb     = __videobuf_alloc_vb,
334         .iolock       = __videobuf_iolock,
335         .mmap_mapper  = __videobuf_mmap_mapper,
336         .vaddr        = __videobuf_to_vaddr,
337 };
338
339 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
340                                     const struct videobuf_queue_ops *ops,
341                                     struct device *dev,
342                                     spinlock_t *irqlock,
343                                     enum v4l2_buf_type type,
344                                     enum v4l2_field field,
345                                     unsigned int msize,
346                                     void *priv,
347                                     struct mutex *ext_lock)
348 {
349         videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
350                                  priv, &qops, ext_lock);
351 }
352 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
353
354 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
355 {
356         struct videobuf_dma_contig_memory *mem = buf->priv;
357
358         BUG_ON(!mem);
359         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
360
361         return mem->dma_handle;
362 }
363 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
364
365 void videobuf_dma_contig_free(struct videobuf_queue *q,
366                               struct videobuf_buffer *buf)
367 {
368         struct videobuf_dma_contig_memory *mem = buf->priv;
369
370         /* mmapped memory can't be freed here, otherwise mmapped region
371            would be released, while still needed. In this case, the memory
372            release should happen inside videobuf_vm_close().
373            So, it should free memory only if the memory were allocated for
374            read() operation.
375          */
376         if (buf->memory != V4L2_MEMORY_USERPTR)
377                 return;
378
379         if (!mem)
380                 return;
381
382         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
383
384         /* handle user space pointer case */
385         if (buf->baddr) {
386                 videobuf_dma_contig_user_put(mem);
387                 return;
388         }
389
390         /* read() method */
391         if (mem->vaddr) {
392                 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
393                 mem->vaddr = NULL;
394         }
395 }
396 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
397
398 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
399 MODULE_AUTHOR("Magnus Damm");
400 MODULE_LICENSE("GPL");