omap_vout: fix compiler warning
[pandora-kernel.git] / drivers / media / video / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 struct vb2_dma_sg_buf {
25         void                            *vaddr;
26         struct page                     **pages;
27         int                             write;
28         int                             offset;
29         struct vb2_dma_sg_desc          sg_desc;
30         atomic_t                        refcount;
31         struct vb2_vmarea_handler       handler;
32 };
33
34 static void vb2_dma_sg_put(void *buf_priv);
35
36 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
37 {
38         struct vb2_dma_sg_buf *buf;
39         int i;
40
41         buf = kzalloc(sizeof *buf, GFP_KERNEL);
42         if (!buf)
43                 return NULL;
44
45         buf->vaddr = NULL;
46         buf->write = 0;
47         buf->offset = 0;
48         buf->sg_desc.size = size;
49         buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
50
51         buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
52                                       sizeof(*buf->sg_desc.sglist));
53         if (!buf->sg_desc.sglist)
54                 goto fail_sglist_alloc;
55         sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
56
57         buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
58                              GFP_KERNEL);
59         if (!buf->pages)
60                 goto fail_pages_array_alloc;
61
62         for (i = 0; i < buf->sg_desc.num_pages; ++i) {
63                 buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
64                 if (NULL == buf->pages[i])
65                         goto fail_pages_alloc;
66                 sg_set_page(&buf->sg_desc.sglist[i],
67                             buf->pages[i], PAGE_SIZE, 0);
68         }
69
70         buf->handler.refcount = &buf->refcount;
71         buf->handler.put = vb2_dma_sg_put;
72         buf->handler.arg = buf;
73
74         atomic_inc(&buf->refcount);
75
76         printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
77                 __func__, buf->sg_desc.num_pages);
78         return buf;
79
80 fail_pages_alloc:
81         while (--i >= 0)
82                 __free_page(buf->pages[i]);
83         kfree(buf->pages);
84
85 fail_pages_array_alloc:
86         vfree(buf->sg_desc.sglist);
87
88 fail_sglist_alloc:
89         kfree(buf);
90         return NULL;
91 }
92
93 static void vb2_dma_sg_put(void *buf_priv)
94 {
95         struct vb2_dma_sg_buf *buf = buf_priv;
96         int i = buf->sg_desc.num_pages;
97
98         if (atomic_dec_and_test(&buf->refcount)) {
99                 printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
100                         buf->sg_desc.num_pages);
101                 if (buf->vaddr)
102                         vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
103                 vfree(buf->sg_desc.sglist);
104                 while (--i >= 0)
105                         __free_page(buf->pages[i]);
106                 kfree(buf->pages);
107                 kfree(buf);
108         }
109 }
110
111 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
112                                     unsigned long size, int write)
113 {
114         struct vb2_dma_sg_buf *buf;
115         unsigned long first, last;
116         int num_pages_from_user, i;
117
118         buf = kzalloc(sizeof *buf, GFP_KERNEL);
119         if (!buf)
120                 return NULL;
121
122         buf->vaddr = NULL;
123         buf->write = write;
124         buf->offset = vaddr & ~PAGE_MASK;
125         buf->sg_desc.size = size;
126
127         first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
128         last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
129         buf->sg_desc.num_pages = last - first + 1;
130
131         buf->sg_desc.sglist = vzalloc(
132                 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
133         if (!buf->sg_desc.sglist)
134                 goto userptr_fail_sglist_alloc;
135
136         sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
137
138         buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
139                              GFP_KERNEL);
140         if (!buf->pages)
141                 goto userptr_fail_pages_array_alloc;
142
143         down_read(&current->mm->mmap_sem);
144         num_pages_from_user = get_user_pages(current, current->mm,
145                                              vaddr & PAGE_MASK,
146                                              buf->sg_desc.num_pages,
147                                              write,
148                                              1, /* force */
149                                              buf->pages,
150                                              NULL);
151         up_read(&current->mm->mmap_sem);
152         if (num_pages_from_user != buf->sg_desc.num_pages)
153                 goto userptr_fail_get_user_pages;
154
155         sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
156                     PAGE_SIZE - buf->offset, buf->offset);
157         size -= PAGE_SIZE - buf->offset;
158         for (i = 1; i < buf->sg_desc.num_pages; ++i) {
159                 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
160                             min_t(size_t, PAGE_SIZE, size), 0);
161                 size -= min_t(size_t, PAGE_SIZE, size);
162         }
163         return buf;
164
165 userptr_fail_get_user_pages:
166         printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
167                num_pages_from_user, buf->sg_desc.num_pages);
168         while (--num_pages_from_user >= 0)
169                 put_page(buf->pages[num_pages_from_user]);
170         kfree(buf->pages);
171
172 userptr_fail_pages_array_alloc:
173         vfree(buf->sg_desc.sglist);
174
175 userptr_fail_sglist_alloc:
176         kfree(buf);
177         return NULL;
178 }
179
180 /*
181  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
182  *               be used
183  */
184 static void vb2_dma_sg_put_userptr(void *buf_priv)
185 {
186         struct vb2_dma_sg_buf *buf = buf_priv;
187         int i = buf->sg_desc.num_pages;
188
189         printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
190                __func__, buf->sg_desc.num_pages);
191         if (buf->vaddr)
192                 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
193         while (--i >= 0) {
194                 if (buf->write)
195                         set_page_dirty_lock(buf->pages[i]);
196                 put_page(buf->pages[i]);
197         }
198         vfree(buf->sg_desc.sglist);
199         kfree(buf->pages);
200         kfree(buf);
201 }
202
203 static void *vb2_dma_sg_vaddr(void *buf_priv)
204 {
205         struct vb2_dma_sg_buf *buf = buf_priv;
206
207         BUG_ON(!buf);
208
209         if (!buf->vaddr)
210                 buf->vaddr = vm_map_ram(buf->pages,
211                                         buf->sg_desc.num_pages,
212                                         -1,
213                                         PAGE_KERNEL);
214
215         /* add offset in case userptr is not page-aligned */
216         return buf->vaddr + buf->offset;
217 }
218
219 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
220 {
221         struct vb2_dma_sg_buf *buf = buf_priv;
222
223         return atomic_read(&buf->refcount);
224 }
225
226 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
227 {
228         struct vb2_dma_sg_buf *buf = buf_priv;
229         unsigned long uaddr = vma->vm_start;
230         unsigned long usize = vma->vm_end - vma->vm_start;
231         int i = 0;
232
233         if (!buf) {
234                 printk(KERN_ERR "No memory to map\n");
235                 return -EINVAL;
236         }
237
238         do {
239                 int ret;
240
241                 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
242                 if (ret) {
243                         printk(KERN_ERR "Remapping memory, error: %d\n", ret);
244                         return ret;
245                 }
246
247                 uaddr += PAGE_SIZE;
248                 usize -= PAGE_SIZE;
249         } while (usize > 0);
250
251
252         /*
253          * Use common vm_area operations to track buffer refcount.
254          */
255         vma->vm_private_data    = &buf->handler;
256         vma->vm_ops             = &vb2_common_vm_ops;
257
258         vma->vm_ops->open(vma);
259
260         return 0;
261 }
262
263 static void *vb2_dma_sg_cookie(void *buf_priv)
264 {
265         struct vb2_dma_sg_buf *buf = buf_priv;
266
267         return &buf->sg_desc;
268 }
269
270 const struct vb2_mem_ops vb2_dma_sg_memops = {
271         .alloc          = vb2_dma_sg_alloc,
272         .put            = vb2_dma_sg_put,
273         .get_userptr    = vb2_dma_sg_get_userptr,
274         .put_userptr    = vb2_dma_sg_put_userptr,
275         .vaddr          = vb2_dma_sg_vaddr,
276         .mmap           = vb2_dma_sg_mmap,
277         .num_users      = vb2_dma_sg_num_users,
278         .cookie         = vb2_dma_sg_cookie,
279 };
280 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
281
282 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
283 MODULE_AUTHOR("Andrzej Pietrasiewicz");
284 MODULE_LICENSE("GPL");