52d42cdeeb9b41cec6d28bcff4c852877f9d2b46
[pandora-kernel.git] / drivers / gpu / drm / exynos / exynos_drm_buf.c
1 /* exynos_drm_buf.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include "drmP.h"
27 #include "drm.h"
28 #include "exynos_drm.h"
29
30 #include "exynos_drm_drv.h"
31 #include "exynos_drm_gem.h"
32 #include "exynos_drm_buf.h"
33
34 static int lowlevel_buffer_allocate(struct drm_device *dev,
35                 unsigned int flags, struct exynos_drm_gem_buf *buf)
36 {
37         dma_addr_t start_addr, end_addr;
38         unsigned int npages, page_size, i = 0;
39         struct scatterlist *sgl;
40         int ret = 0;
41
42         DRM_DEBUG_KMS("%s\n", __FILE__);
43
44         if (IS_NONCONTIG_BUFFER(flags)) {
45                 DRM_DEBUG_KMS("not support allocation type.\n");
46                 return -EINVAL;
47         }
48
49         if (buf->dma_addr) {
50                 DRM_DEBUG_KMS("already allocated.\n");
51                 return 0;
52         }
53
54         if (buf->size >= SZ_1M) {
55                 npages = buf->size >> SECTION_SHIFT;
56                 page_size = SECTION_SIZE;
57         } else if (buf->size >= SZ_64K) {
58                 npages = buf->size >> 16;
59                 page_size = SZ_64K;
60         } else {
61                 npages = buf->size >> PAGE_SHIFT;
62                 page_size = PAGE_SIZE;
63         }
64
65         buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
66         if (!buf->sgt) {
67                 DRM_ERROR("failed to allocate sg table.\n");
68                 return -ENOMEM;
69         }
70
71         ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
72         if (ret < 0) {
73                 DRM_ERROR("failed to initialize sg table.\n");
74                 kfree(buf->sgt);
75                 buf->sgt = NULL;
76                 return -ENOMEM;
77         }
78
79                 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
80                                 &buf->dma_addr, GFP_KERNEL);
81                 if (!buf->kvaddr) {
82                         DRM_ERROR("failed to allocate buffer.\n");
83                         ret = -ENOMEM;
84                         goto err1;
85                 }
86
87                 start_addr = buf->dma_addr;
88                 end_addr = buf->dma_addr + buf->size;
89
90                 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
91                 if (!buf->pages) {
92                         DRM_ERROR("failed to allocate pages.\n");
93                         ret = -ENOMEM;
94                         goto err2;
95                 }
96
97         start_addr = buf->dma_addr;
98         end_addr = buf->dma_addr + buf->size;
99
100         buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
101         if (!buf->pages) {
102                 DRM_ERROR("failed to allocate pages.\n");
103                 ret = -ENOMEM;
104                 goto err2;
105         }
106
107         sgl = buf->sgt->sgl;
108
109         while (i < npages) {
110                 buf->pages[i] = phys_to_page(start_addr);
111                 sg_set_page(sgl, buf->pages[i], page_size, 0);
112                 sg_dma_address(sgl) = start_addr;
113                 start_addr += page_size;
114                 if (end_addr - start_addr < page_size)
115                         break;
116                 sgl = sg_next(sgl);
117                 i++;
118         }
119
120         buf->pages[i] = phys_to_page(start_addr);
121
122         DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
123                         (unsigned long)buf->kvaddr,
124                         (unsigned long)buf->dma_addr,
125                         buf->size);
126
127         return ret;
128 err2:
129         dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
130                         (dma_addr_t)buf->dma_addr);
131         buf->dma_addr = (dma_addr_t)NULL;
132 err1:
133         sg_free_table(buf->sgt);
134         kfree(buf->sgt);
135         buf->sgt = NULL;
136
137         return ret;
138 }
139
140 static void lowlevel_buffer_deallocate(struct drm_device *dev,
141                 unsigned int flags, struct exynos_drm_gem_buf *buf)
142 {
143         DRM_DEBUG_KMS("%s.\n", __FILE__);
144
145         /*
146          * release only physically continuous memory and
147          * non-continuous memory would be released by exynos
148          * gem framework.
149          */
150         if (IS_NONCONTIG_BUFFER(flags)) {
151                 DRM_DEBUG_KMS("not support allocation type.\n");
152                 return;
153         }
154
155         if (!buf->dma_addr) {
156                 DRM_DEBUG_KMS("dma_addr is invalid.\n");
157                 return;
158         }
159
160         DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
161                         (unsigned long)buf->kvaddr,
162                         (unsigned long)buf->dma_addr,
163                         buf->size);
164
165         sg_free_table(buf->sgt);
166
167         kfree(buf->sgt);
168         buf->sgt = NULL;
169
170         kfree(buf->pages);
171         buf->pages = NULL;
172
173         dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
174                                 (dma_addr_t)buf->dma_addr);
175         buf->dma_addr = (dma_addr_t)NULL;
176 }
177
178 struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
179                                                 unsigned int size)
180 {
181         struct exynos_drm_gem_buf *buffer;
182
183         DRM_DEBUG_KMS("%s.\n", __FILE__);
184         DRM_DEBUG_KMS("desired size = 0x%x\n", size);
185
186         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
187         if (!buffer) {
188                 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
189                 return NULL;
190         }
191
192         buffer->size = size;
193         return buffer;
194 }
195
196 void exynos_drm_fini_buf(struct drm_device *dev,
197                                 struct exynos_drm_gem_buf *buffer)
198 {
199         DRM_DEBUG_KMS("%s.\n", __FILE__);
200
201         if (!buffer) {
202                 DRM_DEBUG_KMS("buffer is null.\n");
203                 return;
204         }
205
206         kfree(buffer);
207         buffer = NULL;
208 }
209
210 int exynos_drm_alloc_buf(struct drm_device *dev,
211                 struct exynos_drm_gem_buf *buf, unsigned int flags)
212 {
213
214         /*
215          * allocate memory region and set the memory information
216          * to vaddr and dma_addr of a buffer object.
217          */
218         if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
219                 return -ENOMEM;
220
221         return 0;
222 }
223
224 void exynos_drm_free_buf(struct drm_device *dev,
225                 unsigned int flags, struct exynos_drm_gem_buf *buffer)
226 {
227
228         lowlevel_buffer_deallocate(dev, flags, buffer);
229 }