Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / drivers / gpu / drm / exynos / exynos_drm_dmabuf.c
1 /* exynos_drm_dmabuf.c
2  *
3  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/exynos_drm.h>
14 #include "exynos_drm_drv.h"
15 #include "exynos_drm_gem.h"
16
17 #include <linux/dma-buf.h>
18
19 struct exynos_drm_dmabuf_attachment {
20         struct sg_table sgt;
21         enum dma_data_direction dir;
22 };
23
24 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
25                                         struct device *dev,
26                                         struct dma_buf_attachment *attach)
27 {
28         struct exynos_drm_dmabuf_attachment *exynos_attach;
29
30         exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
31         if (!exynos_attach)
32                 return -ENOMEM;
33
34         exynos_attach->dir = DMA_NONE;
35         attach->priv = exynos_attach;
36
37         return 0;
38 }
39
40 static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
41                                         struct dma_buf_attachment *attach)
42 {
43         struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
44         struct sg_table *sgt;
45
46         if (!exynos_attach)
47                 return;
48
49         sgt = &exynos_attach->sgt;
50
51         if (exynos_attach->dir != DMA_NONE)
52                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
53                                 exynos_attach->dir);
54
55         sg_free_table(sgt);
56         kfree(exynos_attach);
57         attach->priv = NULL;
58 }
59
60 static struct sg_table *
61                 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
62                                         enum dma_data_direction dir)
63 {
64         struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
65         struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
66         struct drm_device *dev = gem_obj->base.dev;
67         struct exynos_drm_gem_buf *buf;
68         struct scatterlist *rd, *wr;
69         struct sg_table *sgt = NULL;
70         unsigned int i;
71         int nents, ret;
72
73         DRM_DEBUG_PRIME("%s\n", __FILE__);
74
75         if (WARN_ON(dir == DMA_NONE))
76                 return ERR_PTR(-EINVAL);
77
78         /* just return current sgt if already requested. */
79         if (exynos_attach->dir == dir)
80                 return &exynos_attach->sgt;
81
82         /* reattaching is not allowed. */
83         if (WARN_ON(exynos_attach->dir != DMA_NONE))
84                 return ERR_PTR(-EBUSY);
85
86         buf = gem_obj->buffer;
87         if (!buf) {
88                 DRM_ERROR("buffer is null.\n");
89                 return ERR_PTR(-ENOMEM);
90         }
91
92         sgt = &exynos_attach->sgt;
93
94         ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
95         if (ret) {
96                 DRM_ERROR("failed to alloc sgt.\n");
97                 return ERR_PTR(-ENOMEM);
98         }
99
100         mutex_lock(&dev->struct_mutex);
101
102         rd = buf->sgt->sgl;
103         wr = sgt->sgl;
104         for (i = 0; i < sgt->orig_nents; ++i) {
105                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
106                 rd = sg_next(rd);
107                 wr = sg_next(wr);
108         }
109
110         nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
111         if (!nents) {
112                 DRM_ERROR("failed to map sgl with iommu.\n");
113                 sgt = ERR_PTR(-EIO);
114                 goto err_unlock;
115         }
116
117         exynos_attach->dir = dir;
118         attach->priv = exynos_attach;
119
120         DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
121
122 err_unlock:
123         mutex_unlock(&dev->struct_mutex);
124         return sgt;
125 }
126
127 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
128                                                 struct sg_table *sgt,
129                                                 enum dma_data_direction dir)
130 {
131         /* Nothing to do. */
132 }
133
134 static void exynos_dmabuf_release(struct dma_buf *dmabuf)
135 {
136         struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
137
138         DRM_DEBUG_PRIME("%s\n", __FILE__);
139
140         /*
141          * exynos_dmabuf_release() call means that file object's
142          * f_count is 0 and it calls drm_gem_object_handle_unreference()
143          * to drop the references that these values had been increased
144          * at drm_prime_handle_to_fd()
145          */
146         if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
147                 exynos_gem_obj->base.export_dma_buf = NULL;
148
149                 /*
150                  * drop this gem object refcount to release allocated buffer
151                  * and resources.
152                  */
153                 drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
154         }
155 }
156
157 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
158                                                 unsigned long page_num)
159 {
160         /* TODO */
161
162         return NULL;
163 }
164
165 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
166                                                 unsigned long page_num,
167                                                 void *addr)
168 {
169         /* TODO */
170 }
171
172 static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
173                                         unsigned long page_num)
174 {
175         /* TODO */
176
177         return NULL;
178 }
179
180 static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
181                                         unsigned long page_num, void *addr)
182 {
183         /* TODO */
184 }
185
186 static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
187         struct vm_area_struct *vma)
188 {
189         return -ENOTTY;
190 }
191
192 static struct dma_buf_ops exynos_dmabuf_ops = {
193         .attach                 = exynos_gem_attach_dma_buf,
194         .detach                 = exynos_gem_detach_dma_buf,
195         .map_dma_buf            = exynos_gem_map_dma_buf,
196         .unmap_dma_buf          = exynos_gem_unmap_dma_buf,
197         .kmap                   = exynos_gem_dmabuf_kmap,
198         .kmap_atomic            = exynos_gem_dmabuf_kmap_atomic,
199         .kunmap                 = exynos_gem_dmabuf_kunmap,
200         .kunmap_atomic          = exynos_gem_dmabuf_kunmap_atomic,
201         .mmap                   = exynos_gem_dmabuf_mmap,
202         .release                = exynos_dmabuf_release,
203 };
204
205 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
206                                 struct drm_gem_object *obj, int flags)
207 {
208         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
209
210         return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
211                                 exynos_gem_obj->base.size, flags);
212 }
213
214 struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
215                                 struct dma_buf *dma_buf)
216 {
217         struct dma_buf_attachment *attach;
218         struct sg_table *sgt;
219         struct scatterlist *sgl;
220         struct exynos_drm_gem_obj *exynos_gem_obj;
221         struct exynos_drm_gem_buf *buffer;
222         int ret;
223
224         DRM_DEBUG_PRIME("%s\n", __FILE__);
225
226         /* is this one of own objects? */
227         if (dma_buf->ops == &exynos_dmabuf_ops) {
228                 struct drm_gem_object *obj;
229
230                 exynos_gem_obj = dma_buf->priv;
231                 obj = &exynos_gem_obj->base;
232
233                 /* is it from our device? */
234                 if (obj->dev == drm_dev) {
235                         /*
236                          * Importing dmabuf exported from out own gem increases
237                          * refcount on gem itself instead of f_count of dmabuf.
238                          */
239                         drm_gem_object_reference(obj);
240                         dma_buf_put(dma_buf);
241                         return obj;
242                 }
243         }
244
245         attach = dma_buf_attach(dma_buf, drm_dev->dev);
246         if (IS_ERR(attach))
247                 return ERR_PTR(-EINVAL);
248
249
250         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
251         if (IS_ERR_OR_NULL(sgt)) {
252                 ret = PTR_ERR(sgt);
253                 goto err_buf_detach;
254         }
255
256         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
257         if (!buffer) {
258                 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
259                 ret = -ENOMEM;
260                 goto err_unmap_attach;
261         }
262
263         exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
264         if (!exynos_gem_obj) {
265                 ret = -ENOMEM;
266                 goto err_free_buffer;
267         }
268
269         sgl = sgt->sgl;
270
271         buffer->size = dma_buf->size;
272         buffer->dma_addr = sg_dma_address(sgl);
273
274         if (sgt->nents == 1) {
275                 /* always physically continuous memory if sgt->nents is 1. */
276                 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
277         } else {
278                 /*
279                  * this case could be CONTIG or NONCONTIG type but for now
280                  * sets NONCONTIG.
281                  * TODO. we have to find a way that exporter can notify
282                  * the type of its own buffer to importer.
283                  */
284                 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
285         }
286
287         exynos_gem_obj->buffer = buffer;
288         buffer->sgt = sgt;
289         exynos_gem_obj->base.import_attach = attach;
290
291         DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
292                                                                 buffer->size);
293
294         return &exynos_gem_obj->base;
295
296 err_free_buffer:
297         kfree(buffer);
298         buffer = NULL;
299 err_unmap_attach:
300         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
301 err_buf_detach:
302         dma_buf_detach(dma_buf, attach);
303         return ERR_PTR(ret);
304 }
305
306 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
307 MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
308 MODULE_LICENSE("GPL");