Merge branch 'master' into for-next
[pandora-kernel.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <drm/exynos_drm.h>
17
18 #include "exynos_drm_drv.h"
19 #include "exynos_drm_gem.h"
20 #include "exynos_drm_buf.h"
21 #include "exynos_drm_iommu.h"
22
23 static unsigned int convert_to_vm_err_msg(int msg)
24 {
25         unsigned int out_msg;
26
27         switch (msg) {
28         case 0:
29         case -ERESTARTSYS:
30         case -EINTR:
31                 out_msg = VM_FAULT_NOPAGE;
32                 break;
33
34         case -ENOMEM:
35                 out_msg = VM_FAULT_OOM;
36                 break;
37
38         default:
39                 out_msg = VM_FAULT_SIGBUS;
40                 break;
41         }
42
43         return out_msg;
44 }
45
46 static int check_gem_flags(unsigned int flags)
47 {
48         if (flags & ~(EXYNOS_BO_MASK)) {
49                 DRM_ERROR("invalid flags.\n");
50                 return -EINVAL;
51         }
52
53         return 0;
54 }
55
56 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
57                                         struct vm_area_struct *vma)
58 {
59         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
60
61         /* non-cachable as default. */
62         if (obj->flags & EXYNOS_BO_CACHABLE)
63                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64         else if (obj->flags & EXYNOS_BO_WC)
65                 vma->vm_page_prot =
66                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
67         else
68                 vma->vm_page_prot =
69                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
70 }
71
72 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
73 {
74         /* TODO */
75
76         return roundup(size, PAGE_SIZE);
77 }
78
79 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
80                                         struct vm_area_struct *vma,
81                                         unsigned long f_vaddr,
82                                         pgoff_t page_offset)
83 {
84         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
85         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
86         struct scatterlist *sgl;
87         unsigned long pfn;
88         int i;
89
90         if (!buf->sgt)
91                 return -EINTR;
92
93         if (page_offset >= (buf->size >> PAGE_SHIFT)) {
94                 DRM_ERROR("invalid page offset\n");
95                 return -EINVAL;
96         }
97
98         sgl = buf->sgt->sgl;
99         for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
100                 if (page_offset < (sgl->length >> PAGE_SHIFT))
101                         break;
102                 page_offset -=  (sgl->length >> PAGE_SHIFT);
103         }
104
105         pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
106
107         return vm_insert_mixed(vma, f_vaddr, pfn);
108 }
109
110 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
111                                         struct drm_file *file_priv,
112                                         unsigned int *handle)
113 {
114         int ret;
115
116         /*
117          * allocate a id of idr table where the obj is registered
118          * and handle has the id what user can see.
119          */
120         ret = drm_gem_handle_create(file_priv, obj, handle);
121         if (ret)
122                 return ret;
123
124         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
125
126         /* drop reference from allocate - handle holds it now. */
127         drm_gem_object_unreference_unlocked(obj);
128
129         return 0;
130 }
131
132 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
133 {
134         struct drm_gem_object *obj;
135         struct exynos_drm_gem_buf *buf;
136
137         obj = &exynos_gem_obj->base;
138         buf = exynos_gem_obj->buffer;
139
140         DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
141
142         /*
143          * do not release memory region from exporter.
144          *
145          * the region will be released by exporter
146          * once dmabuf's refcount becomes 0.
147          */
148         if (obj->import_attach)
149                 goto out;
150
151         exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
152
153 out:
154         exynos_drm_fini_buf(obj->dev, buf);
155         exynos_gem_obj->buffer = NULL;
156
157         drm_gem_free_mmap_offset(obj);
158
159         /* release file pointer to gem object. */
160         drm_gem_object_release(obj);
161
162         kfree(exynos_gem_obj);
163         exynos_gem_obj = NULL;
164 }
165
166 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
167                                                 unsigned int gem_handle,
168                                                 struct drm_file *file_priv)
169 {
170         struct exynos_drm_gem_obj *exynos_gem_obj;
171         struct drm_gem_object *obj;
172
173         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
174         if (!obj) {
175                 DRM_ERROR("failed to lookup gem object.\n");
176                 return 0;
177         }
178
179         exynos_gem_obj = to_exynos_gem_obj(obj);
180
181         drm_gem_object_unreference_unlocked(obj);
182
183         return exynos_gem_obj->buffer->size;
184 }
185
186
187 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
188                                                       unsigned long size)
189 {
190         struct exynos_drm_gem_obj *exynos_gem_obj;
191         struct drm_gem_object *obj;
192         int ret;
193
194         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
195         if (!exynos_gem_obj)
196                 return NULL;
197
198         exynos_gem_obj->size = size;
199         obj = &exynos_gem_obj->base;
200
201         ret = drm_gem_object_init(dev, obj, size);
202         if (ret < 0) {
203                 DRM_ERROR("failed to initialize gem object\n");
204                 kfree(exynos_gem_obj);
205                 return NULL;
206         }
207
208         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
209
210         return exynos_gem_obj;
211 }
212
213 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
214                                                 unsigned int flags,
215                                                 unsigned long size)
216 {
217         struct exynos_drm_gem_obj *exynos_gem_obj;
218         struct exynos_drm_gem_buf *buf;
219         int ret;
220
221         if (!size) {
222                 DRM_ERROR("invalid size.\n");
223                 return ERR_PTR(-EINVAL);
224         }
225
226         size = roundup_gem_size(size, flags);
227
228         ret = check_gem_flags(flags);
229         if (ret)
230                 return ERR_PTR(ret);
231
232         buf = exynos_drm_init_buf(dev, size);
233         if (!buf)
234                 return ERR_PTR(-ENOMEM);
235
236         exynos_gem_obj = exynos_drm_gem_init(dev, size);
237         if (!exynos_gem_obj) {
238                 ret = -ENOMEM;
239                 goto err_fini_buf;
240         }
241
242         exynos_gem_obj->buffer = buf;
243
244         /* set memory type and cache attribute from user side. */
245         exynos_gem_obj->flags = flags;
246
247         ret = exynos_drm_alloc_buf(dev, buf, flags);
248         if (ret < 0)
249                 goto err_gem_fini;
250
251         return exynos_gem_obj;
252
253 err_gem_fini:
254         drm_gem_object_release(&exynos_gem_obj->base);
255         kfree(exynos_gem_obj);
256 err_fini_buf:
257         exynos_drm_fini_buf(dev, buf);
258         return ERR_PTR(ret);
259 }
260
261 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
262                                 struct drm_file *file_priv)
263 {
264         struct drm_exynos_gem_create *args = data;
265         struct exynos_drm_gem_obj *exynos_gem_obj;
266         int ret;
267
268         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
269         if (IS_ERR(exynos_gem_obj))
270                 return PTR_ERR(exynos_gem_obj);
271
272         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
273                         &args->handle);
274         if (ret) {
275                 exynos_drm_gem_destroy(exynos_gem_obj);
276                 return ret;
277         }
278
279         return 0;
280 }
281
282 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
283                                         unsigned int gem_handle,
284                                         struct drm_file *filp)
285 {
286         struct exynos_drm_gem_obj *exynos_gem_obj;
287         struct drm_gem_object *obj;
288
289         obj = drm_gem_object_lookup(dev, filp, gem_handle);
290         if (!obj) {
291                 DRM_ERROR("failed to lookup gem object.\n");
292                 return ERR_PTR(-EINVAL);
293         }
294
295         exynos_gem_obj = to_exynos_gem_obj(obj);
296
297         return &exynos_gem_obj->buffer->dma_addr;
298 }
299
300 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
301                                         unsigned int gem_handle,
302                                         struct drm_file *filp)
303 {
304         struct exynos_drm_gem_obj *exynos_gem_obj;
305         struct drm_gem_object *obj;
306
307         obj = drm_gem_object_lookup(dev, filp, gem_handle);
308         if (!obj) {
309                 DRM_ERROR("failed to lookup gem object.\n");
310                 return;
311         }
312
313         exynos_gem_obj = to_exynos_gem_obj(obj);
314
315         drm_gem_object_unreference_unlocked(obj);
316
317         /*
318          * decrease obj->refcount one more time because we has already
319          * increased it at exynos_drm_gem_get_dma_addr().
320          */
321         drm_gem_object_unreference_unlocked(obj);
322 }
323
324 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
325                                     struct drm_file *file_priv)
326 {
327         struct drm_exynos_gem_map_off *args = data;
328
329         DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
330                         args->handle, (unsigned long)args->offset);
331
332         if (!(dev->driver->driver_features & DRIVER_GEM)) {
333                 DRM_ERROR("does not support GEM.\n");
334                 return -ENODEV;
335         }
336
337         return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
338                         &args->offset);
339 }
340
341 static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
342                                                         struct file *filp)
343 {
344         struct drm_file *file_priv;
345
346         /* find current process's drm_file from filelist. */
347         list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
348                 if (file_priv->filp == filp)
349                         return file_priv;
350
351         WARN_ON(1);
352
353         return ERR_PTR(-EFAULT);
354 }
355
356 static int exynos_drm_gem_mmap_buffer(struct file *filp,
357                                       struct vm_area_struct *vma)
358 {
359         struct drm_gem_object *obj = filp->private_data;
360         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
361         struct drm_device *drm_dev = obj->dev;
362         struct exynos_drm_gem_buf *buffer;
363         struct drm_file *file_priv;
364         unsigned long vm_size;
365         int ret;
366
367         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
368         vma->vm_private_data = obj;
369         vma->vm_ops = drm_dev->driver->gem_vm_ops;
370
371         /* restore it to driver's fops. */
372         filp->f_op = fops_get(drm_dev->driver->fops);
373
374         file_priv = exynos_drm_find_drm_file(drm_dev, filp);
375         if (IS_ERR(file_priv))
376                 return PTR_ERR(file_priv);
377
378         /* restore it to drm_file. */
379         filp->private_data = file_priv;
380
381         update_vm_cache_attr(exynos_gem_obj, vma);
382
383         vm_size = vma->vm_end - vma->vm_start;
384
385         /*
386          * a buffer contains information to physically continuous memory
387          * allocated by user request or at framebuffer creation.
388          */
389         buffer = exynos_gem_obj->buffer;
390
391         /* check if user-requested size is valid. */
392         if (vm_size > buffer->size)
393                 return -EINVAL;
394
395         ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
396                                 buffer->dma_addr, buffer->size,
397                                 &buffer->dma_attrs);
398         if (ret < 0) {
399                 DRM_ERROR("failed to mmap.\n");
400                 return ret;
401         }
402
403         /*
404          * take a reference to this mapping of the object. And this reference
405          * is unreferenced by the corresponding vm_close call.
406          */
407         drm_gem_object_reference(obj);
408
409         drm_vm_open_locked(drm_dev, vma);
410
411         return 0;
412 }
413
414 static const struct file_operations exynos_drm_gem_fops = {
415         .mmap = exynos_drm_gem_mmap_buffer,
416 };
417
418 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
419                               struct drm_file *file_priv)
420 {
421         struct drm_exynos_gem_mmap *args = data;
422         struct drm_gem_object *obj;
423         unsigned long addr;
424
425         if (!(dev->driver->driver_features & DRIVER_GEM)) {
426                 DRM_ERROR("does not support GEM.\n");
427                 return -ENODEV;
428         }
429
430         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
431         if (!obj) {
432                 DRM_ERROR("failed to lookup gem object.\n");
433                 return -EINVAL;
434         }
435
436         /*
437          * We have to use gem object and its fops for specific mmaper,
438          * but vm_mmap() can deliver only filp. So we have to change
439          * filp->f_op and filp->private_data temporarily, then restore
440          * again. So it is important to keep lock until restoration the
441          * settings to prevent others from misuse of filp->f_op or
442          * filp->private_data.
443          */
444         mutex_lock(&dev->struct_mutex);
445
446         /*
447          * Set specific mmper's fops. And it will be restored by
448          * exynos_drm_gem_mmap_buffer to dev->driver->fops.
449          * This is used to call specific mapper temporarily.
450          */
451         file_priv->filp->f_op = &exynos_drm_gem_fops;
452
453         /*
454          * Set gem object to private_data so that specific mmaper
455          * can get the gem object. And it will be restored by
456          * exynos_drm_gem_mmap_buffer to drm_file.
457          */
458         file_priv->filp->private_data = obj;
459
460         addr = vm_mmap(file_priv->filp, 0, args->size,
461                         PROT_READ | PROT_WRITE, MAP_SHARED, 0);
462
463         drm_gem_object_unreference(obj);
464
465         if (IS_ERR_VALUE(addr)) {
466                 /* check filp->f_op, filp->private_data are restored */
467                 if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
468                         file_priv->filp->f_op = fops_get(dev->driver->fops);
469                         file_priv->filp->private_data = file_priv;
470                 }
471                 mutex_unlock(&dev->struct_mutex);
472                 return (int)addr;
473         }
474
475         mutex_unlock(&dev->struct_mutex);
476
477         args->mapped = addr;
478
479         DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
480
481         return 0;
482 }
483
484 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
485                                       struct drm_file *file_priv)
486 {       struct exynos_drm_gem_obj *exynos_gem_obj;
487         struct drm_exynos_gem_info *args = data;
488         struct drm_gem_object *obj;
489
490         mutex_lock(&dev->struct_mutex);
491
492         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
493         if (!obj) {
494                 DRM_ERROR("failed to lookup gem object.\n");
495                 mutex_unlock(&dev->struct_mutex);
496                 return -EINVAL;
497         }
498
499         exynos_gem_obj = to_exynos_gem_obj(obj);
500
501         args->flags = exynos_gem_obj->flags;
502         args->size = exynos_gem_obj->size;
503
504         drm_gem_object_unreference(obj);
505         mutex_unlock(&dev->struct_mutex);
506
507         return 0;
508 }
509
510 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
511 {
512         struct vm_area_struct *vma_copy;
513
514         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
515         if (!vma_copy)
516                 return NULL;
517
518         if (vma->vm_ops && vma->vm_ops->open)
519                 vma->vm_ops->open(vma);
520
521         if (vma->vm_file)
522                 get_file(vma->vm_file);
523
524         memcpy(vma_copy, vma, sizeof(*vma));
525
526         vma_copy->vm_mm = NULL;
527         vma_copy->vm_next = NULL;
528         vma_copy->vm_prev = NULL;
529
530         return vma_copy;
531 }
532
533 void exynos_gem_put_vma(struct vm_area_struct *vma)
534 {
535         if (!vma)
536                 return;
537
538         if (vma->vm_ops && vma->vm_ops->close)
539                 vma->vm_ops->close(vma);
540
541         if (vma->vm_file)
542                 fput(vma->vm_file);
543
544         kfree(vma);
545 }
546
547 int exynos_gem_get_pages_from_userptr(unsigned long start,
548                                                 unsigned int npages,
549                                                 struct page **pages,
550                                                 struct vm_area_struct *vma)
551 {
552         int get_npages;
553
554         /* the memory region mmaped with VM_PFNMAP. */
555         if (vma_is_io(vma)) {
556                 unsigned int i;
557
558                 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
559                         unsigned long pfn;
560                         int ret = follow_pfn(vma, start, &pfn);
561                         if (ret)
562                                 return ret;
563
564                         pages[i] = pfn_to_page(pfn);
565                 }
566
567                 if (i != npages) {
568                         DRM_ERROR("failed to get user_pages.\n");
569                         return -EINVAL;
570                 }
571
572                 return 0;
573         }
574
575         get_npages = get_user_pages(current, current->mm, start,
576                                         npages, 1, 1, pages, NULL);
577         get_npages = max(get_npages, 0);
578         if (get_npages != npages) {
579                 DRM_ERROR("failed to get user_pages.\n");
580                 while (get_npages)
581                         put_page(pages[--get_npages]);
582                 return -EFAULT;
583         }
584
585         return 0;
586 }
587
588 void exynos_gem_put_pages_to_userptr(struct page **pages,
589                                         unsigned int npages,
590                                         struct vm_area_struct *vma)
591 {
592         if (!vma_is_io(vma)) {
593                 unsigned int i;
594
595                 for (i = 0; i < npages; i++) {
596                         set_page_dirty_lock(pages[i]);
597
598                         /*
599                          * undo the reference we took when populating
600                          * the table.
601                          */
602                         put_page(pages[i]);
603                 }
604         }
605 }
606
607 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
608                                 struct sg_table *sgt,
609                                 enum dma_data_direction dir)
610 {
611         int nents;
612
613         mutex_lock(&drm_dev->struct_mutex);
614
615         nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
616         if (!nents) {
617                 DRM_ERROR("failed to map sgl with dma.\n");
618                 mutex_unlock(&drm_dev->struct_mutex);
619                 return nents;
620         }
621
622         mutex_unlock(&drm_dev->struct_mutex);
623         return 0;
624 }
625
626 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
627                                 struct sg_table *sgt,
628                                 enum dma_data_direction dir)
629 {
630         dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
631 }
632
633 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
634 {
635         struct exynos_drm_gem_obj *exynos_gem_obj;
636         struct exynos_drm_gem_buf *buf;
637
638         exynos_gem_obj = to_exynos_gem_obj(obj);
639         buf = exynos_gem_obj->buffer;
640
641         if (obj->import_attach)
642                 drm_prime_gem_destroy(obj, buf->sgt);
643
644         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
645 }
646
647 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
648                                struct drm_device *dev,
649                                struct drm_mode_create_dumb *args)
650 {
651         struct exynos_drm_gem_obj *exynos_gem_obj;
652         int ret;
653
654         /*
655          * allocate memory to be used for framebuffer.
656          * - this callback would be called by user application
657          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
658          */
659
660         args->pitch = args->width * ((args->bpp + 7) / 8);
661         args->size = args->pitch * args->height;
662
663         exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
664                                                 EXYNOS_BO_WC, args->size);
665         /*
666          * If physically contiguous memory allocation fails and if IOMMU is
667          * supported then try to get buffer from non physically contiguous
668          * memory area.
669          */
670         if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
671                 dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
672                 exynos_gem_obj = exynos_drm_gem_create(dev,
673                                         EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
674                                         args->size);
675         }
676
677         if (IS_ERR(exynos_gem_obj))
678                 return PTR_ERR(exynos_gem_obj);
679
680         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
681                         &args->handle);
682         if (ret) {
683                 exynos_drm_gem_destroy(exynos_gem_obj);
684                 return ret;
685         }
686
687         return 0;
688 }
689
690 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
691                                    struct drm_device *dev, uint32_t handle,
692                                    uint64_t *offset)
693 {
694         struct drm_gem_object *obj;
695         int ret = 0;
696
697         mutex_lock(&dev->struct_mutex);
698
699         /*
700          * get offset of memory allocated for drm framebuffer.
701          * - this callback would be called by user application
702          *      with DRM_IOCTL_MODE_MAP_DUMB command.
703          */
704
705         obj = drm_gem_object_lookup(dev, file_priv, handle);
706         if (!obj) {
707                 DRM_ERROR("failed to lookup gem object.\n");
708                 ret = -EINVAL;
709                 goto unlock;
710         }
711
712         ret = drm_gem_create_mmap_offset(obj);
713         if (ret)
714                 goto out;
715
716         *offset = drm_vma_node_offset_addr(&obj->vma_node);
717         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
718
719 out:
720         drm_gem_object_unreference(obj);
721 unlock:
722         mutex_unlock(&dev->struct_mutex);
723         return ret;
724 }
725
726 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
727 {
728         struct drm_gem_object *obj = vma->vm_private_data;
729         struct drm_device *dev = obj->dev;
730         unsigned long f_vaddr;
731         pgoff_t page_offset;
732         int ret;
733
734         page_offset = ((unsigned long)vmf->virtual_address -
735                         vma->vm_start) >> PAGE_SHIFT;
736         f_vaddr = (unsigned long)vmf->virtual_address;
737
738         mutex_lock(&dev->struct_mutex);
739
740         ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
741         if (ret < 0)
742                 DRM_ERROR("failed to map a buffer with user.\n");
743
744         mutex_unlock(&dev->struct_mutex);
745
746         return convert_to_vm_err_msg(ret);
747 }
748
749 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
750 {
751         struct exynos_drm_gem_obj *exynos_gem_obj;
752         struct drm_gem_object *obj;
753         int ret;
754
755         /* set vm_area_struct. */
756         ret = drm_gem_mmap(filp, vma);
757         if (ret < 0) {
758                 DRM_ERROR("failed to mmap.\n");
759                 return ret;
760         }
761
762         obj = vma->vm_private_data;
763         exynos_gem_obj = to_exynos_gem_obj(obj);
764
765         ret = check_gem_flags(exynos_gem_obj->flags);
766         if (ret) {
767                 drm_gem_vm_close(vma);
768                 drm_gem_free_mmap_offset(obj);
769                 return ret;
770         }
771
772         vma->vm_flags &= ~VM_PFNMAP;
773         vma->vm_flags |= VM_MIXEDMAP;
774
775         update_vm_cache_attr(exynos_gem_obj, vma);
776
777         return ret;
778 }