2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/uaccess.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
43 * This file provides some of the base ioctls and library routines for
44 * the graphics memory manager implemented by each device driver.
46 * Because various devices have different requirements in terms of
47 * synchronization and migration strategies, implementing that is left up to
48 * the driver, and all that the general API provides should be generic --
49 * allocating objects, reading/writing data with the cpu, freeing objects.
50 * Even there, platform-dependent optimizations for reading/writing data with
51 * the CPU mean we'll likely hook those out to driver-specific calls. However,
52 * the DRI2 implementation wants to have at least allocate/mmap be generic.
54 * The goal was to have swap-backed object allocation managed through
55 * struct file. However, file descriptors as handles to a struct file have
57 * - Process limits prevent more than 1024 or so being used at a time by
59 * - Inability to allocate high fds will aggravate the X Server's select()
60 * handling, and likely that of many GL client applications as well.
62 * This led to a plan of using our own integer IDs (called handles, following
63 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
64 * ioctls. The objects themselves will still include the struct file so
65 * that we can transition to fds if the required kernel infrastructure shows
66 * up at a later date, and as our interface with shmfs for memory allocation.
70 * We make up offsets for buffer objects so we can recognize them at
74 /* pgoff in mmap is an unsigned long, so we need to make sure that
75 * the faked up offset will fit
78 #if BITS_PER_LONG == 64
79 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
80 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
82 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
83 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
87 * Initialize the GEM device fields
91 drm_gem_init(struct drm_device *dev)
93 struct drm_gem_mm *mm;
95 spin_lock_init(&dev->object_name_lock);
96 idr_init(&dev->object_name_idr);
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
100 DRM_ERROR("out of memory\n");
104 dev->mm_private = mm;
106 if (drm_ht_create(&mm->offset_hash, 12)) {
111 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE)) {
113 drm_ht_remove(&mm->offset_hash);
122 drm_gem_destroy(struct drm_device *dev)
124 struct drm_gem_mm *mm = dev->mm_private;
126 drm_mm_takedown(&mm->offset_manager);
127 drm_ht_remove(&mm->offset_hash);
129 dev->mm_private = NULL;
133 * Initialize an already allocated GEM object of the specified size with
134 * shmfs backing store.
136 int drm_gem_object_init(struct drm_device *dev,
137 struct drm_gem_object *obj, size_t size)
139 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
142 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143 if (IS_ERR(obj->filp))
144 return PTR_ERR(obj->filp);
146 kref_init(&obj->refcount);
147 atomic_set(&obj->handle_count, 0);
152 EXPORT_SYMBOL(drm_gem_object_init);
155 * Initialize an already allocated GEM object of the specified size with
156 * no GEM provided backing store. Instead the caller is responsible for
157 * backing the object and handling it.
159 int drm_gem_private_object_init(struct drm_device *dev,
160 struct drm_gem_object *obj, size_t size)
162 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
167 kref_init(&obj->refcount);
168 atomic_set(&obj->handle_count, 0);
173 EXPORT_SYMBOL(drm_gem_private_object_init);
176 * Allocate a GEM object of the specified size with shmfs backing store
178 struct drm_gem_object *
179 drm_gem_object_alloc(struct drm_device *dev, size_t size)
181 struct drm_gem_object *obj;
183 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
187 if (drm_gem_object_init(dev, obj, size) != 0)
190 if (dev->driver->gem_init_object != NULL &&
191 dev->driver->gem_init_object(obj) != 0) {
196 /* Object_init mangles the global counters - readjust them. */
202 EXPORT_SYMBOL(drm_gem_object_alloc);
205 * Removes the mapping from handle to filp for this object.
208 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
210 struct drm_device *dev;
211 struct drm_gem_object *obj;
213 /* This is gross. The idr system doesn't let us try a delete and
214 * return an error code. It just spews if you fail at deleting.
215 * So, we have to grab a lock around finding the object and then
216 * doing the delete on it and dropping the refcount, or the user
217 * could race us to double-decrement the refcount and cause a
218 * use-after-free later. Given the frequency of our handle lookups,
219 * we may want to use ida for number allocation and a hash table
220 * for the pointers, anyway.
222 spin_lock(&filp->table_lock);
224 /* Check if we currently have a reference on the object */
225 obj = idr_find(&filp->object_idr, handle);
227 spin_unlock(&filp->table_lock);
232 /* Release reference and decrement refcount. */
233 idr_remove(&filp->object_idr, handle);
234 spin_unlock(&filp->table_lock);
236 if (obj->import_attach)
237 drm_prime_remove_imported_buf_handle(&filp->prime,
238 obj->import_attach->dmabuf);
240 if (dev->driver->gem_close_object)
241 dev->driver->gem_close_object(obj, filp);
242 drm_gem_object_handle_unreference_unlocked(obj);
246 EXPORT_SYMBOL(drm_gem_handle_delete);
249 * Create a handle for this object. This adds a handle reference
250 * to the object, which includes a regular reference count. Callers
251 * will likely want to dereference the object afterwards.
254 drm_gem_handle_create(struct drm_file *file_priv,
255 struct drm_gem_object *obj,
258 struct drm_device *dev = obj->dev;
262 * Get the user-visible handle using idr.
265 /* ensure there is space available to allocate a handle */
266 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
269 /* do the allocation under our spinlock */
270 spin_lock(&file_priv->table_lock);
271 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
272 spin_unlock(&file_priv->table_lock);
279 drm_gem_object_handle_reference(obj);
281 if (dev->driver->gem_open_object) {
282 ret = dev->driver->gem_open_object(obj, file_priv);
284 drm_gem_handle_delete(file_priv, *handlep);
291 EXPORT_SYMBOL(drm_gem_handle_create);
295 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
296 * @obj: obj in question
298 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
301 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
303 struct drm_device *dev = obj->dev;
304 struct drm_gem_mm *mm = dev->mm_private;
305 struct drm_map_list *list = &obj->map_list;
307 drm_ht_remove_item(&mm->offset_hash, &list->hash);
308 drm_mm_put_block(list->file_offset_node);
312 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
315 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
316 * @obj: obj in question
318 * GEM memory mapping works by handing back to userspace a fake mmap offset
319 * it can use in a subsequent mmap(2) call. The DRM core code then looks
320 * up the object based on the offset and sets up the various memory mapping
323 * This routine allocates and attaches a fake offset for @obj.
326 drm_gem_create_mmap_offset(struct drm_gem_object *obj)
328 struct drm_device *dev = obj->dev;
329 struct drm_gem_mm *mm = dev->mm_private;
330 struct drm_map_list *list;
331 struct drm_local_map *map;
334 /* Set the object up for mmap'ing */
335 list = &obj->map_list;
336 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
341 map->type = _DRM_GEM;
342 map->size = obj->size;
345 /* Get a DRM GEM mmap offset allocated... */
346 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
347 obj->size / PAGE_SIZE, 0, 0);
349 if (!list->file_offset_node) {
350 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
355 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
356 obj->size / PAGE_SIZE, 0);
357 if (!list->file_offset_node) {
362 list->hash.key = list->file_offset_node->start;
363 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
365 DRM_ERROR("failed to add to map hash\n");
372 drm_mm_put_block(list->file_offset_node);
379 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
381 /** Returns a reference to the object named by the handle. */
382 struct drm_gem_object *
383 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
386 struct drm_gem_object *obj;
388 spin_lock(&filp->table_lock);
390 /* Check if we currently have a reference on the object */
391 obj = idr_find(&filp->object_idr, handle);
393 spin_unlock(&filp->table_lock);
397 drm_gem_object_reference(obj);
399 spin_unlock(&filp->table_lock);
403 EXPORT_SYMBOL(drm_gem_object_lookup);
406 * Releases the handle to an mm object.
409 drm_gem_close_ioctl(struct drm_device *dev, void *data,
410 struct drm_file *file_priv)
412 struct drm_gem_close *args = data;
415 if (!(dev->driver->driver_features & DRIVER_GEM))
418 ret = drm_gem_handle_delete(file_priv, args->handle);
424 * Create a global name for an object, returning the name.
426 * Note that the name does not hold a reference; when the object
427 * is freed, the name goes away.
430 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
431 struct drm_file *file_priv)
433 struct drm_gem_flink *args = data;
434 struct drm_gem_object *obj;
437 if (!(dev->driver->driver_features & DRIVER_GEM))
440 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
445 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
450 spin_lock(&dev->object_name_lock);
452 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
454 args->name = (uint64_t) obj->name;
455 spin_unlock(&dev->object_name_lock);
463 /* Allocate a reference for the name table. */
464 drm_gem_object_reference(obj);
466 args->name = (uint64_t) obj->name;
467 spin_unlock(&dev->object_name_lock);
472 drm_gem_object_unreference_unlocked(obj);
477 * Open an object using the global name, returning a handle and the size.
479 * This handle (of course) holds a reference to the object, so the object
480 * will not go away until the handle is deleted.
483 drm_gem_open_ioctl(struct drm_device *dev, void *data,
484 struct drm_file *file_priv)
486 struct drm_gem_open *args = data;
487 struct drm_gem_object *obj;
491 if (!(dev->driver->driver_features & DRIVER_GEM))
494 spin_lock(&dev->object_name_lock);
495 obj = idr_find(&dev->object_name_idr, (int) args->name);
497 drm_gem_object_reference(obj);
498 spin_unlock(&dev->object_name_lock);
502 ret = drm_gem_handle_create(file_priv, obj, &handle);
503 drm_gem_object_unreference_unlocked(obj);
507 args->handle = handle;
508 args->size = obj->size;
514 * Called at device open time, sets up the structure for handling refcounting
518 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
520 idr_init(&file_private->object_idr);
521 spin_lock_init(&file_private->table_lock);
525 * Called at device close to release the file's
526 * handle references on objects.
529 drm_gem_object_release_handle(int id, void *ptr, void *data)
531 struct drm_file *file_priv = data;
532 struct drm_gem_object *obj = ptr;
533 struct drm_device *dev = obj->dev;
535 if (obj->import_attach)
536 drm_prime_remove_imported_buf_handle(&file_priv->prime,
537 obj->import_attach->dmabuf);
539 if (dev->driver->gem_close_object)
540 dev->driver->gem_close_object(obj, file_priv);
542 drm_gem_object_handle_unreference_unlocked(obj);
548 * Called at close time when the filp is going away.
550 * Releases any remaining references on objects by this filp.
553 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
555 idr_for_each(&file_private->object_idr,
556 &drm_gem_object_release_handle, file_private);
558 idr_remove_all(&file_private->object_idr);
559 idr_destroy(&file_private->object_idr);
563 drm_gem_object_release(struct drm_gem_object *obj)
568 EXPORT_SYMBOL(drm_gem_object_release);
571 * Called after the last reference to the object has been lost.
572 * Must be called holding struct_ mutex
577 drm_gem_object_free(struct kref *kref)
579 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
580 struct drm_device *dev = obj->dev;
582 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
584 if (dev->driver->gem_free_object != NULL)
585 dev->driver->gem_free_object(obj);
587 EXPORT_SYMBOL(drm_gem_object_free);
589 static void drm_gem_object_ref_bug(struct kref *list_kref)
595 * Called after the last handle to the object has been closed
597 * Removes any name for the object. Note that this must be
598 * called before drm_gem_object_free or we'll be touching
601 void drm_gem_object_handle_free(struct drm_gem_object *obj)
603 struct drm_device *dev = obj->dev;
605 /* Remove any name for this object */
606 spin_lock(&dev->object_name_lock);
608 idr_remove(&dev->object_name_idr, obj->name);
610 spin_unlock(&dev->object_name_lock);
612 * The object name held a reference to this object, drop
615 * This cannot be the last reference, since the handle holds one too.
617 kref_put(&obj->refcount, drm_gem_object_ref_bug);
619 spin_unlock(&dev->object_name_lock);
622 EXPORT_SYMBOL(drm_gem_object_handle_free);
624 void drm_gem_vm_open(struct vm_area_struct *vma)
626 struct drm_gem_object *obj = vma->vm_private_data;
628 drm_gem_object_reference(obj);
630 mutex_lock(&obj->dev->struct_mutex);
631 drm_vm_open_locked(vma);
632 mutex_unlock(&obj->dev->struct_mutex);
634 EXPORT_SYMBOL(drm_gem_vm_open);
636 void drm_gem_vm_close(struct vm_area_struct *vma)
638 struct drm_gem_object *obj = vma->vm_private_data;
639 struct drm_device *dev = obj->dev;
641 mutex_lock(&dev->struct_mutex);
642 drm_vm_close_locked(vma);
643 drm_gem_object_unreference(obj);
644 mutex_unlock(&dev->struct_mutex);
646 EXPORT_SYMBOL(drm_gem_vm_close);
650 * drm_gem_mmap - memory map routine for GEM objects
651 * @filp: DRM file pointer
652 * @vma: VMA for the area to be mapped
654 * If a driver supports GEM object mapping, mmap calls on the DRM file
655 * descriptor will end up here.
657 * If we find the object based on the offset passed in (vma->vm_pgoff will
658 * contain the fake offset we created when the GTT map ioctl was called on
659 * the object), we set up the driver fault handler so that any accesses
660 * to the object can be trapped, to perform migration, GTT binding, surface
661 * register allocation, or performance monitoring.
663 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
665 struct drm_file *priv = filp->private_data;
666 struct drm_device *dev = priv->minor->dev;
667 struct drm_gem_mm *mm = dev->mm_private;
668 struct drm_local_map *map = NULL;
669 struct drm_gem_object *obj;
670 struct drm_hash_item *hash;
673 if (drm_device_is_unplugged(dev))
676 mutex_lock(&dev->struct_mutex);
678 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
679 mutex_unlock(&dev->struct_mutex);
680 return drm_mmap(filp, vma);
683 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
685 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
690 /* Check for valid size. */
691 if (map->size < vma->vm_end - vma->vm_start) {
697 if (!obj->dev->driver->gem_vm_ops) {
702 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
703 vma->vm_ops = obj->dev->driver->gem_vm_ops;
704 vma->vm_private_data = map->handle;
705 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
707 /* Take a ref for this mapping of the object, so that the fault
708 * handler can dereference the mmap offset's pointer to the object.
709 * This reference is cleaned up by the corresponding vm_close
710 * (which should happen whether the vma was created by this call, or
711 * by a vm_open due to mremap or partial unmap or whatever).
713 drm_gem_object_reference(obj);
715 drm_vm_open_locked(vma);
718 mutex_unlock(&dev->struct_mutex);
722 EXPORT_SYMBOL(drm_gem_mmap);