2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
38 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
39 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
46 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
47 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49 bool map_and_fenceable);
50 static void i915_gem_clear_fence_reg(struct drm_device *dev,
51 struct drm_i915_fence_reg *reg);
52 static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file);
56 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
63 /* some bookkeeping */
64 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
67 dev_priv->mm.object_count++;
68 dev_priv->mm.object_memory += size;
71 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
74 dev_priv->mm.object_count--;
75 dev_priv->mm.object_memory -= size;
79 i915_gem_wait_for_error(struct drm_device *dev)
81 struct drm_i915_private *dev_priv = dev->dev_private;
82 struct completion *x = &dev_priv->error_completion;
86 if (!atomic_read(&dev_priv->mm.wedged))
89 ret = wait_for_completion_interruptible(x);
93 if (atomic_read(&dev_priv->mm.wedged)) {
94 /* GPU is hung, bump the completion count to account for
95 * the token we just consumed so that we never hit zero and
96 * end up waiting upon a subsequent completion event that
99 spin_lock_irqsave(&x->wait.lock, flags);
101 spin_unlock_irqrestore(&x->wait.lock, flags);
106 int i915_mutex_lock_interruptible(struct drm_device *dev)
110 ret = i915_gem_wait_for_error(dev);
114 ret = mutex_lock_interruptible(&dev->struct_mutex);
118 WARN_ON(i915_verify_lists(dev));
123 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
125 return obj->gtt_space && !obj->active && obj->pin_count == 0;
128 void i915_gem_do_init(struct drm_device *dev,
130 unsigned long mappable_end,
133 drm_i915_private_t *dev_priv = dev->dev_private;
135 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
137 dev_priv->mm.gtt_start = start;
138 dev_priv->mm.gtt_mappable_end = mappable_end;
139 dev_priv->mm.gtt_end = end;
140 dev_priv->mm.gtt_total = end - start;
141 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
143 /* Take over this portion of the GTT */
144 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
148 i915_gem_init_ioctl(struct drm_device *dev, void *data,
149 struct drm_file *file)
151 struct drm_i915_gem_init *args = data;
153 if (args->gtt_start >= args->gtt_end ||
154 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
157 mutex_lock(&dev->struct_mutex);
158 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
159 mutex_unlock(&dev->struct_mutex);
165 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
166 struct drm_file *file)
168 struct drm_i915_private *dev_priv = dev->dev_private;
169 struct drm_i915_gem_get_aperture *args = data;
170 struct drm_i915_gem_object *obj;
173 if (!(dev->driver->driver_features & DRIVER_GEM))
177 mutex_lock(&dev->struct_mutex);
178 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
179 pinned += obj->gtt_space->size;
180 mutex_unlock(&dev->struct_mutex);
182 args->aper_size = dev_priv->mm.gtt_total;
183 args->aper_available_size = args->aper_size -pinned;
189 i915_gem_create(struct drm_file *file,
190 struct drm_device *dev,
194 struct drm_i915_gem_object *obj;
198 size = roundup(size, PAGE_SIZE);
200 /* Allocate the new object */
201 obj = i915_gem_alloc_object(dev, size);
205 ret = drm_gem_handle_create(file, &obj->base, &handle);
207 drm_gem_object_release(&obj->base);
208 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
213 /* drop reference from allocate - handle holds it now */
214 drm_gem_object_unreference(&obj->base);
215 trace_i915_gem_object_create(obj);
222 i915_gem_dumb_create(struct drm_file *file,
223 struct drm_device *dev,
224 struct drm_mode_create_dumb *args)
226 /* have to work out size/pitch and return them */
227 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
228 args->size = args->pitch * args->height;
229 return i915_gem_create(file, dev,
230 args->size, &args->handle);
233 int i915_gem_dumb_destroy(struct drm_file *file,
234 struct drm_device *dev,
237 return drm_gem_handle_delete(file, handle);
241 * Creates a new mm object and returns a handle to it.
244 i915_gem_create_ioctl(struct drm_device *dev, void *data,
245 struct drm_file *file)
247 struct drm_i915_gem_create *args = data;
248 return i915_gem_create(file, dev,
249 args->size, &args->handle);
252 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
254 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
256 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
257 obj->tiling_mode != I915_TILING_NONE;
261 slow_shmem_copy(struct page *dst_page,
263 struct page *src_page,
267 char *dst_vaddr, *src_vaddr;
269 dst_vaddr = kmap(dst_page);
270 src_vaddr = kmap(src_page);
272 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
279 slow_shmem_bit17_copy(struct page *gpu_page,
281 struct page *cpu_page,
286 char *gpu_vaddr, *cpu_vaddr;
288 /* Use the unswizzled path if this page isn't affected. */
289 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
291 return slow_shmem_copy(cpu_page, cpu_offset,
292 gpu_page, gpu_offset, length);
294 return slow_shmem_copy(gpu_page, gpu_offset,
295 cpu_page, cpu_offset, length);
298 gpu_vaddr = kmap(gpu_page);
299 cpu_vaddr = kmap(cpu_page);
301 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
302 * XORing with the other bits (A9 for Y, A9 and A10 for X)
305 int cacheline_end = ALIGN(gpu_offset + 1, 64);
306 int this_length = min(cacheline_end - gpu_offset, length);
307 int swizzled_gpu_offset = gpu_offset ^ 64;
310 memcpy(cpu_vaddr + cpu_offset,
311 gpu_vaddr + swizzled_gpu_offset,
314 memcpy(gpu_vaddr + swizzled_gpu_offset,
315 cpu_vaddr + cpu_offset,
318 cpu_offset += this_length;
319 gpu_offset += this_length;
320 length -= this_length;
328 * This is the fast shmem pread path, which attempts to copy_from_user directly
329 * from the backing pages of the object to the user's address space. On a
330 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
333 i915_gem_shmem_pread_fast(struct drm_device *dev,
334 struct drm_i915_gem_object *obj,
335 struct drm_i915_gem_pread *args,
336 struct drm_file *file)
338 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
341 char __user *user_data;
342 int page_offset, page_length;
344 user_data = (char __user *) (uintptr_t) args->data_ptr;
347 offset = args->offset;
354 /* Operation in this page
356 * page_offset = offset within page
357 * page_length = bytes to copy for this page
359 page_offset = offset & (PAGE_SIZE-1);
360 page_length = remain;
361 if ((page_offset + remain) > PAGE_SIZE)
362 page_length = PAGE_SIZE - page_offset;
364 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
365 GFP_HIGHUSER | __GFP_RECLAIMABLE);
367 return PTR_ERR(page);
369 vaddr = kmap_atomic(page);
370 ret = __copy_to_user_inatomic(user_data,
373 kunmap_atomic(vaddr);
375 mark_page_accessed(page);
376 page_cache_release(page);
380 remain -= page_length;
381 user_data += page_length;
382 offset += page_length;
389 * This is the fallback shmem pread path, which allocates temporary storage
390 * in kernel space to copy_to_user into outside of the struct_mutex, so we
391 * can copy out of the object's backing pages while holding the struct mutex
392 * and not take page faults.
395 i915_gem_shmem_pread_slow(struct drm_device *dev,
396 struct drm_i915_gem_object *obj,
397 struct drm_i915_gem_pread *args,
398 struct drm_file *file)
400 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
401 struct mm_struct *mm = current->mm;
402 struct page **user_pages;
404 loff_t offset, pinned_pages, i;
405 loff_t first_data_page, last_data_page, num_pages;
406 int shmem_page_offset;
407 int data_page_index, data_page_offset;
410 uint64_t data_ptr = args->data_ptr;
411 int do_bit17_swizzling;
415 /* Pin the user pages containing the data. We can't fault while
416 * holding the struct mutex, yet we want to hold it while
417 * dereferencing the user data.
419 first_data_page = data_ptr / PAGE_SIZE;
420 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
421 num_pages = last_data_page - first_data_page + 1;
423 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
424 if (user_pages == NULL)
427 mutex_unlock(&dev->struct_mutex);
428 down_read(&mm->mmap_sem);
429 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
430 num_pages, 1, 0, user_pages, NULL);
431 up_read(&mm->mmap_sem);
432 mutex_lock(&dev->struct_mutex);
433 if (pinned_pages < num_pages) {
438 ret = i915_gem_object_set_cpu_read_domain_range(obj,
444 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
446 offset = args->offset;
451 /* Operation in this page
453 * shmem_page_offset = offset within page in shmem file
454 * data_page_index = page number in get_user_pages return
455 * data_page_offset = offset with data_page_index page.
456 * page_length = bytes to copy for this page
458 shmem_page_offset = offset & ~PAGE_MASK;
459 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
460 data_page_offset = data_ptr & ~PAGE_MASK;
462 page_length = remain;
463 if ((shmem_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - shmem_page_offset;
465 if ((data_page_offset + page_length) > PAGE_SIZE)
466 page_length = PAGE_SIZE - data_page_offset;
468 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
469 GFP_HIGHUSER | __GFP_RECLAIMABLE);
471 return PTR_ERR(page);
473 if (do_bit17_swizzling) {
474 slow_shmem_bit17_copy(page,
476 user_pages[data_page_index],
481 slow_shmem_copy(user_pages[data_page_index],
488 mark_page_accessed(page);
489 page_cache_release(page);
491 remain -= page_length;
492 data_ptr += page_length;
493 offset += page_length;
497 for (i = 0; i < pinned_pages; i++) {
498 SetPageDirty(user_pages[i]);
499 mark_page_accessed(user_pages[i]);
500 page_cache_release(user_pages[i]);
502 drm_free_large(user_pages);
508 * Reads data from the object referenced by handle.
510 * On error, the contents of *data are undefined.
513 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
514 struct drm_file *file)
516 struct drm_i915_gem_pread *args = data;
517 struct drm_i915_gem_object *obj;
523 if (!access_ok(VERIFY_WRITE,
524 (char __user *)(uintptr_t)args->data_ptr,
528 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
533 ret = i915_mutex_lock_interruptible(dev);
537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
538 if (&obj->base == NULL) {
543 /* Bounds check source. */
544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
550 trace_i915_gem_object_pread(obj, args->offset, args->size);
552 ret = i915_gem_object_set_cpu_read_domain_range(obj,
559 if (!i915_gem_object_needs_bit17_swizzle(obj))
560 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
562 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
565 drm_gem_object_unreference(&obj->base);
567 mutex_unlock(&dev->struct_mutex);
571 /* This is the fast write path which cannot handle
572 * page faults in the source data
576 fast_user_write(struct io_mapping *mapping,
577 loff_t page_base, int page_offset,
578 char __user *user_data,
582 unsigned long unwritten;
584 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
585 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
587 io_mapping_unmap_atomic(vaddr_atomic);
591 /* Here's the write path which can sleep for
596 slow_kernel_write(struct io_mapping *mapping,
597 loff_t gtt_base, int gtt_offset,
598 struct page *user_page, int user_offset,
601 char __iomem *dst_vaddr;
604 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
605 src_vaddr = kmap(user_page);
607 memcpy_toio(dst_vaddr + gtt_offset,
608 src_vaddr + user_offset,
612 io_mapping_unmap(dst_vaddr);
616 * This is the fast pwrite path, where we copy the data directly from the
617 * user into the GTT, uncached.
620 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
621 struct drm_i915_gem_object *obj,
622 struct drm_i915_gem_pwrite *args,
623 struct drm_file *file)
625 drm_i915_private_t *dev_priv = dev->dev_private;
627 loff_t offset, page_base;
628 char __user *user_data;
629 int page_offset, page_length;
631 user_data = (char __user *) (uintptr_t) args->data_ptr;
634 offset = obj->gtt_offset + args->offset;
637 /* Operation in this page
639 * page_base = page offset within aperture
640 * page_offset = offset within page
641 * page_length = bytes to copy for this page
643 page_base = (offset & ~(PAGE_SIZE-1));
644 page_offset = offset & (PAGE_SIZE-1);
645 page_length = remain;
646 if ((page_offset + remain) > PAGE_SIZE)
647 page_length = PAGE_SIZE - page_offset;
649 /* If we get a fault while copying data, then (presumably) our
650 * source page isn't available. Return the error and we'll
651 * retry in the slow path.
653 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
654 page_offset, user_data, page_length))
658 remain -= page_length;
659 user_data += page_length;
660 offset += page_length;
667 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
668 * the memory and maps it using kmap_atomic for copying.
670 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
671 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
674 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
675 struct drm_i915_gem_object *obj,
676 struct drm_i915_gem_pwrite *args,
677 struct drm_file *file)
679 drm_i915_private_t *dev_priv = dev->dev_private;
681 loff_t gtt_page_base, offset;
682 loff_t first_data_page, last_data_page, num_pages;
683 loff_t pinned_pages, i;
684 struct page **user_pages;
685 struct mm_struct *mm = current->mm;
686 int gtt_page_offset, data_page_offset, data_page_index, page_length;
688 uint64_t data_ptr = args->data_ptr;
692 /* Pin the user pages containing the data. We can't fault while
693 * holding the struct mutex, and all of the pwrite implementations
694 * want to hold it while dereferencing the user data.
696 first_data_page = data_ptr / PAGE_SIZE;
697 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
698 num_pages = last_data_page - first_data_page + 1;
700 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
701 if (user_pages == NULL)
704 mutex_unlock(&dev->struct_mutex);
705 down_read(&mm->mmap_sem);
706 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
707 num_pages, 0, 0, user_pages, NULL);
708 up_read(&mm->mmap_sem);
709 mutex_lock(&dev->struct_mutex);
710 if (pinned_pages < num_pages) {
712 goto out_unpin_pages;
715 ret = i915_gem_object_set_to_gtt_domain(obj, true);
717 goto out_unpin_pages;
719 ret = i915_gem_object_put_fence(obj);
721 goto out_unpin_pages;
723 offset = obj->gtt_offset + args->offset;
726 /* Operation in this page
728 * gtt_page_base = page offset within aperture
729 * gtt_page_offset = offset within page in aperture
730 * data_page_index = page number in get_user_pages return
731 * data_page_offset = offset with data_page_index page.
732 * page_length = bytes to copy for this page
734 gtt_page_base = offset & PAGE_MASK;
735 gtt_page_offset = offset & ~PAGE_MASK;
736 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
737 data_page_offset = data_ptr & ~PAGE_MASK;
739 page_length = remain;
740 if ((gtt_page_offset + page_length) > PAGE_SIZE)
741 page_length = PAGE_SIZE - gtt_page_offset;
742 if ((data_page_offset + page_length) > PAGE_SIZE)
743 page_length = PAGE_SIZE - data_page_offset;
745 slow_kernel_write(dev_priv->mm.gtt_mapping,
746 gtt_page_base, gtt_page_offset,
747 user_pages[data_page_index],
751 remain -= page_length;
752 offset += page_length;
753 data_ptr += page_length;
757 for (i = 0; i < pinned_pages; i++)
758 page_cache_release(user_pages[i]);
759 drm_free_large(user_pages);
765 * This is the fast shmem pwrite path, which attempts to directly
766 * copy_from_user into the kmapped pages backing the object.
769 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
770 struct drm_i915_gem_object *obj,
771 struct drm_i915_gem_pwrite *args,
772 struct drm_file *file)
774 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
777 char __user *user_data;
778 int page_offset, page_length;
780 user_data = (char __user *) (uintptr_t) args->data_ptr;
783 offset = args->offset;
791 /* Operation in this page
793 * page_offset = offset within page
794 * page_length = bytes to copy for this page
796 page_offset = offset & (PAGE_SIZE-1);
797 page_length = remain;
798 if ((page_offset + remain) > PAGE_SIZE)
799 page_length = PAGE_SIZE - page_offset;
801 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
802 GFP_HIGHUSER | __GFP_RECLAIMABLE);
804 return PTR_ERR(page);
806 vaddr = kmap_atomic(page, KM_USER0);
807 ret = __copy_from_user_inatomic(vaddr + page_offset,
810 kunmap_atomic(vaddr, KM_USER0);
812 set_page_dirty(page);
813 mark_page_accessed(page);
814 page_cache_release(page);
816 /* If we get a fault while copying data, then (presumably) our
817 * source page isn't available. Return the error and we'll
818 * retry in the slow path.
823 remain -= page_length;
824 user_data += page_length;
825 offset += page_length;
832 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
833 * the memory and maps it using kmap_atomic for copying.
835 * This avoids taking mmap_sem for faulting on the user's address while the
836 * struct_mutex is held.
839 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
840 struct drm_i915_gem_object *obj,
841 struct drm_i915_gem_pwrite *args,
842 struct drm_file *file)
844 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
845 struct mm_struct *mm = current->mm;
846 struct page **user_pages;
848 loff_t offset, pinned_pages, i;
849 loff_t first_data_page, last_data_page, num_pages;
850 int shmem_page_offset;
851 int data_page_index, data_page_offset;
854 uint64_t data_ptr = args->data_ptr;
855 int do_bit17_swizzling;
859 /* Pin the user pages containing the data. We can't fault while
860 * holding the struct mutex, and all of the pwrite implementations
861 * want to hold it while dereferencing the user data.
863 first_data_page = data_ptr / PAGE_SIZE;
864 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
865 num_pages = last_data_page - first_data_page + 1;
867 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
868 if (user_pages == NULL)
871 mutex_unlock(&dev->struct_mutex);
872 down_read(&mm->mmap_sem);
873 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
874 num_pages, 0, 0, user_pages, NULL);
875 up_read(&mm->mmap_sem);
876 mutex_lock(&dev->struct_mutex);
877 if (pinned_pages < num_pages) {
882 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
886 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
888 offset = args->offset;
894 /* Operation in this page
896 * shmem_page_offset = offset within page in shmem file
897 * data_page_index = page number in get_user_pages return
898 * data_page_offset = offset with data_page_index page.
899 * page_length = bytes to copy for this page
901 shmem_page_offset = offset & ~PAGE_MASK;
902 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
903 data_page_offset = data_ptr & ~PAGE_MASK;
905 page_length = remain;
906 if ((shmem_page_offset + page_length) > PAGE_SIZE)
907 page_length = PAGE_SIZE - shmem_page_offset;
908 if ((data_page_offset + page_length) > PAGE_SIZE)
909 page_length = PAGE_SIZE - data_page_offset;
911 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
912 GFP_HIGHUSER | __GFP_RECLAIMABLE);
918 if (do_bit17_swizzling) {
919 slow_shmem_bit17_copy(page,
921 user_pages[data_page_index],
926 slow_shmem_copy(page,
928 user_pages[data_page_index],
933 set_page_dirty(page);
934 mark_page_accessed(page);
935 page_cache_release(page);
937 remain -= page_length;
938 data_ptr += page_length;
939 offset += page_length;
943 for (i = 0; i < pinned_pages; i++)
944 page_cache_release(user_pages[i]);
945 drm_free_large(user_pages);
951 * Writes data to the object referenced by handle.
953 * On error, the contents of the buffer that were to be modified are undefined.
956 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
957 struct drm_file *file)
959 struct drm_i915_gem_pwrite *args = data;
960 struct drm_i915_gem_object *obj;
966 if (!access_ok(VERIFY_READ,
967 (char __user *)(uintptr_t)args->data_ptr,
971 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
976 ret = i915_mutex_lock_interruptible(dev);
980 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
981 if (&obj->base == NULL) {
986 /* Bounds check destination. */
987 if (args->offset > obj->base.size ||
988 args->size > obj->base.size - args->offset) {
993 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
995 /* We can only do the GTT pwrite on untiled buffers, as otherwise
996 * it would end up going through the fenced access, and we'll get
997 * different detiling behavior between reading and writing.
998 * pread/pwrite currently are reading and writing from the CPU
999 * perspective, requiring manual detiling by the client.
1002 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1003 else if (obj->gtt_space &&
1004 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1005 ret = i915_gem_object_pin(obj, 0, true);
1009 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1013 ret = i915_gem_object_put_fence(obj);
1017 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1019 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1022 i915_gem_object_unpin(obj);
1024 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1029 if (!i915_gem_object_needs_bit17_swizzle(obj))
1030 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1032 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1036 drm_gem_object_unreference(&obj->base);
1038 mutex_unlock(&dev->struct_mutex);
1043 * Called when user space prepares to use an object with the CPU, either
1044 * through the mmap ioctl's mapping or a GTT mapping.
1047 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1048 struct drm_file *file)
1050 struct drm_i915_gem_set_domain *args = data;
1051 struct drm_i915_gem_object *obj;
1052 uint32_t read_domains = args->read_domains;
1053 uint32_t write_domain = args->write_domain;
1056 if (!(dev->driver->driver_features & DRIVER_GEM))
1059 /* Only handle setting domains to types used by the CPU. */
1060 if (write_domain & I915_GEM_GPU_DOMAINS)
1063 if (read_domains & I915_GEM_GPU_DOMAINS)
1066 /* Having something in the write domain implies it's in the read
1067 * domain, and only that read domain. Enforce that in the request.
1069 if (write_domain != 0 && read_domains != write_domain)
1072 ret = i915_mutex_lock_interruptible(dev);
1076 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1077 if (&obj->base == NULL) {
1082 if (read_domains & I915_GEM_DOMAIN_GTT) {
1083 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1085 /* Silently promote "you're not bound, there was nothing to do"
1086 * to success, since the client was just asking us to
1087 * make sure everything was done.
1092 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1095 drm_gem_object_unreference(&obj->base);
1097 mutex_unlock(&dev->struct_mutex);
1102 * Called when user space has done writes to this buffer
1105 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1106 struct drm_file *file)
1108 struct drm_i915_gem_sw_finish *args = data;
1109 struct drm_i915_gem_object *obj;
1112 if (!(dev->driver->driver_features & DRIVER_GEM))
1115 ret = i915_mutex_lock_interruptible(dev);
1119 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1120 if (&obj->base == NULL) {
1125 /* Pinned buffers may be scanout, so flush the cache */
1127 i915_gem_object_flush_cpu_write_domain(obj);
1129 drm_gem_object_unreference(&obj->base);
1131 mutex_unlock(&dev->struct_mutex);
1136 * Maps the contents of an object, returning the address it is mapped
1139 * While the mapping holds a reference on the contents of the object, it doesn't
1140 * imply a ref on the object itself.
1143 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1144 struct drm_file *file)
1146 struct drm_i915_private *dev_priv = dev->dev_private;
1147 struct drm_i915_gem_mmap *args = data;
1148 struct drm_gem_object *obj;
1151 if (!(dev->driver->driver_features & DRIVER_GEM))
1154 obj = drm_gem_object_lookup(dev, file, args->handle);
1158 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1159 drm_gem_object_unreference_unlocked(obj);
1163 down_write(¤t->mm->mmap_sem);
1164 addr = do_mmap(obj->filp, 0, args->size,
1165 PROT_READ | PROT_WRITE, MAP_SHARED,
1167 up_write(¤t->mm->mmap_sem);
1168 drm_gem_object_unreference_unlocked(obj);
1169 if (IS_ERR((void *)addr))
1172 args->addr_ptr = (uint64_t) addr;
1178 * i915_gem_fault - fault a page into the GTT
1179 * vma: VMA in question
1182 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1183 * from userspace. The fault handler takes care of binding the object to
1184 * the GTT (if needed), allocating and programming a fence register (again,
1185 * only if needed based on whether the old reg is still valid or the object
1186 * is tiled) and inserting a new PTE into the faulting process.
1188 * Note that the faulting process may involve evicting existing objects
1189 * from the GTT and/or fence registers to make room. So performance may
1190 * suffer if the GTT working set is large or there are few fence registers
1193 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1195 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1196 struct drm_device *dev = obj->base.dev;
1197 drm_i915_private_t *dev_priv = dev->dev_private;
1198 pgoff_t page_offset;
1201 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1203 /* We don't use vmf->pgoff since that has the fake offset */
1204 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1207 ret = i915_mutex_lock_interruptible(dev);
1211 trace_i915_gem_object_fault(obj, page_offset, true, write);
1213 /* Now bind it into the GTT if needed */
1214 if (!obj->map_and_fenceable) {
1215 ret = i915_gem_object_unbind(obj);
1219 if (!obj->gtt_space) {
1220 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1225 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1229 if (obj->tiling_mode == I915_TILING_NONE)
1230 ret = i915_gem_object_put_fence(obj);
1232 ret = i915_gem_object_get_fence(obj, NULL);
1236 if (i915_gem_object_is_inactive(obj))
1237 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1239 obj->fault_mappable = true;
1241 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1244 /* Finally, remap it using the new GTT offset */
1245 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1247 mutex_unlock(&dev->struct_mutex);
1252 /* Give the error handler a chance to run and move the
1253 * objects off the GPU active list. Next time we service the
1254 * fault, we should be able to transition the page into the
1255 * GTT without touching the GPU (and so avoid further
1256 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1257 * with coherency, just lost writes.
1263 return VM_FAULT_NOPAGE;
1265 return VM_FAULT_OOM;
1267 return VM_FAULT_SIGBUS;
1272 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1273 * @obj: obj in question
1275 * GEM memory mapping works by handing back to userspace a fake mmap offset
1276 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1277 * up the object based on the offset and sets up the various memory mapping
1280 * This routine allocates and attaches a fake offset for @obj.
1283 i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1285 struct drm_device *dev = obj->base.dev;
1286 struct drm_gem_mm *mm = dev->mm_private;
1287 struct drm_map_list *list;
1288 struct drm_local_map *map;
1291 /* Set the object up for mmap'ing */
1292 list = &obj->base.map_list;
1293 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1298 map->type = _DRM_GEM;
1299 map->size = obj->base.size;
1302 /* Get a DRM GEM mmap offset allocated... */
1303 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1304 obj->base.size / PAGE_SIZE,
1306 if (!list->file_offset_node) {
1307 DRM_ERROR("failed to allocate offset for bo %d\n",
1313 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1314 obj->base.size / PAGE_SIZE,
1316 if (!list->file_offset_node) {
1321 list->hash.key = list->file_offset_node->start;
1322 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1324 DRM_ERROR("failed to add to map hash\n");
1331 drm_mm_put_block(list->file_offset_node);
1340 * i915_gem_release_mmap - remove physical page mappings
1341 * @obj: obj in question
1343 * Preserve the reservation of the mmapping with the DRM core code, but
1344 * relinquish ownership of the pages back to the system.
1346 * It is vital that we remove the page mapping if we have mapped a tiled
1347 * object through the GTT and then lose the fence register due to
1348 * resource pressure. Similarly if the object has been moved out of the
1349 * aperture, than pages mapped into userspace must be revoked. Removing the
1350 * mapping will then trigger a page fault on the next user access, allowing
1351 * fixup by i915_gem_fault().
1354 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1356 if (!obj->fault_mappable)
1359 if (obj->base.dev->dev_mapping)
1360 unmap_mapping_range(obj->base.dev->dev_mapping,
1361 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1364 obj->fault_mappable = false;
1368 i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1370 struct drm_device *dev = obj->base.dev;
1371 struct drm_gem_mm *mm = dev->mm_private;
1372 struct drm_map_list *list = &obj->base.map_list;
1374 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1375 drm_mm_put_block(list->file_offset_node);
1381 i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1383 struct drm_device *dev = obj->base.dev;
1386 if (INTEL_INFO(dev)->gen >= 4 ||
1387 obj->tiling_mode == I915_TILING_NONE)
1388 return obj->base.size;
1390 /* Previous chips need a power-of-two fence region when tiling */
1391 if (INTEL_INFO(dev)->gen == 3)
1396 while (size < obj->base.size)
1403 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1404 * @obj: object to check
1406 * Return the required GTT alignment for an object, taking into account
1407 * potential fence register mapping.
1410 i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1412 struct drm_device *dev = obj->base.dev;
1415 * Minimum alignment is 4k (GTT page size), but might be greater
1416 * if a fence register is needed for the object.
1418 if (INTEL_INFO(dev)->gen >= 4 ||
1419 obj->tiling_mode == I915_TILING_NONE)
1423 * Previous chips need to be aligned to the size of the smallest
1424 * fence register that can contain the object.
1426 return i915_gem_get_gtt_size(obj);
1430 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1432 * @obj: object to check
1434 * Return the required GTT alignment for an object, only taking into account
1435 * unfenced tiled surface requirements.
1438 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1440 struct drm_device *dev = obj->base.dev;
1444 * Minimum alignment is 4k (GTT page size) for sane hw.
1446 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1447 obj->tiling_mode == I915_TILING_NONE)
1451 * Older chips need unfenced tiled buffers to be aligned to the left
1452 * edge of an even tile row (where tile rows are counted as if the bo is
1453 * placed in a fenced gtt region).
1456 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1461 return tile_height * obj->stride * 2;
1465 i915_gem_mmap_gtt(struct drm_file *file,
1466 struct drm_device *dev,
1470 struct drm_i915_private *dev_priv = dev->dev_private;
1471 struct drm_i915_gem_object *obj;
1474 if (!(dev->driver->driver_features & DRIVER_GEM))
1477 ret = i915_mutex_lock_interruptible(dev);
1481 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1482 if (&obj->base == NULL) {
1487 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1492 if (obj->madv != I915_MADV_WILLNEED) {
1493 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1498 if (!obj->base.map_list.map) {
1499 ret = i915_gem_create_mmap_offset(obj);
1504 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1507 drm_gem_object_unreference(&obj->base);
1509 mutex_unlock(&dev->struct_mutex);
1514 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1516 * @data: GTT mapping ioctl data
1517 * @file: GEM object info
1519 * Simply returns the fake offset to userspace so it can mmap it.
1520 * The mmap call will end up in drm_gem_mmap(), which will set things
1521 * up so we can get faults in the handler above.
1523 * The fault handler will take care of binding the object into the GTT
1524 * (since it may have been evicted to make room for something), allocating
1525 * a fence register, and mapping the appropriate aperture address into
1529 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1530 struct drm_file *file)
1532 struct drm_i915_gem_mmap_gtt *args = data;
1534 if (!(dev->driver->driver_features & DRIVER_GEM))
1537 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1542 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1546 struct address_space *mapping;
1547 struct inode *inode;
1550 /* Get the list of pages out of our struct file. They'll be pinned
1551 * at this point until we release them.
1553 page_count = obj->base.size / PAGE_SIZE;
1554 BUG_ON(obj->pages != NULL);
1555 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1556 if (obj->pages == NULL)
1559 inode = obj->base.filp->f_path.dentry->d_inode;
1560 mapping = inode->i_mapping;
1561 for (i = 0; i < page_count; i++) {
1562 page = read_cache_page_gfp(mapping, i,
1570 obj->pages[i] = page;
1573 if (obj->tiling_mode != I915_TILING_NONE)
1574 i915_gem_object_do_bit_17_swizzle(obj);
1580 page_cache_release(obj->pages[i]);
1582 drm_free_large(obj->pages);
1584 return PTR_ERR(page);
1588 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1590 int page_count = obj->base.size / PAGE_SIZE;
1593 BUG_ON(obj->madv == __I915_MADV_PURGED);
1595 if (obj->tiling_mode != I915_TILING_NONE)
1596 i915_gem_object_save_bit_17_swizzle(obj);
1598 if (obj->madv == I915_MADV_DONTNEED)
1601 for (i = 0; i < page_count; i++) {
1603 set_page_dirty(obj->pages[i]);
1605 if (obj->madv == I915_MADV_WILLNEED)
1606 mark_page_accessed(obj->pages[i]);
1608 page_cache_release(obj->pages[i]);
1612 drm_free_large(obj->pages);
1617 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1618 struct intel_ring_buffer *ring,
1621 struct drm_device *dev = obj->base.dev;
1622 struct drm_i915_private *dev_priv = dev->dev_private;
1624 BUG_ON(ring == NULL);
1627 /* Add a reference if we're newly entering the active list. */
1629 drm_gem_object_reference(&obj->base);
1633 /* Move from whatever list we were on to the tail of execution. */
1634 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1635 list_move_tail(&obj->ring_list, &ring->active_list);
1637 obj->last_rendering_seqno = seqno;
1638 if (obj->fenced_gpu_access) {
1639 struct drm_i915_fence_reg *reg;
1641 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1643 obj->last_fenced_seqno = seqno;
1644 obj->last_fenced_ring = ring;
1646 reg = &dev_priv->fence_regs[obj->fence_reg];
1647 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
1652 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1654 list_del_init(&obj->ring_list);
1655 obj->last_rendering_seqno = 0;
1659 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1661 struct drm_device *dev = obj->base.dev;
1662 drm_i915_private_t *dev_priv = dev->dev_private;
1664 BUG_ON(!obj->active);
1665 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1667 i915_gem_object_move_off_active(obj);
1671 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1673 struct drm_device *dev = obj->base.dev;
1674 struct drm_i915_private *dev_priv = dev->dev_private;
1676 if (obj->pin_count != 0)
1677 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1679 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1681 BUG_ON(!list_empty(&obj->gpu_write_list));
1682 BUG_ON(!obj->active);
1685 i915_gem_object_move_off_active(obj);
1686 obj->fenced_gpu_access = false;
1689 obj->pending_gpu_write = false;
1690 drm_gem_object_unreference(&obj->base);
1692 WARN_ON(i915_verify_lists(dev));
1695 /* Immediately discard the backing storage */
1697 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1699 struct inode *inode;
1701 /* Our goal here is to return as much of the memory as
1702 * is possible back to the system as we are called from OOM.
1703 * To do this we must instruct the shmfs to drop all of its
1704 * backing pages, *now*. Here we mirror the actions taken
1705 * when by shmem_delete_inode() to release the backing store.
1707 inode = obj->base.filp->f_path.dentry->d_inode;
1708 truncate_inode_pages(inode->i_mapping, 0);
1709 if (inode->i_op->truncate_range)
1710 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1712 obj->madv = __I915_MADV_PURGED;
1716 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1718 return obj->madv == I915_MADV_DONTNEED;
1722 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1723 uint32_t flush_domains)
1725 struct drm_i915_gem_object *obj, *next;
1727 list_for_each_entry_safe(obj, next,
1728 &ring->gpu_write_list,
1730 if (obj->base.write_domain & flush_domains) {
1731 uint32_t old_write_domain = obj->base.write_domain;
1733 obj->base.write_domain = 0;
1734 list_del_init(&obj->gpu_write_list);
1735 i915_gem_object_move_to_active(obj, ring,
1736 i915_gem_next_request_seqno(ring));
1738 trace_i915_gem_object_change_domain(obj,
1739 obj->base.read_domains,
1746 i915_add_request(struct intel_ring_buffer *ring,
1747 struct drm_file *file,
1748 struct drm_i915_gem_request *request)
1750 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1755 BUG_ON(request == NULL);
1757 ret = ring->add_request(ring, &seqno);
1761 trace_i915_gem_request_add(ring, seqno);
1763 request->seqno = seqno;
1764 request->ring = ring;
1765 request->emitted_jiffies = jiffies;
1766 was_empty = list_empty(&ring->request_list);
1767 list_add_tail(&request->list, &ring->request_list);
1770 struct drm_i915_file_private *file_priv = file->driver_priv;
1772 spin_lock(&file_priv->mm.lock);
1773 request->file_priv = file_priv;
1774 list_add_tail(&request->client_list,
1775 &file_priv->mm.request_list);
1776 spin_unlock(&file_priv->mm.lock);
1779 ring->outstanding_lazy_request = false;
1781 if (!dev_priv->mm.suspended) {
1782 mod_timer(&dev_priv->hangcheck_timer,
1783 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1785 queue_delayed_work(dev_priv->wq,
1786 &dev_priv->mm.retire_work, HZ);
1792 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1794 struct drm_i915_file_private *file_priv = request->file_priv;
1799 spin_lock(&file_priv->mm.lock);
1800 if (request->file_priv) {
1801 list_del(&request->client_list);
1802 request->file_priv = NULL;
1804 spin_unlock(&file_priv->mm.lock);
1807 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1808 struct intel_ring_buffer *ring)
1810 while (!list_empty(&ring->request_list)) {
1811 struct drm_i915_gem_request *request;
1813 request = list_first_entry(&ring->request_list,
1814 struct drm_i915_gem_request,
1817 list_del(&request->list);
1818 i915_gem_request_remove_from_client(request);
1822 while (!list_empty(&ring->active_list)) {
1823 struct drm_i915_gem_object *obj;
1825 obj = list_first_entry(&ring->active_list,
1826 struct drm_i915_gem_object,
1829 obj->base.write_domain = 0;
1830 list_del_init(&obj->gpu_write_list);
1831 i915_gem_object_move_to_inactive(obj);
1835 static void i915_gem_reset_fences(struct drm_device *dev)
1837 struct drm_i915_private *dev_priv = dev->dev_private;
1840 for (i = 0; i < 16; i++) {
1841 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1842 struct drm_i915_gem_object *obj = reg->obj;
1847 if (obj->tiling_mode)
1848 i915_gem_release_mmap(obj);
1850 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1851 reg->obj->fenced_gpu_access = false;
1852 reg->obj->last_fenced_seqno = 0;
1853 reg->obj->last_fenced_ring = NULL;
1854 i915_gem_clear_fence_reg(dev, reg);
1858 void i915_gem_reset(struct drm_device *dev)
1860 struct drm_i915_private *dev_priv = dev->dev_private;
1861 struct drm_i915_gem_object *obj;
1864 for (i = 0; i < I915_NUM_RINGS; i++)
1865 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1867 /* Remove anything from the flushing lists. The GPU cache is likely
1868 * to be lost on reset along with the data, so simply move the
1869 * lost bo to the inactive list.
1871 while (!list_empty(&dev_priv->mm.flushing_list)) {
1872 obj= list_first_entry(&dev_priv->mm.flushing_list,
1873 struct drm_i915_gem_object,
1876 obj->base.write_domain = 0;
1877 list_del_init(&obj->gpu_write_list);
1878 i915_gem_object_move_to_inactive(obj);
1881 /* Move everything out of the GPU domains to ensure we do any
1882 * necessary invalidation upon reuse.
1884 list_for_each_entry(obj,
1885 &dev_priv->mm.inactive_list,
1888 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1891 /* The fence registers are invalidated so clear them out */
1892 i915_gem_reset_fences(dev);
1896 * This function clears the request list as sequence numbers are passed.
1899 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1904 if (list_empty(&ring->request_list))
1907 WARN_ON(i915_verify_lists(ring->dev));
1909 seqno = ring->get_seqno(ring);
1911 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1912 if (seqno >= ring->sync_seqno[i])
1913 ring->sync_seqno[i] = 0;
1915 while (!list_empty(&ring->request_list)) {
1916 struct drm_i915_gem_request *request;
1918 request = list_first_entry(&ring->request_list,
1919 struct drm_i915_gem_request,
1922 if (!i915_seqno_passed(seqno, request->seqno))
1925 trace_i915_gem_request_retire(ring, request->seqno);
1927 list_del(&request->list);
1928 i915_gem_request_remove_from_client(request);
1932 /* Move any buffers on the active list that are no longer referenced
1933 * by the ringbuffer to the flushing/inactive lists as appropriate.
1935 while (!list_empty(&ring->active_list)) {
1936 struct drm_i915_gem_object *obj;
1938 obj= list_first_entry(&ring->active_list,
1939 struct drm_i915_gem_object,
1942 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1945 if (obj->base.write_domain != 0)
1946 i915_gem_object_move_to_flushing(obj);
1948 i915_gem_object_move_to_inactive(obj);
1951 if (unlikely(ring->trace_irq_seqno &&
1952 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1953 ring->irq_put(ring);
1954 ring->trace_irq_seqno = 0;
1957 WARN_ON(i915_verify_lists(ring->dev));
1961 i915_gem_retire_requests(struct drm_device *dev)
1963 drm_i915_private_t *dev_priv = dev->dev_private;
1966 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1967 struct drm_i915_gem_object *obj, *next;
1969 /* We must be careful that during unbind() we do not
1970 * accidentally infinitely recurse into retire requests.
1972 * retire -> free -> unbind -> wait -> retire_ring
1974 list_for_each_entry_safe(obj, next,
1975 &dev_priv->mm.deferred_free_list,
1977 i915_gem_free_object_tail(obj);
1980 for (i = 0; i < I915_NUM_RINGS; i++)
1981 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1985 i915_gem_retire_work_handler(struct work_struct *work)
1987 drm_i915_private_t *dev_priv;
1988 struct drm_device *dev;
1992 dev_priv = container_of(work, drm_i915_private_t,
1993 mm.retire_work.work);
1994 dev = dev_priv->dev;
1996 /* Come back later if the device is busy... */
1997 if (!mutex_trylock(&dev->struct_mutex)) {
1998 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2002 i915_gem_retire_requests(dev);
2004 /* Send a periodic flush down the ring so we don't hold onto GEM
2005 * objects indefinitely.
2008 for (i = 0; i < I915_NUM_RINGS; i++) {
2009 struct intel_ring_buffer *ring = &dev_priv->ring[i];
2011 if (!list_empty(&ring->gpu_write_list)) {
2012 struct drm_i915_gem_request *request;
2015 ret = i915_gem_flush_ring(ring,
2016 0, I915_GEM_GPU_DOMAINS);
2017 request = kzalloc(sizeof(*request), GFP_KERNEL);
2018 if (ret || request == NULL ||
2019 i915_add_request(ring, NULL, request))
2023 idle &= list_empty(&ring->request_list);
2026 if (!dev_priv->mm.suspended && !idle)
2027 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2029 mutex_unlock(&dev->struct_mutex);
2033 * Waits for a sequence number to be signaled, and cleans up the
2034 * request and object lists appropriately for that event.
2037 i915_wait_request(struct intel_ring_buffer *ring,
2040 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2046 if (atomic_read(&dev_priv->mm.wedged)) {
2047 struct completion *x = &dev_priv->error_completion;
2048 bool recovery_complete;
2049 unsigned long flags;
2051 /* Give the error handler a chance to run. */
2052 spin_lock_irqsave(&x->wait.lock, flags);
2053 recovery_complete = x->done > 0;
2054 spin_unlock_irqrestore(&x->wait.lock, flags);
2056 return recovery_complete ? -EIO : -EAGAIN;
2059 if (seqno == ring->outstanding_lazy_request) {
2060 struct drm_i915_gem_request *request;
2062 request = kzalloc(sizeof(*request), GFP_KERNEL);
2063 if (request == NULL)
2066 ret = i915_add_request(ring, NULL, request);
2072 seqno = request->seqno;
2075 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2076 if (HAS_PCH_SPLIT(ring->dev))
2077 ier = I915_READ(DEIER) | I915_READ(GTIER);
2079 ier = I915_READ(IER);
2081 DRM_ERROR("something (likely vbetool) disabled "
2082 "interrupts, re-enabling\n");
2083 i915_driver_irq_preinstall(ring->dev);
2084 i915_driver_irq_postinstall(ring->dev);
2087 trace_i915_gem_request_wait_begin(ring, seqno);
2089 ring->waiting_seqno = seqno;
2090 if (ring->irq_get(ring)) {
2091 if (dev_priv->mm.interruptible)
2092 ret = wait_event_interruptible(ring->irq_queue,
2093 i915_seqno_passed(ring->get_seqno(ring), seqno)
2094 || atomic_read(&dev_priv->mm.wedged));
2096 wait_event(ring->irq_queue,
2097 i915_seqno_passed(ring->get_seqno(ring), seqno)
2098 || atomic_read(&dev_priv->mm.wedged));
2100 ring->irq_put(ring);
2101 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2103 atomic_read(&dev_priv->mm.wedged), 3000))
2105 ring->waiting_seqno = 0;
2107 trace_i915_gem_request_wait_end(ring, seqno);
2109 if (atomic_read(&dev_priv->mm.wedged))
2112 if (ret && ret != -ERESTARTSYS)
2113 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2114 __func__, ret, seqno, ring->get_seqno(ring),
2115 dev_priv->next_seqno);
2117 /* Directly dispatch request retiring. While we have the work queue
2118 * to handle this, the waiter on a request often wants an associated
2119 * buffer to have made it to the inactive list, and we would need
2120 * a separate wait queue to handle that.
2123 i915_gem_retire_requests_ring(ring);
2129 * Ensures that all rendering to the object has completed and the object is
2130 * safe to unbind from the GTT or access from the CPU.
2133 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2137 /* This function only exists to support waiting for existing rendering,
2138 * not for emitting required flushes.
2140 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2142 /* If there is rendering queued on the buffer being evicted, wait for
2146 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2155 * Unbinds an object from the GTT aperture.
2158 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2162 if (obj->gtt_space == NULL)
2165 if (obj->pin_count != 0) {
2166 DRM_ERROR("Attempting to unbind pinned buffer\n");
2170 /* blow away mappings if mapped through GTT */
2171 i915_gem_release_mmap(obj);
2173 /* Move the object to the CPU domain to ensure that
2174 * any possible CPU writes while it's not in the GTT
2175 * are flushed when we go to remap it. This will
2176 * also ensure that all pending GPU writes are finished
2179 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2180 if (ret == -ERESTARTSYS)
2182 /* Continue on if we fail due to EIO, the GPU is hung so we
2183 * should be safe and we need to cleanup or else we might
2184 * cause memory corruption through use-after-free.
2187 i915_gem_clflush_object(obj);
2188 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2191 /* release the fence reg _after_ flushing */
2192 ret = i915_gem_object_put_fence(obj);
2193 if (ret == -ERESTARTSYS)
2196 trace_i915_gem_object_unbind(obj);
2198 i915_gem_gtt_unbind_object(obj);
2199 i915_gem_object_put_pages_gtt(obj);
2201 list_del_init(&obj->gtt_list);
2202 list_del_init(&obj->mm_list);
2203 /* Avoid an unnecessary call to unbind on rebind. */
2204 obj->map_and_fenceable = true;
2206 drm_mm_put_block(obj->gtt_space);
2207 obj->gtt_space = NULL;
2208 obj->gtt_offset = 0;
2210 if (i915_gem_object_is_purgeable(obj))
2211 i915_gem_object_truncate(obj);
2217 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2218 uint32_t invalidate_domains,
2219 uint32_t flush_domains)
2223 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2226 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2228 ret = ring->flush(ring, invalidate_domains, flush_domains);
2232 if (flush_domains & I915_GEM_GPU_DOMAINS)
2233 i915_gem_process_flushing_list(ring, flush_domains);
2238 static int i915_ring_idle(struct intel_ring_buffer *ring)
2242 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2245 if (!list_empty(&ring->gpu_write_list)) {
2246 ret = i915_gem_flush_ring(ring,
2247 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2252 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2256 i915_gpu_idle(struct drm_device *dev)
2258 drm_i915_private_t *dev_priv = dev->dev_private;
2262 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2263 list_empty(&dev_priv->mm.active_list));
2267 /* Flush everything onto the inactive list. */
2268 for (i = 0; i < I915_NUM_RINGS; i++) {
2269 ret = i915_ring_idle(&dev_priv->ring[i]);
2277 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2278 struct intel_ring_buffer *pipelined)
2280 struct drm_device *dev = obj->base.dev;
2281 drm_i915_private_t *dev_priv = dev->dev_private;
2282 u32 size = obj->gtt_space->size;
2283 int regnum = obj->fence_reg;
2286 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2288 val |= obj->gtt_offset & 0xfffff000;
2289 val |= (uint64_t)((obj->stride / 128) - 1) <<
2290 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2292 if (obj->tiling_mode == I915_TILING_Y)
2293 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2294 val |= I965_FENCE_REG_VALID;
2297 int ret = intel_ring_begin(pipelined, 6);
2301 intel_ring_emit(pipelined, MI_NOOP);
2302 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2303 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2304 intel_ring_emit(pipelined, (u32)val);
2305 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2306 intel_ring_emit(pipelined, (u32)(val >> 32));
2307 intel_ring_advance(pipelined);
2309 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2314 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2315 struct intel_ring_buffer *pipelined)
2317 struct drm_device *dev = obj->base.dev;
2318 drm_i915_private_t *dev_priv = dev->dev_private;
2319 u32 size = obj->gtt_space->size;
2320 int regnum = obj->fence_reg;
2323 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2325 val |= obj->gtt_offset & 0xfffff000;
2326 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2327 if (obj->tiling_mode == I915_TILING_Y)
2328 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2329 val |= I965_FENCE_REG_VALID;
2332 int ret = intel_ring_begin(pipelined, 6);
2336 intel_ring_emit(pipelined, MI_NOOP);
2337 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2338 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2339 intel_ring_emit(pipelined, (u32)val);
2340 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2341 intel_ring_emit(pipelined, (u32)(val >> 32));
2342 intel_ring_advance(pipelined);
2344 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2349 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2350 struct intel_ring_buffer *pipelined)
2352 struct drm_device *dev = obj->base.dev;
2353 drm_i915_private_t *dev_priv = dev->dev_private;
2354 u32 size = obj->gtt_space->size;
2355 u32 fence_reg, val, pitch_val;
2358 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2359 (size & -size) != size ||
2360 (obj->gtt_offset & (size - 1)),
2361 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2362 obj->gtt_offset, obj->map_and_fenceable, size))
2365 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2370 /* Note: pitch better be a power of two tile widths */
2371 pitch_val = obj->stride / tile_width;
2372 pitch_val = ffs(pitch_val) - 1;
2374 val = obj->gtt_offset;
2375 if (obj->tiling_mode == I915_TILING_Y)
2376 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2377 val |= I915_FENCE_SIZE_BITS(size);
2378 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2379 val |= I830_FENCE_REG_VALID;
2381 fence_reg = obj->fence_reg;
2383 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2385 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2388 int ret = intel_ring_begin(pipelined, 4);
2392 intel_ring_emit(pipelined, MI_NOOP);
2393 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2394 intel_ring_emit(pipelined, fence_reg);
2395 intel_ring_emit(pipelined, val);
2396 intel_ring_advance(pipelined);
2398 I915_WRITE(fence_reg, val);
2403 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2404 struct intel_ring_buffer *pipelined)
2406 struct drm_device *dev = obj->base.dev;
2407 drm_i915_private_t *dev_priv = dev->dev_private;
2408 u32 size = obj->gtt_space->size;
2409 int regnum = obj->fence_reg;
2413 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2414 (size & -size) != size ||
2415 (obj->gtt_offset & (size - 1)),
2416 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2417 obj->gtt_offset, size))
2420 pitch_val = obj->stride / 128;
2421 pitch_val = ffs(pitch_val) - 1;
2423 val = obj->gtt_offset;
2424 if (obj->tiling_mode == I915_TILING_Y)
2425 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2426 val |= I830_FENCE_SIZE_BITS(size);
2427 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2428 val |= I830_FENCE_REG_VALID;
2431 int ret = intel_ring_begin(pipelined, 4);
2435 intel_ring_emit(pipelined, MI_NOOP);
2436 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2437 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2438 intel_ring_emit(pipelined, val);
2439 intel_ring_advance(pipelined);
2441 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2446 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2448 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2452 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2453 struct intel_ring_buffer *pipelined)
2457 if (obj->fenced_gpu_access) {
2458 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2459 ret = i915_gem_flush_ring(obj->last_fenced_ring,
2460 0, obj->base.write_domain);
2465 obj->fenced_gpu_access = false;
2468 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2469 if (!ring_passed_seqno(obj->last_fenced_ring,
2470 obj->last_fenced_seqno)) {
2471 ret = i915_wait_request(obj->last_fenced_ring,
2472 obj->last_fenced_seqno);
2477 obj->last_fenced_seqno = 0;
2478 obj->last_fenced_ring = NULL;
2481 /* Ensure that all CPU reads are completed before installing a fence
2482 * and all writes before removing the fence.
2484 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2491 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2495 if (obj->tiling_mode)
2496 i915_gem_release_mmap(obj);
2498 ret = i915_gem_object_flush_fence(obj, NULL);
2502 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2503 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2504 i915_gem_clear_fence_reg(obj->base.dev,
2505 &dev_priv->fence_regs[obj->fence_reg]);
2507 obj->fence_reg = I915_FENCE_REG_NONE;
2513 static struct drm_i915_fence_reg *
2514 i915_find_fence_reg(struct drm_device *dev,
2515 struct intel_ring_buffer *pipelined)
2517 struct drm_i915_private *dev_priv = dev->dev_private;
2518 struct drm_i915_fence_reg *reg, *first, *avail;
2521 /* First try to find a free reg */
2523 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2524 reg = &dev_priv->fence_regs[i];
2528 if (!reg->obj->pin_count)
2535 /* None available, try to steal one or wait for a user to finish */
2536 avail = first = NULL;
2537 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2538 if (reg->obj->pin_count)
2545 !reg->obj->last_fenced_ring ||
2546 reg->obj->last_fenced_ring == pipelined) {
2559 * i915_gem_object_get_fence - set up a fence reg for an object
2560 * @obj: object to map through a fence reg
2561 * @pipelined: ring on which to queue the change, or NULL for CPU access
2562 * @interruptible: must we wait uninterruptibly for the register to retire?
2564 * When mapping objects through the GTT, userspace wants to be able to write
2565 * to them without having to worry about swizzling if the object is tiled.
2567 * This function walks the fence regs looking for a free one for @obj,
2568 * stealing one if it can't find any.
2570 * It then sets up the reg based on the object's properties: address, pitch
2571 * and tiling format.
2574 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2575 struct intel_ring_buffer *pipelined)
2577 struct drm_device *dev = obj->base.dev;
2578 struct drm_i915_private *dev_priv = dev->dev_private;
2579 struct drm_i915_fence_reg *reg;
2582 /* XXX disable pipelining. There are bugs. Shocking. */
2585 /* Just update our place in the LRU if our fence is getting reused. */
2586 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2587 reg = &dev_priv->fence_regs[obj->fence_reg];
2588 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
2590 if (obj->tiling_changed) {
2591 ret = i915_gem_object_flush_fence(obj, pipelined);
2595 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2600 i915_gem_next_request_seqno(pipelined);
2601 obj->last_fenced_seqno = reg->setup_seqno;
2602 obj->last_fenced_ring = pipelined;
2609 if (reg->setup_seqno) {
2610 if (!ring_passed_seqno(obj->last_fenced_ring,
2611 reg->setup_seqno)) {
2612 ret = i915_wait_request(obj->last_fenced_ring,
2618 reg->setup_seqno = 0;
2620 } else if (obj->last_fenced_ring &&
2621 obj->last_fenced_ring != pipelined) {
2622 ret = i915_gem_object_flush_fence(obj, pipelined);
2630 reg = i915_find_fence_reg(dev, pipelined);
2634 ret = i915_gem_object_flush_fence(obj, pipelined);
2639 struct drm_i915_gem_object *old = reg->obj;
2641 drm_gem_object_reference(&old->base);
2643 if (old->tiling_mode)
2644 i915_gem_release_mmap(old);
2646 ret = i915_gem_object_flush_fence(old, pipelined);
2648 drm_gem_object_unreference(&old->base);
2652 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2655 old->fence_reg = I915_FENCE_REG_NONE;
2656 old->last_fenced_ring = pipelined;
2657 old->last_fenced_seqno =
2658 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2660 drm_gem_object_unreference(&old->base);
2661 } else if (obj->last_fenced_seqno == 0)
2665 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
2666 obj->fence_reg = reg - dev_priv->fence_regs;
2667 obj->last_fenced_ring = pipelined;
2670 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2671 obj->last_fenced_seqno = reg->setup_seqno;
2674 obj->tiling_changed = false;
2675 switch (INTEL_INFO(dev)->gen) {
2677 ret = sandybridge_write_fence_reg(obj, pipelined);
2681 ret = i965_write_fence_reg(obj, pipelined);
2684 ret = i915_write_fence_reg(obj, pipelined);
2687 ret = i830_write_fence_reg(obj, pipelined);
2695 * i915_gem_clear_fence_reg - clear out fence register info
2696 * @obj: object to clear
2698 * Zeroes out the fence register itself and clears out the associated
2699 * data structures in dev_priv and obj.
2702 i915_gem_clear_fence_reg(struct drm_device *dev,
2703 struct drm_i915_fence_reg *reg)
2705 drm_i915_private_t *dev_priv = dev->dev_private;
2706 uint32_t fence_reg = reg - dev_priv->fence_regs;
2708 switch (INTEL_INFO(dev)->gen) {
2710 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2714 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2718 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2721 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2723 I915_WRITE(fence_reg, 0);
2727 list_del_init(®->lru_list);
2729 reg->setup_seqno = 0;
2733 * Finds free space in the GTT aperture and binds the object there.
2736 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2738 bool map_and_fenceable)
2740 struct drm_device *dev = obj->base.dev;
2741 drm_i915_private_t *dev_priv = dev->dev_private;
2742 struct drm_mm_node *free_space;
2743 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2744 u32 size, fence_size, fence_alignment, unfenced_alignment;
2745 bool mappable, fenceable;
2748 if (obj->madv != I915_MADV_WILLNEED) {
2749 DRM_ERROR("Attempting to bind a purgeable object\n");
2753 fence_size = i915_gem_get_gtt_size(obj);
2754 fence_alignment = i915_gem_get_gtt_alignment(obj);
2755 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
2758 alignment = map_and_fenceable ? fence_alignment :
2760 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2761 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2765 size = map_and_fenceable ? fence_size : obj->base.size;
2767 /* If the object is bigger than the entire aperture, reject it early
2768 * before evicting everything in a vain attempt to find space.
2770 if (obj->base.size >
2771 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2772 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2777 if (map_and_fenceable)
2779 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2781 dev_priv->mm.gtt_mappable_end,
2784 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2785 size, alignment, 0);
2787 if (free_space != NULL) {
2788 if (map_and_fenceable)
2790 drm_mm_get_block_range_generic(free_space,
2792 dev_priv->mm.gtt_mappable_end,
2796 drm_mm_get_block(free_space, size, alignment);
2798 if (obj->gtt_space == NULL) {
2799 /* If the gtt is empty and we're still having trouble
2800 * fitting our object in, we're out of memory.
2802 ret = i915_gem_evict_something(dev, size, alignment,
2810 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2812 drm_mm_put_block(obj->gtt_space);
2813 obj->gtt_space = NULL;
2815 if (ret == -ENOMEM) {
2816 /* first try to reclaim some memory by clearing the GTT */
2817 ret = i915_gem_evict_everything(dev, false);
2819 /* now try to shrink everyone else */
2834 ret = i915_gem_gtt_bind_object(obj);
2836 i915_gem_object_put_pages_gtt(obj);
2837 drm_mm_put_block(obj->gtt_space);
2838 obj->gtt_space = NULL;
2840 if (i915_gem_evict_everything(dev, false))
2846 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2847 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2849 /* Assert that the object is not currently in any GPU domain. As it
2850 * wasn't in the GTT, there shouldn't be any way it could have been in
2853 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2854 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2856 obj->gtt_offset = obj->gtt_space->start;
2859 obj->gtt_space->size == fence_size &&
2860 (obj->gtt_space->start & (fence_alignment -1)) == 0;
2863 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2865 obj->map_and_fenceable = mappable && fenceable;
2867 trace_i915_gem_object_bind(obj, map_and_fenceable);
2872 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2874 /* If we don't have a page list set up, then we're not pinned
2875 * to GPU, and we can ignore the cache flush because it'll happen
2876 * again at bind time.
2878 if (obj->pages == NULL)
2881 trace_i915_gem_object_clflush(obj);
2883 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2886 /** Flushes any GPU write domain for the object if it's dirty. */
2888 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2890 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2893 /* Queue the GPU write cache flushing we need. */
2894 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2897 /** Flushes the GTT write domain for the object if it's dirty. */
2899 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2901 uint32_t old_write_domain;
2903 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2906 /* No actual flushing is required for the GTT write domain. Writes
2907 * to it immediately go to main memory as far as we know, so there's
2908 * no chipset flush. It also doesn't land in render cache.
2910 * However, we do have to enforce the order so that all writes through
2911 * the GTT land before any writes to the device, such as updates to
2916 i915_gem_release_mmap(obj);
2918 old_write_domain = obj->base.write_domain;
2919 obj->base.write_domain = 0;
2921 trace_i915_gem_object_change_domain(obj,
2922 obj->base.read_domains,
2926 /** Flushes the CPU write domain for the object if it's dirty. */
2928 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2930 uint32_t old_write_domain;
2932 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2935 i915_gem_clflush_object(obj);
2936 intel_gtt_chipset_flush();
2937 old_write_domain = obj->base.write_domain;
2938 obj->base.write_domain = 0;
2940 trace_i915_gem_object_change_domain(obj,
2941 obj->base.read_domains,
2946 * Moves a single object to the GTT read, and possibly write domain.
2948 * This function returns when the move is complete, including waiting on
2952 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2954 uint32_t old_write_domain, old_read_domains;
2957 /* Not valid to be called on unbound objects. */
2958 if (obj->gtt_space == NULL)
2961 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2964 ret = i915_gem_object_flush_gpu_write_domain(obj);
2968 if (obj->pending_gpu_write || write) {
2969 ret = i915_gem_object_wait_rendering(obj);
2974 i915_gem_object_flush_cpu_write_domain(obj);
2976 old_write_domain = obj->base.write_domain;
2977 old_read_domains = obj->base.read_domains;
2979 /* It should now be out of any other write domains, and we can update
2980 * the domain values for our changes.
2982 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2983 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2985 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2986 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2990 trace_i915_gem_object_change_domain(obj,
2998 * Prepare buffer for display plane. Use uninterruptible for possible flush
2999 * wait, as in modesetting process we're not supposed to be interrupted.
3002 i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
3003 struct intel_ring_buffer *pipelined)
3005 uint32_t old_read_domains;
3008 /* Not valid to be called on unbound objects. */
3009 if (obj->gtt_space == NULL)
3012 ret = i915_gem_object_flush_gpu_write_domain(obj);
3017 /* Currently, we are always called from an non-interruptible context. */
3018 if (pipelined != obj->ring) {
3019 ret = i915_gem_object_wait_rendering(obj);
3024 i915_gem_object_flush_cpu_write_domain(obj);
3026 old_read_domains = obj->base.read_domains;
3027 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3029 trace_i915_gem_object_change_domain(obj,
3031 obj->base.write_domain);
3037 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3044 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3045 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3050 return i915_gem_object_wait_rendering(obj);
3054 * Moves a single object to the CPU read, and possibly write domain.
3056 * This function returns when the move is complete, including waiting on
3060 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3062 uint32_t old_write_domain, old_read_domains;
3065 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3068 ret = i915_gem_object_flush_gpu_write_domain(obj);
3072 ret = i915_gem_object_wait_rendering(obj);
3076 i915_gem_object_flush_gtt_write_domain(obj);
3078 /* If we have a partially-valid cache of the object in the CPU,
3079 * finish invalidating it and free the per-page flags.
3081 i915_gem_object_set_to_full_cpu_read_domain(obj);
3083 old_write_domain = obj->base.write_domain;
3084 old_read_domains = obj->base.read_domains;
3086 /* Flush the CPU cache if it's still invalid. */
3087 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3088 i915_gem_clflush_object(obj);
3090 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3093 /* It should now be out of any other write domains, and we can update
3094 * the domain values for our changes.
3096 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3098 /* If we're writing through the CPU, then the GPU read domains will
3099 * need to be invalidated at next use.
3102 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3103 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3106 trace_i915_gem_object_change_domain(obj,
3114 * Moves the object from a partially CPU read to a full one.
3116 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3117 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3120 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3122 if (!obj->page_cpu_valid)
3125 /* If we're partially in the CPU read domain, finish moving it in.
3127 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3130 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3131 if (obj->page_cpu_valid[i])
3133 drm_clflush_pages(obj->pages + i, 1);
3137 /* Free the page_cpu_valid mappings which are now stale, whether
3138 * or not we've got I915_GEM_DOMAIN_CPU.
3140 kfree(obj->page_cpu_valid);
3141 obj->page_cpu_valid = NULL;
3145 * Set the CPU read domain on a range of the object.
3147 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3148 * not entirely valid. The page_cpu_valid member of the object flags which
3149 * pages have been flushed, and will be respected by
3150 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3151 * of the whole object.
3153 * This function returns when the move is complete, including waiting on
3157 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3158 uint64_t offset, uint64_t size)
3160 uint32_t old_read_domains;
3163 if (offset == 0 && size == obj->base.size)
3164 return i915_gem_object_set_to_cpu_domain(obj, 0);
3166 ret = i915_gem_object_flush_gpu_write_domain(obj);
3170 ret = i915_gem_object_wait_rendering(obj);
3174 i915_gem_object_flush_gtt_write_domain(obj);
3176 /* If we're already fully in the CPU read domain, we're done. */
3177 if (obj->page_cpu_valid == NULL &&
3178 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3181 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3182 * newly adding I915_GEM_DOMAIN_CPU
3184 if (obj->page_cpu_valid == NULL) {
3185 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3187 if (obj->page_cpu_valid == NULL)
3189 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3190 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3192 /* Flush the cache on any pages that are still invalid from the CPU's
3195 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3197 if (obj->page_cpu_valid[i])
3200 drm_clflush_pages(obj->pages + i, 1);
3202 obj->page_cpu_valid[i] = 1;
3205 /* It should now be out of any other write domains, and we can update
3206 * the domain values for our changes.
3208 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3210 old_read_domains = obj->base.read_domains;
3211 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3213 trace_i915_gem_object_change_domain(obj,
3215 obj->base.write_domain);
3220 /* Throttle our rendering by waiting until the ring has completed our requests
3221 * emitted over 20 msec ago.
3223 * Note that if we were to use the current jiffies each time around the loop,
3224 * we wouldn't escape the function with any frames outstanding if the time to
3225 * render a frame was over 20ms.
3227 * This should get us reasonable parallelism between CPU and GPU but also
3228 * relatively low latency when blocking on a particular request to finish.
3231 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3233 struct drm_i915_private *dev_priv = dev->dev_private;
3234 struct drm_i915_file_private *file_priv = file->driver_priv;
3235 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3236 struct drm_i915_gem_request *request;
3237 struct intel_ring_buffer *ring = NULL;
3241 if (atomic_read(&dev_priv->mm.wedged))
3244 spin_lock(&file_priv->mm.lock);
3245 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3246 if (time_after_eq(request->emitted_jiffies, recent_enough))
3249 ring = request->ring;
3250 seqno = request->seqno;
3252 spin_unlock(&file_priv->mm.lock);
3258 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3259 /* And wait for the seqno passing without holding any locks and
3260 * causing extra latency for others. This is safe as the irq
3261 * generation is designed to be run atomically and so is
3264 if (ring->irq_get(ring)) {
3265 ret = wait_event_interruptible(ring->irq_queue,
3266 i915_seqno_passed(ring->get_seqno(ring), seqno)
3267 || atomic_read(&dev_priv->mm.wedged));
3268 ring->irq_put(ring);
3270 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3276 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3282 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3284 bool map_and_fenceable)
3286 struct drm_device *dev = obj->base.dev;
3287 struct drm_i915_private *dev_priv = dev->dev_private;
3290 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3291 WARN_ON(i915_verify_lists(dev));
3293 if (obj->gtt_space != NULL) {
3294 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3295 (map_and_fenceable && !obj->map_and_fenceable)) {
3296 WARN(obj->pin_count,
3297 "bo is already pinned with incorrect alignment:"
3298 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3299 " obj->map_and_fenceable=%d\n",
3300 obj->gtt_offset, alignment,
3302 obj->map_and_fenceable);
3303 ret = i915_gem_object_unbind(obj);
3309 if (obj->gtt_space == NULL) {
3310 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3316 if (obj->pin_count++ == 0) {
3318 list_move_tail(&obj->mm_list,
3319 &dev_priv->mm.pinned_list);
3321 obj->pin_mappable |= map_and_fenceable;
3323 WARN_ON(i915_verify_lists(dev));
3328 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3330 struct drm_device *dev = obj->base.dev;
3331 drm_i915_private_t *dev_priv = dev->dev_private;
3333 WARN_ON(i915_verify_lists(dev));
3334 BUG_ON(obj->pin_count == 0);
3335 BUG_ON(obj->gtt_space == NULL);
3337 if (--obj->pin_count == 0) {
3339 list_move_tail(&obj->mm_list,
3340 &dev_priv->mm.inactive_list);
3341 obj->pin_mappable = false;
3343 WARN_ON(i915_verify_lists(dev));
3347 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3348 struct drm_file *file)
3350 struct drm_i915_gem_pin *args = data;
3351 struct drm_i915_gem_object *obj;
3354 ret = i915_mutex_lock_interruptible(dev);
3358 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3359 if (&obj->base == NULL) {
3364 if (obj->madv != I915_MADV_WILLNEED) {
3365 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3370 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3371 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3377 obj->user_pin_count++;
3378 obj->pin_filp = file;
3379 if (obj->user_pin_count == 1) {
3380 ret = i915_gem_object_pin(obj, args->alignment, true);
3385 /* XXX - flush the CPU caches for pinned objects
3386 * as the X server doesn't manage domains yet
3388 i915_gem_object_flush_cpu_write_domain(obj);
3389 args->offset = obj->gtt_offset;
3391 drm_gem_object_unreference(&obj->base);
3393 mutex_unlock(&dev->struct_mutex);
3398 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3399 struct drm_file *file)
3401 struct drm_i915_gem_pin *args = data;
3402 struct drm_i915_gem_object *obj;
3405 ret = i915_mutex_lock_interruptible(dev);
3409 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3410 if (&obj->base == NULL) {
3415 if (obj->pin_filp != file) {
3416 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3421 obj->user_pin_count--;
3422 if (obj->user_pin_count == 0) {
3423 obj->pin_filp = NULL;
3424 i915_gem_object_unpin(obj);
3428 drm_gem_object_unreference(&obj->base);
3430 mutex_unlock(&dev->struct_mutex);
3435 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3436 struct drm_file *file)
3438 struct drm_i915_gem_busy *args = data;
3439 struct drm_i915_gem_object *obj;
3442 ret = i915_mutex_lock_interruptible(dev);
3446 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3447 if (&obj->base == NULL) {
3452 /* Count all active objects as busy, even if they are currently not used
3453 * by the gpu. Users of this interface expect objects to eventually
3454 * become non-busy without any further actions, therefore emit any
3455 * necessary flushes here.
3457 args->busy = obj->active;
3459 /* Unconditionally flush objects, even when the gpu still uses this
3460 * object. Userspace calling this function indicates that it wants to
3461 * use this buffer rather sooner than later, so issuing the required
3462 * flush earlier is beneficial.
3464 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3465 ret = i915_gem_flush_ring(obj->ring,
3466 0, obj->base.write_domain);
3467 } else if (obj->ring->outstanding_lazy_request ==
3468 obj->last_rendering_seqno) {
3469 struct drm_i915_gem_request *request;
3471 /* This ring is not being cleared by active usage,
3472 * so emit a request to do so.
3474 request = kzalloc(sizeof(*request), GFP_KERNEL);
3476 ret = i915_add_request(obj->ring, NULL,request);
3481 /* Update the active list for the hardware's current position.
3482 * Otherwise this only updates on a delayed timer or when irqs
3483 * are actually unmasked, and our working set ends up being
3484 * larger than required.
3486 i915_gem_retire_requests_ring(obj->ring);
3488 args->busy = obj->active;
3491 drm_gem_object_unreference(&obj->base);
3493 mutex_unlock(&dev->struct_mutex);
3498 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3499 struct drm_file *file_priv)
3501 return i915_gem_ring_throttle(dev, file_priv);
3505 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3506 struct drm_file *file_priv)
3508 struct drm_i915_gem_madvise *args = data;
3509 struct drm_i915_gem_object *obj;
3512 switch (args->madv) {
3513 case I915_MADV_DONTNEED:
3514 case I915_MADV_WILLNEED:
3520 ret = i915_mutex_lock_interruptible(dev);
3524 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3525 if (&obj->base == NULL) {
3530 if (obj->pin_count) {
3535 if (obj->madv != __I915_MADV_PURGED)
3536 obj->madv = args->madv;
3538 /* if the object is no longer bound, discard its backing storage */
3539 if (i915_gem_object_is_purgeable(obj) &&
3540 obj->gtt_space == NULL)
3541 i915_gem_object_truncate(obj);
3543 args->retained = obj->madv != __I915_MADV_PURGED;
3546 drm_gem_object_unreference(&obj->base);
3548 mutex_unlock(&dev->struct_mutex);
3552 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3555 struct drm_i915_private *dev_priv = dev->dev_private;
3556 struct drm_i915_gem_object *obj;
3558 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3562 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3567 i915_gem_info_add_obj(dev_priv, size);
3569 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3570 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3572 obj->agp_type = AGP_USER_MEMORY;
3573 obj->base.driver_private = NULL;
3574 obj->fence_reg = I915_FENCE_REG_NONE;
3575 INIT_LIST_HEAD(&obj->mm_list);
3576 INIT_LIST_HEAD(&obj->gtt_list);
3577 INIT_LIST_HEAD(&obj->ring_list);
3578 INIT_LIST_HEAD(&obj->exec_list);
3579 INIT_LIST_HEAD(&obj->gpu_write_list);
3580 obj->madv = I915_MADV_WILLNEED;
3581 /* Avoid an unnecessary call to unbind on the first bind. */
3582 obj->map_and_fenceable = true;
3587 int i915_gem_init_object(struct drm_gem_object *obj)
3594 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3596 struct drm_device *dev = obj->base.dev;
3597 drm_i915_private_t *dev_priv = dev->dev_private;
3600 ret = i915_gem_object_unbind(obj);
3601 if (ret == -ERESTARTSYS) {
3602 list_move(&obj->mm_list,
3603 &dev_priv->mm.deferred_free_list);
3607 trace_i915_gem_object_destroy(obj);
3609 if (obj->base.map_list.map)
3610 i915_gem_free_mmap_offset(obj);
3612 drm_gem_object_release(&obj->base);
3613 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3615 kfree(obj->page_cpu_valid);
3620 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3622 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3623 struct drm_device *dev = obj->base.dev;
3625 while (obj->pin_count > 0)
3626 i915_gem_object_unpin(obj);
3629 i915_gem_detach_phys_object(dev, obj);
3631 i915_gem_free_object_tail(obj);
3635 i915_gem_idle(struct drm_device *dev)
3637 drm_i915_private_t *dev_priv = dev->dev_private;
3640 mutex_lock(&dev->struct_mutex);
3642 if (dev_priv->mm.suspended) {
3643 mutex_unlock(&dev->struct_mutex);
3647 ret = i915_gpu_idle(dev);
3649 mutex_unlock(&dev->struct_mutex);
3653 /* Under UMS, be paranoid and evict. */
3654 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3655 ret = i915_gem_evict_inactive(dev, false);
3657 mutex_unlock(&dev->struct_mutex);
3662 i915_gem_reset_fences(dev);
3664 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3665 * We need to replace this with a semaphore, or something.
3666 * And not confound mm.suspended!
3668 dev_priv->mm.suspended = 1;
3669 del_timer_sync(&dev_priv->hangcheck_timer);
3671 i915_kernel_lost_context(dev);
3672 i915_gem_cleanup_ringbuffer(dev);
3674 mutex_unlock(&dev->struct_mutex);
3676 /* Cancel the retire work handler, which should be idle now. */
3677 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3683 i915_gem_init_ringbuffer(struct drm_device *dev)
3685 drm_i915_private_t *dev_priv = dev->dev_private;
3688 ret = intel_init_render_ring_buffer(dev);
3693 ret = intel_init_bsd_ring_buffer(dev);
3695 goto cleanup_render_ring;
3699 ret = intel_init_blt_ring_buffer(dev);
3701 goto cleanup_bsd_ring;
3704 dev_priv->next_seqno = 1;
3709 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3710 cleanup_render_ring:
3711 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3716 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3718 drm_i915_private_t *dev_priv = dev->dev_private;
3721 for (i = 0; i < I915_NUM_RINGS; i++)
3722 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3726 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3727 struct drm_file *file_priv)
3729 drm_i915_private_t *dev_priv = dev->dev_private;
3732 if (drm_core_check_feature(dev, DRIVER_MODESET))
3735 if (atomic_read(&dev_priv->mm.wedged)) {
3736 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3737 atomic_set(&dev_priv->mm.wedged, 0);
3740 mutex_lock(&dev->struct_mutex);
3741 dev_priv->mm.suspended = 0;
3743 ret = i915_gem_init_ringbuffer(dev);
3745 mutex_unlock(&dev->struct_mutex);
3749 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3750 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3751 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3752 for (i = 0; i < I915_NUM_RINGS; i++) {
3753 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3754 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3756 mutex_unlock(&dev->struct_mutex);
3758 ret = drm_irq_install(dev);
3760 goto cleanup_ringbuffer;
3765 mutex_lock(&dev->struct_mutex);
3766 i915_gem_cleanup_ringbuffer(dev);
3767 dev_priv->mm.suspended = 1;
3768 mutex_unlock(&dev->struct_mutex);
3774 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3775 struct drm_file *file_priv)
3777 if (drm_core_check_feature(dev, DRIVER_MODESET))
3780 drm_irq_uninstall(dev);
3781 return i915_gem_idle(dev);
3785 i915_gem_lastclose(struct drm_device *dev)
3789 if (drm_core_check_feature(dev, DRIVER_MODESET))
3792 ret = i915_gem_idle(dev);
3794 DRM_ERROR("failed to idle hardware: %d\n", ret);
3798 init_ring_lists(struct intel_ring_buffer *ring)
3800 INIT_LIST_HEAD(&ring->active_list);
3801 INIT_LIST_HEAD(&ring->request_list);
3802 INIT_LIST_HEAD(&ring->gpu_write_list);
3806 i915_gem_load(struct drm_device *dev)
3809 drm_i915_private_t *dev_priv = dev->dev_private;
3811 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3812 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3813 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3814 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3815 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3816 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3817 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3818 for (i = 0; i < I915_NUM_RINGS; i++)
3819 init_ring_lists(&dev_priv->ring[i]);
3820 for (i = 0; i < 16; i++)
3821 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3822 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3823 i915_gem_retire_work_handler);
3824 init_completion(&dev_priv->error_completion);
3826 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3828 u32 tmp = I915_READ(MI_ARB_STATE);
3829 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3830 /* arb state is a masked write, so set bit + bit in mask */
3831 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3832 I915_WRITE(MI_ARB_STATE, tmp);
3836 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3838 /* Old X drivers will take 0-2 for front, back, depth buffers */
3839 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3840 dev_priv->fence_reg_start = 3;
3842 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3843 dev_priv->num_fence_regs = 16;
3845 dev_priv->num_fence_regs = 8;
3847 /* Initialize fence registers to zero */
3848 switch (INTEL_INFO(dev)->gen) {
3850 for (i = 0; i < 16; i++)
3851 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
3855 for (i = 0; i < 16; i++)
3856 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
3859 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3860 for (i = 0; i < 8; i++)
3861 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
3863 for (i = 0; i < 8; i++)
3864 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
3867 i915_gem_detect_bit_6_swizzle(dev);
3868 init_waitqueue_head(&dev_priv->pending_flip_queue);
3870 dev_priv->mm.interruptible = true;
3872 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3873 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3874 register_shrinker(&dev_priv->mm.inactive_shrinker);
3878 * Create a physically contiguous memory object for this object
3879 * e.g. for cursor + overlay regs
3881 static int i915_gem_init_phys_object(struct drm_device *dev,
3882 int id, int size, int align)
3884 drm_i915_private_t *dev_priv = dev->dev_private;
3885 struct drm_i915_gem_phys_object *phys_obj;
3888 if (dev_priv->mm.phys_objs[id - 1] || !size)
3891 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3897 phys_obj->handle = drm_pci_alloc(dev, size, align);
3898 if (!phys_obj->handle) {
3903 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3906 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3914 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3916 drm_i915_private_t *dev_priv = dev->dev_private;
3917 struct drm_i915_gem_phys_object *phys_obj;
3919 if (!dev_priv->mm.phys_objs[id - 1])
3922 phys_obj = dev_priv->mm.phys_objs[id - 1];
3923 if (phys_obj->cur_obj) {
3924 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3928 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3930 drm_pci_free(dev, phys_obj->handle);
3932 dev_priv->mm.phys_objs[id - 1] = NULL;
3935 void i915_gem_free_all_phys_object(struct drm_device *dev)
3939 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3940 i915_gem_free_phys_object(dev, i);
3943 void i915_gem_detach_phys_object(struct drm_device *dev,
3944 struct drm_i915_gem_object *obj)
3946 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3953 vaddr = obj->phys_obj->handle->vaddr;
3955 page_count = obj->base.size / PAGE_SIZE;
3956 for (i = 0; i < page_count; i++) {
3957 struct page *page = read_cache_page_gfp(mapping, i,
3958 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3959 if (!IS_ERR(page)) {
3960 char *dst = kmap_atomic(page);
3961 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3964 drm_clflush_pages(&page, 1);
3966 set_page_dirty(page);
3967 mark_page_accessed(page);
3968 page_cache_release(page);
3971 intel_gtt_chipset_flush();
3973 obj->phys_obj->cur_obj = NULL;
3974 obj->phys_obj = NULL;
3978 i915_gem_attach_phys_object(struct drm_device *dev,
3979 struct drm_i915_gem_object *obj,
3983 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3984 drm_i915_private_t *dev_priv = dev->dev_private;
3989 if (id > I915_MAX_PHYS_OBJECT)
3992 if (obj->phys_obj) {
3993 if (obj->phys_obj->id == id)
3995 i915_gem_detach_phys_object(dev, obj);
3998 /* create a new object */
3999 if (!dev_priv->mm.phys_objs[id - 1]) {
4000 ret = i915_gem_init_phys_object(dev, id,
4001 obj->base.size, align);
4003 DRM_ERROR("failed to init phys object %d size: %zu\n",
4004 id, obj->base.size);
4009 /* bind to the object */
4010 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4011 obj->phys_obj->cur_obj = obj;
4013 page_count = obj->base.size / PAGE_SIZE;
4015 for (i = 0; i < page_count; i++) {
4019 page = read_cache_page_gfp(mapping, i,
4020 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4022 return PTR_ERR(page);
4024 src = kmap_atomic(page);
4025 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4026 memcpy(dst, src, PAGE_SIZE);
4029 mark_page_accessed(page);
4030 page_cache_release(page);
4037 i915_gem_phys_pwrite(struct drm_device *dev,
4038 struct drm_i915_gem_object *obj,
4039 struct drm_i915_gem_pwrite *args,
4040 struct drm_file *file_priv)
4042 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4043 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4045 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4046 unsigned long unwritten;
4048 /* The physical object once assigned is fixed for the lifetime
4049 * of the obj, so we can safely drop the lock and continue
4052 mutex_unlock(&dev->struct_mutex);
4053 unwritten = copy_from_user(vaddr, user_data, args->size);
4054 mutex_lock(&dev->struct_mutex);
4059 intel_gtt_chipset_flush();
4063 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4065 struct drm_i915_file_private *file_priv = file->driver_priv;
4067 /* Clean up our request list when the client is going away, so that
4068 * later retire_requests won't dereference our soon-to-be-gone
4071 spin_lock(&file_priv->mm.lock);
4072 while (!list_empty(&file_priv->mm.request_list)) {
4073 struct drm_i915_gem_request *request;
4075 request = list_first_entry(&file_priv->mm.request_list,
4076 struct drm_i915_gem_request,
4078 list_del(&request->client_list);
4079 request->file_priv = NULL;
4081 spin_unlock(&file_priv->mm.lock);
4085 i915_gpu_is_active(struct drm_device *dev)
4087 drm_i915_private_t *dev_priv = dev->dev_private;
4090 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4091 list_empty(&dev_priv->mm.active_list);
4093 return !lists_empty;
4097 i915_gem_inactive_shrink(struct shrinker *shrinker,
4101 struct drm_i915_private *dev_priv =
4102 container_of(shrinker,
4103 struct drm_i915_private,
4104 mm.inactive_shrinker);
4105 struct drm_device *dev = dev_priv->dev;
4106 struct drm_i915_gem_object *obj, *next;
4109 if (!mutex_trylock(&dev->struct_mutex))
4112 /* "fast-path" to count number of available objects */
4113 if (nr_to_scan == 0) {
4115 list_for_each_entry(obj,
4116 &dev_priv->mm.inactive_list,
4119 mutex_unlock(&dev->struct_mutex);
4120 return cnt / 100 * sysctl_vfs_cache_pressure;
4124 /* first scan for clean buffers */
4125 i915_gem_retire_requests(dev);
4127 list_for_each_entry_safe(obj, next,
4128 &dev_priv->mm.inactive_list,
4130 if (i915_gem_object_is_purgeable(obj)) {
4131 if (i915_gem_object_unbind(obj) == 0 &&
4137 /* second pass, evict/count anything still on the inactive list */
4139 list_for_each_entry_safe(obj, next,
4140 &dev_priv->mm.inactive_list,
4143 i915_gem_object_unbind(obj) == 0)
4149 if (nr_to_scan && i915_gpu_is_active(dev)) {
4151 * We are desperate for pages, so as a last resort, wait
4152 * for the GPU to finish and discard whatever we can.
4153 * This has a dramatic impact to reduce the number of
4154 * OOM-killer events whilst running the GPU aggressively.
4156 if (i915_gpu_idle(dev) == 0)
4159 mutex_unlock(&dev->struct_mutex);
4160 return cnt / 100 * sysctl_vfs_cache_pressure;