2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
38 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
39 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
46 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
47 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49 bool map_and_fenceable);
50 static void i915_gem_clear_fence_reg(struct drm_device *dev,
51 struct drm_i915_fence_reg *reg);
52 static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file);
56 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59 struct shrink_control *sc);
61 /* some bookkeeping */
62 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 dev_priv->mm.object_count++;
66 dev_priv->mm.object_memory += size;
69 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
72 dev_priv->mm.object_count--;
73 dev_priv->mm.object_memory -= size;
77 i915_gem_wait_for_error(struct drm_device *dev)
79 struct drm_i915_private *dev_priv = dev->dev_private;
80 struct completion *x = &dev_priv->error_completion;
84 if (!atomic_read(&dev_priv->mm.wedged))
87 ret = wait_for_completion_interruptible(x);
91 if (atomic_read(&dev_priv->mm.wedged)) {
92 /* GPU is hung, bump the completion count to account for
93 * the token we just consumed so that we never hit zero and
94 * end up waiting upon a subsequent completion event that
97 spin_lock_irqsave(&x->wait.lock, flags);
99 spin_unlock_irqrestore(&x->wait.lock, flags);
104 int i915_mutex_lock_interruptible(struct drm_device *dev)
108 ret = i915_gem_wait_for_error(dev);
112 ret = mutex_lock_interruptible(&dev->struct_mutex);
116 WARN_ON(i915_verify_lists(dev));
121 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
123 return obj->gtt_space && !obj->active && obj->pin_count == 0;
126 void i915_gem_do_init(struct drm_device *dev,
128 unsigned long mappable_end,
131 drm_i915_private_t *dev_priv = dev->dev_private;
133 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
135 dev_priv->mm.gtt_start = start;
136 dev_priv->mm.gtt_mappable_end = mappable_end;
137 dev_priv->mm.gtt_end = end;
138 dev_priv->mm.gtt_total = end - start;
139 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
141 /* Take over this portion of the GTT */
142 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
146 i915_gem_init_ioctl(struct drm_device *dev, void *data,
147 struct drm_file *file)
149 struct drm_i915_gem_init *args = data;
151 if (args->gtt_start >= args->gtt_end ||
152 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
155 mutex_lock(&dev->struct_mutex);
156 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
157 mutex_unlock(&dev->struct_mutex);
163 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
164 struct drm_file *file)
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 struct drm_i915_gem_get_aperture *args = data;
168 struct drm_i915_gem_object *obj;
171 if (!(dev->driver->driver_features & DRIVER_GEM))
175 mutex_lock(&dev->struct_mutex);
176 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
177 pinned += obj->gtt_space->size;
178 mutex_unlock(&dev->struct_mutex);
180 args->aper_size = dev_priv->mm.gtt_total;
181 args->aper_available_size = args->aper_size -pinned;
187 i915_gem_create(struct drm_file *file,
188 struct drm_device *dev,
192 struct drm_i915_gem_object *obj;
196 size = roundup(size, PAGE_SIZE);
198 /* Allocate the new object */
199 obj = i915_gem_alloc_object(dev, size);
203 ret = drm_gem_handle_create(file, &obj->base, &handle);
205 drm_gem_object_release(&obj->base);
206 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
211 /* drop reference from allocate - handle holds it now */
212 drm_gem_object_unreference(&obj->base);
213 trace_i915_gem_object_create(obj);
220 i915_gem_dumb_create(struct drm_file *file,
221 struct drm_device *dev,
222 struct drm_mode_create_dumb *args)
224 /* have to work out size/pitch and return them */
225 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
226 args->size = args->pitch * args->height;
227 return i915_gem_create(file, dev,
228 args->size, &args->handle);
231 int i915_gem_dumb_destroy(struct drm_file *file,
232 struct drm_device *dev,
235 return drm_gem_handle_delete(file, handle);
239 * Creates a new mm object and returns a handle to it.
242 i915_gem_create_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *file)
245 struct drm_i915_gem_create *args = data;
246 return i915_gem_create(file, dev,
247 args->size, &args->handle);
250 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
252 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
254 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
255 obj->tiling_mode != I915_TILING_NONE;
259 slow_shmem_copy(struct page *dst_page,
261 struct page *src_page,
265 char *dst_vaddr, *src_vaddr;
267 dst_vaddr = kmap(dst_page);
268 src_vaddr = kmap(src_page);
270 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
277 slow_shmem_bit17_copy(struct page *gpu_page,
279 struct page *cpu_page,
284 char *gpu_vaddr, *cpu_vaddr;
286 /* Use the unswizzled path if this page isn't affected. */
287 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
289 return slow_shmem_copy(cpu_page, cpu_offset,
290 gpu_page, gpu_offset, length);
292 return slow_shmem_copy(gpu_page, gpu_offset,
293 cpu_page, cpu_offset, length);
296 gpu_vaddr = kmap(gpu_page);
297 cpu_vaddr = kmap(cpu_page);
299 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
300 * XORing with the other bits (A9 for Y, A9 and A10 for X)
303 int cacheline_end = ALIGN(gpu_offset + 1, 64);
304 int this_length = min(cacheline_end - gpu_offset, length);
305 int swizzled_gpu_offset = gpu_offset ^ 64;
308 memcpy(cpu_vaddr + cpu_offset,
309 gpu_vaddr + swizzled_gpu_offset,
312 memcpy(gpu_vaddr + swizzled_gpu_offset,
313 cpu_vaddr + cpu_offset,
316 cpu_offset += this_length;
317 gpu_offset += this_length;
318 length -= this_length;
326 * This is the fast shmem pread path, which attempts to copy_from_user directly
327 * from the backing pages of the object to the user's address space. On a
328 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
331 i915_gem_shmem_pread_fast(struct drm_device *dev,
332 struct drm_i915_gem_object *obj,
333 struct drm_i915_gem_pread *args,
334 struct drm_file *file)
336 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
339 char __user *user_data;
340 int page_offset, page_length;
342 user_data = (char __user *) (uintptr_t) args->data_ptr;
345 offset = args->offset;
352 /* Operation in this page
354 * page_offset = offset within page
355 * page_length = bytes to copy for this page
357 page_offset = offset & (PAGE_SIZE-1);
358 page_length = remain;
359 if ((page_offset + remain) > PAGE_SIZE)
360 page_length = PAGE_SIZE - page_offset;
362 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
363 GFP_HIGHUSER | __GFP_RECLAIMABLE);
365 return PTR_ERR(page);
367 vaddr = kmap_atomic(page);
368 ret = __copy_to_user_inatomic(user_data,
371 kunmap_atomic(vaddr);
373 mark_page_accessed(page);
374 page_cache_release(page);
378 remain -= page_length;
379 user_data += page_length;
380 offset += page_length;
387 * This is the fallback shmem pread path, which allocates temporary storage
388 * in kernel space to copy_to_user into outside of the struct_mutex, so we
389 * can copy out of the object's backing pages while holding the struct mutex
390 * and not take page faults.
393 i915_gem_shmem_pread_slow(struct drm_device *dev,
394 struct drm_i915_gem_object *obj,
395 struct drm_i915_gem_pread *args,
396 struct drm_file *file)
398 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
399 struct mm_struct *mm = current->mm;
400 struct page **user_pages;
402 loff_t offset, pinned_pages, i;
403 loff_t first_data_page, last_data_page, num_pages;
404 int shmem_page_offset;
405 int data_page_index, data_page_offset;
408 uint64_t data_ptr = args->data_ptr;
409 int do_bit17_swizzling;
413 /* Pin the user pages containing the data. We can't fault while
414 * holding the struct mutex, yet we want to hold it while
415 * dereferencing the user data.
417 first_data_page = data_ptr / PAGE_SIZE;
418 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
419 num_pages = last_data_page - first_data_page + 1;
421 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
422 if (user_pages == NULL)
425 mutex_unlock(&dev->struct_mutex);
426 down_read(&mm->mmap_sem);
427 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
428 num_pages, 1, 0, user_pages, NULL);
429 up_read(&mm->mmap_sem);
430 mutex_lock(&dev->struct_mutex);
431 if (pinned_pages < num_pages) {
436 ret = i915_gem_object_set_cpu_read_domain_range(obj,
442 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
444 offset = args->offset;
449 /* Operation in this page
451 * shmem_page_offset = offset within page in shmem file
452 * data_page_index = page number in get_user_pages return
453 * data_page_offset = offset with data_page_index page.
454 * page_length = bytes to copy for this page
456 shmem_page_offset = offset & ~PAGE_MASK;
457 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
458 data_page_offset = data_ptr & ~PAGE_MASK;
460 page_length = remain;
461 if ((shmem_page_offset + page_length) > PAGE_SIZE)
462 page_length = PAGE_SIZE - shmem_page_offset;
463 if ((data_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - data_page_offset;
466 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
467 GFP_HIGHUSER | __GFP_RECLAIMABLE);
469 return PTR_ERR(page);
471 if (do_bit17_swizzling) {
472 slow_shmem_bit17_copy(page,
474 user_pages[data_page_index],
479 slow_shmem_copy(user_pages[data_page_index],
486 mark_page_accessed(page);
487 page_cache_release(page);
489 remain -= page_length;
490 data_ptr += page_length;
491 offset += page_length;
495 for (i = 0; i < pinned_pages; i++) {
496 SetPageDirty(user_pages[i]);
497 mark_page_accessed(user_pages[i]);
498 page_cache_release(user_pages[i]);
500 drm_free_large(user_pages);
506 * Reads data from the object referenced by handle.
508 * On error, the contents of *data are undefined.
511 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
512 struct drm_file *file)
514 struct drm_i915_gem_pread *args = data;
515 struct drm_i915_gem_object *obj;
521 if (!access_ok(VERIFY_WRITE,
522 (char __user *)(uintptr_t)args->data_ptr,
526 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
531 ret = i915_mutex_lock_interruptible(dev);
535 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
536 if (&obj->base == NULL) {
541 /* Bounds check source. */
542 if (args->offset > obj->base.size ||
543 args->size > obj->base.size - args->offset) {
548 trace_i915_gem_object_pread(obj, args->offset, args->size);
550 ret = i915_gem_object_set_cpu_read_domain_range(obj,
557 if (!i915_gem_object_needs_bit17_swizzle(obj))
558 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
560 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
563 drm_gem_object_unreference(&obj->base);
565 mutex_unlock(&dev->struct_mutex);
569 /* This is the fast write path which cannot handle
570 * page faults in the source data
574 fast_user_write(struct io_mapping *mapping,
575 loff_t page_base, int page_offset,
576 char __user *user_data,
580 unsigned long unwritten;
582 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
583 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
585 io_mapping_unmap_atomic(vaddr_atomic);
589 /* Here's the write path which can sleep for
594 slow_kernel_write(struct io_mapping *mapping,
595 loff_t gtt_base, int gtt_offset,
596 struct page *user_page, int user_offset,
599 char __iomem *dst_vaddr;
602 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
603 src_vaddr = kmap(user_page);
605 memcpy_toio(dst_vaddr + gtt_offset,
606 src_vaddr + user_offset,
610 io_mapping_unmap(dst_vaddr);
614 * This is the fast pwrite path, where we copy the data directly from the
615 * user into the GTT, uncached.
618 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
619 struct drm_i915_gem_object *obj,
620 struct drm_i915_gem_pwrite *args,
621 struct drm_file *file)
623 drm_i915_private_t *dev_priv = dev->dev_private;
625 loff_t offset, page_base;
626 char __user *user_data;
627 int page_offset, page_length;
629 user_data = (char __user *) (uintptr_t) args->data_ptr;
632 offset = obj->gtt_offset + args->offset;
635 /* Operation in this page
637 * page_base = page offset within aperture
638 * page_offset = offset within page
639 * page_length = bytes to copy for this page
641 page_base = (offset & ~(PAGE_SIZE-1));
642 page_offset = offset & (PAGE_SIZE-1);
643 page_length = remain;
644 if ((page_offset + remain) > PAGE_SIZE)
645 page_length = PAGE_SIZE - page_offset;
647 /* If we get a fault while copying data, then (presumably) our
648 * source page isn't available. Return the error and we'll
649 * retry in the slow path.
651 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
652 page_offset, user_data, page_length))
656 remain -= page_length;
657 user_data += page_length;
658 offset += page_length;
665 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
666 * the memory and maps it using kmap_atomic for copying.
668 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
669 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
672 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
673 struct drm_i915_gem_object *obj,
674 struct drm_i915_gem_pwrite *args,
675 struct drm_file *file)
677 drm_i915_private_t *dev_priv = dev->dev_private;
679 loff_t gtt_page_base, offset;
680 loff_t first_data_page, last_data_page, num_pages;
681 loff_t pinned_pages, i;
682 struct page **user_pages;
683 struct mm_struct *mm = current->mm;
684 int gtt_page_offset, data_page_offset, data_page_index, page_length;
686 uint64_t data_ptr = args->data_ptr;
690 /* Pin the user pages containing the data. We can't fault while
691 * holding the struct mutex, and all of the pwrite implementations
692 * want to hold it while dereferencing the user data.
694 first_data_page = data_ptr / PAGE_SIZE;
695 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
696 num_pages = last_data_page - first_data_page + 1;
698 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
699 if (user_pages == NULL)
702 mutex_unlock(&dev->struct_mutex);
703 down_read(&mm->mmap_sem);
704 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
705 num_pages, 0, 0, user_pages, NULL);
706 up_read(&mm->mmap_sem);
707 mutex_lock(&dev->struct_mutex);
708 if (pinned_pages < num_pages) {
710 goto out_unpin_pages;
713 ret = i915_gem_object_set_to_gtt_domain(obj, true);
715 goto out_unpin_pages;
717 ret = i915_gem_object_put_fence(obj);
719 goto out_unpin_pages;
721 offset = obj->gtt_offset + args->offset;
724 /* Operation in this page
726 * gtt_page_base = page offset within aperture
727 * gtt_page_offset = offset within page in aperture
728 * data_page_index = page number in get_user_pages return
729 * data_page_offset = offset with data_page_index page.
730 * page_length = bytes to copy for this page
732 gtt_page_base = offset & PAGE_MASK;
733 gtt_page_offset = offset & ~PAGE_MASK;
734 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
735 data_page_offset = data_ptr & ~PAGE_MASK;
737 page_length = remain;
738 if ((gtt_page_offset + page_length) > PAGE_SIZE)
739 page_length = PAGE_SIZE - gtt_page_offset;
740 if ((data_page_offset + page_length) > PAGE_SIZE)
741 page_length = PAGE_SIZE - data_page_offset;
743 slow_kernel_write(dev_priv->mm.gtt_mapping,
744 gtt_page_base, gtt_page_offset,
745 user_pages[data_page_index],
749 remain -= page_length;
750 offset += page_length;
751 data_ptr += page_length;
755 for (i = 0; i < pinned_pages; i++)
756 page_cache_release(user_pages[i]);
757 drm_free_large(user_pages);
763 * This is the fast shmem pwrite path, which attempts to directly
764 * copy_from_user into the kmapped pages backing the object.
767 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
768 struct drm_i915_gem_object *obj,
769 struct drm_i915_gem_pwrite *args,
770 struct drm_file *file)
772 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
775 char __user *user_data;
776 int page_offset, page_length;
778 user_data = (char __user *) (uintptr_t) args->data_ptr;
781 offset = args->offset;
789 /* Operation in this page
791 * page_offset = offset within page
792 * page_length = bytes to copy for this page
794 page_offset = offset & (PAGE_SIZE-1);
795 page_length = remain;
796 if ((page_offset + remain) > PAGE_SIZE)
797 page_length = PAGE_SIZE - page_offset;
799 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
800 GFP_HIGHUSER | __GFP_RECLAIMABLE);
802 return PTR_ERR(page);
804 vaddr = kmap_atomic(page, KM_USER0);
805 ret = __copy_from_user_inatomic(vaddr + page_offset,
808 kunmap_atomic(vaddr, KM_USER0);
810 set_page_dirty(page);
811 mark_page_accessed(page);
812 page_cache_release(page);
814 /* If we get a fault while copying data, then (presumably) our
815 * source page isn't available. Return the error and we'll
816 * retry in the slow path.
821 remain -= page_length;
822 user_data += page_length;
823 offset += page_length;
830 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
831 * the memory and maps it using kmap_atomic for copying.
833 * This avoids taking mmap_sem for faulting on the user's address while the
834 * struct_mutex is held.
837 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
838 struct drm_i915_gem_object *obj,
839 struct drm_i915_gem_pwrite *args,
840 struct drm_file *file)
842 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
843 struct mm_struct *mm = current->mm;
844 struct page **user_pages;
846 loff_t offset, pinned_pages, i;
847 loff_t first_data_page, last_data_page, num_pages;
848 int shmem_page_offset;
849 int data_page_index, data_page_offset;
852 uint64_t data_ptr = args->data_ptr;
853 int do_bit17_swizzling;
857 /* Pin the user pages containing the data. We can't fault while
858 * holding the struct mutex, and all of the pwrite implementations
859 * want to hold it while dereferencing the user data.
861 first_data_page = data_ptr / PAGE_SIZE;
862 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
863 num_pages = last_data_page - first_data_page + 1;
865 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
866 if (user_pages == NULL)
869 mutex_unlock(&dev->struct_mutex);
870 down_read(&mm->mmap_sem);
871 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
872 num_pages, 0, 0, user_pages, NULL);
873 up_read(&mm->mmap_sem);
874 mutex_lock(&dev->struct_mutex);
875 if (pinned_pages < num_pages) {
880 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
884 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886 offset = args->offset;
892 /* Operation in this page
894 * shmem_page_offset = offset within page in shmem file
895 * data_page_index = page number in get_user_pages return
896 * data_page_offset = offset with data_page_index page.
897 * page_length = bytes to copy for this page
899 shmem_page_offset = offset & ~PAGE_MASK;
900 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
901 data_page_offset = data_ptr & ~PAGE_MASK;
903 page_length = remain;
904 if ((shmem_page_offset + page_length) > PAGE_SIZE)
905 page_length = PAGE_SIZE - shmem_page_offset;
906 if ((data_page_offset + page_length) > PAGE_SIZE)
907 page_length = PAGE_SIZE - data_page_offset;
909 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
910 GFP_HIGHUSER | __GFP_RECLAIMABLE);
916 if (do_bit17_swizzling) {
917 slow_shmem_bit17_copy(page,
919 user_pages[data_page_index],
924 slow_shmem_copy(page,
926 user_pages[data_page_index],
931 set_page_dirty(page);
932 mark_page_accessed(page);
933 page_cache_release(page);
935 remain -= page_length;
936 data_ptr += page_length;
937 offset += page_length;
941 for (i = 0; i < pinned_pages; i++)
942 page_cache_release(user_pages[i]);
943 drm_free_large(user_pages);
949 * Writes data to the object referenced by handle.
951 * On error, the contents of the buffer that were to be modified are undefined.
954 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
955 struct drm_file *file)
957 struct drm_i915_gem_pwrite *args = data;
958 struct drm_i915_gem_object *obj;
964 if (!access_ok(VERIFY_READ,
965 (char __user *)(uintptr_t)args->data_ptr,
969 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
974 ret = i915_mutex_lock_interruptible(dev);
978 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
979 if (&obj->base == NULL) {
984 /* Bounds check destination. */
985 if (args->offset > obj->base.size ||
986 args->size > obj->base.size - args->offset) {
991 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
993 /* We can only do the GTT pwrite on untiled buffers, as otherwise
994 * it would end up going through the fenced access, and we'll get
995 * different detiling behavior between reading and writing.
996 * pread/pwrite currently are reading and writing from the CPU
997 * perspective, requiring manual detiling by the client.
1000 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1001 else if (obj->gtt_space &&
1002 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003 ret = i915_gem_object_pin(obj, 0, true);
1007 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1011 ret = i915_gem_object_put_fence(obj);
1015 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1017 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1020 i915_gem_object_unpin(obj);
1022 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1027 if (!i915_gem_object_needs_bit17_swizzle(obj))
1028 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1030 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1034 drm_gem_object_unreference(&obj->base);
1036 mutex_unlock(&dev->struct_mutex);
1041 * Called when user space prepares to use an object with the CPU, either
1042 * through the mmap ioctl's mapping or a GTT mapping.
1045 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1046 struct drm_file *file)
1048 struct drm_i915_gem_set_domain *args = data;
1049 struct drm_i915_gem_object *obj;
1050 uint32_t read_domains = args->read_domains;
1051 uint32_t write_domain = args->write_domain;
1054 if (!(dev->driver->driver_features & DRIVER_GEM))
1057 /* Only handle setting domains to types used by the CPU. */
1058 if (write_domain & I915_GEM_GPU_DOMAINS)
1061 if (read_domains & I915_GEM_GPU_DOMAINS)
1064 /* Having something in the write domain implies it's in the read
1065 * domain, and only that read domain. Enforce that in the request.
1067 if (write_domain != 0 && read_domains != write_domain)
1070 ret = i915_mutex_lock_interruptible(dev);
1074 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1075 if (&obj->base == NULL) {
1080 if (read_domains & I915_GEM_DOMAIN_GTT) {
1081 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1083 /* Silently promote "you're not bound, there was nothing to do"
1084 * to success, since the client was just asking us to
1085 * make sure everything was done.
1090 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1093 drm_gem_object_unreference(&obj->base);
1095 mutex_unlock(&dev->struct_mutex);
1100 * Called when user space has done writes to this buffer
1103 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1104 struct drm_file *file)
1106 struct drm_i915_gem_sw_finish *args = data;
1107 struct drm_i915_gem_object *obj;
1110 if (!(dev->driver->driver_features & DRIVER_GEM))
1113 ret = i915_mutex_lock_interruptible(dev);
1117 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1118 if (&obj->base == NULL) {
1123 /* Pinned buffers may be scanout, so flush the cache */
1125 i915_gem_object_flush_cpu_write_domain(obj);
1127 drm_gem_object_unreference(&obj->base);
1129 mutex_unlock(&dev->struct_mutex);
1134 * Maps the contents of an object, returning the address it is mapped
1137 * While the mapping holds a reference on the contents of the object, it doesn't
1138 * imply a ref on the object itself.
1141 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1142 struct drm_file *file)
1144 struct drm_i915_private *dev_priv = dev->dev_private;
1145 struct drm_i915_gem_mmap *args = data;
1146 struct drm_gem_object *obj;
1149 if (!(dev->driver->driver_features & DRIVER_GEM))
1152 obj = drm_gem_object_lookup(dev, file, args->handle);
1156 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1157 drm_gem_object_unreference_unlocked(obj);
1161 down_write(¤t->mm->mmap_sem);
1162 addr = do_mmap(obj->filp, 0, args->size,
1163 PROT_READ | PROT_WRITE, MAP_SHARED,
1165 up_write(¤t->mm->mmap_sem);
1166 drm_gem_object_unreference_unlocked(obj);
1167 if (IS_ERR((void *)addr))
1170 args->addr_ptr = (uint64_t) addr;
1176 * i915_gem_fault - fault a page into the GTT
1177 * vma: VMA in question
1180 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1181 * from userspace. The fault handler takes care of binding the object to
1182 * the GTT (if needed), allocating and programming a fence register (again,
1183 * only if needed based on whether the old reg is still valid or the object
1184 * is tiled) and inserting a new PTE into the faulting process.
1186 * Note that the faulting process may involve evicting existing objects
1187 * from the GTT and/or fence registers to make room. So performance may
1188 * suffer if the GTT working set is large or there are few fence registers
1191 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1193 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1194 struct drm_device *dev = obj->base.dev;
1195 drm_i915_private_t *dev_priv = dev->dev_private;
1196 pgoff_t page_offset;
1199 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1201 /* We don't use vmf->pgoff since that has the fake offset */
1202 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1205 ret = i915_mutex_lock_interruptible(dev);
1209 trace_i915_gem_object_fault(obj, page_offset, true, write);
1211 /* Now bind it into the GTT if needed */
1212 if (!obj->map_and_fenceable) {
1213 ret = i915_gem_object_unbind(obj);
1217 if (!obj->gtt_space) {
1218 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1223 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1227 if (obj->tiling_mode == I915_TILING_NONE)
1228 ret = i915_gem_object_put_fence(obj);
1230 ret = i915_gem_object_get_fence(obj, NULL);
1234 if (i915_gem_object_is_inactive(obj))
1235 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1237 obj->fault_mappable = true;
1239 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1242 /* Finally, remap it using the new GTT offset */
1243 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1245 mutex_unlock(&dev->struct_mutex);
1250 /* Give the error handler a chance to run and move the
1251 * objects off the GPU active list. Next time we service the
1252 * fault, we should be able to transition the page into the
1253 * GTT without touching the GPU (and so avoid further
1254 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1255 * with coherency, just lost writes.
1261 return VM_FAULT_NOPAGE;
1263 return VM_FAULT_OOM;
1265 return VM_FAULT_SIGBUS;
1270 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1271 * @obj: obj in question
1273 * GEM memory mapping works by handing back to userspace a fake mmap offset
1274 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1275 * up the object based on the offset and sets up the various memory mapping
1278 * This routine allocates and attaches a fake offset for @obj.
1281 i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1283 struct drm_device *dev = obj->base.dev;
1284 struct drm_gem_mm *mm = dev->mm_private;
1285 struct drm_map_list *list;
1286 struct drm_local_map *map;
1289 /* Set the object up for mmap'ing */
1290 list = &obj->base.map_list;
1291 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1296 map->type = _DRM_GEM;
1297 map->size = obj->base.size;
1300 /* Get a DRM GEM mmap offset allocated... */
1301 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1302 obj->base.size / PAGE_SIZE,
1304 if (!list->file_offset_node) {
1305 DRM_ERROR("failed to allocate offset for bo %d\n",
1311 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1312 obj->base.size / PAGE_SIZE,
1314 if (!list->file_offset_node) {
1319 list->hash.key = list->file_offset_node->start;
1320 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1322 DRM_ERROR("failed to add to map hash\n");
1329 drm_mm_put_block(list->file_offset_node);
1338 * i915_gem_release_mmap - remove physical page mappings
1339 * @obj: obj in question
1341 * Preserve the reservation of the mmapping with the DRM core code, but
1342 * relinquish ownership of the pages back to the system.
1344 * It is vital that we remove the page mapping if we have mapped a tiled
1345 * object through the GTT and then lose the fence register due to
1346 * resource pressure. Similarly if the object has been moved out of the
1347 * aperture, than pages mapped into userspace must be revoked. Removing the
1348 * mapping will then trigger a page fault on the next user access, allowing
1349 * fixup by i915_gem_fault().
1352 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1354 if (!obj->fault_mappable)
1357 if (obj->base.dev->dev_mapping)
1358 unmap_mapping_range(obj->base.dev->dev_mapping,
1359 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1362 obj->fault_mappable = false;
1366 i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1368 struct drm_device *dev = obj->base.dev;
1369 struct drm_gem_mm *mm = dev->mm_private;
1370 struct drm_map_list *list = &obj->base.map_list;
1372 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1373 drm_mm_put_block(list->file_offset_node);
1379 i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1381 struct drm_device *dev = obj->base.dev;
1384 if (INTEL_INFO(dev)->gen >= 4 ||
1385 obj->tiling_mode == I915_TILING_NONE)
1386 return obj->base.size;
1388 /* Previous chips need a power-of-two fence region when tiling */
1389 if (INTEL_INFO(dev)->gen == 3)
1394 while (size < obj->base.size)
1401 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1402 * @obj: object to check
1404 * Return the required GTT alignment for an object, taking into account
1405 * potential fence register mapping.
1408 i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1410 struct drm_device *dev = obj->base.dev;
1413 * Minimum alignment is 4k (GTT page size), but might be greater
1414 * if a fence register is needed for the object.
1416 if (INTEL_INFO(dev)->gen >= 4 ||
1417 obj->tiling_mode == I915_TILING_NONE)
1421 * Previous chips need to be aligned to the size of the smallest
1422 * fence register that can contain the object.
1424 return i915_gem_get_gtt_size(obj);
1428 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1430 * @obj: object to check
1432 * Return the required GTT alignment for an object, only taking into account
1433 * unfenced tiled surface requirements.
1436 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1438 struct drm_device *dev = obj->base.dev;
1442 * Minimum alignment is 4k (GTT page size) for sane hw.
1444 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1445 obj->tiling_mode == I915_TILING_NONE)
1449 * Older chips need unfenced tiled buffers to be aligned to the left
1450 * edge of an even tile row (where tile rows are counted as if the bo is
1451 * placed in a fenced gtt region).
1454 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1459 return tile_height * obj->stride * 2;
1463 i915_gem_mmap_gtt(struct drm_file *file,
1464 struct drm_device *dev,
1468 struct drm_i915_private *dev_priv = dev->dev_private;
1469 struct drm_i915_gem_object *obj;
1472 if (!(dev->driver->driver_features & DRIVER_GEM))
1475 ret = i915_mutex_lock_interruptible(dev);
1479 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1480 if (&obj->base == NULL) {
1485 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1490 if (obj->madv != I915_MADV_WILLNEED) {
1491 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1496 if (!obj->base.map_list.map) {
1497 ret = i915_gem_create_mmap_offset(obj);
1502 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1505 drm_gem_object_unreference(&obj->base);
1507 mutex_unlock(&dev->struct_mutex);
1512 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1514 * @data: GTT mapping ioctl data
1515 * @file: GEM object info
1517 * Simply returns the fake offset to userspace so it can mmap it.
1518 * The mmap call will end up in drm_gem_mmap(), which will set things
1519 * up so we can get faults in the handler above.
1521 * The fault handler will take care of binding the object into the GTT
1522 * (since it may have been evicted to make room for something), allocating
1523 * a fence register, and mapping the appropriate aperture address into
1527 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1528 struct drm_file *file)
1530 struct drm_i915_gem_mmap_gtt *args = data;
1532 if (!(dev->driver->driver_features & DRIVER_GEM))
1535 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1540 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1544 struct address_space *mapping;
1545 struct inode *inode;
1548 /* Get the list of pages out of our struct file. They'll be pinned
1549 * at this point until we release them.
1551 page_count = obj->base.size / PAGE_SIZE;
1552 BUG_ON(obj->pages != NULL);
1553 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1554 if (obj->pages == NULL)
1557 inode = obj->base.filp->f_path.dentry->d_inode;
1558 mapping = inode->i_mapping;
1559 for (i = 0; i < page_count; i++) {
1560 page = read_cache_page_gfp(mapping, i,
1568 obj->pages[i] = page;
1571 if (obj->tiling_mode != I915_TILING_NONE)
1572 i915_gem_object_do_bit_17_swizzle(obj);
1578 page_cache_release(obj->pages[i]);
1580 drm_free_large(obj->pages);
1582 return PTR_ERR(page);
1586 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1588 int page_count = obj->base.size / PAGE_SIZE;
1591 BUG_ON(obj->madv == __I915_MADV_PURGED);
1593 if (obj->tiling_mode != I915_TILING_NONE)
1594 i915_gem_object_save_bit_17_swizzle(obj);
1596 if (obj->madv == I915_MADV_DONTNEED)
1599 for (i = 0; i < page_count; i++) {
1601 set_page_dirty(obj->pages[i]);
1603 if (obj->madv == I915_MADV_WILLNEED)
1604 mark_page_accessed(obj->pages[i]);
1606 page_cache_release(obj->pages[i]);
1610 drm_free_large(obj->pages);
1615 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1616 struct intel_ring_buffer *ring,
1619 struct drm_device *dev = obj->base.dev;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1622 BUG_ON(ring == NULL);
1625 /* Add a reference if we're newly entering the active list. */
1627 drm_gem_object_reference(&obj->base);
1631 /* Move from whatever list we were on to the tail of execution. */
1632 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1633 list_move_tail(&obj->ring_list, &ring->active_list);
1635 obj->last_rendering_seqno = seqno;
1636 if (obj->fenced_gpu_access) {
1637 struct drm_i915_fence_reg *reg;
1639 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1641 obj->last_fenced_seqno = seqno;
1642 obj->last_fenced_ring = ring;
1644 reg = &dev_priv->fence_regs[obj->fence_reg];
1645 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
1650 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1652 list_del_init(&obj->ring_list);
1653 obj->last_rendering_seqno = 0;
1657 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1659 struct drm_device *dev = obj->base.dev;
1660 drm_i915_private_t *dev_priv = dev->dev_private;
1662 BUG_ON(!obj->active);
1663 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1665 i915_gem_object_move_off_active(obj);
1669 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1671 struct drm_device *dev = obj->base.dev;
1672 struct drm_i915_private *dev_priv = dev->dev_private;
1674 if (obj->pin_count != 0)
1675 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1677 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1679 BUG_ON(!list_empty(&obj->gpu_write_list));
1680 BUG_ON(!obj->active);
1683 i915_gem_object_move_off_active(obj);
1684 obj->fenced_gpu_access = false;
1687 obj->pending_gpu_write = false;
1688 drm_gem_object_unreference(&obj->base);
1690 WARN_ON(i915_verify_lists(dev));
1693 /* Immediately discard the backing storage */
1695 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1697 struct inode *inode;
1699 /* Our goal here is to return as much of the memory as
1700 * is possible back to the system as we are called from OOM.
1701 * To do this we must instruct the shmfs to drop all of its
1702 * backing pages, *now*. Here we mirror the actions taken
1703 * when by shmem_delete_inode() to release the backing store.
1705 inode = obj->base.filp->f_path.dentry->d_inode;
1706 truncate_inode_pages(inode->i_mapping, 0);
1707 if (inode->i_op->truncate_range)
1708 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1710 obj->madv = __I915_MADV_PURGED;
1714 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1716 return obj->madv == I915_MADV_DONTNEED;
1720 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1721 uint32_t flush_domains)
1723 struct drm_i915_gem_object *obj, *next;
1725 list_for_each_entry_safe(obj, next,
1726 &ring->gpu_write_list,
1728 if (obj->base.write_domain & flush_domains) {
1729 uint32_t old_write_domain = obj->base.write_domain;
1731 obj->base.write_domain = 0;
1732 list_del_init(&obj->gpu_write_list);
1733 i915_gem_object_move_to_active(obj, ring,
1734 i915_gem_next_request_seqno(ring));
1736 trace_i915_gem_object_change_domain(obj,
1737 obj->base.read_domains,
1744 i915_add_request(struct intel_ring_buffer *ring,
1745 struct drm_file *file,
1746 struct drm_i915_gem_request *request)
1748 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1753 BUG_ON(request == NULL);
1755 ret = ring->add_request(ring, &seqno);
1759 trace_i915_gem_request_add(ring, seqno);
1761 request->seqno = seqno;
1762 request->ring = ring;
1763 request->emitted_jiffies = jiffies;
1764 was_empty = list_empty(&ring->request_list);
1765 list_add_tail(&request->list, &ring->request_list);
1768 struct drm_i915_file_private *file_priv = file->driver_priv;
1770 spin_lock(&file_priv->mm.lock);
1771 request->file_priv = file_priv;
1772 list_add_tail(&request->client_list,
1773 &file_priv->mm.request_list);
1774 spin_unlock(&file_priv->mm.lock);
1777 ring->outstanding_lazy_request = false;
1779 if (!dev_priv->mm.suspended) {
1780 mod_timer(&dev_priv->hangcheck_timer,
1781 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1783 queue_delayed_work(dev_priv->wq,
1784 &dev_priv->mm.retire_work, HZ);
1790 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1792 struct drm_i915_file_private *file_priv = request->file_priv;
1797 spin_lock(&file_priv->mm.lock);
1798 if (request->file_priv) {
1799 list_del(&request->client_list);
1800 request->file_priv = NULL;
1802 spin_unlock(&file_priv->mm.lock);
1805 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1806 struct intel_ring_buffer *ring)
1808 while (!list_empty(&ring->request_list)) {
1809 struct drm_i915_gem_request *request;
1811 request = list_first_entry(&ring->request_list,
1812 struct drm_i915_gem_request,
1815 list_del(&request->list);
1816 i915_gem_request_remove_from_client(request);
1820 while (!list_empty(&ring->active_list)) {
1821 struct drm_i915_gem_object *obj;
1823 obj = list_first_entry(&ring->active_list,
1824 struct drm_i915_gem_object,
1827 obj->base.write_domain = 0;
1828 list_del_init(&obj->gpu_write_list);
1829 i915_gem_object_move_to_inactive(obj);
1833 static void i915_gem_reset_fences(struct drm_device *dev)
1835 struct drm_i915_private *dev_priv = dev->dev_private;
1838 for (i = 0; i < 16; i++) {
1839 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1840 struct drm_i915_gem_object *obj = reg->obj;
1845 if (obj->tiling_mode)
1846 i915_gem_release_mmap(obj);
1848 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1849 reg->obj->fenced_gpu_access = false;
1850 reg->obj->last_fenced_seqno = 0;
1851 reg->obj->last_fenced_ring = NULL;
1852 i915_gem_clear_fence_reg(dev, reg);
1856 void i915_gem_reset(struct drm_device *dev)
1858 struct drm_i915_private *dev_priv = dev->dev_private;
1859 struct drm_i915_gem_object *obj;
1862 for (i = 0; i < I915_NUM_RINGS; i++)
1863 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1865 /* Remove anything from the flushing lists. The GPU cache is likely
1866 * to be lost on reset along with the data, so simply move the
1867 * lost bo to the inactive list.
1869 while (!list_empty(&dev_priv->mm.flushing_list)) {
1870 obj= list_first_entry(&dev_priv->mm.flushing_list,
1871 struct drm_i915_gem_object,
1874 obj->base.write_domain = 0;
1875 list_del_init(&obj->gpu_write_list);
1876 i915_gem_object_move_to_inactive(obj);
1879 /* Move everything out of the GPU domains to ensure we do any
1880 * necessary invalidation upon reuse.
1882 list_for_each_entry(obj,
1883 &dev_priv->mm.inactive_list,
1886 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1889 /* The fence registers are invalidated so clear them out */
1890 i915_gem_reset_fences(dev);
1894 * This function clears the request list as sequence numbers are passed.
1897 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1902 if (list_empty(&ring->request_list))
1905 WARN_ON(i915_verify_lists(ring->dev));
1907 seqno = ring->get_seqno(ring);
1909 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1910 if (seqno >= ring->sync_seqno[i])
1911 ring->sync_seqno[i] = 0;
1913 while (!list_empty(&ring->request_list)) {
1914 struct drm_i915_gem_request *request;
1916 request = list_first_entry(&ring->request_list,
1917 struct drm_i915_gem_request,
1920 if (!i915_seqno_passed(seqno, request->seqno))
1923 trace_i915_gem_request_retire(ring, request->seqno);
1925 list_del(&request->list);
1926 i915_gem_request_remove_from_client(request);
1930 /* Move any buffers on the active list that are no longer referenced
1931 * by the ringbuffer to the flushing/inactive lists as appropriate.
1933 while (!list_empty(&ring->active_list)) {
1934 struct drm_i915_gem_object *obj;
1936 obj= list_first_entry(&ring->active_list,
1937 struct drm_i915_gem_object,
1940 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1943 if (obj->base.write_domain != 0)
1944 i915_gem_object_move_to_flushing(obj);
1946 i915_gem_object_move_to_inactive(obj);
1949 if (unlikely(ring->trace_irq_seqno &&
1950 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1951 ring->irq_put(ring);
1952 ring->trace_irq_seqno = 0;
1955 WARN_ON(i915_verify_lists(ring->dev));
1959 i915_gem_retire_requests(struct drm_device *dev)
1961 drm_i915_private_t *dev_priv = dev->dev_private;
1964 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1965 struct drm_i915_gem_object *obj, *next;
1967 /* We must be careful that during unbind() we do not
1968 * accidentally infinitely recurse into retire requests.
1970 * retire -> free -> unbind -> wait -> retire_ring
1972 list_for_each_entry_safe(obj, next,
1973 &dev_priv->mm.deferred_free_list,
1975 i915_gem_free_object_tail(obj);
1978 for (i = 0; i < I915_NUM_RINGS; i++)
1979 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1983 i915_gem_retire_work_handler(struct work_struct *work)
1985 drm_i915_private_t *dev_priv;
1986 struct drm_device *dev;
1990 dev_priv = container_of(work, drm_i915_private_t,
1991 mm.retire_work.work);
1992 dev = dev_priv->dev;
1994 /* Come back later if the device is busy... */
1995 if (!mutex_trylock(&dev->struct_mutex)) {
1996 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2000 i915_gem_retire_requests(dev);
2002 /* Send a periodic flush down the ring so we don't hold onto GEM
2003 * objects indefinitely.
2006 for (i = 0; i < I915_NUM_RINGS; i++) {
2007 struct intel_ring_buffer *ring = &dev_priv->ring[i];
2009 if (!list_empty(&ring->gpu_write_list)) {
2010 struct drm_i915_gem_request *request;
2013 ret = i915_gem_flush_ring(ring,
2014 0, I915_GEM_GPU_DOMAINS);
2015 request = kzalloc(sizeof(*request), GFP_KERNEL);
2016 if (ret || request == NULL ||
2017 i915_add_request(ring, NULL, request))
2021 idle &= list_empty(&ring->request_list);
2024 if (!dev_priv->mm.suspended && !idle)
2025 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2027 mutex_unlock(&dev->struct_mutex);
2031 * Waits for a sequence number to be signaled, and cleans up the
2032 * request and object lists appropriately for that event.
2035 i915_wait_request(struct intel_ring_buffer *ring,
2038 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2044 if (atomic_read(&dev_priv->mm.wedged)) {
2045 struct completion *x = &dev_priv->error_completion;
2046 bool recovery_complete;
2047 unsigned long flags;
2049 /* Give the error handler a chance to run. */
2050 spin_lock_irqsave(&x->wait.lock, flags);
2051 recovery_complete = x->done > 0;
2052 spin_unlock_irqrestore(&x->wait.lock, flags);
2054 return recovery_complete ? -EIO : -EAGAIN;
2057 if (seqno == ring->outstanding_lazy_request) {
2058 struct drm_i915_gem_request *request;
2060 request = kzalloc(sizeof(*request), GFP_KERNEL);
2061 if (request == NULL)
2064 ret = i915_add_request(ring, NULL, request);
2070 seqno = request->seqno;
2073 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2074 if (HAS_PCH_SPLIT(ring->dev))
2075 ier = I915_READ(DEIER) | I915_READ(GTIER);
2077 ier = I915_READ(IER);
2079 DRM_ERROR("something (likely vbetool) disabled "
2080 "interrupts, re-enabling\n");
2081 i915_driver_irq_preinstall(ring->dev);
2082 i915_driver_irq_postinstall(ring->dev);
2085 trace_i915_gem_request_wait_begin(ring, seqno);
2087 ring->waiting_seqno = seqno;
2088 if (ring->irq_get(ring)) {
2089 if (dev_priv->mm.interruptible)
2090 ret = wait_event_interruptible(ring->irq_queue,
2091 i915_seqno_passed(ring->get_seqno(ring), seqno)
2092 || atomic_read(&dev_priv->mm.wedged));
2094 wait_event(ring->irq_queue,
2095 i915_seqno_passed(ring->get_seqno(ring), seqno)
2096 || atomic_read(&dev_priv->mm.wedged));
2098 ring->irq_put(ring);
2099 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2101 atomic_read(&dev_priv->mm.wedged), 3000))
2103 ring->waiting_seqno = 0;
2105 trace_i915_gem_request_wait_end(ring, seqno);
2107 if (atomic_read(&dev_priv->mm.wedged))
2110 if (ret && ret != -ERESTARTSYS)
2111 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2112 __func__, ret, seqno, ring->get_seqno(ring),
2113 dev_priv->next_seqno);
2115 /* Directly dispatch request retiring. While we have the work queue
2116 * to handle this, the waiter on a request often wants an associated
2117 * buffer to have made it to the inactive list, and we would need
2118 * a separate wait queue to handle that.
2121 i915_gem_retire_requests_ring(ring);
2127 * Ensures that all rendering to the object has completed and the object is
2128 * safe to unbind from the GTT or access from the CPU.
2131 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2135 /* This function only exists to support waiting for existing rendering,
2136 * not for emitting required flushes.
2138 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2140 /* If there is rendering queued on the buffer being evicted, wait for
2144 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2153 * Unbinds an object from the GTT aperture.
2156 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2160 if (obj->gtt_space == NULL)
2163 if (obj->pin_count != 0) {
2164 DRM_ERROR("Attempting to unbind pinned buffer\n");
2168 /* blow away mappings if mapped through GTT */
2169 i915_gem_release_mmap(obj);
2171 /* Move the object to the CPU domain to ensure that
2172 * any possible CPU writes while it's not in the GTT
2173 * are flushed when we go to remap it. This will
2174 * also ensure that all pending GPU writes are finished
2177 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2178 if (ret == -ERESTARTSYS)
2180 /* Continue on if we fail due to EIO, the GPU is hung so we
2181 * should be safe and we need to cleanup or else we might
2182 * cause memory corruption through use-after-free.
2185 i915_gem_clflush_object(obj);
2186 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2189 /* release the fence reg _after_ flushing */
2190 ret = i915_gem_object_put_fence(obj);
2191 if (ret == -ERESTARTSYS)
2194 trace_i915_gem_object_unbind(obj);
2196 i915_gem_gtt_unbind_object(obj);
2197 i915_gem_object_put_pages_gtt(obj);
2199 list_del_init(&obj->gtt_list);
2200 list_del_init(&obj->mm_list);
2201 /* Avoid an unnecessary call to unbind on rebind. */
2202 obj->map_and_fenceable = true;
2204 drm_mm_put_block(obj->gtt_space);
2205 obj->gtt_space = NULL;
2206 obj->gtt_offset = 0;
2208 if (i915_gem_object_is_purgeable(obj))
2209 i915_gem_object_truncate(obj);
2215 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2216 uint32_t invalidate_domains,
2217 uint32_t flush_domains)
2221 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2224 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2226 ret = ring->flush(ring, invalidate_domains, flush_domains);
2230 if (flush_domains & I915_GEM_GPU_DOMAINS)
2231 i915_gem_process_flushing_list(ring, flush_domains);
2236 static int i915_ring_idle(struct intel_ring_buffer *ring)
2240 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2243 if (!list_empty(&ring->gpu_write_list)) {
2244 ret = i915_gem_flush_ring(ring,
2245 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2250 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2254 i915_gpu_idle(struct drm_device *dev)
2256 drm_i915_private_t *dev_priv = dev->dev_private;
2260 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2261 list_empty(&dev_priv->mm.active_list));
2265 /* Flush everything onto the inactive list. */
2266 for (i = 0; i < I915_NUM_RINGS; i++) {
2267 ret = i915_ring_idle(&dev_priv->ring[i]);
2275 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2276 struct intel_ring_buffer *pipelined)
2278 struct drm_device *dev = obj->base.dev;
2279 drm_i915_private_t *dev_priv = dev->dev_private;
2280 u32 size = obj->gtt_space->size;
2281 int regnum = obj->fence_reg;
2284 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2286 val |= obj->gtt_offset & 0xfffff000;
2287 val |= (uint64_t)((obj->stride / 128) - 1) <<
2288 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2290 if (obj->tiling_mode == I915_TILING_Y)
2291 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2292 val |= I965_FENCE_REG_VALID;
2295 int ret = intel_ring_begin(pipelined, 6);
2299 intel_ring_emit(pipelined, MI_NOOP);
2300 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2301 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2302 intel_ring_emit(pipelined, (u32)val);
2303 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2304 intel_ring_emit(pipelined, (u32)(val >> 32));
2305 intel_ring_advance(pipelined);
2307 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2312 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2313 struct intel_ring_buffer *pipelined)
2315 struct drm_device *dev = obj->base.dev;
2316 drm_i915_private_t *dev_priv = dev->dev_private;
2317 u32 size = obj->gtt_space->size;
2318 int regnum = obj->fence_reg;
2321 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2323 val |= obj->gtt_offset & 0xfffff000;
2324 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2325 if (obj->tiling_mode == I915_TILING_Y)
2326 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2327 val |= I965_FENCE_REG_VALID;
2330 int ret = intel_ring_begin(pipelined, 6);
2334 intel_ring_emit(pipelined, MI_NOOP);
2335 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2336 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2337 intel_ring_emit(pipelined, (u32)val);
2338 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2339 intel_ring_emit(pipelined, (u32)(val >> 32));
2340 intel_ring_advance(pipelined);
2342 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2347 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2348 struct intel_ring_buffer *pipelined)
2350 struct drm_device *dev = obj->base.dev;
2351 drm_i915_private_t *dev_priv = dev->dev_private;
2352 u32 size = obj->gtt_space->size;
2353 u32 fence_reg, val, pitch_val;
2356 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2357 (size & -size) != size ||
2358 (obj->gtt_offset & (size - 1)),
2359 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2360 obj->gtt_offset, obj->map_and_fenceable, size))
2363 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2368 /* Note: pitch better be a power of two tile widths */
2369 pitch_val = obj->stride / tile_width;
2370 pitch_val = ffs(pitch_val) - 1;
2372 val = obj->gtt_offset;
2373 if (obj->tiling_mode == I915_TILING_Y)
2374 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2375 val |= I915_FENCE_SIZE_BITS(size);
2376 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2377 val |= I830_FENCE_REG_VALID;
2379 fence_reg = obj->fence_reg;
2381 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2383 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2386 int ret = intel_ring_begin(pipelined, 4);
2390 intel_ring_emit(pipelined, MI_NOOP);
2391 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2392 intel_ring_emit(pipelined, fence_reg);
2393 intel_ring_emit(pipelined, val);
2394 intel_ring_advance(pipelined);
2396 I915_WRITE(fence_reg, val);
2401 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2402 struct intel_ring_buffer *pipelined)
2404 struct drm_device *dev = obj->base.dev;
2405 drm_i915_private_t *dev_priv = dev->dev_private;
2406 u32 size = obj->gtt_space->size;
2407 int regnum = obj->fence_reg;
2411 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2412 (size & -size) != size ||
2413 (obj->gtt_offset & (size - 1)),
2414 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2415 obj->gtt_offset, size))
2418 pitch_val = obj->stride / 128;
2419 pitch_val = ffs(pitch_val) - 1;
2421 val = obj->gtt_offset;
2422 if (obj->tiling_mode == I915_TILING_Y)
2423 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2424 val |= I830_FENCE_SIZE_BITS(size);
2425 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2426 val |= I830_FENCE_REG_VALID;
2429 int ret = intel_ring_begin(pipelined, 4);
2433 intel_ring_emit(pipelined, MI_NOOP);
2434 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2435 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2436 intel_ring_emit(pipelined, val);
2437 intel_ring_advance(pipelined);
2439 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2444 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2446 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2450 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2451 struct intel_ring_buffer *pipelined)
2455 if (obj->fenced_gpu_access) {
2456 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2457 ret = i915_gem_flush_ring(obj->last_fenced_ring,
2458 0, obj->base.write_domain);
2463 obj->fenced_gpu_access = false;
2466 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2467 if (!ring_passed_seqno(obj->last_fenced_ring,
2468 obj->last_fenced_seqno)) {
2469 ret = i915_wait_request(obj->last_fenced_ring,
2470 obj->last_fenced_seqno);
2475 obj->last_fenced_seqno = 0;
2476 obj->last_fenced_ring = NULL;
2479 /* Ensure that all CPU reads are completed before installing a fence
2480 * and all writes before removing the fence.
2482 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2489 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2493 if (obj->tiling_mode)
2494 i915_gem_release_mmap(obj);
2496 ret = i915_gem_object_flush_fence(obj, NULL);
2500 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2501 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2502 i915_gem_clear_fence_reg(obj->base.dev,
2503 &dev_priv->fence_regs[obj->fence_reg]);
2505 obj->fence_reg = I915_FENCE_REG_NONE;
2511 static struct drm_i915_fence_reg *
2512 i915_find_fence_reg(struct drm_device *dev,
2513 struct intel_ring_buffer *pipelined)
2515 struct drm_i915_private *dev_priv = dev->dev_private;
2516 struct drm_i915_fence_reg *reg, *first, *avail;
2519 /* First try to find a free reg */
2521 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2522 reg = &dev_priv->fence_regs[i];
2526 if (!reg->obj->pin_count)
2533 /* None available, try to steal one or wait for a user to finish */
2534 avail = first = NULL;
2535 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2536 if (reg->obj->pin_count)
2543 !reg->obj->last_fenced_ring ||
2544 reg->obj->last_fenced_ring == pipelined) {
2557 * i915_gem_object_get_fence - set up a fence reg for an object
2558 * @obj: object to map through a fence reg
2559 * @pipelined: ring on which to queue the change, or NULL for CPU access
2560 * @interruptible: must we wait uninterruptibly for the register to retire?
2562 * When mapping objects through the GTT, userspace wants to be able to write
2563 * to them without having to worry about swizzling if the object is tiled.
2565 * This function walks the fence regs looking for a free one for @obj,
2566 * stealing one if it can't find any.
2568 * It then sets up the reg based on the object's properties: address, pitch
2569 * and tiling format.
2572 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2573 struct intel_ring_buffer *pipelined)
2575 struct drm_device *dev = obj->base.dev;
2576 struct drm_i915_private *dev_priv = dev->dev_private;
2577 struct drm_i915_fence_reg *reg;
2580 /* XXX disable pipelining. There are bugs. Shocking. */
2583 /* Just update our place in the LRU if our fence is getting reused. */
2584 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2585 reg = &dev_priv->fence_regs[obj->fence_reg];
2586 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
2588 if (obj->tiling_changed) {
2589 ret = i915_gem_object_flush_fence(obj, pipelined);
2593 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2598 i915_gem_next_request_seqno(pipelined);
2599 obj->last_fenced_seqno = reg->setup_seqno;
2600 obj->last_fenced_ring = pipelined;
2607 if (reg->setup_seqno) {
2608 if (!ring_passed_seqno(obj->last_fenced_ring,
2609 reg->setup_seqno)) {
2610 ret = i915_wait_request(obj->last_fenced_ring,
2616 reg->setup_seqno = 0;
2618 } else if (obj->last_fenced_ring &&
2619 obj->last_fenced_ring != pipelined) {
2620 ret = i915_gem_object_flush_fence(obj, pipelined);
2628 reg = i915_find_fence_reg(dev, pipelined);
2632 ret = i915_gem_object_flush_fence(obj, pipelined);
2637 struct drm_i915_gem_object *old = reg->obj;
2639 drm_gem_object_reference(&old->base);
2641 if (old->tiling_mode)
2642 i915_gem_release_mmap(old);
2644 ret = i915_gem_object_flush_fence(old, pipelined);
2646 drm_gem_object_unreference(&old->base);
2650 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2653 old->fence_reg = I915_FENCE_REG_NONE;
2654 old->last_fenced_ring = pipelined;
2655 old->last_fenced_seqno =
2656 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2658 drm_gem_object_unreference(&old->base);
2659 } else if (obj->last_fenced_seqno == 0)
2663 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
2664 obj->fence_reg = reg - dev_priv->fence_regs;
2665 obj->last_fenced_ring = pipelined;
2668 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2669 obj->last_fenced_seqno = reg->setup_seqno;
2672 obj->tiling_changed = false;
2673 switch (INTEL_INFO(dev)->gen) {
2676 ret = sandybridge_write_fence_reg(obj, pipelined);
2680 ret = i965_write_fence_reg(obj, pipelined);
2683 ret = i915_write_fence_reg(obj, pipelined);
2686 ret = i830_write_fence_reg(obj, pipelined);
2694 * i915_gem_clear_fence_reg - clear out fence register info
2695 * @obj: object to clear
2697 * Zeroes out the fence register itself and clears out the associated
2698 * data structures in dev_priv and obj.
2701 i915_gem_clear_fence_reg(struct drm_device *dev,
2702 struct drm_i915_fence_reg *reg)
2704 drm_i915_private_t *dev_priv = dev->dev_private;
2705 uint32_t fence_reg = reg - dev_priv->fence_regs;
2707 switch (INTEL_INFO(dev)->gen) {
2710 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2714 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2718 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2721 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2723 I915_WRITE(fence_reg, 0);
2727 list_del_init(®->lru_list);
2729 reg->setup_seqno = 0;
2733 * Finds free space in the GTT aperture and binds the object there.
2736 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2738 bool map_and_fenceable)
2740 struct drm_device *dev = obj->base.dev;
2741 drm_i915_private_t *dev_priv = dev->dev_private;
2742 struct drm_mm_node *free_space;
2743 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2744 u32 size, fence_size, fence_alignment, unfenced_alignment;
2745 bool mappable, fenceable;
2748 if (obj->madv != I915_MADV_WILLNEED) {
2749 DRM_ERROR("Attempting to bind a purgeable object\n");
2753 fence_size = i915_gem_get_gtt_size(obj);
2754 fence_alignment = i915_gem_get_gtt_alignment(obj);
2755 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
2758 alignment = map_and_fenceable ? fence_alignment :
2760 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2761 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2765 size = map_and_fenceable ? fence_size : obj->base.size;
2767 /* If the object is bigger than the entire aperture, reject it early
2768 * before evicting everything in a vain attempt to find space.
2770 if (obj->base.size >
2771 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2772 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2777 if (map_and_fenceable)
2779 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2781 dev_priv->mm.gtt_mappable_end,
2784 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2785 size, alignment, 0);
2787 if (free_space != NULL) {
2788 if (map_and_fenceable)
2790 drm_mm_get_block_range_generic(free_space,
2792 dev_priv->mm.gtt_mappable_end,
2796 drm_mm_get_block(free_space, size, alignment);
2798 if (obj->gtt_space == NULL) {
2799 /* If the gtt is empty and we're still having trouble
2800 * fitting our object in, we're out of memory.
2802 ret = i915_gem_evict_something(dev, size, alignment,
2810 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2812 drm_mm_put_block(obj->gtt_space);
2813 obj->gtt_space = NULL;
2815 if (ret == -ENOMEM) {
2816 /* first try to reclaim some memory by clearing the GTT */
2817 ret = i915_gem_evict_everything(dev, false);
2819 /* now try to shrink everyone else */
2834 ret = i915_gem_gtt_bind_object(obj);
2836 i915_gem_object_put_pages_gtt(obj);
2837 drm_mm_put_block(obj->gtt_space);
2838 obj->gtt_space = NULL;
2840 if (i915_gem_evict_everything(dev, false))
2846 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2847 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2849 /* Assert that the object is not currently in any GPU domain. As it
2850 * wasn't in the GTT, there shouldn't be any way it could have been in
2853 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2854 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2856 obj->gtt_offset = obj->gtt_space->start;
2859 obj->gtt_space->size == fence_size &&
2860 (obj->gtt_space->start & (fence_alignment -1)) == 0;
2863 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2865 obj->map_and_fenceable = mappable && fenceable;
2867 trace_i915_gem_object_bind(obj, map_and_fenceable);
2872 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2874 /* If we don't have a page list set up, then we're not pinned
2875 * to GPU, and we can ignore the cache flush because it'll happen
2876 * again at bind time.
2878 if (obj->pages == NULL)
2881 /* If the GPU is snooping the contents of the CPU cache,
2882 * we do not need to manually clear the CPU cache lines. However,
2883 * the caches are only snooped when the render cache is
2884 * flushed/invalidated. As we always have to emit invalidations
2885 * and flushes when moving into and out of the RENDER domain, correct
2886 * snooping behaviour occurs naturally as the result of our domain
2889 if (obj->cache_level != I915_CACHE_NONE)
2892 trace_i915_gem_object_clflush(obj);
2894 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2897 /** Flushes any GPU write domain for the object if it's dirty. */
2899 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2901 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2904 /* Queue the GPU write cache flushing we need. */
2905 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2908 /** Flushes the GTT write domain for the object if it's dirty. */
2910 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2912 uint32_t old_write_domain;
2914 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2917 /* No actual flushing is required for the GTT write domain. Writes
2918 * to it immediately go to main memory as far as we know, so there's
2919 * no chipset flush. It also doesn't land in render cache.
2921 * However, we do have to enforce the order so that all writes through
2922 * the GTT land before any writes to the device, such as updates to
2927 i915_gem_release_mmap(obj);
2929 old_write_domain = obj->base.write_domain;
2930 obj->base.write_domain = 0;
2932 trace_i915_gem_object_change_domain(obj,
2933 obj->base.read_domains,
2937 /** Flushes the CPU write domain for the object if it's dirty. */
2939 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2941 uint32_t old_write_domain;
2943 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2946 i915_gem_clflush_object(obj);
2947 intel_gtt_chipset_flush();
2948 old_write_domain = obj->base.write_domain;
2949 obj->base.write_domain = 0;
2951 trace_i915_gem_object_change_domain(obj,
2952 obj->base.read_domains,
2957 * Moves a single object to the GTT read, and possibly write domain.
2959 * This function returns when the move is complete, including waiting on
2963 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2965 uint32_t old_write_domain, old_read_domains;
2968 /* Not valid to be called on unbound objects. */
2969 if (obj->gtt_space == NULL)
2972 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2975 ret = i915_gem_object_flush_gpu_write_domain(obj);
2979 if (obj->pending_gpu_write || write) {
2980 ret = i915_gem_object_wait_rendering(obj);
2985 i915_gem_object_flush_cpu_write_domain(obj);
2987 old_write_domain = obj->base.write_domain;
2988 old_read_domains = obj->base.read_domains;
2990 /* It should now be out of any other write domains, and we can update
2991 * the domain values for our changes.
2993 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2994 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2996 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2997 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3001 trace_i915_gem_object_change_domain(obj,
3009 * Prepare buffer for display plane. Use uninterruptible for possible flush
3010 * wait, as in modesetting process we're not supposed to be interrupted.
3013 i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
3014 struct intel_ring_buffer *pipelined)
3016 uint32_t old_read_domains;
3019 /* Not valid to be called on unbound objects. */
3020 if (obj->gtt_space == NULL)
3023 ret = i915_gem_object_flush_gpu_write_domain(obj);
3028 /* Currently, we are always called from an non-interruptible context. */
3029 if (pipelined != obj->ring) {
3030 ret = i915_gem_object_wait_rendering(obj);
3035 i915_gem_object_flush_cpu_write_domain(obj);
3037 old_read_domains = obj->base.read_domains;
3038 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3040 trace_i915_gem_object_change_domain(obj,
3042 obj->base.write_domain);
3048 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3055 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3056 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3061 return i915_gem_object_wait_rendering(obj);
3065 * Moves a single object to the CPU read, and possibly write domain.
3067 * This function returns when the move is complete, including waiting on
3071 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3073 uint32_t old_write_domain, old_read_domains;
3076 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3079 ret = i915_gem_object_flush_gpu_write_domain(obj);
3083 ret = i915_gem_object_wait_rendering(obj);
3087 i915_gem_object_flush_gtt_write_domain(obj);
3089 /* If we have a partially-valid cache of the object in the CPU,
3090 * finish invalidating it and free the per-page flags.
3092 i915_gem_object_set_to_full_cpu_read_domain(obj);
3094 old_write_domain = obj->base.write_domain;
3095 old_read_domains = obj->base.read_domains;
3097 /* Flush the CPU cache if it's still invalid. */
3098 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3099 i915_gem_clflush_object(obj);
3101 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3104 /* It should now be out of any other write domains, and we can update
3105 * the domain values for our changes.
3107 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3109 /* If we're writing through the CPU, then the GPU read domains will
3110 * need to be invalidated at next use.
3113 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3114 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3117 trace_i915_gem_object_change_domain(obj,
3125 * Moves the object from a partially CPU read to a full one.
3127 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3128 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3131 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3133 if (!obj->page_cpu_valid)
3136 /* If we're partially in the CPU read domain, finish moving it in.
3138 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3141 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3142 if (obj->page_cpu_valid[i])
3144 drm_clflush_pages(obj->pages + i, 1);
3148 /* Free the page_cpu_valid mappings which are now stale, whether
3149 * or not we've got I915_GEM_DOMAIN_CPU.
3151 kfree(obj->page_cpu_valid);
3152 obj->page_cpu_valid = NULL;
3156 * Set the CPU read domain on a range of the object.
3158 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3159 * not entirely valid. The page_cpu_valid member of the object flags which
3160 * pages have been flushed, and will be respected by
3161 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3162 * of the whole object.
3164 * This function returns when the move is complete, including waiting on
3168 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3169 uint64_t offset, uint64_t size)
3171 uint32_t old_read_domains;
3174 if (offset == 0 && size == obj->base.size)
3175 return i915_gem_object_set_to_cpu_domain(obj, 0);
3177 ret = i915_gem_object_flush_gpu_write_domain(obj);
3181 ret = i915_gem_object_wait_rendering(obj);
3185 i915_gem_object_flush_gtt_write_domain(obj);
3187 /* If we're already fully in the CPU read domain, we're done. */
3188 if (obj->page_cpu_valid == NULL &&
3189 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3192 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3193 * newly adding I915_GEM_DOMAIN_CPU
3195 if (obj->page_cpu_valid == NULL) {
3196 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3198 if (obj->page_cpu_valid == NULL)
3200 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3201 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3203 /* Flush the cache on any pages that are still invalid from the CPU's
3206 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3208 if (obj->page_cpu_valid[i])
3211 drm_clflush_pages(obj->pages + i, 1);
3213 obj->page_cpu_valid[i] = 1;
3216 /* It should now be out of any other write domains, and we can update
3217 * the domain values for our changes.
3219 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3221 old_read_domains = obj->base.read_domains;
3222 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3224 trace_i915_gem_object_change_domain(obj,
3226 obj->base.write_domain);
3231 /* Throttle our rendering by waiting until the ring has completed our requests
3232 * emitted over 20 msec ago.
3234 * Note that if we were to use the current jiffies each time around the loop,
3235 * we wouldn't escape the function with any frames outstanding if the time to
3236 * render a frame was over 20ms.
3238 * This should get us reasonable parallelism between CPU and GPU but also
3239 * relatively low latency when blocking on a particular request to finish.
3242 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3244 struct drm_i915_private *dev_priv = dev->dev_private;
3245 struct drm_i915_file_private *file_priv = file->driver_priv;
3246 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3247 struct drm_i915_gem_request *request;
3248 struct intel_ring_buffer *ring = NULL;
3252 if (atomic_read(&dev_priv->mm.wedged))
3255 spin_lock(&file_priv->mm.lock);
3256 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3257 if (time_after_eq(request->emitted_jiffies, recent_enough))
3260 ring = request->ring;
3261 seqno = request->seqno;
3263 spin_unlock(&file_priv->mm.lock);
3269 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3270 /* And wait for the seqno passing without holding any locks and
3271 * causing extra latency for others. This is safe as the irq
3272 * generation is designed to be run atomically and so is
3275 if (ring->irq_get(ring)) {
3276 ret = wait_event_interruptible(ring->irq_queue,
3277 i915_seqno_passed(ring->get_seqno(ring), seqno)
3278 || atomic_read(&dev_priv->mm.wedged));
3279 ring->irq_put(ring);
3281 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3287 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3293 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3295 bool map_and_fenceable)
3297 struct drm_device *dev = obj->base.dev;
3298 struct drm_i915_private *dev_priv = dev->dev_private;
3301 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3302 WARN_ON(i915_verify_lists(dev));
3304 if (obj->gtt_space != NULL) {
3305 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3306 (map_and_fenceable && !obj->map_and_fenceable)) {
3307 WARN(obj->pin_count,
3308 "bo is already pinned with incorrect alignment:"
3309 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3310 " obj->map_and_fenceable=%d\n",
3311 obj->gtt_offset, alignment,
3313 obj->map_and_fenceable);
3314 ret = i915_gem_object_unbind(obj);
3320 if (obj->gtt_space == NULL) {
3321 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3327 if (obj->pin_count++ == 0) {
3329 list_move_tail(&obj->mm_list,
3330 &dev_priv->mm.pinned_list);
3332 obj->pin_mappable |= map_and_fenceable;
3334 WARN_ON(i915_verify_lists(dev));
3339 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3341 struct drm_device *dev = obj->base.dev;
3342 drm_i915_private_t *dev_priv = dev->dev_private;
3344 WARN_ON(i915_verify_lists(dev));
3345 BUG_ON(obj->pin_count == 0);
3346 BUG_ON(obj->gtt_space == NULL);
3348 if (--obj->pin_count == 0) {
3350 list_move_tail(&obj->mm_list,
3351 &dev_priv->mm.inactive_list);
3352 obj->pin_mappable = false;
3354 WARN_ON(i915_verify_lists(dev));
3358 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3359 struct drm_file *file)
3361 struct drm_i915_gem_pin *args = data;
3362 struct drm_i915_gem_object *obj;
3365 ret = i915_mutex_lock_interruptible(dev);
3369 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3370 if (&obj->base == NULL) {
3375 if (obj->madv != I915_MADV_WILLNEED) {
3376 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3381 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3382 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3388 obj->user_pin_count++;
3389 obj->pin_filp = file;
3390 if (obj->user_pin_count == 1) {
3391 ret = i915_gem_object_pin(obj, args->alignment, true);
3396 /* XXX - flush the CPU caches for pinned objects
3397 * as the X server doesn't manage domains yet
3399 i915_gem_object_flush_cpu_write_domain(obj);
3400 args->offset = obj->gtt_offset;
3402 drm_gem_object_unreference(&obj->base);
3404 mutex_unlock(&dev->struct_mutex);
3409 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3410 struct drm_file *file)
3412 struct drm_i915_gem_pin *args = data;
3413 struct drm_i915_gem_object *obj;
3416 ret = i915_mutex_lock_interruptible(dev);
3420 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3421 if (&obj->base == NULL) {
3426 if (obj->pin_filp != file) {
3427 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3432 obj->user_pin_count--;
3433 if (obj->user_pin_count == 0) {
3434 obj->pin_filp = NULL;
3435 i915_gem_object_unpin(obj);
3439 drm_gem_object_unreference(&obj->base);
3441 mutex_unlock(&dev->struct_mutex);
3446 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3447 struct drm_file *file)
3449 struct drm_i915_gem_busy *args = data;
3450 struct drm_i915_gem_object *obj;
3453 ret = i915_mutex_lock_interruptible(dev);
3457 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3458 if (&obj->base == NULL) {
3463 /* Count all active objects as busy, even if they are currently not used
3464 * by the gpu. Users of this interface expect objects to eventually
3465 * become non-busy without any further actions, therefore emit any
3466 * necessary flushes here.
3468 args->busy = obj->active;
3470 /* Unconditionally flush objects, even when the gpu still uses this
3471 * object. Userspace calling this function indicates that it wants to
3472 * use this buffer rather sooner than later, so issuing the required
3473 * flush earlier is beneficial.
3475 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3476 ret = i915_gem_flush_ring(obj->ring,
3477 0, obj->base.write_domain);
3478 } else if (obj->ring->outstanding_lazy_request ==
3479 obj->last_rendering_seqno) {
3480 struct drm_i915_gem_request *request;
3482 /* This ring is not being cleared by active usage,
3483 * so emit a request to do so.
3485 request = kzalloc(sizeof(*request), GFP_KERNEL);
3487 ret = i915_add_request(obj->ring, NULL,request);
3492 /* Update the active list for the hardware's current position.
3493 * Otherwise this only updates on a delayed timer or when irqs
3494 * are actually unmasked, and our working set ends up being
3495 * larger than required.
3497 i915_gem_retire_requests_ring(obj->ring);
3499 args->busy = obj->active;
3502 drm_gem_object_unreference(&obj->base);
3504 mutex_unlock(&dev->struct_mutex);
3509 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3510 struct drm_file *file_priv)
3512 return i915_gem_ring_throttle(dev, file_priv);
3516 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3517 struct drm_file *file_priv)
3519 struct drm_i915_gem_madvise *args = data;
3520 struct drm_i915_gem_object *obj;
3523 switch (args->madv) {
3524 case I915_MADV_DONTNEED:
3525 case I915_MADV_WILLNEED:
3531 ret = i915_mutex_lock_interruptible(dev);
3535 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3536 if (&obj->base == NULL) {
3541 if (obj->pin_count) {
3546 if (obj->madv != __I915_MADV_PURGED)
3547 obj->madv = args->madv;
3549 /* if the object is no longer bound, discard its backing storage */
3550 if (i915_gem_object_is_purgeable(obj) &&
3551 obj->gtt_space == NULL)
3552 i915_gem_object_truncate(obj);
3554 args->retained = obj->madv != __I915_MADV_PURGED;
3557 drm_gem_object_unreference(&obj->base);
3559 mutex_unlock(&dev->struct_mutex);
3563 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3566 struct drm_i915_private *dev_priv = dev->dev_private;
3567 struct drm_i915_gem_object *obj;
3569 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3573 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3578 i915_gem_info_add_obj(dev_priv, size);
3580 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3581 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3583 obj->cache_level = I915_CACHE_NONE;
3584 obj->base.driver_private = NULL;
3585 obj->fence_reg = I915_FENCE_REG_NONE;
3586 INIT_LIST_HEAD(&obj->mm_list);
3587 INIT_LIST_HEAD(&obj->gtt_list);
3588 INIT_LIST_HEAD(&obj->ring_list);
3589 INIT_LIST_HEAD(&obj->exec_list);
3590 INIT_LIST_HEAD(&obj->gpu_write_list);
3591 obj->madv = I915_MADV_WILLNEED;
3592 /* Avoid an unnecessary call to unbind on the first bind. */
3593 obj->map_and_fenceable = true;
3598 int i915_gem_init_object(struct drm_gem_object *obj)
3605 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3607 struct drm_device *dev = obj->base.dev;
3608 drm_i915_private_t *dev_priv = dev->dev_private;
3611 ret = i915_gem_object_unbind(obj);
3612 if (ret == -ERESTARTSYS) {
3613 list_move(&obj->mm_list,
3614 &dev_priv->mm.deferred_free_list);
3618 trace_i915_gem_object_destroy(obj);
3620 if (obj->base.map_list.map)
3621 i915_gem_free_mmap_offset(obj);
3623 drm_gem_object_release(&obj->base);
3624 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3626 kfree(obj->page_cpu_valid);
3631 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3633 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3634 struct drm_device *dev = obj->base.dev;
3636 while (obj->pin_count > 0)
3637 i915_gem_object_unpin(obj);
3640 i915_gem_detach_phys_object(dev, obj);
3642 i915_gem_free_object_tail(obj);
3646 i915_gem_idle(struct drm_device *dev)
3648 drm_i915_private_t *dev_priv = dev->dev_private;
3651 mutex_lock(&dev->struct_mutex);
3653 if (dev_priv->mm.suspended) {
3654 mutex_unlock(&dev->struct_mutex);
3658 ret = i915_gpu_idle(dev);
3660 mutex_unlock(&dev->struct_mutex);
3664 /* Under UMS, be paranoid and evict. */
3665 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3666 ret = i915_gem_evict_inactive(dev, false);
3668 mutex_unlock(&dev->struct_mutex);
3673 i915_gem_reset_fences(dev);
3675 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3676 * We need to replace this with a semaphore, or something.
3677 * And not confound mm.suspended!
3679 dev_priv->mm.suspended = 1;
3680 del_timer_sync(&dev_priv->hangcheck_timer);
3682 i915_kernel_lost_context(dev);
3683 i915_gem_cleanup_ringbuffer(dev);
3685 mutex_unlock(&dev->struct_mutex);
3687 /* Cancel the retire work handler, which should be idle now. */
3688 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3694 i915_gem_init_ringbuffer(struct drm_device *dev)
3696 drm_i915_private_t *dev_priv = dev->dev_private;
3699 ret = intel_init_render_ring_buffer(dev);
3704 ret = intel_init_bsd_ring_buffer(dev);
3706 goto cleanup_render_ring;
3710 ret = intel_init_blt_ring_buffer(dev);
3712 goto cleanup_bsd_ring;
3715 dev_priv->next_seqno = 1;
3720 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3721 cleanup_render_ring:
3722 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3727 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3729 drm_i915_private_t *dev_priv = dev->dev_private;
3732 for (i = 0; i < I915_NUM_RINGS; i++)
3733 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3737 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3738 struct drm_file *file_priv)
3740 drm_i915_private_t *dev_priv = dev->dev_private;
3743 if (drm_core_check_feature(dev, DRIVER_MODESET))
3746 if (atomic_read(&dev_priv->mm.wedged)) {
3747 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3748 atomic_set(&dev_priv->mm.wedged, 0);
3751 mutex_lock(&dev->struct_mutex);
3752 dev_priv->mm.suspended = 0;
3754 ret = i915_gem_init_ringbuffer(dev);
3756 mutex_unlock(&dev->struct_mutex);
3760 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3761 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3762 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3763 for (i = 0; i < I915_NUM_RINGS; i++) {
3764 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3765 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3767 mutex_unlock(&dev->struct_mutex);
3769 ret = drm_irq_install(dev);
3771 goto cleanup_ringbuffer;
3776 mutex_lock(&dev->struct_mutex);
3777 i915_gem_cleanup_ringbuffer(dev);
3778 dev_priv->mm.suspended = 1;
3779 mutex_unlock(&dev->struct_mutex);
3785 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3786 struct drm_file *file_priv)
3788 if (drm_core_check_feature(dev, DRIVER_MODESET))
3791 drm_irq_uninstall(dev);
3792 return i915_gem_idle(dev);
3796 i915_gem_lastclose(struct drm_device *dev)
3800 if (drm_core_check_feature(dev, DRIVER_MODESET))
3803 ret = i915_gem_idle(dev);
3805 DRM_ERROR("failed to idle hardware: %d\n", ret);
3809 init_ring_lists(struct intel_ring_buffer *ring)
3811 INIT_LIST_HEAD(&ring->active_list);
3812 INIT_LIST_HEAD(&ring->request_list);
3813 INIT_LIST_HEAD(&ring->gpu_write_list);
3817 i915_gem_load(struct drm_device *dev)
3820 drm_i915_private_t *dev_priv = dev->dev_private;
3822 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3823 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3824 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3825 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3826 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3827 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3828 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3829 for (i = 0; i < I915_NUM_RINGS; i++)
3830 init_ring_lists(&dev_priv->ring[i]);
3831 for (i = 0; i < 16; i++)
3832 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3833 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3834 i915_gem_retire_work_handler);
3835 init_completion(&dev_priv->error_completion);
3837 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3839 u32 tmp = I915_READ(MI_ARB_STATE);
3840 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3841 /* arb state is a masked write, so set bit + bit in mask */
3842 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3843 I915_WRITE(MI_ARB_STATE, tmp);
3847 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3849 /* Old X drivers will take 0-2 for front, back, depth buffers */
3850 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3851 dev_priv->fence_reg_start = 3;
3853 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3854 dev_priv->num_fence_regs = 16;
3856 dev_priv->num_fence_regs = 8;
3858 /* Initialize fence registers to zero */
3859 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3860 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3863 i915_gem_detect_bit_6_swizzle(dev);
3864 init_waitqueue_head(&dev_priv->pending_flip_queue);
3866 dev_priv->mm.interruptible = true;
3868 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3869 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3870 register_shrinker(&dev_priv->mm.inactive_shrinker);
3874 * Create a physically contiguous memory object for this object
3875 * e.g. for cursor + overlay regs
3877 static int i915_gem_init_phys_object(struct drm_device *dev,
3878 int id, int size, int align)
3880 drm_i915_private_t *dev_priv = dev->dev_private;
3881 struct drm_i915_gem_phys_object *phys_obj;
3884 if (dev_priv->mm.phys_objs[id - 1] || !size)
3887 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3893 phys_obj->handle = drm_pci_alloc(dev, size, align);
3894 if (!phys_obj->handle) {
3899 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3902 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3910 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3912 drm_i915_private_t *dev_priv = dev->dev_private;
3913 struct drm_i915_gem_phys_object *phys_obj;
3915 if (!dev_priv->mm.phys_objs[id - 1])
3918 phys_obj = dev_priv->mm.phys_objs[id - 1];
3919 if (phys_obj->cur_obj) {
3920 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3924 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3926 drm_pci_free(dev, phys_obj->handle);
3928 dev_priv->mm.phys_objs[id - 1] = NULL;
3931 void i915_gem_free_all_phys_object(struct drm_device *dev)
3935 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3936 i915_gem_free_phys_object(dev, i);
3939 void i915_gem_detach_phys_object(struct drm_device *dev,
3940 struct drm_i915_gem_object *obj)
3942 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3949 vaddr = obj->phys_obj->handle->vaddr;
3951 page_count = obj->base.size / PAGE_SIZE;
3952 for (i = 0; i < page_count; i++) {
3953 struct page *page = read_cache_page_gfp(mapping, i,
3954 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3955 if (!IS_ERR(page)) {
3956 char *dst = kmap_atomic(page);
3957 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3960 drm_clflush_pages(&page, 1);
3962 set_page_dirty(page);
3963 mark_page_accessed(page);
3964 page_cache_release(page);
3967 intel_gtt_chipset_flush();
3969 obj->phys_obj->cur_obj = NULL;
3970 obj->phys_obj = NULL;
3974 i915_gem_attach_phys_object(struct drm_device *dev,
3975 struct drm_i915_gem_object *obj,
3979 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3980 drm_i915_private_t *dev_priv = dev->dev_private;
3985 if (id > I915_MAX_PHYS_OBJECT)
3988 if (obj->phys_obj) {
3989 if (obj->phys_obj->id == id)
3991 i915_gem_detach_phys_object(dev, obj);
3994 /* create a new object */
3995 if (!dev_priv->mm.phys_objs[id - 1]) {
3996 ret = i915_gem_init_phys_object(dev, id,
3997 obj->base.size, align);
3999 DRM_ERROR("failed to init phys object %d size: %zu\n",
4000 id, obj->base.size);
4005 /* bind to the object */
4006 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4007 obj->phys_obj->cur_obj = obj;
4009 page_count = obj->base.size / PAGE_SIZE;
4011 for (i = 0; i < page_count; i++) {
4015 page = read_cache_page_gfp(mapping, i,
4016 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4018 return PTR_ERR(page);
4020 src = kmap_atomic(page);
4021 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4022 memcpy(dst, src, PAGE_SIZE);
4025 mark_page_accessed(page);
4026 page_cache_release(page);
4033 i915_gem_phys_pwrite(struct drm_device *dev,
4034 struct drm_i915_gem_object *obj,
4035 struct drm_i915_gem_pwrite *args,
4036 struct drm_file *file_priv)
4038 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4039 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4041 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4042 unsigned long unwritten;
4044 /* The physical object once assigned is fixed for the lifetime
4045 * of the obj, so we can safely drop the lock and continue
4048 mutex_unlock(&dev->struct_mutex);
4049 unwritten = copy_from_user(vaddr, user_data, args->size);
4050 mutex_lock(&dev->struct_mutex);
4055 intel_gtt_chipset_flush();
4059 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4061 struct drm_i915_file_private *file_priv = file->driver_priv;
4063 /* Clean up our request list when the client is going away, so that
4064 * later retire_requests won't dereference our soon-to-be-gone
4067 spin_lock(&file_priv->mm.lock);
4068 while (!list_empty(&file_priv->mm.request_list)) {
4069 struct drm_i915_gem_request *request;
4071 request = list_first_entry(&file_priv->mm.request_list,
4072 struct drm_i915_gem_request,
4074 list_del(&request->client_list);
4075 request->file_priv = NULL;
4077 spin_unlock(&file_priv->mm.lock);
4081 i915_gpu_is_active(struct drm_device *dev)
4083 drm_i915_private_t *dev_priv = dev->dev_private;
4086 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4087 list_empty(&dev_priv->mm.active_list);
4089 return !lists_empty;
4093 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4095 struct drm_i915_private *dev_priv =
4096 container_of(shrinker,
4097 struct drm_i915_private,
4098 mm.inactive_shrinker);
4099 struct drm_device *dev = dev_priv->dev;
4100 struct drm_i915_gem_object *obj, *next;
4101 int nr_to_scan = sc->nr_to_scan;
4104 if (!mutex_trylock(&dev->struct_mutex))
4107 /* "fast-path" to count number of available objects */
4108 if (nr_to_scan == 0) {
4110 list_for_each_entry(obj,
4111 &dev_priv->mm.inactive_list,
4114 mutex_unlock(&dev->struct_mutex);
4115 return cnt / 100 * sysctl_vfs_cache_pressure;
4119 /* first scan for clean buffers */
4120 i915_gem_retire_requests(dev);
4122 list_for_each_entry_safe(obj, next,
4123 &dev_priv->mm.inactive_list,
4125 if (i915_gem_object_is_purgeable(obj)) {
4126 if (i915_gem_object_unbind(obj) == 0 &&
4132 /* second pass, evict/count anything still on the inactive list */
4134 list_for_each_entry_safe(obj, next,
4135 &dev_priv->mm.inactive_list,
4138 i915_gem_object_unbind(obj) == 0)
4144 if (nr_to_scan && i915_gpu_is_active(dev)) {
4146 * We are desperate for pages, so as a last resort, wait
4147 * for the GPU to finish and discard whatever we can.
4148 * This has a dramatic impact to reduce the number of
4149 * OOM-killer events whilst running the GPU aggressively.
4151 if (i915_gpu_idle(dev) == 0)
4154 mutex_unlock(&dev->struct_mutex);
4155 return cnt / 100 * sysctl_vfs_cache_pressure;