3cac366b3053ab794ff13b40360ebb31c24029a1
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37
38 struct change_domains {
39         uint32_t invalidate_domains;
40         uint32_t flush_domains;
41         uint32_t flush_rings;
42 };
43
44 static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv);
45 static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv);
46
47 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
48                                                   bool pipelined);
49 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
50 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
51 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
52                                              int write);
53 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
54                                                      uint64_t offset,
55                                                      uint64_t size);
56 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
57 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
58                                           bool interruptible);
59 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
60                                        unsigned alignment,
61                                        bool map_and_fenceable);
62 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
63 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
64                                 struct drm_i915_gem_pwrite *args,
65                                 struct drm_file *file_priv);
66 static void i915_gem_free_object_tail(struct drm_gem_object *obj);
67
68 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
69                                     int nr_to_scan,
70                                     gfp_t gfp_mask);
71
72
73 /* some bookkeeping */
74 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
75                                   size_t size)
76 {
77         dev_priv->mm.object_count++;
78         dev_priv->mm.object_memory += size;
79 }
80
81 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
82                                      size_t size)
83 {
84         dev_priv->mm.object_count--;
85         dev_priv->mm.object_memory -= size;
86 }
87
88 static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
89                                   struct drm_i915_gem_object *obj)
90 {
91         dev_priv->mm.gtt_count++;
92         dev_priv->mm.gtt_memory += obj->gtt_space->size;
93         if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
94                 dev_priv->mm.mappable_gtt_used +=
95                         min_t(size_t, obj->gtt_space->size,
96                               dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
97         }
98         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
99 }
100
101 static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
102                                      struct drm_i915_gem_object *obj)
103 {
104         dev_priv->mm.gtt_count--;
105         dev_priv->mm.gtt_memory -= obj->gtt_space->size;
106         if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
107                 dev_priv->mm.mappable_gtt_used -=
108                         min_t(size_t, obj->gtt_space->size,
109                               dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
110         }
111         list_del_init(&obj->gtt_list);
112 }
113
114 /**
115  * Update the mappable working set counters. Call _only_ when there is a change
116  * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
117  * @mappable: new state the changed mappable flag (either pin_ or fault_).
118  */
119 static void
120 i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
121                               struct drm_i915_gem_object *obj,
122                               bool mappable)
123 {
124         if (mappable) {
125                 if (obj->pin_mappable && obj->fault_mappable)
126                         /* Combined state was already mappable. */
127                         return;
128                 dev_priv->mm.gtt_mappable_count++;
129                 dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
130         } else {
131                 if (obj->pin_mappable || obj->fault_mappable)
132                         /* Combined state still mappable. */
133                         return;
134                 dev_priv->mm.gtt_mappable_count--;
135                 dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
136         }
137 }
138
139 static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
140                                   struct drm_i915_gem_object *obj,
141                                   bool mappable)
142 {
143         dev_priv->mm.pin_count++;
144         dev_priv->mm.pin_memory += obj->gtt_space->size;
145         if (mappable) {
146                 obj->pin_mappable = true;
147                 i915_gem_info_update_mappable(dev_priv, obj, true);
148         }
149 }
150
151 static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
152                                      struct drm_i915_gem_object *obj)
153 {
154         dev_priv->mm.pin_count--;
155         dev_priv->mm.pin_memory -= obj->gtt_space->size;
156         if (obj->pin_mappable) {
157                 obj->pin_mappable = false;
158                 i915_gem_info_update_mappable(dev_priv, obj, false);
159         }
160 }
161
162 int
163 i915_gem_check_is_wedged(struct drm_device *dev)
164 {
165         struct drm_i915_private *dev_priv = dev->dev_private;
166         struct completion *x = &dev_priv->error_completion;
167         unsigned long flags;
168         int ret;
169
170         if (!atomic_read(&dev_priv->mm.wedged))
171                 return 0;
172
173         ret = wait_for_completion_interruptible(x);
174         if (ret)
175                 return ret;
176
177         /* Success, we reset the GPU! */
178         if (!atomic_read(&dev_priv->mm.wedged))
179                 return 0;
180
181         /* GPU is hung, bump the completion count to account for
182          * the token we just consumed so that we never hit zero and
183          * end up waiting upon a subsequent completion event that
184          * will never happen.
185          */
186         spin_lock_irqsave(&x->wait.lock, flags);
187         x->done++;
188         spin_unlock_irqrestore(&x->wait.lock, flags);
189         return -EIO;
190 }
191
192 static int i915_mutex_lock_interruptible(struct drm_device *dev)
193 {
194         struct drm_i915_private *dev_priv = dev->dev_private;
195         int ret;
196
197         ret = i915_gem_check_is_wedged(dev);
198         if (ret)
199                 return ret;
200
201         ret = mutex_lock_interruptible(&dev->struct_mutex);
202         if (ret)
203                 return ret;
204
205         if (atomic_read(&dev_priv->mm.wedged)) {
206                 mutex_unlock(&dev->struct_mutex);
207                 return -EAGAIN;
208         }
209
210         WARN_ON(i915_verify_lists(dev));
211         return 0;
212 }
213
214 static inline bool
215 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
216 {
217         return obj_priv->gtt_space &&
218                 !obj_priv->active &&
219                 obj_priv->pin_count == 0;
220 }
221
222 int i915_gem_do_init(struct drm_device *dev,
223                      unsigned long start,
224                      unsigned long mappable_end,
225                      unsigned long end)
226 {
227         drm_i915_private_t *dev_priv = dev->dev_private;
228
229         if (start >= end ||
230             (start & (PAGE_SIZE - 1)) != 0 ||
231             (end & (PAGE_SIZE - 1)) != 0) {
232                 return -EINVAL;
233         }
234
235         drm_mm_init(&dev_priv->mm.gtt_space, start,
236                     end - start);
237
238         dev_priv->mm.gtt_total = end - start;
239         dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
240         dev_priv->mm.gtt_mappable_end = mappable_end;
241
242         return 0;
243 }
244
245 int
246 i915_gem_init_ioctl(struct drm_device *dev, void *data,
247                     struct drm_file *file_priv)
248 {
249         struct drm_i915_gem_init *args = data;
250         int ret;
251
252         mutex_lock(&dev->struct_mutex);
253         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
254         mutex_unlock(&dev->struct_mutex);
255
256         return ret;
257 }
258
259 int
260 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
261                             struct drm_file *file_priv)
262 {
263         struct drm_i915_private *dev_priv = dev->dev_private;
264         struct drm_i915_gem_get_aperture *args = data;
265
266         if (!(dev->driver->driver_features & DRIVER_GEM))
267                 return -ENODEV;
268
269         mutex_lock(&dev->struct_mutex);
270         args->aper_size = dev_priv->mm.gtt_total;
271         args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
272         mutex_unlock(&dev->struct_mutex);
273
274         return 0;
275 }
276
277
278 /**
279  * Creates a new mm object and returns a handle to it.
280  */
281 int
282 i915_gem_create_ioctl(struct drm_device *dev, void *data,
283                       struct drm_file *file_priv)
284 {
285         struct drm_i915_gem_create *args = data;
286         struct drm_gem_object *obj;
287         int ret;
288         u32 handle;
289
290         args->size = roundup(args->size, PAGE_SIZE);
291
292         /* Allocate the new object */
293         obj = i915_gem_alloc_object(dev, args->size);
294         if (obj == NULL)
295                 return -ENOMEM;
296
297         ret = drm_gem_handle_create(file_priv, obj, &handle);
298         if (ret) {
299                 drm_gem_object_release(obj);
300                 i915_gem_info_remove_obj(dev->dev_private, obj->size);
301                 kfree(obj);
302                 return ret;
303         }
304
305         /* drop reference from allocate - handle holds it now */
306         drm_gem_object_unreference(obj);
307         trace_i915_gem_object_create(obj);
308
309         args->handle = handle;
310         return 0;
311 }
312
313 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
314 {
315         drm_i915_private_t *dev_priv = obj->dev->dev_private;
316         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
317
318         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
319                 obj_priv->tiling_mode != I915_TILING_NONE;
320 }
321
322 static inline void
323 slow_shmem_copy(struct page *dst_page,
324                 int dst_offset,
325                 struct page *src_page,
326                 int src_offset,
327                 int length)
328 {
329         char *dst_vaddr, *src_vaddr;
330
331         dst_vaddr = kmap(dst_page);
332         src_vaddr = kmap(src_page);
333
334         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
335
336         kunmap(src_page);
337         kunmap(dst_page);
338 }
339
340 static inline void
341 slow_shmem_bit17_copy(struct page *gpu_page,
342                       int gpu_offset,
343                       struct page *cpu_page,
344                       int cpu_offset,
345                       int length,
346                       int is_read)
347 {
348         char *gpu_vaddr, *cpu_vaddr;
349
350         /* Use the unswizzled path if this page isn't affected. */
351         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
352                 if (is_read)
353                         return slow_shmem_copy(cpu_page, cpu_offset,
354                                                gpu_page, gpu_offset, length);
355                 else
356                         return slow_shmem_copy(gpu_page, gpu_offset,
357                                                cpu_page, cpu_offset, length);
358         }
359
360         gpu_vaddr = kmap(gpu_page);
361         cpu_vaddr = kmap(cpu_page);
362
363         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
364          * XORing with the other bits (A9 for Y, A9 and A10 for X)
365          */
366         while (length > 0) {
367                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
368                 int this_length = min(cacheline_end - gpu_offset, length);
369                 int swizzled_gpu_offset = gpu_offset ^ 64;
370
371                 if (is_read) {
372                         memcpy(cpu_vaddr + cpu_offset,
373                                gpu_vaddr + swizzled_gpu_offset,
374                                this_length);
375                 } else {
376                         memcpy(gpu_vaddr + swizzled_gpu_offset,
377                                cpu_vaddr + cpu_offset,
378                                this_length);
379                 }
380                 cpu_offset += this_length;
381                 gpu_offset += this_length;
382                 length -= this_length;
383         }
384
385         kunmap(cpu_page);
386         kunmap(gpu_page);
387 }
388
389 /**
390  * This is the fast shmem pread path, which attempts to copy_from_user directly
391  * from the backing pages of the object to the user's address space.  On a
392  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
393  */
394 static int
395 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
396                           struct drm_i915_gem_pread *args,
397                           struct drm_file *file_priv)
398 {
399         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
400         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
401         ssize_t remain;
402         loff_t offset;
403         char __user *user_data;
404         int page_offset, page_length;
405
406         user_data = (char __user *) (uintptr_t) args->data_ptr;
407         remain = args->size;
408
409         obj_priv = to_intel_bo(obj);
410         offset = args->offset;
411
412         while (remain > 0) {
413                 struct page *page;
414                 char *vaddr;
415                 int ret;
416
417                 /* Operation in this page
418                  *
419                  * page_offset = offset within page
420                  * page_length = bytes to copy for this page
421                  */
422                 page_offset = offset & (PAGE_SIZE-1);
423                 page_length = remain;
424                 if ((page_offset + remain) > PAGE_SIZE)
425                         page_length = PAGE_SIZE - page_offset;
426
427                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
428                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
429                 if (IS_ERR(page))
430                         return PTR_ERR(page);
431
432                 vaddr = kmap_atomic(page);
433                 ret = __copy_to_user_inatomic(user_data,
434                                               vaddr + page_offset,
435                                               page_length);
436                 kunmap_atomic(vaddr);
437
438                 mark_page_accessed(page);
439                 page_cache_release(page);
440                 if (ret)
441                         return -EFAULT;
442
443                 remain -= page_length;
444                 user_data += page_length;
445                 offset += page_length;
446         }
447
448         return 0;
449 }
450
451 /**
452  * This is the fallback shmem pread path, which allocates temporary storage
453  * in kernel space to copy_to_user into outside of the struct_mutex, so we
454  * can copy out of the object's backing pages while holding the struct mutex
455  * and not take page faults.
456  */
457 static int
458 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
459                           struct drm_i915_gem_pread *args,
460                           struct drm_file *file_priv)
461 {
462         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
463         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
464         struct mm_struct *mm = current->mm;
465         struct page **user_pages;
466         ssize_t remain;
467         loff_t offset, pinned_pages, i;
468         loff_t first_data_page, last_data_page, num_pages;
469         int shmem_page_offset;
470         int data_page_index, data_page_offset;
471         int page_length;
472         int ret;
473         uint64_t data_ptr = args->data_ptr;
474         int do_bit17_swizzling;
475
476         remain = args->size;
477
478         /* Pin the user pages containing the data.  We can't fault while
479          * holding the struct mutex, yet we want to hold it while
480          * dereferencing the user data.
481          */
482         first_data_page = data_ptr / PAGE_SIZE;
483         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
484         num_pages = last_data_page - first_data_page + 1;
485
486         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
487         if (user_pages == NULL)
488                 return -ENOMEM;
489
490         mutex_unlock(&dev->struct_mutex);
491         down_read(&mm->mmap_sem);
492         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
493                                       num_pages, 1, 0, user_pages, NULL);
494         up_read(&mm->mmap_sem);
495         mutex_lock(&dev->struct_mutex);
496         if (pinned_pages < num_pages) {
497                 ret = -EFAULT;
498                 goto out;
499         }
500
501         ret = i915_gem_object_set_cpu_read_domain_range(obj,
502                                                         args->offset,
503                                                         args->size);
504         if (ret)
505                 goto out;
506
507         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
508
509         obj_priv = to_intel_bo(obj);
510         offset = args->offset;
511
512         while (remain > 0) {
513                 struct page *page;
514
515                 /* Operation in this page
516                  *
517                  * shmem_page_offset = offset within page in shmem file
518                  * data_page_index = page number in get_user_pages return
519                  * data_page_offset = offset with data_page_index page.
520                  * page_length = bytes to copy for this page
521                  */
522                 shmem_page_offset = offset & ~PAGE_MASK;
523                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
524                 data_page_offset = data_ptr & ~PAGE_MASK;
525
526                 page_length = remain;
527                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
528                         page_length = PAGE_SIZE - shmem_page_offset;
529                 if ((data_page_offset + page_length) > PAGE_SIZE)
530                         page_length = PAGE_SIZE - data_page_offset;
531
532                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
533                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
534                 if (IS_ERR(page))
535                         return PTR_ERR(page);
536
537                 if (do_bit17_swizzling) {
538                         slow_shmem_bit17_copy(page,
539                                               shmem_page_offset,
540                                               user_pages[data_page_index],
541                                               data_page_offset,
542                                               page_length,
543                                               1);
544                 } else {
545                         slow_shmem_copy(user_pages[data_page_index],
546                                         data_page_offset,
547                                         page,
548                                         shmem_page_offset,
549                                         page_length);
550                 }
551
552                 mark_page_accessed(page);
553                 page_cache_release(page);
554
555                 remain -= page_length;
556                 data_ptr += page_length;
557                 offset += page_length;
558         }
559
560 out:
561         for (i = 0; i < pinned_pages; i++) {
562                 SetPageDirty(user_pages[i]);
563                 mark_page_accessed(user_pages[i]);
564                 page_cache_release(user_pages[i]);
565         }
566         drm_free_large(user_pages);
567
568         return ret;
569 }
570
571 /**
572  * Reads data from the object referenced by handle.
573  *
574  * On error, the contents of *data are undefined.
575  */
576 int
577 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
578                      struct drm_file *file_priv)
579 {
580         struct drm_i915_gem_pread *args = data;
581         struct drm_gem_object *obj;
582         struct drm_i915_gem_object *obj_priv;
583         int ret = 0;
584
585         if (args->size == 0)
586                 return 0;
587
588         if (!access_ok(VERIFY_WRITE,
589                        (char __user *)(uintptr_t)args->data_ptr,
590                        args->size))
591                 return -EFAULT;
592
593         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
594                                        args->size);
595         if (ret)
596                 return -EFAULT;
597
598         ret = i915_mutex_lock_interruptible(dev);
599         if (ret)
600                 return ret;
601
602         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
603         if (obj == NULL) {
604                 ret = -ENOENT;
605                 goto unlock;
606         }
607         obj_priv = to_intel_bo(obj);
608
609         /* Bounds check source.  */
610         if (args->offset > obj->size || args->size > obj->size - args->offset) {
611                 ret = -EINVAL;
612                 goto out;
613         }
614
615         ret = i915_gem_object_set_cpu_read_domain_range(obj,
616                                                         args->offset,
617                                                         args->size);
618         if (ret)
619                 goto out;
620
621         ret = -EFAULT;
622         if (!i915_gem_object_needs_bit17_swizzle(obj))
623                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
624         if (ret == -EFAULT)
625                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
626
627 out:
628         drm_gem_object_unreference(obj);
629 unlock:
630         mutex_unlock(&dev->struct_mutex);
631         return ret;
632 }
633
634 /* This is the fast write path which cannot handle
635  * page faults in the source data
636  */
637
638 static inline int
639 fast_user_write(struct io_mapping *mapping,
640                 loff_t page_base, int page_offset,
641                 char __user *user_data,
642                 int length)
643 {
644         char *vaddr_atomic;
645         unsigned long unwritten;
646
647         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
648         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
649                                                       user_data, length);
650         io_mapping_unmap_atomic(vaddr_atomic);
651         return unwritten;
652 }
653
654 /* Here's the write path which can sleep for
655  * page faults
656  */
657
658 static inline void
659 slow_kernel_write(struct io_mapping *mapping,
660                   loff_t gtt_base, int gtt_offset,
661                   struct page *user_page, int user_offset,
662                   int length)
663 {
664         char __iomem *dst_vaddr;
665         char *src_vaddr;
666
667         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
668         src_vaddr = kmap(user_page);
669
670         memcpy_toio(dst_vaddr + gtt_offset,
671                     src_vaddr + user_offset,
672                     length);
673
674         kunmap(user_page);
675         io_mapping_unmap(dst_vaddr);
676 }
677
678 /**
679  * This is the fast pwrite path, where we copy the data directly from the
680  * user into the GTT, uncached.
681  */
682 static int
683 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
684                          struct drm_i915_gem_pwrite *args,
685                          struct drm_file *file_priv)
686 {
687         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
688         drm_i915_private_t *dev_priv = dev->dev_private;
689         ssize_t remain;
690         loff_t offset, page_base;
691         char __user *user_data;
692         int page_offset, page_length;
693
694         user_data = (char __user *) (uintptr_t) args->data_ptr;
695         remain = args->size;
696
697         obj_priv = to_intel_bo(obj);
698         offset = obj_priv->gtt_offset + args->offset;
699
700         while (remain > 0) {
701                 /* Operation in this page
702                  *
703                  * page_base = page offset within aperture
704                  * page_offset = offset within page
705                  * page_length = bytes to copy for this page
706                  */
707                 page_base = (offset & ~(PAGE_SIZE-1));
708                 page_offset = offset & (PAGE_SIZE-1);
709                 page_length = remain;
710                 if ((page_offset + remain) > PAGE_SIZE)
711                         page_length = PAGE_SIZE - page_offset;
712
713                 /* If we get a fault while copying data, then (presumably) our
714                  * source page isn't available.  Return the error and we'll
715                  * retry in the slow path.
716                  */
717                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
718                                     page_offset, user_data, page_length))
719
720                         return -EFAULT;
721
722                 remain -= page_length;
723                 user_data += page_length;
724                 offset += page_length;
725         }
726
727         return 0;
728 }
729
730 /**
731  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
732  * the memory and maps it using kmap_atomic for copying.
733  *
734  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
735  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
736  */
737 static int
738 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
739                          struct drm_i915_gem_pwrite *args,
740                          struct drm_file *file_priv)
741 {
742         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
743         drm_i915_private_t *dev_priv = dev->dev_private;
744         ssize_t remain;
745         loff_t gtt_page_base, offset;
746         loff_t first_data_page, last_data_page, num_pages;
747         loff_t pinned_pages, i;
748         struct page **user_pages;
749         struct mm_struct *mm = current->mm;
750         int gtt_page_offset, data_page_offset, data_page_index, page_length;
751         int ret;
752         uint64_t data_ptr = args->data_ptr;
753
754         remain = args->size;
755
756         /* Pin the user pages containing the data.  We can't fault while
757          * holding the struct mutex, and all of the pwrite implementations
758          * want to hold it while dereferencing the user data.
759          */
760         first_data_page = data_ptr / PAGE_SIZE;
761         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
762         num_pages = last_data_page - first_data_page + 1;
763
764         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
765         if (user_pages == NULL)
766                 return -ENOMEM;
767
768         mutex_unlock(&dev->struct_mutex);
769         down_read(&mm->mmap_sem);
770         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
771                                       num_pages, 0, 0, user_pages, NULL);
772         up_read(&mm->mmap_sem);
773         mutex_lock(&dev->struct_mutex);
774         if (pinned_pages < num_pages) {
775                 ret = -EFAULT;
776                 goto out_unpin_pages;
777         }
778
779         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
780         if (ret)
781                 goto out_unpin_pages;
782
783         obj_priv = to_intel_bo(obj);
784         offset = obj_priv->gtt_offset + args->offset;
785
786         while (remain > 0) {
787                 /* Operation in this page
788                  *
789                  * gtt_page_base = page offset within aperture
790                  * gtt_page_offset = offset within page in aperture
791                  * data_page_index = page number in get_user_pages return
792                  * data_page_offset = offset with data_page_index page.
793                  * page_length = bytes to copy for this page
794                  */
795                 gtt_page_base = offset & PAGE_MASK;
796                 gtt_page_offset = offset & ~PAGE_MASK;
797                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
798                 data_page_offset = data_ptr & ~PAGE_MASK;
799
800                 page_length = remain;
801                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
802                         page_length = PAGE_SIZE - gtt_page_offset;
803                 if ((data_page_offset + page_length) > PAGE_SIZE)
804                         page_length = PAGE_SIZE - data_page_offset;
805
806                 slow_kernel_write(dev_priv->mm.gtt_mapping,
807                                   gtt_page_base, gtt_page_offset,
808                                   user_pages[data_page_index],
809                                   data_page_offset,
810                                   page_length);
811
812                 remain -= page_length;
813                 offset += page_length;
814                 data_ptr += page_length;
815         }
816
817 out_unpin_pages:
818         for (i = 0; i < pinned_pages; i++)
819                 page_cache_release(user_pages[i]);
820         drm_free_large(user_pages);
821
822         return ret;
823 }
824
825 /**
826  * This is the fast shmem pwrite path, which attempts to directly
827  * copy_from_user into the kmapped pages backing the object.
828  */
829 static int
830 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
831                            struct drm_i915_gem_pwrite *args,
832                            struct drm_file *file_priv)
833 {
834         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
835         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
836         ssize_t remain;
837         loff_t offset;
838         char __user *user_data;
839         int page_offset, page_length;
840
841         user_data = (char __user *) (uintptr_t) args->data_ptr;
842         remain = args->size;
843
844         obj_priv = to_intel_bo(obj);
845         offset = args->offset;
846         obj_priv->dirty = 1;
847
848         while (remain > 0) {
849                 struct page *page;
850                 char *vaddr;
851                 int ret;
852
853                 /* Operation in this page
854                  *
855                  * page_offset = offset within page
856                  * page_length = bytes to copy for this page
857                  */
858                 page_offset = offset & (PAGE_SIZE-1);
859                 page_length = remain;
860                 if ((page_offset + remain) > PAGE_SIZE)
861                         page_length = PAGE_SIZE - page_offset;
862
863                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
864                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
865                 if (IS_ERR(page))
866                         return PTR_ERR(page);
867
868                 vaddr = kmap_atomic(page, KM_USER0);
869                 ret = __copy_from_user_inatomic(vaddr + page_offset,
870                                                 user_data,
871                                                 page_length);
872                 kunmap_atomic(vaddr, KM_USER0);
873
874                 set_page_dirty(page);
875                 mark_page_accessed(page);
876                 page_cache_release(page);
877
878                 /* If we get a fault while copying data, then (presumably) our
879                  * source page isn't available.  Return the error and we'll
880                  * retry in the slow path.
881                  */
882                 if (ret)
883                         return -EFAULT;
884
885                 remain -= page_length;
886                 user_data += page_length;
887                 offset += page_length;
888         }
889
890         return 0;
891 }
892
893 /**
894  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
895  * the memory and maps it using kmap_atomic for copying.
896  *
897  * This avoids taking mmap_sem for faulting on the user's address while the
898  * struct_mutex is held.
899  */
900 static int
901 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
902                            struct drm_i915_gem_pwrite *args,
903                            struct drm_file *file_priv)
904 {
905         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
906         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
907         struct mm_struct *mm = current->mm;
908         struct page **user_pages;
909         ssize_t remain;
910         loff_t offset, pinned_pages, i;
911         loff_t first_data_page, last_data_page, num_pages;
912         int shmem_page_offset;
913         int data_page_index,  data_page_offset;
914         int page_length;
915         int ret;
916         uint64_t data_ptr = args->data_ptr;
917         int do_bit17_swizzling;
918
919         remain = args->size;
920
921         /* Pin the user pages containing the data.  We can't fault while
922          * holding the struct mutex, and all of the pwrite implementations
923          * want to hold it while dereferencing the user data.
924          */
925         first_data_page = data_ptr / PAGE_SIZE;
926         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
927         num_pages = last_data_page - first_data_page + 1;
928
929         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
930         if (user_pages == NULL)
931                 return -ENOMEM;
932
933         mutex_unlock(&dev->struct_mutex);
934         down_read(&mm->mmap_sem);
935         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
936                                       num_pages, 0, 0, user_pages, NULL);
937         up_read(&mm->mmap_sem);
938         mutex_lock(&dev->struct_mutex);
939         if (pinned_pages < num_pages) {
940                 ret = -EFAULT;
941                 goto out;
942         }
943
944         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
945         if (ret)
946                 goto out;
947
948         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
949
950         obj_priv = to_intel_bo(obj);
951         offset = args->offset;
952         obj_priv->dirty = 1;
953
954         while (remain > 0) {
955                 struct page *page;
956
957                 /* Operation in this page
958                  *
959                  * shmem_page_offset = offset within page in shmem file
960                  * data_page_index = page number in get_user_pages return
961                  * data_page_offset = offset with data_page_index page.
962                  * page_length = bytes to copy for this page
963                  */
964                 shmem_page_offset = offset & ~PAGE_MASK;
965                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
966                 data_page_offset = data_ptr & ~PAGE_MASK;
967
968                 page_length = remain;
969                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
970                         page_length = PAGE_SIZE - shmem_page_offset;
971                 if ((data_page_offset + page_length) > PAGE_SIZE)
972                         page_length = PAGE_SIZE - data_page_offset;
973
974                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
975                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
976                 if (IS_ERR(page)) {
977                         ret = PTR_ERR(page);
978                         goto out;
979                 }
980
981                 if (do_bit17_swizzling) {
982                         slow_shmem_bit17_copy(page,
983                                               shmem_page_offset,
984                                               user_pages[data_page_index],
985                                               data_page_offset,
986                                               page_length,
987                                               0);
988                 } else {
989                         slow_shmem_copy(page,
990                                         shmem_page_offset,
991                                         user_pages[data_page_index],
992                                         data_page_offset,
993                                         page_length);
994                 }
995
996                 set_page_dirty(page);
997                 mark_page_accessed(page);
998                 page_cache_release(page);
999
1000                 remain -= page_length;
1001                 data_ptr += page_length;
1002                 offset += page_length;
1003         }
1004
1005 out:
1006         for (i = 0; i < pinned_pages; i++)
1007                 page_cache_release(user_pages[i]);
1008         drm_free_large(user_pages);
1009
1010         return ret;
1011 }
1012
1013 /**
1014  * Writes data to the object referenced by handle.
1015  *
1016  * On error, the contents of the buffer that were to be modified are undefined.
1017  */
1018 int
1019 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1020                       struct drm_file *file)
1021 {
1022         struct drm_i915_gem_pwrite *args = data;
1023         struct drm_gem_object *obj;
1024         struct drm_i915_gem_object *obj_priv;
1025         int ret;
1026
1027         if (args->size == 0)
1028                 return 0;
1029
1030         if (!access_ok(VERIFY_READ,
1031                        (char __user *)(uintptr_t)args->data_ptr,
1032                        args->size))
1033                 return -EFAULT;
1034
1035         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1036                                       args->size);
1037         if (ret)
1038                 return -EFAULT;
1039
1040         ret = i915_mutex_lock_interruptible(dev);
1041         if (ret)
1042                 return ret;
1043
1044         obj = drm_gem_object_lookup(dev, file, args->handle);
1045         if (obj == NULL) {
1046                 ret = -ENOENT;
1047                 goto unlock;
1048         }
1049         obj_priv = to_intel_bo(obj);
1050
1051         /* Bounds check destination. */
1052         if (args->offset > obj->size || args->size > obj->size - args->offset) {
1053                 ret = -EINVAL;
1054                 goto out;
1055         }
1056
1057         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1058          * it would end up going through the fenced access, and we'll get
1059          * different detiling behavior between reading and writing.
1060          * pread/pwrite currently are reading and writing from the CPU
1061          * perspective, requiring manual detiling by the client.
1062          */
1063         if (obj_priv->phys_obj)
1064                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1065         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
1066                  obj_priv->gtt_space &&
1067                  obj->write_domain != I915_GEM_DOMAIN_CPU) {
1068                 ret = i915_gem_object_pin(obj, 0, true);
1069                 if (ret)
1070                         goto out;
1071
1072                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1073                 if (ret)
1074                         goto out_unpin;
1075
1076                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1077                 if (ret == -EFAULT)
1078                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1079
1080 out_unpin:
1081                 i915_gem_object_unpin(obj);
1082         } else {
1083                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1084                 if (ret)
1085                         goto out;
1086
1087                 ret = -EFAULT;
1088                 if (!i915_gem_object_needs_bit17_swizzle(obj))
1089                         ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1090                 if (ret == -EFAULT)
1091                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1092         }
1093
1094 out:
1095         drm_gem_object_unreference(obj);
1096 unlock:
1097         mutex_unlock(&dev->struct_mutex);
1098         return ret;
1099 }
1100
1101 /**
1102  * Called when user space prepares to use an object with the CPU, either
1103  * through the mmap ioctl's mapping or a GTT mapping.
1104  */
1105 int
1106 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1107                           struct drm_file *file_priv)
1108 {
1109         struct drm_i915_private *dev_priv = dev->dev_private;
1110         struct drm_i915_gem_set_domain *args = data;
1111         struct drm_gem_object *obj;
1112         struct drm_i915_gem_object *obj_priv;
1113         uint32_t read_domains = args->read_domains;
1114         uint32_t write_domain = args->write_domain;
1115         int ret;
1116
1117         if (!(dev->driver->driver_features & DRIVER_GEM))
1118                 return -ENODEV;
1119
1120         /* Only handle setting domains to types used by the CPU. */
1121         if (write_domain & I915_GEM_GPU_DOMAINS)
1122                 return -EINVAL;
1123
1124         if (read_domains & I915_GEM_GPU_DOMAINS)
1125                 return -EINVAL;
1126
1127         /* Having something in the write domain implies it's in the read
1128          * domain, and only that read domain.  Enforce that in the request.
1129          */
1130         if (write_domain != 0 && read_domains != write_domain)
1131                 return -EINVAL;
1132
1133         ret = i915_mutex_lock_interruptible(dev);
1134         if (ret)
1135                 return ret;
1136
1137         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1138         if (obj == NULL) {
1139                 ret = -ENOENT;
1140                 goto unlock;
1141         }
1142         obj_priv = to_intel_bo(obj);
1143
1144         intel_mark_busy(dev, obj);
1145
1146         if (read_domains & I915_GEM_DOMAIN_GTT) {
1147                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1148
1149                 /* Update the LRU on the fence for the CPU access that's
1150                  * about to occur.
1151                  */
1152                 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1153                         struct drm_i915_fence_reg *reg =
1154                                 &dev_priv->fence_regs[obj_priv->fence_reg];
1155                         list_move_tail(&reg->lru_list,
1156                                        &dev_priv->mm.fence_list);
1157                 }
1158
1159                 /* Silently promote "you're not bound, there was nothing to do"
1160                  * to success, since the client was just asking us to
1161                  * make sure everything was done.
1162                  */
1163                 if (ret == -EINVAL)
1164                         ret = 0;
1165         } else {
1166                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1167         }
1168
1169         /* Maintain LRU order of "inactive" objects */
1170         if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1171                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1172
1173         drm_gem_object_unreference(obj);
1174 unlock:
1175         mutex_unlock(&dev->struct_mutex);
1176         return ret;
1177 }
1178
1179 /**
1180  * Called when user space has done writes to this buffer
1181  */
1182 int
1183 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1184                       struct drm_file *file_priv)
1185 {
1186         struct drm_i915_gem_sw_finish *args = data;
1187         struct drm_gem_object *obj;
1188         int ret = 0;
1189
1190         if (!(dev->driver->driver_features & DRIVER_GEM))
1191                 return -ENODEV;
1192
1193         ret = i915_mutex_lock_interruptible(dev);
1194         if (ret)
1195                 return ret;
1196
1197         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1198         if (obj == NULL) {
1199                 ret = -ENOENT;
1200                 goto unlock;
1201         }
1202
1203         /* Pinned buffers may be scanout, so flush the cache */
1204         if (to_intel_bo(obj)->pin_count)
1205                 i915_gem_object_flush_cpu_write_domain(obj);
1206
1207         drm_gem_object_unreference(obj);
1208 unlock:
1209         mutex_unlock(&dev->struct_mutex);
1210         return ret;
1211 }
1212
1213 /**
1214  * Maps the contents of an object, returning the address it is mapped
1215  * into.
1216  *
1217  * While the mapping holds a reference on the contents of the object, it doesn't
1218  * imply a ref on the object itself.
1219  */
1220 int
1221 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1222                    struct drm_file *file_priv)
1223 {
1224         struct drm_i915_private *dev_priv = dev->dev_private;
1225         struct drm_i915_gem_mmap *args = data;
1226         struct drm_gem_object *obj;
1227         loff_t offset;
1228         unsigned long addr;
1229
1230         if (!(dev->driver->driver_features & DRIVER_GEM))
1231                 return -ENODEV;
1232
1233         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1234         if (obj == NULL)
1235                 return -ENOENT;
1236
1237         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1238                 drm_gem_object_unreference_unlocked(obj);
1239                 return -E2BIG;
1240         }
1241
1242         offset = args->offset;
1243
1244         down_write(&current->mm->mmap_sem);
1245         addr = do_mmap(obj->filp, 0, args->size,
1246                        PROT_READ | PROT_WRITE, MAP_SHARED,
1247                        args->offset);
1248         up_write(&current->mm->mmap_sem);
1249         drm_gem_object_unreference_unlocked(obj);
1250         if (IS_ERR((void *)addr))
1251                 return addr;
1252
1253         args->addr_ptr = (uint64_t) addr;
1254
1255         return 0;
1256 }
1257
1258 /**
1259  * i915_gem_fault - fault a page into the GTT
1260  * vma: VMA in question
1261  * vmf: fault info
1262  *
1263  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1264  * from userspace.  The fault handler takes care of binding the object to
1265  * the GTT (if needed), allocating and programming a fence register (again,
1266  * only if needed based on whether the old reg is still valid or the object
1267  * is tiled) and inserting a new PTE into the faulting process.
1268  *
1269  * Note that the faulting process may involve evicting existing objects
1270  * from the GTT and/or fence registers to make room.  So performance may
1271  * suffer if the GTT working set is large or there are few fence registers
1272  * left.
1273  */
1274 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1275 {
1276         struct drm_gem_object *obj = vma->vm_private_data;
1277         struct drm_device *dev = obj->dev;
1278         drm_i915_private_t *dev_priv = dev->dev_private;
1279         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1280         pgoff_t page_offset;
1281         unsigned long pfn;
1282         int ret = 0;
1283         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1284
1285         /* We don't use vmf->pgoff since that has the fake offset */
1286         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1287                 PAGE_SHIFT;
1288
1289         /* Now bind it into the GTT if needed */
1290         mutex_lock(&dev->struct_mutex);
1291         BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
1292
1293         if (obj_priv->gtt_space) {
1294                 if (!obj_priv->map_and_fenceable) {
1295                         ret = i915_gem_object_unbind(obj);
1296                         if (ret)
1297                                 goto unlock;
1298                 }
1299         }
1300
1301         if (!obj_priv->gtt_space) {
1302                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1303                 if (ret)
1304                         goto unlock;
1305         }
1306
1307         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1308         if (ret)
1309                 goto unlock;
1310
1311         if (!obj_priv->fault_mappable) {
1312                 obj_priv->fault_mappable = true;
1313                 i915_gem_info_update_mappable(dev_priv, obj_priv, true);
1314         }
1315
1316         /* Need a new fence register? */
1317         if (obj_priv->tiling_mode != I915_TILING_NONE) {
1318                 ret = i915_gem_object_get_fence_reg(obj, true);
1319                 if (ret)
1320                         goto unlock;
1321         }
1322
1323         if (i915_gem_object_is_inactive(obj_priv))
1324                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1325
1326         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1327                 page_offset;
1328
1329         /* Finally, remap it using the new GTT offset */
1330         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1331 unlock:
1332         mutex_unlock(&dev->struct_mutex);
1333
1334         switch (ret) {
1335         case -EAGAIN:
1336                 set_need_resched();
1337         case 0:
1338         case -ERESTARTSYS:
1339                 return VM_FAULT_NOPAGE;
1340         case -ENOMEM:
1341                 return VM_FAULT_OOM;
1342         default:
1343                 return VM_FAULT_SIGBUS;
1344         }
1345 }
1346
1347 /**
1348  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1349  * @obj: obj in question
1350  *
1351  * GEM memory mapping works by handing back to userspace a fake mmap offset
1352  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1353  * up the object based on the offset and sets up the various memory mapping
1354  * structures.
1355  *
1356  * This routine allocates and attaches a fake offset for @obj.
1357  */
1358 static int
1359 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1360 {
1361         struct drm_device *dev = obj->dev;
1362         struct drm_gem_mm *mm = dev->mm_private;
1363         struct drm_map_list *list;
1364         struct drm_local_map *map;
1365         int ret = 0;
1366
1367         /* Set the object up for mmap'ing */
1368         list = &obj->map_list;
1369         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1370         if (!list->map)
1371                 return -ENOMEM;
1372
1373         map = list->map;
1374         map->type = _DRM_GEM;
1375         map->size = obj->size;
1376         map->handle = obj;
1377
1378         /* Get a DRM GEM mmap offset allocated... */
1379         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1380                                                     obj->size / PAGE_SIZE, 0, 0);
1381         if (!list->file_offset_node) {
1382                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1383                 ret = -ENOSPC;
1384                 goto out_free_list;
1385         }
1386
1387         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1388                                                   obj->size / PAGE_SIZE, 0);
1389         if (!list->file_offset_node) {
1390                 ret = -ENOMEM;
1391                 goto out_free_list;
1392         }
1393
1394         list->hash.key = list->file_offset_node->start;
1395         ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1396         if (ret) {
1397                 DRM_ERROR("failed to add to map hash\n");
1398                 goto out_free_mm;
1399         }
1400
1401         return 0;
1402
1403 out_free_mm:
1404         drm_mm_put_block(list->file_offset_node);
1405 out_free_list:
1406         kfree(list->map);
1407         list->map = NULL;
1408
1409         return ret;
1410 }
1411
1412 /**
1413  * i915_gem_release_mmap - remove physical page mappings
1414  * @obj: obj in question
1415  *
1416  * Preserve the reservation of the mmapping with the DRM core code, but
1417  * relinquish ownership of the pages back to the system.
1418  *
1419  * It is vital that we remove the page mapping if we have mapped a tiled
1420  * object through the GTT and then lose the fence register due to
1421  * resource pressure. Similarly if the object has been moved out of the
1422  * aperture, than pages mapped into userspace must be revoked. Removing the
1423  * mapping will then trigger a page fault on the next user access, allowing
1424  * fixup by i915_gem_fault().
1425  */
1426 void
1427 i915_gem_release_mmap(struct drm_gem_object *obj)
1428 {
1429         struct drm_device *dev = obj->dev;
1430         struct drm_i915_private *dev_priv = dev->dev_private;
1431         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1432
1433         if (unlikely(obj->map_list.map && dev->dev_mapping))
1434                 unmap_mapping_range(dev->dev_mapping,
1435                                     (loff_t)obj->map_list.hash.key<<PAGE_SHIFT,
1436                                     obj->size, 1);
1437
1438         if (obj_priv->fault_mappable) {
1439                 obj_priv->fault_mappable = false;
1440                 i915_gem_info_update_mappable(dev_priv, obj_priv, false);
1441         }
1442 }
1443
1444 static void
1445 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1446 {
1447         struct drm_device *dev = obj->dev;
1448         struct drm_gem_mm *mm = dev->mm_private;
1449         struct drm_map_list *list = &obj->map_list;
1450
1451         drm_ht_remove_item(&mm->offset_hash, &list->hash);
1452         drm_mm_put_block(list->file_offset_node);
1453         kfree(list->map);
1454         list->map = NULL;
1455 }
1456
1457 /**
1458  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1459  * @obj: object to check
1460  *
1461  * Return the required GTT alignment for an object, taking into account
1462  * potential fence register mapping.
1463  */
1464 static uint32_t
1465 i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv)
1466 {
1467         struct drm_device *dev = obj_priv->base.dev;
1468
1469         /*
1470          * Minimum alignment is 4k (GTT page size), but might be greater
1471          * if a fence register is needed for the object.
1472          */
1473         if (INTEL_INFO(dev)->gen >= 4 ||
1474             obj_priv->tiling_mode == I915_TILING_NONE)
1475                 return 4096;
1476
1477         /*
1478          * Previous chips need to be aligned to the size of the smallest
1479          * fence register that can contain the object.
1480          */
1481         return i915_gem_get_gtt_size(obj_priv);
1482 }
1483
1484 /**
1485  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1486  *                                       unfenced object
1487  * @obj: object to check
1488  *
1489  * Return the required GTT alignment for an object, only taking into account
1490  * unfenced tiled surface requirements.
1491  */
1492 static uint32_t
1493 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv)
1494 {
1495         struct drm_device *dev = obj_priv->base.dev;
1496         int tile_height;
1497
1498         /*
1499          * Minimum alignment is 4k (GTT page size) for sane hw.
1500          */
1501         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1502             obj_priv->tiling_mode == I915_TILING_NONE)
1503                 return 4096;
1504
1505         /*
1506          * Older chips need unfenced tiled buffers to be aligned to the left
1507          * edge of an even tile row (where tile rows are counted as if the bo is
1508          * placed in a fenced gtt region).
1509          */
1510         if (IS_GEN2(dev) ||
1511             (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1512                 tile_height = 32;
1513         else
1514                 tile_height = 8;
1515
1516         return tile_height * obj_priv->stride * 2;
1517 }
1518
1519 static uint32_t
1520 i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
1521 {
1522         struct drm_device *dev = obj_priv->base.dev;
1523         uint32_t size;
1524
1525         /*
1526          * Minimum alignment is 4k (GTT page size), but might be greater
1527          * if a fence register is needed for the object.
1528          */
1529         if (INTEL_INFO(dev)->gen >= 4)
1530                 return obj_priv->base.size;
1531
1532         /*
1533          * Previous chips need to be aligned to the size of the smallest
1534          * fence register that can contain the object.
1535          */
1536         if (INTEL_INFO(dev)->gen == 3)
1537                 size = 1024*1024;
1538         else
1539                 size = 512*1024;
1540
1541         while (size < obj_priv->base.size)
1542                 size <<= 1;
1543
1544         return size;
1545 }
1546
1547 /**
1548  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1549  * @dev: DRM device
1550  * @data: GTT mapping ioctl data
1551  * @file_priv: GEM object info
1552  *
1553  * Simply returns the fake offset to userspace so it can mmap it.
1554  * The mmap call will end up in drm_gem_mmap(), which will set things
1555  * up so we can get faults in the handler above.
1556  *
1557  * The fault handler will take care of binding the object into the GTT
1558  * (since it may have been evicted to make room for something), allocating
1559  * a fence register, and mapping the appropriate aperture address into
1560  * userspace.
1561  */
1562 int
1563 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1564                         struct drm_file *file_priv)
1565 {
1566         struct drm_i915_private *dev_priv = dev->dev_private;
1567         struct drm_i915_gem_mmap_gtt *args = data;
1568         struct drm_gem_object *obj;
1569         struct drm_i915_gem_object *obj_priv;
1570         int ret;
1571
1572         if (!(dev->driver->driver_features & DRIVER_GEM))
1573                 return -ENODEV;
1574
1575         ret = i915_mutex_lock_interruptible(dev);
1576         if (ret)
1577                 return ret;
1578
1579         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1580         if (obj == NULL) {
1581                 ret = -ENOENT;
1582                 goto unlock;
1583         }
1584         obj_priv = to_intel_bo(obj);
1585
1586         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1587                 ret = -E2BIG;
1588                 goto unlock;
1589         }
1590
1591         if (obj_priv->madv != I915_MADV_WILLNEED) {
1592                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1593                 ret = -EINVAL;
1594                 goto out;
1595         }
1596
1597         if (!obj->map_list.map) {
1598                 ret = i915_gem_create_mmap_offset(obj);
1599                 if (ret)
1600                         goto out;
1601         }
1602
1603         args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1604
1605 out:
1606         drm_gem_object_unreference(obj);
1607 unlock:
1608         mutex_unlock(&dev->struct_mutex);
1609         return ret;
1610 }
1611
1612 static int
1613 i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
1614                               gfp_t gfpmask)
1615 {
1616         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1617         int page_count, i;
1618         struct address_space *mapping;
1619         struct inode *inode;
1620         struct page *page;
1621
1622         /* Get the list of pages out of our struct file.  They'll be pinned
1623          * at this point until we release them.
1624          */
1625         page_count = obj->size / PAGE_SIZE;
1626         BUG_ON(obj_priv->pages != NULL);
1627         obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1628         if (obj_priv->pages == NULL)
1629                 return -ENOMEM;
1630
1631         inode = obj->filp->f_path.dentry->d_inode;
1632         mapping = inode->i_mapping;
1633         for (i = 0; i < page_count; i++) {
1634                 page = read_cache_page_gfp(mapping, i,
1635                                            GFP_HIGHUSER |
1636                                            __GFP_COLD |
1637                                            __GFP_RECLAIMABLE |
1638                                            gfpmask);
1639                 if (IS_ERR(page))
1640                         goto err_pages;
1641
1642                 obj_priv->pages[i] = page;
1643         }
1644
1645         if (obj_priv->tiling_mode != I915_TILING_NONE)
1646                 i915_gem_object_do_bit_17_swizzle(obj);
1647
1648         return 0;
1649
1650 err_pages:
1651         while (i--)
1652                 page_cache_release(obj_priv->pages[i]);
1653
1654         drm_free_large(obj_priv->pages);
1655         obj_priv->pages = NULL;
1656         return PTR_ERR(page);
1657 }
1658
1659 static void
1660 i915_gem_object_put_pages_gtt(struct drm_gem_object *obj)
1661 {
1662         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1663         int page_count = obj->size / PAGE_SIZE;
1664         int i;
1665
1666         BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1667
1668         if (obj_priv->tiling_mode != I915_TILING_NONE)
1669                 i915_gem_object_save_bit_17_swizzle(obj);
1670
1671         if (obj_priv->madv == I915_MADV_DONTNEED)
1672                 obj_priv->dirty = 0;
1673
1674         for (i = 0; i < page_count; i++) {
1675                 if (obj_priv->dirty)
1676                         set_page_dirty(obj_priv->pages[i]);
1677
1678                 if (obj_priv->madv == I915_MADV_WILLNEED)
1679                         mark_page_accessed(obj_priv->pages[i]);
1680
1681                 page_cache_release(obj_priv->pages[i]);
1682         }
1683         obj_priv->dirty = 0;
1684
1685         drm_free_large(obj_priv->pages);
1686         obj_priv->pages = NULL;
1687 }
1688
1689 static uint32_t
1690 i915_gem_next_request_seqno(struct drm_device *dev,
1691                             struct intel_ring_buffer *ring)
1692 {
1693         drm_i915_private_t *dev_priv = dev->dev_private;
1694         return ring->outstanding_lazy_request = dev_priv->next_seqno;
1695 }
1696
1697 static void
1698 i915_gem_object_move_to_active(struct drm_gem_object *obj,
1699                                struct intel_ring_buffer *ring)
1700 {
1701         struct drm_device *dev = obj->dev;
1702         struct drm_i915_private *dev_priv = dev->dev_private;
1703         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1704         uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1705
1706         BUG_ON(ring == NULL);
1707         obj_priv->ring = ring;
1708
1709         /* Add a reference if we're newly entering the active list. */
1710         if (!obj_priv->active) {
1711                 drm_gem_object_reference(obj);
1712                 obj_priv->active = 1;
1713         }
1714
1715         /* Move from whatever list we were on to the tail of execution. */
1716         list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
1717         list_move_tail(&obj_priv->ring_list, &ring->active_list);
1718         obj_priv->last_rendering_seqno = seqno;
1719 }
1720
1721 static void
1722 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1723 {
1724         struct drm_device *dev = obj->dev;
1725         drm_i915_private_t *dev_priv = dev->dev_private;
1726         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1727
1728         BUG_ON(!obj_priv->active);
1729         list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
1730         list_del_init(&obj_priv->ring_list);
1731         obj_priv->last_rendering_seqno = 0;
1732 }
1733
1734 /* Immediately discard the backing storage */
1735 static void
1736 i915_gem_object_truncate(struct drm_gem_object *obj)
1737 {
1738         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1739         struct inode *inode;
1740
1741         /* Our goal here is to return as much of the memory as
1742          * is possible back to the system as we are called from OOM.
1743          * To do this we must instruct the shmfs to drop all of its
1744          * backing pages, *now*. Here we mirror the actions taken
1745          * when by shmem_delete_inode() to release the backing store.
1746          */
1747         inode = obj->filp->f_path.dentry->d_inode;
1748         truncate_inode_pages(inode->i_mapping, 0);
1749         if (inode->i_op->truncate_range)
1750                 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1751
1752         obj_priv->madv = __I915_MADV_PURGED;
1753 }
1754
1755 static inline int
1756 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1757 {
1758         return obj_priv->madv == I915_MADV_DONTNEED;
1759 }
1760
1761 static void
1762 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1763 {
1764         struct drm_device *dev = obj->dev;
1765         drm_i915_private_t *dev_priv = dev->dev_private;
1766         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1767
1768         if (obj_priv->pin_count != 0)
1769                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
1770         else
1771                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1772         list_del_init(&obj_priv->ring_list);
1773
1774         BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1775
1776         obj_priv->last_rendering_seqno = 0;
1777         obj_priv->ring = NULL;
1778         if (obj_priv->active) {
1779                 obj_priv->active = 0;
1780                 drm_gem_object_unreference(obj);
1781         }
1782         WARN_ON(i915_verify_lists(dev));
1783 }
1784
1785 static void
1786 i915_gem_process_flushing_list(struct drm_device *dev,
1787                                uint32_t flush_domains,
1788                                struct intel_ring_buffer *ring)
1789 {
1790         drm_i915_private_t *dev_priv = dev->dev_private;
1791         struct drm_i915_gem_object *obj_priv, *next;
1792
1793         list_for_each_entry_safe(obj_priv, next,
1794                                  &ring->gpu_write_list,
1795                                  gpu_write_list) {
1796                 struct drm_gem_object *obj = &obj_priv->base;
1797
1798                 if (obj->write_domain & flush_domains) {
1799                         uint32_t old_write_domain = obj->write_domain;
1800
1801                         obj->write_domain = 0;
1802                         list_del_init(&obj_priv->gpu_write_list);
1803                         i915_gem_object_move_to_active(obj, ring);
1804
1805                         /* update the fence lru list */
1806                         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1807                                 struct drm_i915_fence_reg *reg =
1808                                         &dev_priv->fence_regs[obj_priv->fence_reg];
1809                                 list_move_tail(&reg->lru_list,
1810                                                 &dev_priv->mm.fence_list);
1811                         }
1812
1813                         trace_i915_gem_object_change_domain(obj,
1814                                                             obj->read_domains,
1815                                                             old_write_domain);
1816                 }
1817         }
1818 }
1819
1820 int
1821 i915_add_request(struct drm_device *dev,
1822                  struct drm_file *file,
1823                  struct drm_i915_gem_request *request,
1824                  struct intel_ring_buffer *ring)
1825 {
1826         drm_i915_private_t *dev_priv = dev->dev_private;
1827         struct drm_i915_file_private *file_priv = NULL;
1828         uint32_t seqno;
1829         int was_empty;
1830         int ret;
1831
1832         BUG_ON(request == NULL);
1833
1834         if (file != NULL)
1835                 file_priv = file->driver_priv;
1836
1837         ret = ring->add_request(ring, &seqno);
1838         if (ret)
1839             return ret;
1840
1841         ring->outstanding_lazy_request = false;
1842
1843         request->seqno = seqno;
1844         request->ring = ring;
1845         request->emitted_jiffies = jiffies;
1846         was_empty = list_empty(&ring->request_list);
1847         list_add_tail(&request->list, &ring->request_list);
1848
1849         if (file_priv) {
1850                 spin_lock(&file_priv->mm.lock);
1851                 request->file_priv = file_priv;
1852                 list_add_tail(&request->client_list,
1853                               &file_priv->mm.request_list);
1854                 spin_unlock(&file_priv->mm.lock);
1855         }
1856
1857         if (!dev_priv->mm.suspended) {
1858                 mod_timer(&dev_priv->hangcheck_timer,
1859                           jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1860                 if (was_empty)
1861                         queue_delayed_work(dev_priv->wq,
1862                                            &dev_priv->mm.retire_work, HZ);
1863         }
1864         return 0;
1865 }
1866
1867 /**
1868  * Command execution barrier
1869  *
1870  * Ensures that all commands in the ring are finished
1871  * before signalling the CPU
1872  */
1873 static void
1874 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1875 {
1876         uint32_t flush_domains = 0;
1877
1878         /* The sampler always gets flushed on i965 (sigh) */
1879         if (INTEL_INFO(dev)->gen >= 4)
1880                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1881
1882         ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
1883 }
1884
1885 static inline void
1886 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1887 {
1888         struct drm_i915_file_private *file_priv = request->file_priv;
1889
1890         if (!file_priv)
1891                 return;
1892
1893         spin_lock(&file_priv->mm.lock);
1894         list_del(&request->client_list);
1895         request->file_priv = NULL;
1896         spin_unlock(&file_priv->mm.lock);
1897 }
1898
1899 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1900                                       struct intel_ring_buffer *ring)
1901 {
1902         while (!list_empty(&ring->request_list)) {
1903                 struct drm_i915_gem_request *request;
1904
1905                 request = list_first_entry(&ring->request_list,
1906                                            struct drm_i915_gem_request,
1907                                            list);
1908
1909                 list_del(&request->list);
1910                 i915_gem_request_remove_from_client(request);
1911                 kfree(request);
1912         }
1913
1914         while (!list_empty(&ring->active_list)) {
1915                 struct drm_i915_gem_object *obj_priv;
1916
1917                 obj_priv = list_first_entry(&ring->active_list,
1918                                             struct drm_i915_gem_object,
1919                                             ring_list);
1920
1921                 obj_priv->base.write_domain = 0;
1922                 list_del_init(&obj_priv->gpu_write_list);
1923                 i915_gem_object_move_to_inactive(&obj_priv->base);
1924         }
1925 }
1926
1927 void i915_gem_reset(struct drm_device *dev)
1928 {
1929         struct drm_i915_private *dev_priv = dev->dev_private;
1930         struct drm_i915_gem_object *obj_priv;
1931         int i;
1932
1933         i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1934         i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1935         i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
1936
1937         /* Remove anything from the flushing lists. The GPU cache is likely
1938          * to be lost on reset along with the data, so simply move the
1939          * lost bo to the inactive list.
1940          */
1941         while (!list_empty(&dev_priv->mm.flushing_list)) {
1942                 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1943                                             struct drm_i915_gem_object,
1944                                             mm_list);
1945
1946                 obj_priv->base.write_domain = 0;
1947                 list_del_init(&obj_priv->gpu_write_list);
1948                 i915_gem_object_move_to_inactive(&obj_priv->base);
1949         }
1950
1951         /* Move everything out of the GPU domains to ensure we do any
1952          * necessary invalidation upon reuse.
1953          */
1954         list_for_each_entry(obj_priv,
1955                             &dev_priv->mm.inactive_list,
1956                             mm_list)
1957         {
1958                 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1959         }
1960
1961         /* The fence registers are invalidated so clear them out */
1962         for (i = 0; i < 16; i++) {
1963                 struct drm_i915_fence_reg *reg;
1964
1965                 reg = &dev_priv->fence_regs[i];
1966                 if (!reg->obj)
1967                         continue;
1968
1969                 i915_gem_clear_fence_reg(reg->obj);
1970         }
1971 }
1972
1973 /**
1974  * This function clears the request list as sequence numbers are passed.
1975  */
1976 static void
1977 i915_gem_retire_requests_ring(struct drm_device *dev,
1978                               struct intel_ring_buffer *ring)
1979 {
1980         drm_i915_private_t *dev_priv = dev->dev_private;
1981         uint32_t seqno;
1982
1983         if (!ring->status_page.page_addr ||
1984             list_empty(&ring->request_list))
1985                 return;
1986
1987         WARN_ON(i915_verify_lists(dev));
1988
1989         seqno = ring->get_seqno(ring);
1990         while (!list_empty(&ring->request_list)) {
1991                 struct drm_i915_gem_request *request;
1992
1993                 request = list_first_entry(&ring->request_list,
1994                                            struct drm_i915_gem_request,
1995                                            list);
1996
1997                 if (!i915_seqno_passed(seqno, request->seqno))
1998                         break;
1999
2000                 trace_i915_gem_request_retire(dev, request->seqno);
2001
2002                 list_del(&request->list);
2003                 i915_gem_request_remove_from_client(request);
2004                 kfree(request);
2005         }
2006
2007         /* Move any buffers on the active list that are no longer referenced
2008          * by the ringbuffer to the flushing/inactive lists as appropriate.
2009          */
2010         while (!list_empty(&ring->active_list)) {
2011                 struct drm_gem_object *obj;
2012                 struct drm_i915_gem_object *obj_priv;
2013
2014                 obj_priv = list_first_entry(&ring->active_list,
2015                                             struct drm_i915_gem_object,
2016                                             ring_list);
2017
2018                 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
2019                         break;
2020
2021                 obj = &obj_priv->base;
2022                 if (obj->write_domain != 0)
2023                         i915_gem_object_move_to_flushing(obj);
2024                 else
2025                         i915_gem_object_move_to_inactive(obj);
2026         }
2027
2028         if (unlikely (dev_priv->trace_irq_seqno &&
2029                       i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
2030                 ring->user_irq_put(ring);
2031                 dev_priv->trace_irq_seqno = 0;
2032         }
2033
2034         WARN_ON(i915_verify_lists(dev));
2035 }
2036
2037 void
2038 i915_gem_retire_requests(struct drm_device *dev)
2039 {
2040         drm_i915_private_t *dev_priv = dev->dev_private;
2041
2042         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
2043             struct drm_i915_gem_object *obj_priv, *tmp;
2044
2045             /* We must be careful that during unbind() we do not
2046              * accidentally infinitely recurse into retire requests.
2047              * Currently:
2048              *   retire -> free -> unbind -> wait -> retire_ring
2049              */
2050             list_for_each_entry_safe(obj_priv, tmp,
2051                                      &dev_priv->mm.deferred_free_list,
2052                                      mm_list)
2053                     i915_gem_free_object_tail(&obj_priv->base);
2054         }
2055
2056         i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
2057         i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
2058         i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
2059 }
2060
2061 static void
2062 i915_gem_retire_work_handler(struct work_struct *work)
2063 {
2064         drm_i915_private_t *dev_priv;
2065         struct drm_device *dev;
2066
2067         dev_priv = container_of(work, drm_i915_private_t,
2068                                 mm.retire_work.work);
2069         dev = dev_priv->dev;
2070
2071         /* Come back later if the device is busy... */
2072         if (!mutex_trylock(&dev->struct_mutex)) {
2073                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2074                 return;
2075         }
2076
2077         i915_gem_retire_requests(dev);
2078
2079         if (!dev_priv->mm.suspended &&
2080                 (!list_empty(&dev_priv->render_ring.request_list) ||
2081                  !list_empty(&dev_priv->bsd_ring.request_list) ||
2082                  !list_empty(&dev_priv->blt_ring.request_list)))
2083                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2084         mutex_unlock(&dev->struct_mutex);
2085 }
2086
2087 int
2088 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2089                      bool interruptible, struct intel_ring_buffer *ring)
2090 {
2091         drm_i915_private_t *dev_priv = dev->dev_private;
2092         u32 ier;
2093         int ret = 0;
2094
2095         BUG_ON(seqno == 0);
2096
2097         if (atomic_read(&dev_priv->mm.wedged))
2098                 return -EAGAIN;
2099
2100         if (seqno == ring->outstanding_lazy_request) {
2101                 struct drm_i915_gem_request *request;
2102
2103                 request = kzalloc(sizeof(*request), GFP_KERNEL);
2104                 if (request == NULL)
2105                         return -ENOMEM;
2106
2107                 ret = i915_add_request(dev, NULL, request, ring);
2108                 if (ret) {
2109                         kfree(request);
2110                         return ret;
2111                 }
2112
2113                 seqno = request->seqno;
2114         }
2115
2116         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2117                 if (HAS_PCH_SPLIT(dev))
2118                         ier = I915_READ(DEIER) | I915_READ(GTIER);
2119                 else
2120                         ier = I915_READ(IER);
2121                 if (!ier) {
2122                         DRM_ERROR("something (likely vbetool) disabled "
2123                                   "interrupts, re-enabling\n");
2124                         i915_driver_irq_preinstall(dev);
2125                         i915_driver_irq_postinstall(dev);
2126                 }
2127
2128                 trace_i915_gem_request_wait_begin(dev, seqno);
2129
2130                 ring->waiting_seqno = seqno;
2131                 ring->user_irq_get(ring);
2132                 if (interruptible)
2133                         ret = wait_event_interruptible(ring->irq_queue,
2134                                 i915_seqno_passed(ring->get_seqno(ring), seqno)
2135                                 || atomic_read(&dev_priv->mm.wedged));
2136                 else
2137                         wait_event(ring->irq_queue,
2138                                 i915_seqno_passed(ring->get_seqno(ring), seqno)
2139                                 || atomic_read(&dev_priv->mm.wedged));
2140
2141                 ring->user_irq_put(ring);
2142                 ring->waiting_seqno = 0;
2143
2144                 trace_i915_gem_request_wait_end(dev, seqno);
2145         }
2146         if (atomic_read(&dev_priv->mm.wedged))
2147                 ret = -EAGAIN;
2148
2149         if (ret && ret != -ERESTARTSYS)
2150                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2151                           __func__, ret, seqno, ring->get_seqno(ring),
2152                           dev_priv->next_seqno);
2153
2154         /* Directly dispatch request retiring.  While we have the work queue
2155          * to handle this, the waiter on a request often wants an associated
2156          * buffer to have made it to the inactive list, and we would need
2157          * a separate wait queue to handle that.
2158          */
2159         if (ret == 0)
2160                 i915_gem_retire_requests_ring(dev, ring);
2161
2162         return ret;
2163 }
2164
2165 /**
2166  * Waits for a sequence number to be signaled, and cleans up the
2167  * request and object lists appropriately for that event.
2168  */
2169 static int
2170 i915_wait_request(struct drm_device *dev, uint32_t seqno,
2171                   struct intel_ring_buffer *ring)
2172 {
2173         return i915_do_wait_request(dev, seqno, 1, ring);
2174 }
2175
2176 static void
2177 i915_gem_flush_ring(struct drm_device *dev,
2178                     struct drm_file *file_priv,
2179                     struct intel_ring_buffer *ring,
2180                     uint32_t invalidate_domains,
2181                     uint32_t flush_domains)
2182 {
2183         ring->flush(ring, invalidate_domains, flush_domains);
2184         i915_gem_process_flushing_list(dev, flush_domains, ring);
2185 }
2186
2187 static void
2188 i915_gem_flush(struct drm_device *dev,
2189                struct drm_file *file_priv,
2190                uint32_t invalidate_domains,
2191                uint32_t flush_domains,
2192                uint32_t flush_rings)
2193 {
2194         drm_i915_private_t *dev_priv = dev->dev_private;
2195
2196         if (flush_domains & I915_GEM_DOMAIN_CPU)
2197                 intel_gtt_chipset_flush();
2198
2199         if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2200                 if (flush_rings & RING_RENDER)
2201                         i915_gem_flush_ring(dev, file_priv,
2202                                             &dev_priv->render_ring,
2203                                             invalidate_domains, flush_domains);
2204                 if (flush_rings & RING_BSD)
2205                         i915_gem_flush_ring(dev, file_priv,
2206                                             &dev_priv->bsd_ring,
2207                                             invalidate_domains, flush_domains);
2208                 if (flush_rings & RING_BLT)
2209                         i915_gem_flush_ring(dev, file_priv,
2210                                             &dev_priv->blt_ring,
2211                                             invalidate_domains, flush_domains);
2212         }
2213 }
2214
2215 /**
2216  * Ensures that all rendering to the object has completed and the object is
2217  * safe to unbind from the GTT or access from the CPU.
2218  */
2219 static int
2220 i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2221                                bool interruptible)
2222 {
2223         struct drm_device *dev = obj->dev;
2224         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2225         int ret;
2226
2227         /* This function only exists to support waiting for existing rendering,
2228          * not for emitting required flushes.
2229          */
2230         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
2231
2232         /* If there is rendering queued on the buffer being evicted, wait for
2233          * it.
2234          */
2235         if (obj_priv->active) {
2236                 ret = i915_do_wait_request(dev,
2237                                            obj_priv->last_rendering_seqno,
2238                                            interruptible,
2239                                            obj_priv->ring);
2240                 if (ret)
2241                         return ret;
2242         }
2243
2244         return 0;
2245 }
2246
2247 /**
2248  * Unbinds an object from the GTT aperture.
2249  */
2250 int
2251 i915_gem_object_unbind(struct drm_gem_object *obj)
2252 {
2253         struct drm_device *dev = obj->dev;
2254         struct drm_i915_private *dev_priv = dev->dev_private;
2255         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2256         int ret = 0;
2257
2258         if (obj_priv->gtt_space == NULL)
2259                 return 0;
2260
2261         if (obj_priv->pin_count != 0) {
2262                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2263                 return -EINVAL;
2264         }
2265
2266         /* blow away mappings if mapped through GTT */
2267         i915_gem_release_mmap(obj);
2268
2269         /* Move the object to the CPU domain to ensure that
2270          * any possible CPU writes while it's not in the GTT
2271          * are flushed when we go to remap it. This will
2272          * also ensure that all pending GPU writes are finished
2273          * before we unbind.
2274          */
2275         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2276         if (ret == -ERESTARTSYS)
2277                 return ret;
2278         /* Continue on if we fail due to EIO, the GPU is hung so we
2279          * should be safe and we need to cleanup or else we might
2280          * cause memory corruption through use-after-free.
2281          */
2282         if (ret) {
2283                 i915_gem_clflush_object(obj);
2284                 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
2285         }
2286
2287         /* release the fence reg _after_ flushing */
2288         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2289                 i915_gem_clear_fence_reg(obj);
2290
2291         i915_gem_gtt_unbind_object(obj);
2292
2293         i915_gem_object_put_pages_gtt(obj);
2294
2295         i915_gem_info_remove_gtt(dev_priv, obj_priv);
2296         list_del_init(&obj_priv->mm_list);
2297         /* Avoid an unnecessary call to unbind on rebind. */
2298         obj_priv->map_and_fenceable = true;
2299
2300         drm_mm_put_block(obj_priv->gtt_space);
2301         obj_priv->gtt_space = NULL;
2302         obj_priv->gtt_offset = 0;
2303
2304         if (i915_gem_object_is_purgeable(obj_priv))
2305                 i915_gem_object_truncate(obj);
2306
2307         trace_i915_gem_object_unbind(obj);
2308
2309         return ret;
2310 }
2311
2312 static int i915_ring_idle(struct drm_device *dev,
2313                           struct intel_ring_buffer *ring)
2314 {
2315         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2316                 return 0;
2317
2318         i915_gem_flush_ring(dev, NULL, ring,
2319                             I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2320         return i915_wait_request(dev,
2321                                  i915_gem_next_request_seqno(dev, ring),
2322                                  ring);
2323 }
2324
2325 int
2326 i915_gpu_idle(struct drm_device *dev)
2327 {
2328         drm_i915_private_t *dev_priv = dev->dev_private;
2329         bool lists_empty;
2330         int ret;
2331
2332         lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2333                        list_empty(&dev_priv->mm.active_list));
2334         if (lists_empty)
2335                 return 0;
2336
2337         /* Flush everything onto the inactive list. */
2338         ret = i915_ring_idle(dev, &dev_priv->render_ring);
2339         if (ret)
2340                 return ret;
2341
2342         ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2343         if (ret)
2344                 return ret;
2345
2346         ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2347         if (ret)
2348                 return ret;
2349
2350         return 0;
2351 }
2352
2353 static void sandybridge_write_fence_reg(struct drm_gem_object *obj)
2354 {
2355         struct drm_device *dev = obj->dev;
2356         drm_i915_private_t *dev_priv = dev->dev_private;
2357         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2358         u32 size = i915_gem_get_gtt_size(obj_priv);
2359         int regnum = obj_priv->fence_reg;
2360         uint64_t val;
2361
2362         val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
2363                     0xfffff000) << 32;
2364         val |= obj_priv->gtt_offset & 0xfffff000;
2365         val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2366                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2367
2368         if (obj_priv->tiling_mode == I915_TILING_Y)
2369                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2370         val |= I965_FENCE_REG_VALID;
2371
2372         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2373 }
2374
2375 static void i965_write_fence_reg(struct drm_gem_object *obj)
2376 {
2377         struct drm_device *dev = obj->dev;
2378         drm_i915_private_t *dev_priv = dev->dev_private;
2379         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2380         u32 size = i915_gem_get_gtt_size(obj_priv);
2381         int regnum = obj_priv->fence_reg;
2382         uint64_t val;
2383
2384         val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
2385                     0xfffff000) << 32;
2386         val |= obj_priv->gtt_offset & 0xfffff000;
2387         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2388         if (obj_priv->tiling_mode == I915_TILING_Y)
2389                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2390         val |= I965_FENCE_REG_VALID;
2391
2392         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2393 }
2394
2395 static void i915_write_fence_reg(struct drm_gem_object *obj)
2396 {
2397         struct drm_device *dev = obj->dev;
2398         drm_i915_private_t *dev_priv = dev->dev_private;
2399         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2400         u32 size = i915_gem_get_gtt_size(obj_priv);
2401         uint32_t fence_reg, val, pitch_val;
2402         int tile_width;
2403
2404         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2405             (obj_priv->gtt_offset & (size - 1))) {
2406                 WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
2407                      __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size,
2408                      obj_priv->gtt_space->start, obj_priv->gtt_space->size);
2409                 return;
2410         }
2411
2412         if (obj_priv->tiling_mode == I915_TILING_Y &&
2413             HAS_128_BYTE_Y_TILING(dev))
2414                 tile_width = 128;
2415         else
2416                 tile_width = 512;
2417
2418         /* Note: pitch better be a power of two tile widths */
2419         pitch_val = obj_priv->stride / tile_width;
2420         pitch_val = ffs(pitch_val) - 1;
2421
2422         if (obj_priv->tiling_mode == I915_TILING_Y &&
2423             HAS_128_BYTE_Y_TILING(dev))
2424                 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2425         else
2426                 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2427
2428         val = obj_priv->gtt_offset;
2429         if (obj_priv->tiling_mode == I915_TILING_Y)
2430                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2431         val |= I915_FENCE_SIZE_BITS(size);
2432         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2433         val |= I830_FENCE_REG_VALID;
2434
2435         fence_reg = obj_priv->fence_reg;
2436         if (fence_reg < 8)
2437                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2438         else
2439                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2440         I915_WRITE(fence_reg, val);
2441 }
2442
2443 static void i830_write_fence_reg(struct drm_gem_object *obj)
2444 {
2445         struct drm_device *dev = obj->dev;
2446         drm_i915_private_t *dev_priv = dev->dev_private;
2447         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2448         u32 size = i915_gem_get_gtt_size(obj_priv);
2449         int regnum = obj_priv->fence_reg;
2450         uint32_t val;
2451         uint32_t pitch_val;
2452         uint32_t fence_size_bits;
2453
2454         if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2455             (obj_priv->gtt_offset & (obj->size - 1))) {
2456                 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2457                      __func__, obj_priv->gtt_offset);
2458                 return;
2459         }
2460
2461         pitch_val = obj_priv->stride / 128;
2462         pitch_val = ffs(pitch_val) - 1;
2463         WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2464
2465         val = obj_priv->gtt_offset;
2466         if (obj_priv->tiling_mode == I915_TILING_Y)
2467                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2468         fence_size_bits = I830_FENCE_SIZE_BITS(size);
2469         WARN_ON(fence_size_bits & ~0x00000f00);
2470         val |= fence_size_bits;
2471         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2472         val |= I830_FENCE_REG_VALID;
2473
2474         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2475 }
2476
2477 static int i915_find_fence_reg(struct drm_device *dev,
2478                                bool interruptible)
2479 {
2480         struct drm_i915_private *dev_priv = dev->dev_private;
2481         struct drm_i915_fence_reg *reg;
2482         struct drm_i915_gem_object *obj_priv = NULL;
2483         int i, avail, ret;
2484
2485         /* First try to find a free reg */
2486         avail = 0;
2487         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2488                 reg = &dev_priv->fence_regs[i];
2489                 if (!reg->obj)
2490                         return i;
2491
2492                 obj_priv = to_intel_bo(reg->obj);
2493                 if (!obj_priv->pin_count)
2494                     avail++;
2495         }
2496
2497         if (avail == 0)
2498                 return -ENOSPC;
2499
2500         /* None available, try to steal one or wait for a user to finish */
2501         avail = I915_FENCE_REG_NONE;
2502         list_for_each_entry(reg, &dev_priv->mm.fence_list,
2503                             lru_list) {
2504                 obj_priv = to_intel_bo(reg->obj);
2505                 if (obj_priv->pin_count)
2506                         continue;
2507
2508                 /* found one! */
2509                 avail = obj_priv->fence_reg;
2510                 break;
2511         }
2512
2513         BUG_ON(avail == I915_FENCE_REG_NONE);
2514
2515         /* We only have a reference on obj from the active list. put_fence_reg
2516          * might drop that one, causing a use-after-free in it. So hold a
2517          * private reference to obj like the other callers of put_fence_reg
2518          * (set_tiling ioctl) do. */
2519         drm_gem_object_reference(&obj_priv->base);
2520         ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible);
2521         drm_gem_object_unreference(&obj_priv->base);
2522         if (ret != 0)
2523                 return ret;
2524
2525         return avail;
2526 }
2527
2528 /**
2529  * i915_gem_object_get_fence_reg - set up a fence reg for an object
2530  * @obj: object to map through a fence reg
2531  *
2532  * When mapping objects through the GTT, userspace wants to be able to write
2533  * to them without having to worry about swizzling if the object is tiled.
2534  *
2535  * This function walks the fence regs looking for a free one for @obj,
2536  * stealing one if it can't find any.
2537  *
2538  * It then sets up the reg based on the object's properties: address, pitch
2539  * and tiling format.
2540  */
2541 int
2542 i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2543                               bool interruptible)
2544 {
2545         struct drm_device *dev = obj->dev;
2546         struct drm_i915_private *dev_priv = dev->dev_private;
2547         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2548         struct drm_i915_fence_reg *reg = NULL;
2549         int ret;
2550
2551         /* Just update our place in the LRU if our fence is getting used. */
2552         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2553                 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2554                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2555                 return 0;
2556         }
2557
2558         switch (obj_priv->tiling_mode) {
2559         case I915_TILING_NONE:
2560                 WARN(1, "allocating a fence for non-tiled object?\n");
2561                 break;
2562         case I915_TILING_X:
2563                 if (!obj_priv->stride)
2564                         return -EINVAL;
2565                 WARN((obj_priv->stride & (512 - 1)),
2566                      "object 0x%08x is X tiled but has non-512B pitch\n",
2567                      obj_priv->gtt_offset);
2568                 break;
2569         case I915_TILING_Y:
2570                 if (!obj_priv->stride)
2571                         return -EINVAL;
2572                 WARN((obj_priv->stride & (128 - 1)),
2573                      "object 0x%08x is Y tiled but has non-128B pitch\n",
2574                      obj_priv->gtt_offset);
2575                 break;
2576         }
2577
2578         ret = i915_find_fence_reg(dev, interruptible);
2579         if (ret < 0)
2580                 return ret;
2581
2582         obj_priv->fence_reg = ret;
2583         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2584         list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2585
2586         reg->obj = obj;
2587
2588         switch (INTEL_INFO(dev)->gen) {
2589         case 6:
2590                 sandybridge_write_fence_reg(obj);
2591                 break;
2592         case 5:
2593         case 4:
2594                 i965_write_fence_reg(obj);
2595                 break;
2596         case 3:
2597                 i915_write_fence_reg(obj);
2598                 break;
2599         case 2:
2600                 i830_write_fence_reg(obj);
2601                 break;
2602         }
2603
2604         trace_i915_gem_object_get_fence(obj,
2605                                         obj_priv->fence_reg,
2606                                         obj_priv->tiling_mode);
2607
2608         return 0;
2609 }
2610
2611 /**
2612  * i915_gem_clear_fence_reg - clear out fence register info
2613  * @obj: object to clear
2614  *
2615  * Zeroes out the fence register itself and clears out the associated
2616  * data structures in dev_priv and obj_priv.
2617  */
2618 static void
2619 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2620 {
2621         struct drm_device *dev = obj->dev;
2622         drm_i915_private_t *dev_priv = dev->dev_private;
2623         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2624         struct drm_i915_fence_reg *reg =
2625                 &dev_priv->fence_regs[obj_priv->fence_reg];
2626         uint32_t fence_reg;
2627
2628         switch (INTEL_INFO(dev)->gen) {
2629         case 6:
2630                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2631                              (obj_priv->fence_reg * 8), 0);
2632                 break;
2633         case 5:
2634         case 4:
2635                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2636                 break;
2637         case 3:
2638                 if (obj_priv->fence_reg >= 8)
2639                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2640                 else
2641         case 2:
2642                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2643
2644                 I915_WRITE(fence_reg, 0);
2645                 break;
2646         }
2647
2648         reg->obj = NULL;
2649         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2650         list_del_init(&reg->lru_list);
2651 }
2652
2653 /**
2654  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2655  * to the buffer to finish, and then resets the fence register.
2656  * @obj: tiled object holding a fence register.
2657  * @bool: whether the wait upon the fence is interruptible
2658  *
2659  * Zeroes out the fence register itself and clears out the associated
2660  * data structures in dev_priv and obj_priv.
2661  */
2662 int
2663 i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2664                               bool interruptible)
2665 {
2666         struct drm_device *dev = obj->dev;
2667         struct drm_i915_private *dev_priv = dev->dev_private;
2668         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2669         struct drm_i915_fence_reg *reg;
2670
2671         if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2672                 return 0;
2673
2674         /* If we've changed tiling, GTT-mappings of the object
2675          * need to re-fault to ensure that the correct fence register
2676          * setup is in place.
2677          */
2678         i915_gem_release_mmap(obj);
2679
2680         /* On the i915, GPU access to tiled buffers is via a fence,
2681          * therefore we must wait for any outstanding access to complete
2682          * before clearing the fence.
2683          */
2684         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2685         if (reg->gpu) {
2686                 int ret;
2687
2688                 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2689                 if (ret)
2690                         return ret;
2691
2692                 ret = i915_gem_object_wait_rendering(obj, interruptible);
2693                 if (ret)
2694                         return ret;
2695
2696                 reg->gpu = false;
2697         }
2698
2699         i915_gem_object_flush_gtt_write_domain(obj);
2700         i915_gem_clear_fence_reg(obj);
2701
2702         return 0;
2703 }
2704
2705 /**
2706  * Finds free space in the GTT aperture and binds the object there.
2707  */
2708 static int
2709 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2710                             unsigned alignment,
2711                             bool map_and_fenceable)
2712 {
2713         struct drm_device *dev = obj->dev;
2714         drm_i915_private_t *dev_priv = dev->dev_private;
2715         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2716         struct drm_mm_node *free_space;
2717         gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2718         u32 size, fence_size, fence_alignment, unfenced_alignment;
2719         bool mappable, fenceable;
2720         int ret;
2721
2722         if (obj_priv->madv != I915_MADV_WILLNEED) {
2723                 DRM_ERROR("Attempting to bind a purgeable object\n");
2724                 return -EINVAL;
2725         }
2726
2727         fence_size = i915_gem_get_gtt_size(obj_priv);
2728         fence_alignment = i915_gem_get_gtt_alignment(obj_priv);
2729         unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv);
2730
2731         if (alignment == 0)
2732                 alignment = map_and_fenceable ? fence_alignment :
2733                                                 unfenced_alignment;
2734         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2735                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2736                 return -EINVAL;
2737         }
2738
2739         size = map_and_fenceable ? fence_size : obj->size;
2740
2741         /* If the object is bigger than the entire aperture, reject it early
2742          * before evicting everything in a vain attempt to find space.
2743          */
2744         if (obj->size >
2745             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2746                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2747                 return -E2BIG;
2748         }
2749
2750  search_free:
2751         if (map_and_fenceable)
2752                 free_space =
2753                         drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2754                                                     size, alignment, 0,
2755                                                     dev_priv->mm.gtt_mappable_end,
2756                                                     0);
2757         else
2758                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2759                                                 size, alignment, 0);
2760
2761         if (free_space != NULL) {
2762                 if (map_and_fenceable)
2763                         obj_priv->gtt_space =
2764                                 drm_mm_get_block_range_generic(free_space,
2765                                                                size, alignment, 0,
2766                                                                dev_priv->mm.gtt_mappable_end,
2767                                                                0);
2768                 else
2769                         obj_priv->gtt_space =
2770                                 drm_mm_get_block(free_space, size, alignment);
2771         }
2772         if (obj_priv->gtt_space == NULL) {
2773                 /* If the gtt is empty and we're still having trouble
2774                  * fitting our object in, we're out of memory.
2775                  */
2776                 ret = i915_gem_evict_something(dev, size, alignment,
2777                                                map_and_fenceable);
2778                 if (ret)
2779                         return ret;
2780
2781                 goto search_free;
2782         }
2783
2784         ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2785         if (ret) {
2786                 drm_mm_put_block(obj_priv->gtt_space);
2787                 obj_priv->gtt_space = NULL;
2788
2789                 if (ret == -ENOMEM) {
2790                         /* first try to clear up some space from the GTT */
2791                         ret = i915_gem_evict_something(dev, size,
2792                                                        alignment,
2793                                                        map_and_fenceable);
2794                         if (ret) {
2795                                 /* now try to shrink everyone else */
2796                                 if (gfpmask) {
2797                                         gfpmask = 0;
2798                                         goto search_free;
2799                                 }
2800
2801                                 return ret;
2802                         }
2803
2804                         goto search_free;
2805                 }
2806
2807                 return ret;
2808         }
2809
2810         ret = i915_gem_gtt_bind_object(obj);
2811         if (ret) {
2812                 i915_gem_object_put_pages_gtt(obj);
2813                 drm_mm_put_block(obj_priv->gtt_space);
2814                 obj_priv->gtt_space = NULL;
2815
2816                 ret = i915_gem_evict_something(dev, size,
2817                                                alignment, map_and_fenceable);
2818                 if (ret)
2819                         return ret;
2820
2821                 goto search_free;
2822         }
2823
2824         obj_priv->gtt_offset = obj_priv->gtt_space->start;
2825
2826         /* keep track of bounds object by adding it to the inactive list */
2827         list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
2828         i915_gem_info_add_gtt(dev_priv, obj_priv);
2829
2830         /* Assert that the object is not currently in any GPU domain. As it
2831          * wasn't in the GTT, there shouldn't be any way it could have been in
2832          * a GPU cache
2833          */
2834         BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2835         BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2836
2837         trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable);
2838
2839         fenceable =
2840                 obj_priv->gtt_space->size == fence_size &&
2841                 (obj_priv->gtt_space->start & (fence_alignment -1)) == 0;
2842
2843         mappable =
2844                 obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end;
2845
2846         obj_priv->map_and_fenceable = mappable && fenceable;
2847
2848         return 0;
2849 }
2850
2851 void
2852 i915_gem_clflush_object(struct drm_gem_object *obj)
2853 {
2854         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
2855
2856         /* If we don't have a page list set up, then we're not pinned
2857          * to GPU, and we can ignore the cache flush because it'll happen
2858          * again at bind time.
2859          */
2860         if (obj_priv->pages == NULL)
2861                 return;
2862
2863         trace_i915_gem_object_clflush(obj);
2864
2865         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2866 }
2867
2868 /** Flushes any GPU write domain for the object if it's dirty. */
2869 static int
2870 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2871                                        bool pipelined)
2872 {
2873         struct drm_device *dev = obj->dev;
2874
2875         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2876                 return 0;
2877
2878         /* Queue the GPU write cache flushing we need. */
2879         i915_gem_flush_ring(dev, NULL,
2880                             to_intel_bo(obj)->ring,
2881                             0, obj->write_domain);
2882         BUG_ON(obj->write_domain);
2883
2884         if (pipelined)
2885                 return 0;
2886
2887         return i915_gem_object_wait_rendering(obj, true);
2888 }
2889
2890 /** Flushes the GTT write domain for the object if it's dirty. */
2891 static void
2892 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2893 {
2894         uint32_t old_write_domain;
2895
2896         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2897                 return;
2898
2899         /* No actual flushing is required for the GTT write domain.   Writes
2900          * to it immediately go to main memory as far as we know, so there's
2901          * no chipset flush.  It also doesn't land in render cache.
2902          */
2903         i915_gem_release_mmap(obj);
2904
2905         old_write_domain = obj->write_domain;
2906         obj->write_domain = 0;
2907
2908         trace_i915_gem_object_change_domain(obj,
2909                                             obj->read_domains,
2910                                             old_write_domain);
2911 }
2912
2913 /** Flushes the CPU write domain for the object if it's dirty. */
2914 static void
2915 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2916 {
2917         uint32_t old_write_domain;
2918
2919         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2920                 return;
2921
2922         i915_gem_clflush_object(obj);
2923         intel_gtt_chipset_flush();
2924         old_write_domain = obj->write_domain;
2925         obj->write_domain = 0;
2926
2927         trace_i915_gem_object_change_domain(obj,
2928                                             obj->read_domains,
2929                                             old_write_domain);
2930 }
2931
2932 /**
2933  * Moves a single object to the GTT read, and possibly write domain.
2934  *
2935  * This function returns when the move is complete, including waiting on
2936  * flushes to occur.
2937  */
2938 int
2939 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2940 {
2941         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2942         uint32_t old_write_domain, old_read_domains;
2943         int ret;
2944
2945         /* Not valid to be called on unbound objects. */
2946         if (obj_priv->gtt_space == NULL)
2947                 return -EINVAL;
2948
2949         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2950         if (ret != 0)
2951                 return ret;
2952
2953         i915_gem_object_flush_cpu_write_domain(obj);
2954
2955         if (write) {
2956                 ret = i915_gem_object_wait_rendering(obj, true);
2957                 if (ret)
2958                         return ret;
2959         }
2960
2961         old_write_domain = obj->write_domain;
2962         old_read_domains = obj->read_domains;
2963
2964         /* It should now be out of any other write domains, and we can update
2965          * the domain values for our changes.
2966          */
2967         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2968         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2969         if (write) {
2970                 obj->read_domains = I915_GEM_DOMAIN_GTT;
2971                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2972                 obj_priv->dirty = 1;
2973         }
2974
2975         trace_i915_gem_object_change_domain(obj,
2976                                             old_read_domains,
2977                                             old_write_domain);
2978
2979         return 0;
2980 }
2981
2982 /*
2983  * Prepare buffer for display plane. Use uninterruptible for possible flush
2984  * wait, as in modesetting process we're not supposed to be interrupted.
2985  */
2986 int
2987 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2988                                      bool pipelined)
2989 {
2990         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2991         uint32_t old_read_domains;
2992         int ret;
2993
2994         /* Not valid to be called on unbound objects. */
2995         if (obj_priv->gtt_space == NULL)
2996                 return -EINVAL;
2997
2998         ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2999         if (ret)
3000                 return ret;
3001
3002         /* Currently, we are always called from an non-interruptible context. */
3003         if (!pipelined) {
3004                 ret = i915_gem_object_wait_rendering(obj, false);
3005                 if (ret)
3006                         return ret;
3007         }
3008
3009         i915_gem_object_flush_cpu_write_domain(obj);
3010
3011         old_read_domains = obj->read_domains;
3012         obj->read_domains |= I915_GEM_DOMAIN_GTT;
3013
3014         trace_i915_gem_object_change_domain(obj,
3015                                             old_read_domains,
3016                                             obj->write_domain);
3017
3018         return 0;
3019 }
3020
3021 int
3022 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
3023                           bool interruptible)
3024 {
3025         if (!obj->active)
3026                 return 0;
3027
3028         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
3029                 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
3030                                     0, obj->base.write_domain);
3031
3032         return i915_gem_object_wait_rendering(&obj->base, interruptible);
3033 }
3034
3035 /**
3036  * Moves a single object to the CPU read, and possibly write domain.
3037  *
3038  * This function returns when the move is complete, including waiting on
3039  * flushes to occur.
3040  */
3041 static int
3042 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
3043 {
3044         uint32_t old_write_domain, old_read_domains;
3045         int ret;
3046
3047         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3048         if (ret != 0)
3049                 return ret;
3050
3051         i915_gem_object_flush_gtt_write_domain(obj);
3052
3053         /* If we have a partially-valid cache of the object in the CPU,
3054          * finish invalidating it and free the per-page flags.
3055          */
3056         i915_gem_object_set_to_full_cpu_read_domain(obj);
3057
3058         if (write) {
3059                 ret = i915_gem_object_wait_rendering(obj, true);
3060                 if (ret)
3061                         return ret;
3062         }
3063
3064         old_write_domain = obj->write_domain;
3065         old_read_domains = obj->read_domains;
3066
3067         /* Flush the CPU cache if it's still invalid. */
3068         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3069                 i915_gem_clflush_object(obj);
3070
3071                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3072         }
3073
3074         /* It should now be out of any other write domains, and we can update
3075          * the domain values for our changes.
3076          */
3077         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3078
3079         /* If we're writing through the CPU, then the GPU read domains will
3080          * need to be invalidated at next use.
3081          */
3082         if (write) {
3083                 obj->read_domains = I915_GEM_DOMAIN_CPU;
3084                 obj->write_domain = I915_GEM_DOMAIN_CPU;
3085         }
3086
3087         trace_i915_gem_object_change_domain(obj,
3088                                             old_read_domains,
3089                                             old_write_domain);
3090
3091         return 0;
3092 }
3093
3094 /*
3095  * Set the next domain for the specified object. This
3096  * may not actually perform the necessary flushing/invaliding though,
3097  * as that may want to be batched with other set_domain operations
3098  *
3099  * This is (we hope) the only really tricky part of gem. The goal
3100  * is fairly simple -- track which caches hold bits of the object
3101  * and make sure they remain coherent. A few concrete examples may
3102  * help to explain how it works. For shorthand, we use the notation
3103  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
3104  * a pair of read and write domain masks.
3105  *
3106  * Case 1: the batch buffer
3107  *
3108  *      1. Allocated
3109  *      2. Written by CPU
3110  *      3. Mapped to GTT
3111  *      4. Read by GPU
3112  *      5. Unmapped from GTT
3113  *      6. Freed
3114  *
3115  *      Let's take these a step at a time
3116  *
3117  *      1. Allocated
3118  *              Pages allocated from the kernel may still have
3119  *              cache contents, so we set them to (CPU, CPU) always.
3120  *      2. Written by CPU (using pwrite)
3121  *              The pwrite function calls set_domain (CPU, CPU) and
3122  *              this function does nothing (as nothing changes)
3123  *      3. Mapped by GTT
3124  *              This function asserts that the object is not
3125  *              currently in any GPU-based read or write domains
3126  *      4. Read by GPU
3127  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
3128  *              As write_domain is zero, this function adds in the
3129  *              current read domains (CPU+COMMAND, 0).
3130  *              flush_domains is set to CPU.
3131  *              invalidate_domains is set to COMMAND
3132  *              clflush is run to get data out of the CPU caches
3133  *              then i915_dev_set_domain calls i915_gem_flush to
3134  *              emit an MI_FLUSH and drm_agp_chipset_flush
3135  *      5. Unmapped from GTT
3136  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
3137  *              flush_domains and invalidate_domains end up both zero
3138  *              so no flushing/invalidating happens
3139  *      6. Freed
3140  *              yay, done
3141  *
3142  * Case 2: The shared render buffer
3143  *
3144  *      1. Allocated
3145  *      2. Mapped to GTT
3146  *      3. Read/written by GPU
3147  *      4. set_domain to (CPU,CPU)
3148  *      5. Read/written by CPU
3149  *      6. Read/written by GPU
3150  *
3151  *      1. Allocated
3152  *              Same as last example, (CPU, CPU)
3153  *      2. Mapped to GTT
3154  *              Nothing changes (assertions find that it is not in the GPU)
3155  *      3. Read/written by GPU
3156  *              execbuffer calls set_domain (RENDER, RENDER)
3157  *              flush_domains gets CPU
3158  *              invalidate_domains gets GPU
3159  *              clflush (obj)
3160  *              MI_FLUSH and drm_agp_chipset_flush
3161  *      4. set_domain (CPU, CPU)
3162  *              flush_domains gets GPU
3163  *              invalidate_domains gets CPU
3164  *              wait_rendering (obj) to make sure all drawing is complete.
3165  *              This will include an MI_FLUSH to get the data from GPU
3166  *              to memory
3167  *              clflush (obj) to invalidate the CPU cache
3168  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3169  *      5. Read/written by CPU
3170  *              cache lines are loaded and dirtied
3171  *      6. Read written by GPU
3172  *              Same as last GPU access
3173  *
3174  * Case 3: The constant buffer
3175  *
3176  *      1. Allocated
3177  *      2. Written by CPU
3178  *      3. Read by GPU
3179  *      4. Updated (written) by CPU again
3180  *      5. Read by GPU
3181  *
3182  *      1. Allocated
3183  *              (CPU, CPU)
3184  *      2. Written by CPU
3185  *              (CPU, CPU)
3186  *      3. Read by GPU
3187  *              (CPU+RENDER, 0)
3188  *              flush_domains = CPU
3189  *              invalidate_domains = RENDER
3190  *              clflush (obj)
3191  *              MI_FLUSH
3192  *              drm_agp_chipset_flush
3193  *      4. Updated (written) by CPU again
3194  *              (CPU, CPU)
3195  *              flush_domains = 0 (no previous write domain)
3196  *              invalidate_domains = 0 (no new read domains)
3197  *      5. Read by GPU
3198  *              (CPU+RENDER, 0)
3199  *              flush_domains = CPU
3200  *              invalidate_domains = RENDER
3201  *              clflush (obj)
3202  *              MI_FLUSH
3203  *              drm_agp_chipset_flush
3204  */
3205 static void
3206 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3207                                   struct intel_ring_buffer *ring,
3208                                   struct change_domains *cd)
3209 {
3210         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
3211         uint32_t                        invalidate_domains = 0;
3212         uint32_t                        flush_domains = 0;
3213
3214         /*
3215          * If the object isn't moving to a new write domain,
3216          * let the object stay in multiple read domains
3217          */
3218         if (obj->pending_write_domain == 0)
3219                 obj->pending_read_domains |= obj->read_domains;
3220
3221         /*
3222          * Flush the current write domain if
3223          * the new read domains don't match. Invalidate
3224          * any read domains which differ from the old
3225          * write domain
3226          */
3227         if (obj->write_domain &&
3228             (obj->write_domain != obj->pending_read_domains ||
3229              obj_priv->ring != ring)) {
3230                 flush_domains |= obj->write_domain;
3231                 invalidate_domains |=
3232                         obj->pending_read_domains & ~obj->write_domain;
3233         }
3234         /*
3235          * Invalidate any read caches which may have
3236          * stale data. That is, any new read domains.
3237          */
3238         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3239         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3240                 i915_gem_clflush_object(obj);
3241
3242         /* blow away mappings if mapped through GTT */
3243         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
3244                 i915_gem_release_mmap(obj);
3245
3246         /* The actual obj->write_domain will be updated with
3247          * pending_write_domain after we emit the accumulated flush for all
3248          * of our domain changes in execbuffers (which clears objects'
3249          * write_domains).  So if we have a current write domain that we
3250          * aren't changing, set pending_write_domain to that.
3251          */
3252         if (flush_domains == 0 && obj->pending_write_domain == 0)
3253                 obj->pending_write_domain = obj->write_domain;
3254
3255         cd->invalidate_domains |= invalidate_domains;
3256         cd->flush_domains |= flush_domains;
3257         if (flush_domains & I915_GEM_GPU_DOMAINS)
3258                 cd->flush_rings |= obj_priv->ring->id;
3259         if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3260                 cd->flush_rings |= ring->id;
3261 }
3262
3263 /**
3264  * Moves the object from a partially CPU read to a full one.
3265  *
3266  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3267  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3268  */
3269 static void
3270 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3271 {
3272         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3273
3274         if (!obj_priv->page_cpu_valid)
3275                 return;
3276
3277         /* If we're partially in the CPU read domain, finish moving it in.
3278          */
3279         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3280                 int i;
3281
3282                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3283                         if (obj_priv->page_cpu_valid[i])
3284                                 continue;
3285                         drm_clflush_pages(obj_priv->pages + i, 1);
3286                 }
3287         }
3288
3289         /* Free the page_cpu_valid mappings which are now stale, whether
3290          * or not we've got I915_GEM_DOMAIN_CPU.
3291          */
3292         kfree(obj_priv->page_cpu_valid);
3293         obj_priv->page_cpu_valid = NULL;
3294 }
3295
3296 /**
3297  * Set the CPU read domain on a range of the object.
3298  *
3299  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3300  * not entirely valid.  The page_cpu_valid member of the object flags which
3301  * pages have been flushed, and will be respected by
3302  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3303  * of the whole object.
3304  *
3305  * This function returns when the move is complete, including waiting on
3306  * flushes to occur.
3307  */
3308 static int
3309 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3310                                           uint64_t offset, uint64_t size)
3311 {
3312         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3313         uint32_t old_read_domains;
3314         int i, ret;
3315
3316         if (offset == 0 && size == obj->size)
3317                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3318
3319         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3320         if (ret != 0)
3321                 return ret;
3322         i915_gem_object_flush_gtt_write_domain(obj);
3323
3324         /* If we're already fully in the CPU read domain, we're done. */
3325         if (obj_priv->page_cpu_valid == NULL &&
3326             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3327                 return 0;
3328
3329         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3330          * newly adding I915_GEM_DOMAIN_CPU
3331          */
3332         if (obj_priv->page_cpu_valid == NULL) {
3333                 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3334                                                    GFP_KERNEL);
3335                 if (obj_priv->page_cpu_valid == NULL)
3336                         return -ENOMEM;
3337         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3338                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3339
3340         /* Flush the cache on any pages that are still invalid from the CPU's
3341          * perspective.
3342          */
3343         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3344              i++) {
3345                 if (obj_priv->page_cpu_valid[i])
3346                         continue;
3347
3348                 drm_clflush_pages(obj_priv->pages + i, 1);
3349
3350                 obj_priv->page_cpu_valid[i] = 1;
3351         }
3352
3353         /* It should now be out of any other write domains, and we can update
3354          * the domain values for our changes.
3355          */
3356         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3357
3358         old_read_domains = obj->read_domains;
3359         obj->read_domains |= I915_GEM_DOMAIN_CPU;
3360
3361         trace_i915_gem_object_change_domain(obj,
3362                                             old_read_domains,
3363                                             obj->write_domain);
3364
3365         return 0;
3366 }
3367
3368 static int
3369 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
3370                                    struct drm_file *file_priv,
3371                                    struct drm_i915_gem_exec_object2 *entry,
3372                                    struct drm_i915_gem_relocation_entry *reloc)
3373 {
3374         struct drm_device *dev = obj->base.dev;
3375         struct drm_gem_object *target_obj;
3376         uint32_t target_offset;
3377         int ret = -EINVAL;
3378
3379         target_obj = drm_gem_object_lookup(dev, file_priv,
3380                                            reloc->target_handle);
3381         if (target_obj == NULL)
3382                 return -ENOENT;
3383
3384         target_offset = to_intel_bo(target_obj)->gtt_offset;
3385
3386 #if WATCH_RELOC
3387         DRM_INFO("%s: obj %p offset %08x target %d "
3388                  "read %08x write %08x gtt %08x "
3389                  "presumed %08x delta %08x\n",
3390                  __func__,
3391                  obj,
3392                  (int) reloc->offset,
3393                  (int) reloc->target_handle,
3394                  (int) reloc->read_domains,
3395                  (int) reloc->write_domain,
3396                  (int) target_offset,
3397                  (int) reloc->presumed_offset,
3398                  reloc->delta);
3399 #endif
3400
3401         /* The target buffer should have appeared before us in the
3402          * exec_object list, so it should have a GTT space bound by now.
3403          */
3404         if (target_offset == 0) {
3405                 DRM_ERROR("No GTT space found for object %d\n",
3406                           reloc->target_handle);
3407                 goto err;
3408         }
3409
3410         /* Validate that the target is in a valid r/w GPU domain */
3411         if (reloc->write_domain & (reloc->write_domain - 1)) {
3412                 DRM_ERROR("reloc with multiple write domains: "
3413                           "obj %p target %d offset %d "
3414                           "read %08x write %08x",
3415                           obj, reloc->target_handle,
3416                           (int) reloc->offset,
3417                           reloc->read_domains,
3418                           reloc->write_domain);
3419                 goto err;
3420         }
3421         if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3422             reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3423                 DRM_ERROR("reloc with read/write CPU domains: "
3424                           "obj %p target %d offset %d "
3425                           "read %08x write %08x",
3426                           obj, reloc->target_handle,
3427                           (int) reloc->offset,
3428                           reloc->read_domains,
3429                           reloc->write_domain);
3430                 goto err;
3431         }
3432         if (reloc->write_domain && target_obj->pending_write_domain &&
3433             reloc->write_domain != target_obj->pending_write_domain) {
3434                 DRM_ERROR("Write domain conflict: "
3435                           "obj %p target %d offset %d "
3436                           "new %08x old %08x\n",
3437                           obj, reloc->target_handle,
3438                           (int) reloc->offset,
3439                           reloc->write_domain,
3440                           target_obj->pending_write_domain);
3441                 goto err;
3442         }
3443
3444         target_obj->pending_read_domains |= reloc->read_domains;
3445         target_obj->pending_write_domain |= reloc->write_domain;
3446
3447         /* If the relocation already has the right value in it, no
3448          * more work needs to be done.
3449          */
3450         if (target_offset == reloc->presumed_offset)
3451                 goto out;
3452
3453         /* Check that the relocation address is valid... */
3454         if (reloc->offset > obj->base.size - 4) {
3455                 DRM_ERROR("Relocation beyond object bounds: "
3456                           "obj %p target %d offset %d size %d.\n",
3457                           obj, reloc->target_handle,
3458                           (int) reloc->offset,
3459                           (int) obj->base.size);
3460                 goto err;
3461         }
3462         if (reloc->offset & 3) {
3463                 DRM_ERROR("Relocation not 4-byte aligned: "
3464                           "obj %p target %d offset %d.\n",
3465                           obj, reloc->target_handle,
3466                           (int) reloc->offset);
3467                 goto err;
3468         }
3469
3470         /* and points to somewhere within the target object. */
3471         if (reloc->delta >= target_obj->size) {
3472                 DRM_ERROR("Relocation beyond target object bounds: "
3473                           "obj %p target %d delta %d size %d.\n",
3474                           obj, reloc->target_handle,
3475                           (int) reloc->delta,
3476                           (int) target_obj->size);
3477                 goto err;
3478         }
3479
3480         reloc->delta += target_offset;
3481         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3482                 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
3483                 char *vaddr;
3484
3485                 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
3486                 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
3487                 kunmap_atomic(vaddr);
3488         } else {
3489                 struct drm_i915_private *dev_priv = dev->dev_private;
3490                 uint32_t __iomem *reloc_entry;
3491                 void __iomem *reloc_page;
3492
3493                 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3494                 if (ret)
3495                         goto err;
3496
3497                 /* Map the page containing the relocation we're going to perform.  */
3498                 reloc->offset += obj->gtt_offset;
3499                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3500                                                       reloc->offset & PAGE_MASK);
3501                 reloc_entry = (uint32_t __iomem *)
3502                         (reloc_page + (reloc->offset & ~PAGE_MASK));
3503                 iowrite32(reloc->delta, reloc_entry);
3504                 io_mapping_unmap_atomic(reloc_page);
3505         }
3506
3507         /* and update the user's relocation entry */
3508         reloc->presumed_offset = target_offset;
3509
3510 out:
3511         ret = 0;
3512 err:
3513         drm_gem_object_unreference(target_obj);
3514         return ret;
3515 }
3516
3517 static int
3518 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
3519                                     struct drm_file *file_priv,
3520                                     struct drm_i915_gem_exec_object2 *entry)
3521 {
3522         struct drm_i915_gem_relocation_entry __user *user_relocs;
3523         int i, ret;
3524
3525         user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3526         for (i = 0; i < entry->relocation_count; i++) {
3527                 struct drm_i915_gem_relocation_entry reloc;
3528
3529                 if (__copy_from_user_inatomic(&reloc,
3530                                               user_relocs+i,
3531                                               sizeof(reloc)))
3532                         return -EFAULT;
3533
3534                 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
3535                 if (ret)
3536                         return ret;
3537
3538                 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3539                                             &reloc.presumed_offset,
3540                                             sizeof(reloc.presumed_offset)))
3541                         return -EFAULT;
3542         }
3543
3544         return 0;
3545 }
3546
3547 static int
3548 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
3549                                          struct drm_file *file_priv,
3550                                          struct drm_i915_gem_exec_object2 *entry,
3551                                          struct drm_i915_gem_relocation_entry *relocs)
3552 {
3553         int i, ret;
3554
3555         for (i = 0; i < entry->relocation_count; i++) {
3556                 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
3557                 if (ret)
3558                         return ret;
3559         }
3560
3561         return 0;
3562 }
3563
3564 static int
3565 i915_gem_execbuffer_relocate(struct drm_device *dev,
3566                              struct drm_file *file,
3567                              struct drm_gem_object **object_list,
3568                              struct drm_i915_gem_exec_object2 *exec_list,
3569                              int count)
3570 {
3571         int i, ret;
3572
3573         for (i = 0; i < count; i++) {
3574                 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3575                 obj->base.pending_read_domains = 0;
3576                 obj->base.pending_write_domain = 0;
3577                 ret = i915_gem_execbuffer_relocate_object(obj, file,
3578                                                           &exec_list[i]);
3579                 if (ret)
3580                         return ret;
3581         }
3582
3583         return 0;
3584 }
3585
3586 static int
3587 i915_gem_execbuffer_reserve(struct drm_device *dev,
3588                             struct drm_file *file,
3589                             struct drm_gem_object **object_list,
3590                             struct drm_i915_gem_exec_object2 *exec_list,
3591                             int count)
3592 {
3593         struct drm_i915_private *dev_priv = dev->dev_private;
3594         int ret, i, retry;
3595
3596         /* attempt to pin all of the buffers into the GTT */
3597         retry = 0;
3598         do {
3599                 ret = 0;
3600                 for (i = 0; i < count; i++) {
3601                         struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
3602                         struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3603                         bool need_fence =
3604                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3605                                 obj->tiling_mode != I915_TILING_NONE;
3606
3607                         /* g33/pnv can't fence buffers in the unmappable part */
3608                         bool need_mappable =
3609                                 entry->relocation_count ? true : need_fence;
3610
3611                         /* Check fence reg constraints and rebind if necessary */
3612                         if (need_mappable && !obj->map_and_fenceable) {
3613                                 ret = i915_gem_object_unbind(&obj->base);
3614                                 if (ret)
3615                                         break;
3616                         }
3617
3618                         ret = i915_gem_object_pin(&obj->base,
3619                                                   entry->alignment,
3620                                                   need_mappable);
3621                         if (ret)
3622                                 break;
3623
3624                         /*
3625                          * Pre-965 chips need a fence register set up in order
3626                          * to properly handle blits to/from tiled surfaces.
3627                          */
3628                         if (need_fence) {
3629                                 ret = i915_gem_object_get_fence_reg(&obj->base, true);
3630                                 if (ret) {
3631                                         i915_gem_object_unpin(&obj->base);
3632                                         break;
3633                                 }
3634
3635                                 dev_priv->fence_regs[obj->fence_reg].gpu = true;
3636                         }
3637
3638                         entry->offset = obj->gtt_offset;
3639                 }
3640
3641                 while (i--)
3642                         i915_gem_object_unpin(object_list[i]);
3643
3644                 if (ret != -ENOSPC || retry > 1)
3645                         return ret;
3646
3647                 /* First attempt, just clear anything that is purgeable.
3648                  * Second attempt, clear the entire GTT.
3649                  */
3650                 ret = i915_gem_evict_everything(dev, retry == 0);
3651                 if (ret)
3652                         return ret;
3653
3654                 retry++;
3655         } while (1);
3656 }
3657
3658 static int
3659 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3660                                   struct drm_file *file,
3661                                   struct drm_gem_object **object_list,
3662                                   struct drm_i915_gem_exec_object2 *exec_list,
3663                                   int count)
3664 {
3665         struct drm_i915_gem_relocation_entry *reloc;
3666         int i, total, ret;
3667
3668         for (i = 0; i < count; i++) {
3669                 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3670                 obj->in_execbuffer = false;
3671         }
3672
3673         mutex_unlock(&dev->struct_mutex);
3674
3675         total = 0;
3676         for (i = 0; i < count; i++)
3677                 total += exec_list[i].relocation_count;
3678
3679         reloc = drm_malloc_ab(total, sizeof(*reloc));
3680         if (reloc == NULL) {
3681                 mutex_lock(&dev->struct_mutex);
3682                 return -ENOMEM;
3683         }
3684
3685         total = 0;
3686         for (i = 0; i < count; i++) {
3687                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3688
3689                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3690
3691                 if (copy_from_user(reloc+total, user_relocs,
3692                                    exec_list[i].relocation_count *
3693                                    sizeof(*reloc))) {
3694                         ret = -EFAULT;
3695                         mutex_lock(&dev->struct_mutex);
3696                         goto err;
3697                 }
3698
3699                 total += exec_list[i].relocation_count;
3700         }
3701
3702         ret = i915_mutex_lock_interruptible(dev);
3703         if (ret) {
3704                 mutex_lock(&dev->struct_mutex);
3705                 goto err;
3706         }
3707
3708         ret = i915_gem_execbuffer_reserve(dev, file,
3709                                           object_list, exec_list,
3710                                           count);
3711         if (ret)
3712                 goto err;
3713
3714         total = 0;
3715         for (i = 0; i < count; i++) {
3716                 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3717                 obj->base.pending_read_domains = 0;
3718                 obj->base.pending_write_domain = 0;
3719                 ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
3720                                                                &exec_list[i],
3721                                                                reloc + total);
3722                 if (ret)
3723                         goto err;
3724
3725                 total += exec_list[i].relocation_count;
3726         }
3727
3728         /* Leave the user relocations as are, this is the painfully slow path,
3729          * and we want to avoid the complication of dropping the lock whilst
3730          * having buffers reserved in the aperture and so causing spurious
3731          * ENOSPC for random operations.
3732          */
3733
3734 err:
3735         drm_free_large(reloc);
3736         return ret;
3737 }
3738
3739 static int
3740 i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3741                                 struct drm_file *file,
3742                                 struct intel_ring_buffer *ring,
3743                                 struct drm_gem_object **objects,
3744                                 int count)
3745 {
3746         struct change_domains cd;
3747         int ret, i;
3748
3749         cd.invalidate_domains = 0;
3750         cd.flush_domains = 0;
3751         cd.flush_rings = 0;
3752         for (i = 0; i < count; i++)
3753                 i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd);
3754
3755         if (cd.invalidate_domains | cd.flush_domains) {
3756 #if WATCH_EXEC
3757                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3758                           __func__,
3759                          cd.invalidate_domains,
3760                          cd.flush_domains);
3761 #endif
3762                 i915_gem_flush(dev, file,
3763                                cd.invalidate_domains,
3764                                cd.flush_domains,
3765                                cd.flush_rings);
3766         }
3767
3768         for (i = 0; i < count; i++) {
3769                 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3770                 /* XXX replace with semaphores */
3771                 if (obj->ring && ring != obj->ring) {
3772                         ret = i915_gem_object_wait_rendering(&obj->base, true);
3773                         if (ret)
3774                                 return ret;
3775                 }
3776         }
3777
3778         return 0;
3779 }
3780
3781 /* Throttle our rendering by waiting until the ring has completed our requests
3782  * emitted over 20 msec ago.
3783  *
3784  * Note that if we were to use the current jiffies each time around the loop,
3785  * we wouldn't escape the function with any frames outstanding if the time to
3786  * render a frame was over 20ms.
3787  *
3788  * This should get us reasonable parallelism between CPU and GPU but also
3789  * relatively low latency when blocking on a particular request to finish.
3790  */
3791 static int
3792 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3793 {
3794         struct drm_i915_private *dev_priv = dev->dev_private;
3795         struct drm_i915_file_private *file_priv = file->driver_priv;
3796         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3797         struct drm_i915_gem_request *request;
3798         struct intel_ring_buffer *ring = NULL;
3799         u32 seqno = 0;
3800         int ret;
3801
3802         spin_lock(&file_priv->mm.lock);
3803         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3804                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3805                         break;
3806
3807                 ring = request->ring;
3808                 seqno = request->seqno;
3809         }
3810         spin_unlock(&file_priv->mm.lock);
3811
3812         if (seqno == 0)
3813                 return 0;
3814
3815         ret = 0;
3816         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3817                 /* And wait for the seqno passing without holding any locks and
3818                  * causing extra latency for others. This is safe as the irq
3819                  * generation is designed to be run atomically and so is
3820                  * lockless.
3821                  */
3822                 ring->user_irq_get(ring);
3823                 ret = wait_event_interruptible(ring->irq_queue,
3824                                                i915_seqno_passed(ring->get_seqno(ring), seqno)
3825                                                || atomic_read(&dev_priv->mm.wedged));
3826                 ring->user_irq_put(ring);
3827
3828                 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3829                         ret = -EIO;
3830         }
3831
3832         if (ret == 0)
3833                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3834
3835         return ret;
3836 }
3837
3838 static int
3839 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3840                           uint64_t exec_offset)
3841 {
3842         uint32_t exec_start, exec_len;
3843
3844         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3845         exec_len = (uint32_t) exec->batch_len;
3846
3847         if ((exec_start | exec_len) & 0x7)
3848                 return -EINVAL;
3849
3850         if (!exec_start)
3851                 return -EINVAL;
3852
3853         return 0;
3854 }
3855
3856 static int
3857 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3858                    int count)
3859 {
3860         int i;
3861
3862         for (i = 0; i < count; i++) {
3863                 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3864                 int length; /* limited by fault_in_pages_readable() */
3865
3866                 /* First check for malicious input causing overflow */
3867                 if (exec[i].relocation_count >
3868                     INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
3869                         return -EINVAL;
3870
3871                 length = exec[i].relocation_count *
3872                         sizeof(struct drm_i915_gem_relocation_entry);
3873                 if (!access_ok(VERIFY_READ, ptr, length))
3874                         return -EFAULT;
3875
3876                 /* we may also need to update the presumed offsets */
3877                 if (!access_ok(VERIFY_WRITE, ptr, length))
3878                         return -EFAULT;
3879
3880                 if (fault_in_pages_readable(ptr, length))
3881                         return -EFAULT;
3882         }
3883
3884         return 0;
3885 }
3886
3887 static int
3888 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3889                        struct drm_file *file,
3890                        struct drm_i915_gem_execbuffer2 *args,
3891                        struct drm_i915_gem_exec_object2 *exec_list)
3892 {
3893         drm_i915_private_t *dev_priv = dev->dev_private;
3894         struct drm_gem_object **object_list = NULL;
3895         struct drm_gem_object *batch_obj;
3896         struct drm_clip_rect *cliprects = NULL;
3897         struct drm_i915_gem_request *request = NULL;
3898         int ret, i, flips;
3899         uint64_t exec_offset;
3900
3901         struct intel_ring_buffer *ring = NULL;
3902
3903         ret = i915_gem_check_is_wedged(dev);
3904         if (ret)
3905                 return ret;
3906
3907         ret = validate_exec_list(exec_list, args->buffer_count);
3908         if (ret)
3909                 return ret;
3910
3911 #if WATCH_EXEC
3912         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3913                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3914 #endif
3915         switch (args->flags & I915_EXEC_RING_MASK) {
3916         case I915_EXEC_DEFAULT:
3917         case I915_EXEC_RENDER:
3918                 ring = &dev_priv->render_ring;
3919                 break;
3920         case I915_EXEC_BSD:
3921                 if (!HAS_BSD(dev)) {
3922                         DRM_ERROR("execbuf with invalid ring (BSD)\n");
3923                         return -EINVAL;
3924                 }
3925                 ring = &dev_priv->bsd_ring;
3926                 break;
3927         case I915_EXEC_BLT:
3928                 if (!HAS_BLT(dev)) {
3929                         DRM_ERROR("execbuf with invalid ring (BLT)\n");
3930                         return -EINVAL;
3931                 }
3932                 ring = &dev_priv->blt_ring;
3933                 break;
3934         default:
3935                 DRM_ERROR("execbuf with unknown ring: %d\n",
3936                           (int)(args->flags & I915_EXEC_RING_MASK));
3937                 return -EINVAL;
3938         }
3939
3940         if (args->buffer_count < 1) {
3941                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3942                 return -EINVAL;
3943         }
3944         object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3945         if (object_list == NULL) {
3946                 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3947                           args->buffer_count);
3948                 ret = -ENOMEM;
3949                 goto pre_mutex_err;
3950         }
3951
3952         if (args->num_cliprects != 0) {
3953                 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3954                                     GFP_KERNEL);
3955                 if (cliprects == NULL) {
3956                         ret = -ENOMEM;
3957                         goto pre_mutex_err;
3958                 }
3959
3960                 ret = copy_from_user(cliprects,
3961                                      (struct drm_clip_rect __user *)
3962                                      (uintptr_t) args->cliprects_ptr,
3963                                      sizeof(*cliprects) * args->num_cliprects);
3964                 if (ret != 0) {
3965                         DRM_ERROR("copy %d cliprects failed: %d\n",
3966                                   args->num_cliprects, ret);
3967                         ret = -EFAULT;
3968                         goto pre_mutex_err;
3969                 }
3970         }
3971
3972         request = kzalloc(sizeof(*request), GFP_KERNEL);
3973         if (request == NULL) {
3974                 ret = -ENOMEM;
3975                 goto pre_mutex_err;
3976         }
3977
3978         ret = i915_mutex_lock_interruptible(dev);
3979         if (ret)
3980                 goto pre_mutex_err;
3981
3982         if (dev_priv->mm.suspended) {
3983                 mutex_unlock(&dev->struct_mutex);
3984                 ret = -EBUSY;
3985                 goto pre_mutex_err;
3986         }
3987
3988         /* Look up object handles */
3989         for (i = 0; i < args->buffer_count; i++) {
3990                 struct drm_i915_gem_object *obj_priv;
3991
3992                 object_list[i] = drm_gem_object_lookup(dev, file,
3993                                                        exec_list[i].handle);
3994                 if (object_list[i] == NULL) {
3995                         DRM_ERROR("Invalid object handle %d at index %d\n",
3996                                    exec_list[i].handle, i);
3997                         /* prevent error path from reading uninitialized data */
3998                         args->buffer_count = i + 1;
3999                         ret = -ENOENT;
4000                         goto err;
4001                 }
4002
4003                 obj_priv = to_intel_bo(object_list[i]);
4004                 if (obj_priv->in_execbuffer) {
4005                         DRM_ERROR("Object %p appears more than once in object list\n",
4006                                    object_list[i]);
4007                         /* prevent error path from reading uninitialized data */
4008                         args->buffer_count = i + 1;
4009                         ret = -EINVAL;
4010                         goto err;
4011                 }
4012                 obj_priv->in_execbuffer = true;
4013         }
4014
4015         /* Move the objects en-masse into the GTT, evicting if necessary. */
4016         ret = i915_gem_execbuffer_reserve(dev, file,
4017                                           object_list, exec_list,
4018                                           args->buffer_count);
4019         if (ret)
4020                 goto err;
4021
4022         /* The objects are in their final locations, apply the relocations. */
4023         ret = i915_gem_execbuffer_relocate(dev, file,
4024                                            object_list, exec_list,
4025                                            args->buffer_count);
4026         if (ret) {
4027                 if (ret == -EFAULT) {
4028                         ret = i915_gem_execbuffer_relocate_slow(dev, file,
4029                                                                 object_list,
4030                                                                 exec_list,
4031                                                                 args->buffer_count);
4032                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
4033                 }
4034                 if (ret)
4035                         goto err;
4036         }
4037
4038         /* Set the pending read domains for the batch buffer to COMMAND */
4039         batch_obj = object_list[args->buffer_count-1];
4040         if (batch_obj->pending_write_domain) {
4041                 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
4042                 ret = -EINVAL;
4043                 goto err;
4044         }
4045         batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
4046
4047         /* Sanity check the batch buffer */
4048         exec_offset = to_intel_bo(batch_obj)->gtt_offset;
4049         ret = i915_gem_check_execbuffer(args, exec_offset);
4050         if (ret != 0) {
4051                 DRM_ERROR("execbuf with invalid offset/length\n");
4052                 goto err;
4053         }
4054
4055         ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
4056                                               object_list, args->buffer_count);
4057         if (ret)
4058                 goto err;
4059
4060 #if WATCH_COHERENCY
4061         for (i = 0; i < args->buffer_count; i++) {
4062                 i915_gem_object_check_coherency(object_list[i],
4063                                                 exec_list[i].handle);
4064         }
4065 #endif
4066
4067 #if WATCH_EXEC
4068         i915_gem_dump_object(batch_obj,
4069                               args->batch_len,
4070                               __func__,
4071                               ~0);
4072 #endif
4073
4074         /* Check for any pending flips. As we only maintain a flip queue depth
4075          * of 1, we can simply insert a WAIT for the next display flip prior
4076          * to executing the batch and avoid stalling the CPU.
4077          */
4078         flips = 0;
4079         for (i = 0; i < args->buffer_count; i++) {
4080                 if (object_list[i]->write_domain)
4081                         flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
4082         }
4083         if (flips) {
4084                 int plane, flip_mask;
4085
4086                 for (plane = 0; flips >> plane; plane++) {
4087                         if (((flips >> plane) & 1) == 0)
4088                                 continue;
4089
4090                         if (plane)
4091                                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
4092                         else
4093                                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
4094
4095                         ret = intel_ring_begin(ring, 2);
4096                         if (ret)
4097                                 goto err;
4098
4099                         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
4100                         intel_ring_emit(ring, MI_NOOP);
4101                         intel_ring_advance(ring);
4102                 }
4103         }
4104
4105         /* Exec the batchbuffer */
4106         ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset);
4107         if (ret) {
4108                 DRM_ERROR("dispatch failed %d\n", ret);
4109                 goto err;
4110         }
4111
4112         for (i = 0; i < args->buffer_count; i++) {
4113                 struct drm_gem_object *obj = object_list[i];
4114
4115                 obj->read_domains = obj->pending_read_domains;
4116                 obj->write_domain = obj->pending_write_domain;
4117
4118                 i915_gem_object_move_to_active(obj, ring);
4119                 if (obj->write_domain) {
4120                         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4121                         obj_priv->dirty = 1;
4122                         list_move_tail(&obj_priv->gpu_write_list,
4123                                        &ring->gpu_write_list);
4124                         intel_mark_busy(dev, obj);
4125                 }
4126
4127                 trace_i915_gem_object_change_domain(obj,
4128                                                     obj->read_domains,
4129                                                     obj->write_domain);
4130         }
4131
4132         /*
4133          * Ensure that the commands in the batch buffer are
4134          * finished before the interrupt fires
4135          */
4136         i915_retire_commands(dev, ring);
4137
4138         if (i915_add_request(dev, file, request, ring))
4139                 i915_gem_next_request_seqno(dev, ring);
4140         else
4141                 request = NULL;
4142
4143 err:
4144         for (i = 0; i < args->buffer_count; i++) {
4145                 if (object_list[i] == NULL)
4146                     break;
4147
4148                 to_intel_bo(object_list[i])->in_execbuffer = false;
4149                 drm_gem_object_unreference(object_list[i]);
4150         }
4151
4152         mutex_unlock(&dev->struct_mutex);
4153
4154 pre_mutex_err:
4155         drm_free_large(object_list);
4156         kfree(cliprects);
4157         kfree(request);
4158
4159         return ret;
4160 }
4161
4162 /*
4163  * Legacy execbuffer just creates an exec2 list from the original exec object
4164  * list array and passes it to the real function.
4165  */
4166 int
4167 i915_gem_execbuffer(struct drm_device *dev, void *data,
4168                     struct drm_file *file_priv)
4169 {
4170         struct drm_i915_gem_execbuffer *args = data;
4171         struct drm_i915_gem_execbuffer2 exec2;
4172         struct drm_i915_gem_exec_object *exec_list = NULL;
4173         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4174         int ret, i;
4175
4176 #if WATCH_EXEC
4177         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4178                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4179 #endif
4180
4181         if (args->buffer_count < 1) {
4182                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4183                 return -EINVAL;
4184         }
4185
4186         /* Copy in the exec list from userland */
4187         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4188         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4189         if (exec_list == NULL || exec2_list == NULL) {
4190                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4191                           args->buffer_count);
4192                 drm_free_large(exec_list);
4193                 drm_free_large(exec2_list);
4194                 return -ENOMEM;
4195         }
4196         ret = copy_from_user(exec_list,
4197                              (struct drm_i915_relocation_entry __user *)
4198                              (uintptr_t) args->buffers_ptr,
4199                              sizeof(*exec_list) * args->buffer_count);
4200         if (ret != 0) {
4201                 DRM_ERROR("copy %d exec entries failed %d\n",
4202                           args->buffer_count, ret);
4203                 drm_free_large(exec_list);
4204                 drm_free_large(exec2_list);
4205                 return -EFAULT;
4206         }
4207
4208         for (i = 0; i < args->buffer_count; i++) {
4209                 exec2_list[i].handle = exec_list[i].handle;
4210                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4211                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4212                 exec2_list[i].alignment = exec_list[i].alignment;
4213                 exec2_list[i].offset = exec_list[i].offset;
4214                 if (INTEL_INFO(dev)->gen < 4)
4215                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4216                 else
4217                         exec2_list[i].flags = 0;
4218         }
4219
4220         exec2.buffers_ptr = args->buffers_ptr;
4221         exec2.buffer_count = args->buffer_count;
4222         exec2.batch_start_offset = args->batch_start_offset;
4223         exec2.batch_len = args->batch_len;
4224         exec2.DR1 = args->DR1;
4225         exec2.DR4 = args->DR4;
4226         exec2.num_cliprects = args->num_cliprects;
4227         exec2.cliprects_ptr = args->cliprects_ptr;
4228         exec2.flags = I915_EXEC_RENDER;
4229
4230         ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4231         if (!ret) {
4232                 /* Copy the new buffer offsets back to the user's exec list. */
4233                 for (i = 0; i < args->buffer_count; i++)
4234                         exec_list[i].offset = exec2_list[i].offset;
4235                 /* ... and back out to userspace */
4236                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4237                                    (uintptr_t) args->buffers_ptr,
4238                                    exec_list,
4239                                    sizeof(*exec_list) * args->buffer_count);
4240                 if (ret) {
4241                         ret = -EFAULT;
4242                         DRM_ERROR("failed to copy %d exec entries "
4243                                   "back to user (%d)\n",
4244                                   args->buffer_count, ret);
4245                 }
4246         }
4247
4248         drm_free_large(exec_list);
4249         drm_free_large(exec2_list);
4250         return ret;
4251 }
4252
4253 int
4254 i915_gem_execbuffer2(struct drm_device *dev, void *data,
4255                      struct drm_file *file_priv)
4256 {
4257         struct drm_i915_gem_execbuffer2 *args = data;
4258         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4259         int ret;
4260
4261 #if WATCH_EXEC
4262         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4263                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4264 #endif
4265
4266         if (args->buffer_count < 1) {
4267                 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4268                 return -EINVAL;
4269         }
4270
4271         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4272         if (exec2_list == NULL) {
4273                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4274                           args->buffer_count);
4275                 return -ENOMEM;
4276         }
4277         ret = copy_from_user(exec2_list,
4278                              (struct drm_i915_relocation_entry __user *)
4279                              (uintptr_t) args->buffers_ptr,
4280                              sizeof(*exec2_list) * args->buffer_count);
4281         if (ret != 0) {
4282                 DRM_ERROR("copy %d exec entries failed %d\n",
4283                           args->buffer_count, ret);
4284                 drm_free_large(exec2_list);
4285                 return -EFAULT;
4286         }
4287
4288         ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4289         if (!ret) {
4290                 /* Copy the new buffer offsets back to the user's exec list. */
4291                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4292                                    (uintptr_t) args->buffers_ptr,
4293                                    exec2_list,
4294                                    sizeof(*exec2_list) * args->buffer_count);
4295                 if (ret) {
4296                         ret = -EFAULT;
4297                         DRM_ERROR("failed to copy %d exec entries "
4298                                   "back to user (%d)\n",
4299                                   args->buffer_count, ret);
4300                 }
4301         }
4302
4303         drm_free_large(exec2_list);
4304         return ret;
4305 }
4306
4307 int
4308 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
4309                     bool map_and_fenceable)
4310 {
4311         struct drm_device *dev = obj->dev;
4312         struct drm_i915_private *dev_priv = dev->dev_private;
4313         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4314         int ret;
4315
4316         BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4317         BUG_ON(map_and_fenceable && !map_and_fenceable);
4318         WARN_ON(i915_verify_lists(dev));
4319
4320         if (obj_priv->gtt_space != NULL) {
4321                 if ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
4322                     (map_and_fenceable && !obj_priv->map_and_fenceable)) {
4323                         WARN(obj_priv->pin_count,
4324                              "bo is already pinned with incorrect alignment:"
4325                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
4326                              " obj->map_and_fenceable=%d\n",
4327                              obj_priv->gtt_offset, alignment,
4328                              map_and_fenceable,
4329                              obj_priv->map_and_fenceable);
4330                         ret = i915_gem_object_unbind(obj);
4331                         if (ret)
4332                                 return ret;
4333                 }
4334         }
4335
4336         if (obj_priv->gtt_space == NULL) {
4337                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
4338                                                   map_and_fenceable);
4339                 if (ret)
4340                         return ret;
4341         }
4342
4343         if (obj_priv->pin_count++ == 0) {
4344                 i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable);
4345                 if (!obj_priv->active)
4346                         list_move_tail(&obj_priv->mm_list,
4347                                        &dev_priv->mm.pinned_list);
4348         }
4349         BUG_ON(!obj_priv->pin_mappable && map_and_fenceable);
4350
4351         WARN_ON(i915_verify_lists(dev));
4352         return 0;
4353 }
4354
4355 void
4356 i915_gem_object_unpin(struct drm_gem_object *obj)
4357 {
4358         struct drm_device *dev = obj->dev;
4359         drm_i915_private_t *dev_priv = dev->dev_private;
4360         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4361
4362         WARN_ON(i915_verify_lists(dev));
4363         BUG_ON(obj_priv->pin_count == 0);
4364         BUG_ON(obj_priv->gtt_space == NULL);
4365
4366         if (--obj_priv->pin_count == 0) {
4367                 if (!obj_priv->active)
4368                         list_move_tail(&obj_priv->mm_list,
4369                                        &dev_priv->mm.inactive_list);
4370                 i915_gem_info_remove_pin(dev_priv, obj_priv);
4371         }
4372         WARN_ON(i915_verify_lists(dev));
4373 }
4374
4375 int
4376 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4377                    struct drm_file *file_priv)
4378 {
4379         struct drm_i915_gem_pin *args = data;
4380         struct drm_gem_object *obj;
4381         struct drm_i915_gem_object *obj_priv;
4382         int ret;
4383
4384         ret = i915_mutex_lock_interruptible(dev);
4385         if (ret)
4386                 return ret;
4387
4388         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4389         if (obj == NULL) {
4390                 ret = -ENOENT;
4391                 goto unlock;
4392         }
4393         obj_priv = to_intel_bo(obj);
4394
4395         if (obj_priv->madv != I915_MADV_WILLNEED) {
4396                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4397                 ret = -EINVAL;
4398                 goto out;
4399         }
4400
4401         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4402                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4403                           args->handle);
4404                 ret = -EINVAL;
4405                 goto out;
4406         }
4407
4408         obj_priv->user_pin_count++;
4409         obj_priv->pin_filp = file_priv;
4410         if (obj_priv->user_pin_count == 1) {
4411                 ret = i915_gem_object_pin(obj, args->alignment, true);
4412                 if (ret)
4413                         goto out;
4414         }
4415
4416         /* XXX - flush the CPU caches for pinned objects
4417          * as the X server doesn't manage domains yet
4418          */
4419         i915_gem_object_flush_cpu_write_domain(obj);
4420         args->offset = obj_priv->gtt_offset;
4421 out:
4422         drm_gem_object_unreference(obj);
4423 unlock:
4424         mutex_unlock(&dev->struct_mutex);
4425         return ret;
4426 }
4427
4428 int
4429 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4430                      struct drm_file *file_priv)
4431 {
4432         struct drm_i915_gem_pin *args = data;
4433         struct drm_gem_object *obj;
4434         struct drm_i915_gem_object *obj_priv;
4435         int ret;
4436
4437         ret = i915_mutex_lock_interruptible(dev);
4438         if (ret)
4439                 return ret;
4440
4441         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4442         if (obj == NULL) {
4443                 ret = -ENOENT;
4444                 goto unlock;
4445         }
4446         obj_priv = to_intel_bo(obj);
4447
4448         if (obj_priv->pin_filp != file_priv) {
4449                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4450                           args->handle);
4451                 ret = -EINVAL;
4452                 goto out;
4453         }
4454         obj_priv->user_pin_count--;
4455         if (obj_priv->user_pin_count == 0) {
4456                 obj_priv->pin_filp = NULL;
4457                 i915_gem_object_unpin(obj);
4458         }
4459
4460 out:
4461         drm_gem_object_unreference(obj);
4462 unlock:
4463         mutex_unlock(&dev->struct_mutex);
4464         return ret;
4465 }
4466
4467 int
4468 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4469                     struct drm_file *file_priv)
4470 {
4471         struct drm_i915_gem_busy *args = data;
4472         struct drm_gem_object *obj;
4473         struct drm_i915_gem_object *obj_priv;
4474         int ret;
4475
4476         ret = i915_mutex_lock_interruptible(dev);
4477         if (ret)
4478                 return ret;
4479
4480         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4481         if (obj == NULL) {
4482                 ret = -ENOENT;
4483                 goto unlock;
4484         }
4485         obj_priv = to_intel_bo(obj);
4486
4487         /* Count all active objects as busy, even if they are currently not used
4488          * by the gpu. Users of this interface expect objects to eventually
4489          * become non-busy without any further actions, therefore emit any
4490          * necessary flushes here.
4491          */
4492         args->busy = obj_priv->active;
4493         if (args->busy) {
4494                 /* Unconditionally flush objects, even when the gpu still uses this
4495                  * object. Userspace calling this function indicates that it wants to
4496                  * use this buffer rather sooner than later, so issuing the required
4497                  * flush earlier is beneficial.
4498                  */
4499                 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4500                         i915_gem_flush_ring(dev, file_priv,
4501                                             obj_priv->ring,
4502                                             0, obj->write_domain);
4503
4504                 /* Update the active list for the hardware's current position.
4505                  * Otherwise this only updates on a delayed timer or when irqs
4506                  * are actually unmasked, and our working set ends up being
4507                  * larger than required.
4508                  */
4509                 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4510
4511                 args->busy = obj_priv->active;
4512         }
4513
4514         drm_gem_object_unreference(obj);
4515 unlock:
4516         mutex_unlock(&dev->struct_mutex);
4517         return ret;
4518 }
4519
4520 int
4521 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4522                         struct drm_file *file_priv)
4523 {
4524     return i915_gem_ring_throttle(dev, file_priv);
4525 }
4526
4527 int
4528 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4529                        struct drm_file *file_priv)
4530 {
4531         struct drm_i915_gem_madvise *args = data;
4532         struct drm_gem_object *obj;
4533         struct drm_i915_gem_object *obj_priv;
4534         int ret;
4535
4536         switch (args->madv) {
4537         case I915_MADV_DONTNEED:
4538         case I915_MADV_WILLNEED:
4539             break;
4540         default:
4541             return -EINVAL;
4542         }
4543
4544         ret = i915_mutex_lock_interruptible(dev);
4545         if (ret)
4546                 return ret;
4547
4548         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4549         if (obj == NULL) {
4550                 ret = -ENOENT;
4551                 goto unlock;
4552         }
4553         obj_priv = to_intel_bo(obj);
4554
4555         if (obj_priv->pin_count) {
4556                 ret = -EINVAL;
4557                 goto out;
4558         }
4559
4560         if (obj_priv->madv != __I915_MADV_PURGED)
4561                 obj_priv->madv = args->madv;
4562
4563         /* if the object is no longer bound, discard its backing storage */
4564         if (i915_gem_object_is_purgeable(obj_priv) &&
4565             obj_priv->gtt_space == NULL)
4566                 i915_gem_object_truncate(obj);
4567
4568         args->retained = obj_priv->madv != __I915_MADV_PURGED;
4569
4570 out:
4571         drm_gem_object_unreference(obj);
4572 unlock:
4573         mutex_unlock(&dev->struct_mutex);
4574         return ret;
4575 }
4576
4577 struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4578                                               size_t size)
4579 {
4580         struct drm_i915_private *dev_priv = dev->dev_private;
4581         struct drm_i915_gem_object *obj;
4582
4583         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4584         if (obj == NULL)
4585                 return NULL;
4586
4587         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4588                 kfree(obj);
4589                 return NULL;
4590         }
4591
4592         i915_gem_info_add_obj(dev_priv, size);
4593
4594         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4595         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4596
4597         obj->agp_type = AGP_USER_MEMORY;
4598         obj->base.driver_private = NULL;
4599         obj->fence_reg = I915_FENCE_REG_NONE;
4600         INIT_LIST_HEAD(&obj->mm_list);
4601         INIT_LIST_HEAD(&obj->gtt_list);
4602         INIT_LIST_HEAD(&obj->ring_list);
4603         INIT_LIST_HEAD(&obj->gpu_write_list);
4604         obj->madv = I915_MADV_WILLNEED;
4605         /* Avoid an unnecessary call to unbind on the first bind. */
4606         obj->map_and_fenceable = true;
4607
4608         return &obj->base;
4609 }
4610
4611 int i915_gem_init_object(struct drm_gem_object *obj)
4612 {
4613         BUG();
4614
4615         return 0;
4616 }
4617
4618 static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4619 {
4620         struct drm_device *dev = obj->dev;
4621         drm_i915_private_t *dev_priv = dev->dev_private;
4622         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4623         int ret;
4624
4625         ret = i915_gem_object_unbind(obj);
4626         if (ret == -ERESTARTSYS) {
4627                 list_move(&obj_priv->mm_list,
4628                           &dev_priv->mm.deferred_free_list);
4629                 return;
4630         }
4631
4632         if (obj->map_list.map)
4633                 i915_gem_free_mmap_offset(obj);
4634
4635         drm_gem_object_release(obj);
4636         i915_gem_info_remove_obj(dev_priv, obj->size);
4637
4638         kfree(obj_priv->page_cpu_valid);
4639         kfree(obj_priv->bit_17);
4640         kfree(obj_priv);
4641 }
4642
4643 void i915_gem_free_object(struct drm_gem_object *obj)
4644 {
4645         struct drm_device *dev = obj->dev;
4646         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4647
4648         trace_i915_gem_object_destroy(obj);
4649
4650         while (obj_priv->pin_count > 0)
4651                 i915_gem_object_unpin(obj);
4652
4653         if (obj_priv->phys_obj)
4654                 i915_gem_detach_phys_object(dev, obj);
4655
4656         i915_gem_free_object_tail(obj);
4657 }
4658
4659 int
4660 i915_gem_idle(struct drm_device *dev)
4661 {
4662         drm_i915_private_t *dev_priv = dev->dev_private;
4663         int ret;
4664
4665         mutex_lock(&dev->struct_mutex);
4666
4667         if (dev_priv->mm.suspended) {
4668                 mutex_unlock(&dev->struct_mutex);
4669                 return 0;
4670         }
4671
4672         ret = i915_gpu_idle(dev);
4673         if (ret) {
4674                 mutex_unlock(&dev->struct_mutex);
4675                 return ret;
4676         }
4677
4678         /* Under UMS, be paranoid and evict. */
4679         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4680                 ret = i915_gem_evict_inactive(dev, false);
4681                 if (ret) {
4682                         mutex_unlock(&dev->struct_mutex);
4683                         return ret;
4684                 }
4685         }
4686
4687         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4688          * We need to replace this with a semaphore, or something.
4689          * And not confound mm.suspended!
4690          */
4691         dev_priv->mm.suspended = 1;
4692         del_timer_sync(&dev_priv->hangcheck_timer);
4693
4694         i915_kernel_lost_context(dev);
4695         i915_gem_cleanup_ringbuffer(dev);
4696
4697         mutex_unlock(&dev->struct_mutex);
4698
4699         /* Cancel the retire work handler, which should be idle now. */
4700         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4701
4702         return 0;
4703 }
4704
4705 /*
4706  * 965+ support PIPE_CONTROL commands, which provide finer grained control
4707  * over cache flushing.
4708  */
4709 static int
4710 i915_gem_init_pipe_control(struct drm_device *dev)
4711 {
4712         drm_i915_private_t *dev_priv = dev->dev_private;
4713         struct drm_gem_object *obj;
4714         struct drm_i915_gem_object *obj_priv;
4715         int ret;
4716
4717         obj = i915_gem_alloc_object(dev, 4096);
4718         if (obj == NULL) {
4719                 DRM_ERROR("Failed to allocate seqno page\n");
4720                 ret = -ENOMEM;
4721                 goto err;
4722         }
4723         obj_priv = to_intel_bo(obj);
4724         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4725
4726         ret = i915_gem_object_pin(obj, 4096, true);
4727         if (ret)
4728                 goto err_unref;
4729
4730         dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4731         dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
4732         if (dev_priv->seqno_page == NULL)
4733                 goto err_unpin;
4734
4735         dev_priv->seqno_obj = obj;
4736         memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4737
4738         return 0;
4739
4740 err_unpin:
4741         i915_gem_object_unpin(obj);
4742 err_unref:
4743         drm_gem_object_unreference(obj);
4744 err:
4745         return ret;
4746 }
4747
4748
4749 static void
4750 i915_gem_cleanup_pipe_control(struct drm_device *dev)
4751 {
4752         drm_i915_private_t *dev_priv = dev->dev_private;
4753         struct drm_gem_object *obj;
4754         struct drm_i915_gem_object *obj_priv;
4755
4756         obj = dev_priv->seqno_obj;
4757         obj_priv = to_intel_bo(obj);
4758         kunmap(obj_priv->pages[0]);
4759         i915_gem_object_unpin(obj);
4760         drm_gem_object_unreference(obj);
4761         dev_priv->seqno_obj = NULL;
4762
4763         dev_priv->seqno_page = NULL;
4764 }
4765
4766 int
4767 i915_gem_init_ringbuffer(struct drm_device *dev)
4768 {
4769         drm_i915_private_t *dev_priv = dev->dev_private;
4770         int ret;
4771
4772         if (HAS_PIPE_CONTROL(dev)) {
4773                 ret = i915_gem_init_pipe_control(dev);
4774                 if (ret)
4775                         return ret;
4776         }
4777
4778         ret = intel_init_render_ring_buffer(dev);
4779         if (ret)
4780                 goto cleanup_pipe_control;
4781
4782         if (HAS_BSD(dev)) {
4783                 ret = intel_init_bsd_ring_buffer(dev);
4784                 if (ret)
4785                         goto cleanup_render_ring;
4786         }
4787
4788         if (HAS_BLT(dev)) {
4789                 ret = intel_init_blt_ring_buffer(dev);
4790                 if (ret)
4791                         goto cleanup_bsd_ring;
4792         }
4793
4794         dev_priv->next_seqno = 1;
4795
4796         return 0;
4797
4798 cleanup_bsd_ring:
4799         intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
4800 cleanup_render_ring:
4801         intel_cleanup_ring_buffer(&dev_priv->render_ring);
4802 cleanup_pipe_control:
4803         if (HAS_PIPE_CONTROL(dev))
4804                 i915_gem_cleanup_pipe_control(dev);
4805         return ret;
4806 }
4807
4808 void
4809 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4810 {
4811         drm_i915_private_t *dev_priv = dev->dev_private;
4812
4813         intel_cleanup_ring_buffer(&dev_priv->render_ring);
4814         intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
4815         intel_cleanup_ring_buffer(&dev_priv->blt_ring);
4816         if (HAS_PIPE_CONTROL(dev))
4817                 i915_gem_cleanup_pipe_control(dev);
4818 }
4819
4820 int
4821 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4822                        struct drm_file *file_priv)
4823 {
4824         drm_i915_private_t *dev_priv = dev->dev_private;
4825         int ret;
4826
4827         if (drm_core_check_feature(dev, DRIVER_MODESET))
4828                 return 0;
4829
4830         if (atomic_read(&dev_priv->mm.wedged)) {
4831                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4832                 atomic_set(&dev_priv->mm.wedged, 0);
4833         }
4834
4835         mutex_lock(&dev->struct_mutex);
4836         dev_priv->mm.suspended = 0;
4837
4838         ret = i915_gem_init_ringbuffer(dev);
4839         if (ret != 0) {
4840                 mutex_unlock(&dev->struct_mutex);
4841                 return ret;
4842         }
4843
4844         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4845         BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4846         BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
4847         BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
4848         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4849         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4850         BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4851         BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
4852         BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
4853         mutex_unlock(&dev->struct_mutex);
4854
4855         ret = drm_irq_install(dev);
4856         if (ret)
4857                 goto cleanup_ringbuffer;
4858
4859         return 0;
4860
4861 cleanup_ringbuffer:
4862         mutex_lock(&dev->struct_mutex);
4863         i915_gem_cleanup_ringbuffer(dev);
4864         dev_priv->mm.suspended = 1;
4865         mutex_unlock(&dev->struct_mutex);
4866
4867         return ret;
4868 }
4869
4870 int
4871 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4872                        struct drm_file *file_priv)
4873 {
4874         if (drm_core_check_feature(dev, DRIVER_MODESET))
4875                 return 0;
4876
4877         drm_irq_uninstall(dev);
4878         return i915_gem_idle(dev);
4879 }
4880
4881 void
4882 i915_gem_lastclose(struct drm_device *dev)
4883 {
4884         int ret;
4885
4886         if (drm_core_check_feature(dev, DRIVER_MODESET))
4887                 return;
4888
4889         ret = i915_gem_idle(dev);
4890         if (ret)
4891                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4892 }
4893
4894 static void
4895 init_ring_lists(struct intel_ring_buffer *ring)
4896 {
4897         INIT_LIST_HEAD(&ring->active_list);
4898         INIT_LIST_HEAD(&ring->request_list);
4899         INIT_LIST_HEAD(&ring->gpu_write_list);
4900 }
4901
4902 void
4903 i915_gem_load(struct drm_device *dev)
4904 {
4905         int i;
4906         drm_i915_private_t *dev_priv = dev->dev_private;
4907
4908         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4909         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4910         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4911         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4912         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4913         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4914         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
4915         init_ring_lists(&dev_priv->render_ring);
4916         init_ring_lists(&dev_priv->bsd_ring);
4917         init_ring_lists(&dev_priv->blt_ring);
4918         for (i = 0; i < 16; i++)
4919                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4920         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4921                           i915_gem_retire_work_handler);
4922         init_completion(&dev_priv->error_completion);
4923
4924         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4925         if (IS_GEN3(dev)) {
4926                 u32 tmp = I915_READ(MI_ARB_STATE);
4927                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4928                         /* arb state is a masked write, so set bit + bit in mask */
4929                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4930                         I915_WRITE(MI_ARB_STATE, tmp);
4931                 }
4932         }
4933
4934         /* Old X drivers will take 0-2 for front, back, depth buffers */
4935         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4936                 dev_priv->fence_reg_start = 3;
4937
4938         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4939                 dev_priv->num_fence_regs = 16;
4940         else
4941                 dev_priv->num_fence_regs = 8;
4942
4943         /* Initialize fence registers to zero */
4944         switch (INTEL_INFO(dev)->gen) {
4945         case 6:
4946                 for (i = 0; i < 16; i++)
4947                         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4948                 break;
4949         case 5:
4950         case 4:
4951                 for (i = 0; i < 16; i++)
4952                         I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4953                 break;
4954         case 3:
4955                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4956                         for (i = 0; i < 8; i++)
4957                                 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4958         case 2:
4959                 for (i = 0; i < 8; i++)
4960                         I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4961                 break;
4962         }
4963         i915_gem_detect_bit_6_swizzle(dev);
4964         init_waitqueue_head(&dev_priv->pending_flip_queue);
4965
4966         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4967         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4968         register_shrinker(&dev_priv->mm.inactive_shrinker);
4969 }
4970
4971 /*
4972  * Create a physically contiguous memory object for this object
4973  * e.g. for cursor + overlay regs
4974  */
4975 static int i915_gem_init_phys_object(struct drm_device *dev,
4976                                      int id, int size, int align)
4977 {
4978         drm_i915_private_t *dev_priv = dev->dev_private;
4979         struct drm_i915_gem_phys_object *phys_obj;
4980         int ret;
4981
4982         if (dev_priv->mm.phys_objs[id - 1] || !size)
4983                 return 0;
4984
4985         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4986         if (!phys_obj)
4987                 return -ENOMEM;
4988
4989         phys_obj->id = id;
4990
4991         phys_obj->handle = drm_pci_alloc(dev, size, align);
4992         if (!phys_obj->handle) {
4993                 ret = -ENOMEM;
4994                 goto kfree_obj;
4995         }
4996 #ifdef CONFIG_X86
4997         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4998 #endif
4999
5000         dev_priv->mm.phys_objs[id - 1] = phys_obj;
5001
5002         return 0;
5003 kfree_obj:
5004         kfree(phys_obj);
5005         return ret;
5006 }
5007
5008 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
5009 {
5010         drm_i915_private_t *dev_priv = dev->dev_private;
5011         struct drm_i915_gem_phys_object *phys_obj;
5012
5013         if (!dev_priv->mm.phys_objs[id - 1])
5014                 return;
5015
5016         phys_obj = dev_priv->mm.phys_objs[id - 1];
5017         if (phys_obj->cur_obj) {
5018                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
5019         }
5020
5021 #ifdef CONFIG_X86
5022         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
5023 #endif
5024         drm_pci_free(dev, phys_obj->handle);
5025         kfree(phys_obj);
5026         dev_priv->mm.phys_objs[id - 1] = NULL;
5027 }
5028
5029 void i915_gem_free_all_phys_object(struct drm_device *dev)
5030 {
5031         int i;
5032
5033         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
5034                 i915_gem_free_phys_object(dev, i);
5035 }
5036
5037 void i915_gem_detach_phys_object(struct drm_device *dev,
5038                                  struct drm_gem_object *obj)
5039 {
5040         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
5041         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5042         char *vaddr;
5043         int i;
5044         int page_count;
5045
5046         if (!obj_priv->phys_obj)
5047                 return;
5048         vaddr = obj_priv->phys_obj->handle->vaddr;
5049
5050         page_count = obj->size / PAGE_SIZE;
5051
5052         for (i = 0; i < page_count; i++) {
5053                 struct page *page = read_cache_page_gfp(mapping, i,
5054                                                         GFP_HIGHUSER | __GFP_RECLAIMABLE);
5055                 if (!IS_ERR(page)) {
5056                         char *dst = kmap_atomic(page);
5057                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
5058                         kunmap_atomic(dst);
5059
5060                         drm_clflush_pages(&page, 1);
5061
5062                         set_page_dirty(page);
5063                         mark_page_accessed(page);
5064                         page_cache_release(page);
5065                 }
5066         }
5067         intel_gtt_chipset_flush();
5068
5069         obj_priv->phys_obj->cur_obj = NULL;
5070         obj_priv->phys_obj = NULL;
5071 }
5072
5073 int
5074 i915_gem_attach_phys_object(struct drm_device *dev,
5075                             struct drm_gem_object *obj,
5076                             int id,
5077                             int align)
5078 {
5079         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
5080         drm_i915_private_t *dev_priv = dev->dev_private;
5081         struct drm_i915_gem_object *obj_priv;
5082         int ret = 0;
5083         int page_count;
5084         int i;
5085
5086         if (id > I915_MAX_PHYS_OBJECT)
5087                 return -EINVAL;
5088
5089         obj_priv = to_intel_bo(obj);
5090
5091         if (obj_priv->phys_obj) {
5092                 if (obj_priv->phys_obj->id == id)
5093                         return 0;
5094                 i915_gem_detach_phys_object(dev, obj);
5095         }
5096
5097         /* create a new object */
5098         if (!dev_priv->mm.phys_objs[id - 1]) {
5099                 ret = i915_gem_init_phys_object(dev, id,
5100                                                 obj->size, align);
5101                 if (ret) {
5102                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
5103                         return ret;
5104                 }
5105         }
5106
5107         /* bind to the object */
5108         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
5109         obj_priv->phys_obj->cur_obj = obj;
5110
5111         page_count = obj->size / PAGE_SIZE;
5112
5113         for (i = 0; i < page_count; i++) {
5114                 struct page *page;
5115                 char *dst, *src;
5116
5117                 page = read_cache_page_gfp(mapping, i,
5118                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
5119                 if (IS_ERR(page))
5120                         return PTR_ERR(page);
5121
5122                 src = kmap_atomic(page);
5123                 dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
5124                 memcpy(dst, src, PAGE_SIZE);
5125                 kunmap_atomic(src);
5126
5127                 mark_page_accessed(page);
5128                 page_cache_release(page);
5129         }
5130
5131         return 0;
5132 }
5133
5134 static int
5135 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5136                      struct drm_i915_gem_pwrite *args,
5137                      struct drm_file *file_priv)
5138 {
5139         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5140         void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
5141         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
5142
5143         DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
5144
5145         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
5146                 unsigned long unwritten;
5147
5148                 /* The physical object once assigned is fixed for the lifetime
5149                  * of the obj, so we can safely drop the lock and continue
5150                  * to access vaddr.
5151                  */
5152                 mutex_unlock(&dev->struct_mutex);
5153                 unwritten = copy_from_user(vaddr, user_data, args->size);
5154                 mutex_lock(&dev->struct_mutex);
5155                 if (unwritten)
5156                         return -EFAULT;
5157         }
5158
5159         intel_gtt_chipset_flush();
5160         return 0;
5161 }
5162
5163 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5164 {
5165         struct drm_i915_file_private *file_priv = file->driver_priv;
5166
5167         /* Clean up our request list when the client is going away, so that
5168          * later retire_requests won't dereference our soon-to-be-gone
5169          * file_priv.
5170          */
5171         spin_lock(&file_priv->mm.lock);
5172         while (!list_empty(&file_priv->mm.request_list)) {
5173                 struct drm_i915_gem_request *request;
5174
5175                 request = list_first_entry(&file_priv->mm.request_list,
5176                                            struct drm_i915_gem_request,
5177                                            client_list);
5178                 list_del(&request->client_list);
5179                 request->file_priv = NULL;
5180         }
5181         spin_unlock(&file_priv->mm.lock);
5182 }
5183
5184 static int
5185 i915_gpu_is_active(struct drm_device *dev)
5186 {
5187         drm_i915_private_t *dev_priv = dev->dev_private;
5188         int lists_empty;
5189
5190         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5191                       list_empty(&dev_priv->mm.active_list);
5192
5193         return !lists_empty;
5194 }
5195
5196 static int
5197 i915_gem_inactive_shrink(struct shrinker *shrinker,
5198                          int nr_to_scan,
5199                          gfp_t gfp_mask)
5200 {
5201         struct drm_i915_private *dev_priv =
5202                 container_of(shrinker,
5203                              struct drm_i915_private,
5204                              mm.inactive_shrinker);
5205         struct drm_device *dev = dev_priv->dev;
5206         struct drm_i915_gem_object *obj, *next;
5207         int cnt;
5208
5209         if (!mutex_trylock(&dev->struct_mutex))
5210                 return 0;
5211
5212         /* "fast-path" to count number of available objects */
5213         if (nr_to_scan == 0) {
5214                 cnt = 0;
5215                 list_for_each_entry(obj,
5216                                     &dev_priv->mm.inactive_list,
5217                                     mm_list)
5218                         cnt++;
5219                 mutex_unlock(&dev->struct_mutex);
5220                 return cnt / 100 * sysctl_vfs_cache_pressure;
5221         }
5222
5223 rescan:
5224         /* first scan for clean buffers */
5225         i915_gem_retire_requests(dev);
5226
5227         list_for_each_entry_safe(obj, next,
5228                                  &dev_priv->mm.inactive_list,
5229                                  mm_list) {
5230                 if (i915_gem_object_is_purgeable(obj)) {
5231                         i915_gem_object_unbind(&obj->base);
5232                         if (--nr_to_scan == 0)
5233                                 break;
5234                 }
5235         }
5236
5237         /* second pass, evict/count anything still on the inactive list */
5238         cnt = 0;
5239         list_for_each_entry_safe(obj, next,
5240                                  &dev_priv->mm.inactive_list,
5241                                  mm_list) {
5242                 if (nr_to_scan) {
5243                         i915_gem_object_unbind(&obj->base);
5244                         nr_to_scan--;
5245                 } else
5246                         cnt++;
5247         }
5248
5249         if (nr_to_scan && i915_gpu_is_active(dev)) {
5250                 /*
5251                  * We are desperate for pages, so as a last resort, wait
5252                  * for the GPU to finish and discard whatever we can.
5253                  * This has a dramatic impact to reduce the number of
5254                  * OOM-killer events whilst running the GPU aggressively.
5255                  */
5256                 if (i915_gpu_idle(dev) == 0)
5257                         goto rescan;
5258         }
5259         mutex_unlock(&dev->struct_mutex);
5260         return cnt / 100 * sysctl_vfs_cache_pressure;
5261 }