Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42                                                    bool force);
43 static __must_check int
44 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
45                            struct i915_address_space *vm,
46                            unsigned alignment,
47                            bool map_and_fenceable,
48                            bool nonblocking);
49 static int i915_gem_phys_pwrite(struct drm_device *dev,
50                                 struct drm_i915_gem_object *obj,
51                                 struct drm_i915_gem_pwrite *args,
52                                 struct drm_file *file);
53
54 static void i915_gem_write_fence(struct drm_device *dev, int reg,
55                                  struct drm_i915_gem_object *obj);
56 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57                                          struct drm_i915_fence_reg *fence,
58                                          bool enable);
59
60 static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61                                              struct shrink_control *sc);
62 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63                                             struct shrink_control *sc);
64 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65 static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
67
68 static bool cpu_cache_is_coherent(struct drm_device *dev,
69                                   enum i915_cache_level level)
70 {
71         return HAS_LLC(dev) || level != I915_CACHE_NONE;
72 }
73
74 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
75 {
76         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
77                 return true;
78
79         return obj->pin_display;
80 }
81
82 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
83 {
84         if (obj->tiling_mode)
85                 i915_gem_release_mmap(obj);
86
87         /* As we do not have an associated fence register, we will force
88          * a tiling change if we ever need to acquire one.
89          */
90         obj->fence_dirty = false;
91         obj->fence_reg = I915_FENCE_REG_NONE;
92 }
93
94 /* some bookkeeping */
95 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
96                                   size_t size)
97 {
98         spin_lock(&dev_priv->mm.object_stat_lock);
99         dev_priv->mm.object_count++;
100         dev_priv->mm.object_memory += size;
101         spin_unlock(&dev_priv->mm.object_stat_lock);
102 }
103
104 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
105                                      size_t size)
106 {
107         spin_lock(&dev_priv->mm.object_stat_lock);
108         dev_priv->mm.object_count--;
109         dev_priv->mm.object_memory -= size;
110         spin_unlock(&dev_priv->mm.object_stat_lock);
111 }
112
113 static int
114 i915_gem_wait_for_error(struct i915_gpu_error *error)
115 {
116         int ret;
117
118 #define EXIT_COND (!i915_reset_in_progress(error) || \
119                    i915_terminally_wedged(error))
120         if (EXIT_COND)
121                 return 0;
122
123         /*
124          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
125          * userspace. If it takes that long something really bad is going on and
126          * we should simply try to bail out and fail as gracefully as possible.
127          */
128         ret = wait_event_interruptible_timeout(error->reset_queue,
129                                                EXIT_COND,
130                                                10*HZ);
131         if (ret == 0) {
132                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
133                 return -EIO;
134         } else if (ret < 0) {
135                 return ret;
136         }
137 #undef EXIT_COND
138
139         return 0;
140 }
141
142 int i915_mutex_lock_interruptible(struct drm_device *dev)
143 {
144         struct drm_i915_private *dev_priv = dev->dev_private;
145         int ret;
146
147         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
148         if (ret)
149                 return ret;
150
151         ret = mutex_lock_interruptible(&dev->struct_mutex);
152         if (ret)
153                 return ret;
154
155         WARN_ON(i915_verify_lists(dev));
156         return 0;
157 }
158
159 static inline bool
160 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
161 {
162         return i915_gem_obj_bound_any(obj) && !obj->active;
163 }
164
165 int
166 i915_gem_init_ioctl(struct drm_device *dev, void *data,
167                     struct drm_file *file)
168 {
169         struct drm_i915_private *dev_priv = dev->dev_private;
170         struct drm_i915_gem_init *args = data;
171
172         if (drm_core_check_feature(dev, DRIVER_MODESET))
173                 return -ENODEV;
174
175         if (args->gtt_start >= args->gtt_end ||
176             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
177                 return -EINVAL;
178
179         /* GEM with user mode setting was never supported on ilk and later. */
180         if (INTEL_INFO(dev)->gen >= 5)
181                 return -ENODEV;
182
183         mutex_lock(&dev->struct_mutex);
184         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
185                                   args->gtt_end);
186         dev_priv->gtt.mappable_end = args->gtt_end;
187         mutex_unlock(&dev->struct_mutex);
188
189         return 0;
190 }
191
192 int
193 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
194                             struct drm_file *file)
195 {
196         struct drm_i915_private *dev_priv = dev->dev_private;
197         struct drm_i915_gem_get_aperture *args = data;
198         struct drm_i915_gem_object *obj;
199         size_t pinned;
200
201         pinned = 0;
202         mutex_lock(&dev->struct_mutex);
203         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
204                 if (obj->pin_count)
205                         pinned += i915_gem_obj_ggtt_size(obj);
206         mutex_unlock(&dev->struct_mutex);
207
208         args->aper_size = dev_priv->gtt.base.total;
209         args->aper_available_size = args->aper_size - pinned;
210
211         return 0;
212 }
213
214 void *i915_gem_object_alloc(struct drm_device *dev)
215 {
216         struct drm_i915_private *dev_priv = dev->dev_private;
217         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
218 }
219
220 void i915_gem_object_free(struct drm_i915_gem_object *obj)
221 {
222         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
223         kmem_cache_free(dev_priv->slab, obj);
224 }
225
226 static int
227 i915_gem_create(struct drm_file *file,
228                 struct drm_device *dev,
229                 uint64_t size,
230                 uint32_t *handle_p)
231 {
232         struct drm_i915_gem_object *obj;
233         int ret;
234         u32 handle;
235
236         size = roundup(size, PAGE_SIZE);
237         if (size == 0)
238                 return -EINVAL;
239
240         /* Allocate the new object */
241         obj = i915_gem_alloc_object(dev, size);
242         if (obj == NULL)
243                 return -ENOMEM;
244
245         ret = drm_gem_handle_create(file, &obj->base, &handle);
246         /* drop reference from allocate - handle holds it now */
247         drm_gem_object_unreference_unlocked(&obj->base);
248         if (ret)
249                 return ret;
250
251         *handle_p = handle;
252         return 0;
253 }
254
255 int
256 i915_gem_dumb_create(struct drm_file *file,
257                      struct drm_device *dev,
258                      struct drm_mode_create_dumb *args)
259 {
260         /* have to work out size/pitch and return them */
261         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
262         args->size = args->pitch * args->height;
263         return i915_gem_create(file, dev,
264                                args->size, &args->handle);
265 }
266
267 /**
268  * Creates a new mm object and returns a handle to it.
269  */
270 int
271 i915_gem_create_ioctl(struct drm_device *dev, void *data,
272                       struct drm_file *file)
273 {
274         struct drm_i915_gem_create *args = data;
275
276         return i915_gem_create(file, dev,
277                                args->size, &args->handle);
278 }
279
280 static inline int
281 __copy_to_user_swizzled(char __user *cpu_vaddr,
282                         const char *gpu_vaddr, int gpu_offset,
283                         int length)
284 {
285         int ret, cpu_offset = 0;
286
287         while (length > 0) {
288                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
289                 int this_length = min(cacheline_end - gpu_offset, length);
290                 int swizzled_gpu_offset = gpu_offset ^ 64;
291
292                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
293                                      gpu_vaddr + swizzled_gpu_offset,
294                                      this_length);
295                 if (ret)
296                         return ret + length;
297
298                 cpu_offset += this_length;
299                 gpu_offset += this_length;
300                 length -= this_length;
301         }
302
303         return 0;
304 }
305
306 static inline int
307 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
308                           const char __user *cpu_vaddr,
309                           int length)
310 {
311         int ret, cpu_offset = 0;
312
313         while (length > 0) {
314                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
315                 int this_length = min(cacheline_end - gpu_offset, length);
316                 int swizzled_gpu_offset = gpu_offset ^ 64;
317
318                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
319                                        cpu_vaddr + cpu_offset,
320                                        this_length);
321                 if (ret)
322                         return ret + length;
323
324                 cpu_offset += this_length;
325                 gpu_offset += this_length;
326                 length -= this_length;
327         }
328
329         return 0;
330 }
331
332 /* Per-page copy function for the shmem pread fastpath.
333  * Flushes invalid cachelines before reading the target if
334  * needs_clflush is set. */
335 static int
336 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
337                  char __user *user_data,
338                  bool page_do_bit17_swizzling, bool needs_clflush)
339 {
340         char *vaddr;
341         int ret;
342
343         if (unlikely(page_do_bit17_swizzling))
344                 return -EINVAL;
345
346         vaddr = kmap_atomic(page);
347         if (needs_clflush)
348                 drm_clflush_virt_range(vaddr + shmem_page_offset,
349                                        page_length);
350         ret = __copy_to_user_inatomic(user_data,
351                                       vaddr + shmem_page_offset,
352                                       page_length);
353         kunmap_atomic(vaddr);
354
355         return ret ? -EFAULT : 0;
356 }
357
358 static void
359 shmem_clflush_swizzled_range(char *addr, unsigned long length,
360                              bool swizzled)
361 {
362         if (unlikely(swizzled)) {
363                 unsigned long start = (unsigned long) addr;
364                 unsigned long end = (unsigned long) addr + length;
365
366                 /* For swizzling simply ensure that we always flush both
367                  * channels. Lame, but simple and it works. Swizzled
368                  * pwrite/pread is far from a hotpath - current userspace
369                  * doesn't use it at all. */
370                 start = round_down(start, 128);
371                 end = round_up(end, 128);
372
373                 drm_clflush_virt_range((void *)start, end - start);
374         } else {
375                 drm_clflush_virt_range(addr, length);
376         }
377
378 }
379
380 /* Only difference to the fast-path function is that this can handle bit17
381  * and uses non-atomic copy and kmap functions. */
382 static int
383 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
384                  char __user *user_data,
385                  bool page_do_bit17_swizzling, bool needs_clflush)
386 {
387         char *vaddr;
388         int ret;
389
390         vaddr = kmap(page);
391         if (needs_clflush)
392                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
393                                              page_length,
394                                              page_do_bit17_swizzling);
395
396         if (page_do_bit17_swizzling)
397                 ret = __copy_to_user_swizzled(user_data,
398                                               vaddr, shmem_page_offset,
399                                               page_length);
400         else
401                 ret = __copy_to_user(user_data,
402                                      vaddr + shmem_page_offset,
403                                      page_length);
404         kunmap(page);
405
406         return ret ? - EFAULT : 0;
407 }
408
409 static int
410 i915_gem_shmem_pread(struct drm_device *dev,
411                      struct drm_i915_gem_object *obj,
412                      struct drm_i915_gem_pread *args,
413                      struct drm_file *file)
414 {
415         char __user *user_data;
416         ssize_t remain;
417         loff_t offset;
418         int shmem_page_offset, page_length, ret = 0;
419         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
420         int prefaulted = 0;
421         int needs_clflush = 0;
422         struct sg_page_iter sg_iter;
423
424         user_data = to_user_ptr(args->data_ptr);
425         remain = args->size;
426
427         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
428
429         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
430                 /* If we're not in the cpu read domain, set ourself into the gtt
431                  * read domain and manually flush cachelines (if required). This
432                  * optimizes for the case when the gpu will dirty the data
433                  * anyway again before the next pread happens. */
434                 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
435                 if (i915_gem_obj_bound_any(obj)) {
436                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
437                         if (ret)
438                                 return ret;
439                 }
440         }
441
442         ret = i915_gem_object_get_pages(obj);
443         if (ret)
444                 return ret;
445
446         i915_gem_object_pin_pages(obj);
447
448         offset = args->offset;
449
450         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
451                          offset >> PAGE_SHIFT) {
452                 struct page *page = sg_page_iter_page(&sg_iter);
453
454                 if (remain <= 0)
455                         break;
456
457                 /* Operation in this page
458                  *
459                  * shmem_page_offset = offset within page in shmem file
460                  * page_length = bytes to copy for this page
461                  */
462                 shmem_page_offset = offset_in_page(offset);
463                 page_length = remain;
464                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
465                         page_length = PAGE_SIZE - shmem_page_offset;
466
467                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
468                         (page_to_phys(page) & (1 << 17)) != 0;
469
470                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
471                                        user_data, page_do_bit17_swizzling,
472                                        needs_clflush);
473                 if (ret == 0)
474                         goto next_page;
475
476                 mutex_unlock(&dev->struct_mutex);
477
478                 if (likely(!i915_prefault_disable) && !prefaulted) {
479                         ret = fault_in_multipages_writeable(user_data, remain);
480                         /* Userspace is tricking us, but we've already clobbered
481                          * its pages with the prefault and promised to write the
482                          * data up to the first fault. Hence ignore any errors
483                          * and just continue. */
484                         (void)ret;
485                         prefaulted = 1;
486                 }
487
488                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
489                                        user_data, page_do_bit17_swizzling,
490                                        needs_clflush);
491
492                 mutex_lock(&dev->struct_mutex);
493
494 next_page:
495                 mark_page_accessed(page);
496
497                 if (ret)
498                         goto out;
499
500                 remain -= page_length;
501                 user_data += page_length;
502                 offset += page_length;
503         }
504
505 out:
506         i915_gem_object_unpin_pages(obj);
507
508         return ret;
509 }
510
511 /**
512  * Reads data from the object referenced by handle.
513  *
514  * On error, the contents of *data are undefined.
515  */
516 int
517 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
518                      struct drm_file *file)
519 {
520         struct drm_i915_gem_pread *args = data;
521         struct drm_i915_gem_object *obj;
522         int ret = 0;
523
524         if (args->size == 0)
525                 return 0;
526
527         if (!access_ok(VERIFY_WRITE,
528                        to_user_ptr(args->data_ptr),
529                        args->size))
530                 return -EFAULT;
531
532         ret = i915_mutex_lock_interruptible(dev);
533         if (ret)
534                 return ret;
535
536         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
537         if (&obj->base == NULL) {
538                 ret = -ENOENT;
539                 goto unlock;
540         }
541
542         /* Bounds check source.  */
543         if (args->offset > obj->base.size ||
544             args->size > obj->base.size - args->offset) {
545                 ret = -EINVAL;
546                 goto out;
547         }
548
549         /* prime objects have no backing filp to GEM pread/pwrite
550          * pages from.
551          */
552         if (!obj->base.filp) {
553                 ret = -EINVAL;
554                 goto out;
555         }
556
557         trace_i915_gem_object_pread(obj, args->offset, args->size);
558
559         ret = i915_gem_shmem_pread(dev, obj, args, file);
560
561 out:
562         drm_gem_object_unreference(&obj->base);
563 unlock:
564         mutex_unlock(&dev->struct_mutex);
565         return ret;
566 }
567
568 /* This is the fast write path which cannot handle
569  * page faults in the source data
570  */
571
572 static inline int
573 fast_user_write(struct io_mapping *mapping,
574                 loff_t page_base, int page_offset,
575                 char __user *user_data,
576                 int length)
577 {
578         void __iomem *vaddr_atomic;
579         void *vaddr;
580         unsigned long unwritten;
581
582         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
583         /* We can use the cpu mem copy function because this is X86. */
584         vaddr = (void __force*)vaddr_atomic + page_offset;
585         unwritten = __copy_from_user_inatomic_nocache(vaddr,
586                                                       user_data, length);
587         io_mapping_unmap_atomic(vaddr_atomic);
588         return unwritten;
589 }
590
591 /**
592  * This is the fast pwrite path, where we copy the data directly from the
593  * user into the GTT, uncached.
594  */
595 static int
596 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
597                          struct drm_i915_gem_object *obj,
598                          struct drm_i915_gem_pwrite *args,
599                          struct drm_file *file)
600 {
601         drm_i915_private_t *dev_priv = dev->dev_private;
602         ssize_t remain;
603         loff_t offset, page_base;
604         char __user *user_data;
605         int page_offset, page_length, ret;
606
607         ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
608         if (ret)
609                 goto out;
610
611         ret = i915_gem_object_set_to_gtt_domain(obj, true);
612         if (ret)
613                 goto out_unpin;
614
615         ret = i915_gem_object_put_fence(obj);
616         if (ret)
617                 goto out_unpin;
618
619         user_data = to_user_ptr(args->data_ptr);
620         remain = args->size;
621
622         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
623
624         while (remain > 0) {
625                 /* Operation in this page
626                  *
627                  * page_base = page offset within aperture
628                  * page_offset = offset within page
629                  * page_length = bytes to copy for this page
630                  */
631                 page_base = offset & PAGE_MASK;
632                 page_offset = offset_in_page(offset);
633                 page_length = remain;
634                 if ((page_offset + remain) > PAGE_SIZE)
635                         page_length = PAGE_SIZE - page_offset;
636
637                 /* If we get a fault while copying data, then (presumably) our
638                  * source page isn't available.  Return the error and we'll
639                  * retry in the slow path.
640                  */
641                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
642                                     page_offset, user_data, page_length)) {
643                         ret = -EFAULT;
644                         goto out_unpin;
645                 }
646
647                 remain -= page_length;
648                 user_data += page_length;
649                 offset += page_length;
650         }
651
652 out_unpin:
653         i915_gem_object_unpin(obj);
654 out:
655         return ret;
656 }
657
658 /* Per-page copy function for the shmem pwrite fastpath.
659  * Flushes invalid cachelines before writing to the target if
660  * needs_clflush_before is set and flushes out any written cachelines after
661  * writing if needs_clflush is set. */
662 static int
663 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
664                   char __user *user_data,
665                   bool page_do_bit17_swizzling,
666                   bool needs_clflush_before,
667                   bool needs_clflush_after)
668 {
669         char *vaddr;
670         int ret;
671
672         if (unlikely(page_do_bit17_swizzling))
673                 return -EINVAL;
674
675         vaddr = kmap_atomic(page);
676         if (needs_clflush_before)
677                 drm_clflush_virt_range(vaddr + shmem_page_offset,
678                                        page_length);
679         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
680                                                 user_data,
681                                                 page_length);
682         if (needs_clflush_after)
683                 drm_clflush_virt_range(vaddr + shmem_page_offset,
684                                        page_length);
685         kunmap_atomic(vaddr);
686
687         return ret ? -EFAULT : 0;
688 }
689
690 /* Only difference to the fast-path function is that this can handle bit17
691  * and uses non-atomic copy and kmap functions. */
692 static int
693 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
694                   char __user *user_data,
695                   bool page_do_bit17_swizzling,
696                   bool needs_clflush_before,
697                   bool needs_clflush_after)
698 {
699         char *vaddr;
700         int ret;
701
702         vaddr = kmap(page);
703         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
704                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
705                                              page_length,
706                                              page_do_bit17_swizzling);
707         if (page_do_bit17_swizzling)
708                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
709                                                 user_data,
710                                                 page_length);
711         else
712                 ret = __copy_from_user(vaddr + shmem_page_offset,
713                                        user_data,
714                                        page_length);
715         if (needs_clflush_after)
716                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
717                                              page_length,
718                                              page_do_bit17_swizzling);
719         kunmap(page);
720
721         return ret ? -EFAULT : 0;
722 }
723
724 static int
725 i915_gem_shmem_pwrite(struct drm_device *dev,
726                       struct drm_i915_gem_object *obj,
727                       struct drm_i915_gem_pwrite *args,
728                       struct drm_file *file)
729 {
730         ssize_t remain;
731         loff_t offset;
732         char __user *user_data;
733         int shmem_page_offset, page_length, ret = 0;
734         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
735         int hit_slowpath = 0;
736         int needs_clflush_after = 0;
737         int needs_clflush_before = 0;
738         struct sg_page_iter sg_iter;
739
740         user_data = to_user_ptr(args->data_ptr);
741         remain = args->size;
742
743         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
744
745         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
746                 /* If we're not in the cpu write domain, set ourself into the gtt
747                  * write domain and manually flush cachelines (if required). This
748                  * optimizes for the case when the gpu will use the data
749                  * right away and we therefore have to clflush anyway. */
750                 needs_clflush_after = cpu_write_needs_clflush(obj);
751                 if (i915_gem_obj_bound_any(obj)) {
752                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
753                         if (ret)
754                                 return ret;
755                 }
756         }
757         /* Same trick applies to invalidate partially written cachelines read
758          * before writing. */
759         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
760                 needs_clflush_before =
761                         !cpu_cache_is_coherent(dev, obj->cache_level);
762
763         ret = i915_gem_object_get_pages(obj);
764         if (ret)
765                 return ret;
766
767         i915_gem_object_pin_pages(obj);
768
769         offset = args->offset;
770         obj->dirty = 1;
771
772         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
773                          offset >> PAGE_SHIFT) {
774                 struct page *page = sg_page_iter_page(&sg_iter);
775                 int partial_cacheline_write;
776
777                 if (remain <= 0)
778                         break;
779
780                 /* Operation in this page
781                  *
782                  * shmem_page_offset = offset within page in shmem file
783                  * page_length = bytes to copy for this page
784                  */
785                 shmem_page_offset = offset_in_page(offset);
786
787                 page_length = remain;
788                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
789                         page_length = PAGE_SIZE - shmem_page_offset;
790
791                 /* If we don't overwrite a cacheline completely we need to be
792                  * careful to have up-to-date data by first clflushing. Don't
793                  * overcomplicate things and flush the entire patch. */
794                 partial_cacheline_write = needs_clflush_before &&
795                         ((shmem_page_offset | page_length)
796                                 & (boot_cpu_data.x86_clflush_size - 1));
797
798                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
799                         (page_to_phys(page) & (1 << 17)) != 0;
800
801                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
802                                         user_data, page_do_bit17_swizzling,
803                                         partial_cacheline_write,
804                                         needs_clflush_after);
805                 if (ret == 0)
806                         goto next_page;
807
808                 hit_slowpath = 1;
809                 mutex_unlock(&dev->struct_mutex);
810                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
811                                         user_data, page_do_bit17_swizzling,
812                                         partial_cacheline_write,
813                                         needs_clflush_after);
814
815                 mutex_lock(&dev->struct_mutex);
816
817 next_page:
818                 set_page_dirty(page);
819                 mark_page_accessed(page);
820
821                 if (ret)
822                         goto out;
823
824                 remain -= page_length;
825                 user_data += page_length;
826                 offset += page_length;
827         }
828
829 out:
830         i915_gem_object_unpin_pages(obj);
831
832         if (hit_slowpath) {
833                 /*
834                  * Fixup: Flush cpu caches in case we didn't flush the dirty
835                  * cachelines in-line while writing and the object moved
836                  * out of the cpu write domain while we've dropped the lock.
837                  */
838                 if (!needs_clflush_after &&
839                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
840                         if (i915_gem_clflush_object(obj, obj->pin_display))
841                                 i915_gem_chipset_flush(dev);
842                 }
843         }
844
845         if (needs_clflush_after)
846                 i915_gem_chipset_flush(dev);
847
848         return ret;
849 }
850
851 /**
852  * Writes data to the object referenced by handle.
853  *
854  * On error, the contents of the buffer that were to be modified are undefined.
855  */
856 int
857 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
858                       struct drm_file *file)
859 {
860         struct drm_i915_gem_pwrite *args = data;
861         struct drm_i915_gem_object *obj;
862         int ret;
863
864         if (args->size == 0)
865                 return 0;
866
867         if (!access_ok(VERIFY_READ,
868                        to_user_ptr(args->data_ptr),
869                        args->size))
870                 return -EFAULT;
871
872         if (likely(!i915_prefault_disable)) {
873                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
874                                                    args->size);
875                 if (ret)
876                         return -EFAULT;
877         }
878
879         ret = i915_mutex_lock_interruptible(dev);
880         if (ret)
881                 return ret;
882
883         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
884         if (&obj->base == NULL) {
885                 ret = -ENOENT;
886                 goto unlock;
887         }
888
889         /* Bounds check destination. */
890         if (args->offset > obj->base.size ||
891             args->size > obj->base.size - args->offset) {
892                 ret = -EINVAL;
893                 goto out;
894         }
895
896         /* prime objects have no backing filp to GEM pread/pwrite
897          * pages from.
898          */
899         if (!obj->base.filp) {
900                 ret = -EINVAL;
901                 goto out;
902         }
903
904         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
905
906         ret = -EFAULT;
907         /* We can only do the GTT pwrite on untiled buffers, as otherwise
908          * it would end up going through the fenced access, and we'll get
909          * different detiling behavior between reading and writing.
910          * pread/pwrite currently are reading and writing from the CPU
911          * perspective, requiring manual detiling by the client.
912          */
913         if (obj->phys_obj) {
914                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
915                 goto out;
916         }
917
918         if (obj->tiling_mode == I915_TILING_NONE &&
919             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
920             cpu_write_needs_clflush(obj)) {
921                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
922                 /* Note that the gtt paths might fail with non-page-backed user
923                  * pointers (e.g. gtt mappings when moving data between
924                  * textures). Fallback to the shmem path in that case. */
925         }
926
927         if (ret == -EFAULT || ret == -ENOSPC)
928                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
929
930 out:
931         drm_gem_object_unreference(&obj->base);
932 unlock:
933         mutex_unlock(&dev->struct_mutex);
934         return ret;
935 }
936
937 int
938 i915_gem_check_wedge(struct i915_gpu_error *error,
939                      bool interruptible)
940 {
941         if (i915_reset_in_progress(error)) {
942                 /* Non-interruptible callers can't handle -EAGAIN, hence return
943                  * -EIO unconditionally for these. */
944                 if (!interruptible)
945                         return -EIO;
946
947                 /* Recovery complete, but the reset failed ... */
948                 if (i915_terminally_wedged(error))
949                         return -EIO;
950
951                 return -EAGAIN;
952         }
953
954         return 0;
955 }
956
957 /*
958  * Compare seqno against outstanding lazy request. Emit a request if they are
959  * equal.
960  */
961 static int
962 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
963 {
964         int ret;
965
966         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
967
968         ret = 0;
969         if (seqno == ring->outstanding_lazy_request)
970                 ret = i915_add_request(ring, NULL);
971
972         return ret;
973 }
974
975 /**
976  * __wait_seqno - wait until execution of seqno has finished
977  * @ring: the ring expected to report seqno
978  * @seqno: duh!
979  * @reset_counter: reset sequence associated with the given seqno
980  * @interruptible: do an interruptible wait (normally yes)
981  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
982  *
983  * Note: It is of utmost importance that the passed in seqno and reset_counter
984  * values have been read by the caller in an smp safe manner. Where read-side
985  * locks are involved, it is sufficient to read the reset_counter before
986  * unlocking the lock that protects the seqno. For lockless tricks, the
987  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
988  * inserted.
989  *
990  * Returns 0 if the seqno was found within the alloted time. Else returns the
991  * errno with remaining time filled in timeout argument.
992  */
993 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
994                         unsigned reset_counter,
995                         bool interruptible, struct timespec *timeout)
996 {
997         drm_i915_private_t *dev_priv = ring->dev->dev_private;
998         struct timespec before, now, wait_time={1,0};
999         unsigned long timeout_jiffies;
1000         long end;
1001         bool wait_forever = true;
1002         int ret;
1003
1004         WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1005
1006         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1007                 return 0;
1008
1009         trace_i915_gem_request_wait_begin(ring, seqno);
1010
1011         if (timeout != NULL) {
1012                 wait_time = *timeout;
1013                 wait_forever = false;
1014         }
1015
1016         timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1017
1018         if (WARN_ON(!ring->irq_get(ring)))
1019                 return -ENODEV;
1020
1021         /* Record current time in case interrupted by signal, or wedged * */
1022         getrawmonotonic(&before);
1023
1024 #define EXIT_COND \
1025         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1026          i915_reset_in_progress(&dev_priv->gpu_error) || \
1027          reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1028         do {
1029                 if (interruptible)
1030                         end = wait_event_interruptible_timeout(ring->irq_queue,
1031                                                                EXIT_COND,
1032                                                                timeout_jiffies);
1033                 else
1034                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1035                                                  timeout_jiffies);
1036
1037                 /* We need to check whether any gpu reset happened in between
1038                  * the caller grabbing the seqno and now ... */
1039                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1040                         end = -EAGAIN;
1041
1042                 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1043                  * gone. */
1044                 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1045                 if (ret)
1046                         end = ret;
1047         } while (end == 0 && wait_forever);
1048
1049         getrawmonotonic(&now);
1050
1051         ring->irq_put(ring);
1052         trace_i915_gem_request_wait_end(ring, seqno);
1053 #undef EXIT_COND
1054
1055         if (timeout) {
1056                 struct timespec sleep_time = timespec_sub(now, before);
1057                 *timeout = timespec_sub(*timeout, sleep_time);
1058                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1059                         set_normalized_timespec(timeout, 0, 0);
1060         }
1061
1062         switch (end) {
1063         case -EIO:
1064         case -EAGAIN: /* Wedged */
1065         case -ERESTARTSYS: /* Signal */
1066                 return (int)end;
1067         case 0: /* Timeout */
1068                 return -ETIME;
1069         default: /* Completed */
1070                 WARN_ON(end < 0); /* We're not aware of other errors */
1071                 return 0;
1072         }
1073 }
1074
1075 /**
1076  * Waits for a sequence number to be signaled, and cleans up the
1077  * request and object lists appropriately for that event.
1078  */
1079 int
1080 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1081 {
1082         struct drm_device *dev = ring->dev;
1083         struct drm_i915_private *dev_priv = dev->dev_private;
1084         bool interruptible = dev_priv->mm.interruptible;
1085         int ret;
1086
1087         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1088         BUG_ON(seqno == 0);
1089
1090         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1091         if (ret)
1092                 return ret;
1093
1094         ret = i915_gem_check_olr(ring, seqno);
1095         if (ret)
1096                 return ret;
1097
1098         return __wait_seqno(ring, seqno,
1099                             atomic_read(&dev_priv->gpu_error.reset_counter),
1100                             interruptible, NULL);
1101 }
1102
1103 static int
1104 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1105                                      struct intel_ring_buffer *ring)
1106 {
1107         i915_gem_retire_requests_ring(ring);
1108
1109         /* Manually manage the write flush as we may have not yet
1110          * retired the buffer.
1111          *
1112          * Note that the last_write_seqno is always the earlier of
1113          * the two (read/write) seqno, so if we haved successfully waited,
1114          * we know we have passed the last write.
1115          */
1116         obj->last_write_seqno = 0;
1117         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1118
1119         return 0;
1120 }
1121
1122 /**
1123  * Ensures that all rendering to the object has completed and the object is
1124  * safe to unbind from the GTT or access from the CPU.
1125  */
1126 static __must_check int
1127 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1128                                bool readonly)
1129 {
1130         struct intel_ring_buffer *ring = obj->ring;
1131         u32 seqno;
1132         int ret;
1133
1134         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1135         if (seqno == 0)
1136                 return 0;
1137
1138         ret = i915_wait_seqno(ring, seqno);
1139         if (ret)
1140                 return ret;
1141
1142         return i915_gem_object_wait_rendering__tail(obj, ring);
1143 }
1144
1145 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1146  * as the object state may change during this call.
1147  */
1148 static __must_check int
1149 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1150                                             bool readonly)
1151 {
1152         struct drm_device *dev = obj->base.dev;
1153         struct drm_i915_private *dev_priv = dev->dev_private;
1154         struct intel_ring_buffer *ring = obj->ring;
1155         unsigned reset_counter;
1156         u32 seqno;
1157         int ret;
1158
1159         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1160         BUG_ON(!dev_priv->mm.interruptible);
1161
1162         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1163         if (seqno == 0)
1164                 return 0;
1165
1166         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1167         if (ret)
1168                 return ret;
1169
1170         ret = i915_gem_check_olr(ring, seqno);
1171         if (ret)
1172                 return ret;
1173
1174         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1175         mutex_unlock(&dev->struct_mutex);
1176         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1177         mutex_lock(&dev->struct_mutex);
1178         if (ret)
1179                 return ret;
1180
1181         return i915_gem_object_wait_rendering__tail(obj, ring);
1182 }
1183
1184 /**
1185  * Called when user space prepares to use an object with the CPU, either
1186  * through the mmap ioctl's mapping or a GTT mapping.
1187  */
1188 int
1189 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1190                           struct drm_file *file)
1191 {
1192         struct drm_i915_gem_set_domain *args = data;
1193         struct drm_i915_gem_object *obj;
1194         uint32_t read_domains = args->read_domains;
1195         uint32_t write_domain = args->write_domain;
1196         int ret;
1197
1198         /* Only handle setting domains to types used by the CPU. */
1199         if (write_domain & I915_GEM_GPU_DOMAINS)
1200                 return -EINVAL;
1201
1202         if (read_domains & I915_GEM_GPU_DOMAINS)
1203                 return -EINVAL;
1204
1205         /* Having something in the write domain implies it's in the read
1206          * domain, and only that read domain.  Enforce that in the request.
1207          */
1208         if (write_domain != 0 && read_domains != write_domain)
1209                 return -EINVAL;
1210
1211         ret = i915_mutex_lock_interruptible(dev);
1212         if (ret)
1213                 return ret;
1214
1215         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1216         if (&obj->base == NULL) {
1217                 ret = -ENOENT;
1218                 goto unlock;
1219         }
1220
1221         /* Try to flush the object off the GPU without holding the lock.
1222          * We will repeat the flush holding the lock in the normal manner
1223          * to catch cases where we are gazumped.
1224          */
1225         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1226         if (ret)
1227                 goto unref;
1228
1229         if (read_domains & I915_GEM_DOMAIN_GTT) {
1230                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1231
1232                 /* Silently promote "you're not bound, there was nothing to do"
1233                  * to success, since the client was just asking us to
1234                  * make sure everything was done.
1235                  */
1236                 if (ret == -EINVAL)
1237                         ret = 0;
1238         } else {
1239                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1240         }
1241
1242 unref:
1243         drm_gem_object_unreference(&obj->base);
1244 unlock:
1245         mutex_unlock(&dev->struct_mutex);
1246         return ret;
1247 }
1248
1249 /**
1250  * Called when user space has done writes to this buffer
1251  */
1252 int
1253 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1254                          struct drm_file *file)
1255 {
1256         struct drm_i915_gem_sw_finish *args = data;
1257         struct drm_i915_gem_object *obj;
1258         int ret = 0;
1259
1260         ret = i915_mutex_lock_interruptible(dev);
1261         if (ret)
1262                 return ret;
1263
1264         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1265         if (&obj->base == NULL) {
1266                 ret = -ENOENT;
1267                 goto unlock;
1268         }
1269
1270         /* Pinned buffers may be scanout, so flush the cache */
1271         if (obj->pin_display)
1272                 i915_gem_object_flush_cpu_write_domain(obj, true);
1273
1274         drm_gem_object_unreference(&obj->base);
1275 unlock:
1276         mutex_unlock(&dev->struct_mutex);
1277         return ret;
1278 }
1279
1280 /**
1281  * Maps the contents of an object, returning the address it is mapped
1282  * into.
1283  *
1284  * While the mapping holds a reference on the contents of the object, it doesn't
1285  * imply a ref on the object itself.
1286  */
1287 int
1288 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1289                     struct drm_file *file)
1290 {
1291         struct drm_i915_gem_mmap *args = data;
1292         struct drm_gem_object *obj;
1293         unsigned long addr;
1294
1295         obj = drm_gem_object_lookup(dev, file, args->handle);
1296         if (obj == NULL)
1297                 return -ENOENT;
1298
1299         /* prime objects have no backing filp to GEM mmap
1300          * pages from.
1301          */
1302         if (!obj->filp) {
1303                 drm_gem_object_unreference_unlocked(obj);
1304                 return -EINVAL;
1305         }
1306
1307         addr = vm_mmap(obj->filp, 0, args->size,
1308                        PROT_READ | PROT_WRITE, MAP_SHARED,
1309                        args->offset);
1310         drm_gem_object_unreference_unlocked(obj);
1311         if (IS_ERR((void *)addr))
1312                 return addr;
1313
1314         args->addr_ptr = (uint64_t) addr;
1315
1316         return 0;
1317 }
1318
1319 /**
1320  * i915_gem_fault - fault a page into the GTT
1321  * vma: VMA in question
1322  * vmf: fault info
1323  *
1324  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1325  * from userspace.  The fault handler takes care of binding the object to
1326  * the GTT (if needed), allocating and programming a fence register (again,
1327  * only if needed based on whether the old reg is still valid or the object
1328  * is tiled) and inserting a new PTE into the faulting process.
1329  *
1330  * Note that the faulting process may involve evicting existing objects
1331  * from the GTT and/or fence registers to make room.  So performance may
1332  * suffer if the GTT working set is large or there are few fence registers
1333  * left.
1334  */
1335 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1336 {
1337         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1338         struct drm_device *dev = obj->base.dev;
1339         drm_i915_private_t *dev_priv = dev->dev_private;
1340         pgoff_t page_offset;
1341         unsigned long pfn;
1342         int ret = 0;
1343         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1344
1345         /* We don't use vmf->pgoff since that has the fake offset */
1346         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1347                 PAGE_SHIFT;
1348
1349         ret = i915_mutex_lock_interruptible(dev);
1350         if (ret)
1351                 goto out;
1352
1353         trace_i915_gem_object_fault(obj, page_offset, true, write);
1354
1355         /* Access to snoopable pages through the GTT is incoherent. */
1356         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1357                 ret = -EINVAL;
1358                 goto unlock;
1359         }
1360
1361         /* Now bind it into the GTT if needed */
1362         ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
1363         if (ret)
1364                 goto unlock;
1365
1366         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1367         if (ret)
1368                 goto unpin;
1369
1370         ret = i915_gem_object_get_fence(obj);
1371         if (ret)
1372                 goto unpin;
1373
1374         obj->fault_mappable = true;
1375
1376         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1377         pfn >>= PAGE_SHIFT;
1378         pfn += page_offset;
1379
1380         /* Finally, remap it using the new GTT offset */
1381         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1382 unpin:
1383         i915_gem_object_unpin(obj);
1384 unlock:
1385         mutex_unlock(&dev->struct_mutex);
1386 out:
1387         switch (ret) {
1388         case -EIO:
1389                 /* If this -EIO is due to a gpu hang, give the reset code a
1390                  * chance to clean up the mess. Otherwise return the proper
1391                  * SIGBUS. */
1392                 if (i915_terminally_wedged(&dev_priv->gpu_error))
1393                         return VM_FAULT_SIGBUS;
1394         case -EAGAIN:
1395                 /*
1396                  * EAGAIN means the gpu is hung and we'll wait for the error
1397                  * handler to reset everything when re-faulting in
1398                  * i915_mutex_lock_interruptible.
1399                  */
1400         case 0:
1401         case -ERESTARTSYS:
1402         case -EINTR:
1403         case -EBUSY:
1404                 /*
1405                  * EBUSY is ok: this just means that another thread
1406                  * already did the job.
1407                  */
1408                 return VM_FAULT_NOPAGE;
1409         case -ENOMEM:
1410                 return VM_FAULT_OOM;
1411         case -ENOSPC:
1412                 return VM_FAULT_SIGBUS;
1413         default:
1414                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1415                 return VM_FAULT_SIGBUS;
1416         }
1417 }
1418
1419 /**
1420  * i915_gem_release_mmap - remove physical page mappings
1421  * @obj: obj in question
1422  *
1423  * Preserve the reservation of the mmapping with the DRM core code, but
1424  * relinquish ownership of the pages back to the system.
1425  *
1426  * It is vital that we remove the page mapping if we have mapped a tiled
1427  * object through the GTT and then lose the fence register due to
1428  * resource pressure. Similarly if the object has been moved out of the
1429  * aperture, than pages mapped into userspace must be revoked. Removing the
1430  * mapping will then trigger a page fault on the next user access, allowing
1431  * fixup by i915_gem_fault().
1432  */
1433 void
1434 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1435 {
1436         if (!obj->fault_mappable)
1437                 return;
1438
1439         drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1440         obj->fault_mappable = false;
1441 }
1442
1443 uint32_t
1444 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1445 {
1446         uint32_t gtt_size;
1447
1448         if (INTEL_INFO(dev)->gen >= 4 ||
1449             tiling_mode == I915_TILING_NONE)
1450                 return size;
1451
1452         /* Previous chips need a power-of-two fence region when tiling */
1453         if (INTEL_INFO(dev)->gen == 3)
1454                 gtt_size = 1024*1024;
1455         else
1456                 gtt_size = 512*1024;
1457
1458         while (gtt_size < size)
1459                 gtt_size <<= 1;
1460
1461         return gtt_size;
1462 }
1463
1464 /**
1465  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1466  * @obj: object to check
1467  *
1468  * Return the required GTT alignment for an object, taking into account
1469  * potential fence register mapping.
1470  */
1471 uint32_t
1472 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1473                            int tiling_mode, bool fenced)
1474 {
1475         /*
1476          * Minimum alignment is 4k (GTT page size), but might be greater
1477          * if a fence register is needed for the object.
1478          */
1479         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1480             tiling_mode == I915_TILING_NONE)
1481                 return 4096;
1482
1483         /*
1484          * Previous chips need to be aligned to the size of the smallest
1485          * fence register that can contain the object.
1486          */
1487         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1488 }
1489
1490 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1491 {
1492         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1493         int ret;
1494
1495         if (drm_vma_node_has_offset(&obj->base.vma_node))
1496                 return 0;
1497
1498         dev_priv->mm.shrinker_no_lock_stealing = true;
1499
1500         ret = drm_gem_create_mmap_offset(&obj->base);
1501         if (ret != -ENOSPC)
1502                 goto out;
1503
1504         /* Badly fragmented mmap space? The only way we can recover
1505          * space is by destroying unwanted objects. We can't randomly release
1506          * mmap_offsets as userspace expects them to be persistent for the
1507          * lifetime of the objects. The closest we can is to release the
1508          * offsets on purgeable objects by truncating it and marking it purged,
1509          * which prevents userspace from ever using that object again.
1510          */
1511         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1512         ret = drm_gem_create_mmap_offset(&obj->base);
1513         if (ret != -ENOSPC)
1514                 goto out;
1515
1516         i915_gem_shrink_all(dev_priv);
1517         ret = drm_gem_create_mmap_offset(&obj->base);
1518 out:
1519         dev_priv->mm.shrinker_no_lock_stealing = false;
1520
1521         return ret;
1522 }
1523
1524 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1525 {
1526         drm_gem_free_mmap_offset(&obj->base);
1527 }
1528
1529 int
1530 i915_gem_mmap_gtt(struct drm_file *file,
1531                   struct drm_device *dev,
1532                   uint32_t handle,
1533                   uint64_t *offset)
1534 {
1535         struct drm_i915_private *dev_priv = dev->dev_private;
1536         struct drm_i915_gem_object *obj;
1537         int ret;
1538
1539         ret = i915_mutex_lock_interruptible(dev);
1540         if (ret)
1541                 return ret;
1542
1543         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1544         if (&obj->base == NULL) {
1545                 ret = -ENOENT;
1546                 goto unlock;
1547         }
1548
1549         if (obj->base.size > dev_priv->gtt.mappable_end) {
1550                 ret = -E2BIG;
1551                 goto out;
1552         }
1553
1554         if (obj->madv != I915_MADV_WILLNEED) {
1555                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1556                 ret = -EINVAL;
1557                 goto out;
1558         }
1559
1560         ret = i915_gem_object_create_mmap_offset(obj);
1561         if (ret)
1562                 goto out;
1563
1564         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1565
1566 out:
1567         drm_gem_object_unreference(&obj->base);
1568 unlock:
1569         mutex_unlock(&dev->struct_mutex);
1570         return ret;
1571 }
1572
1573 /**
1574  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1575  * @dev: DRM device
1576  * @data: GTT mapping ioctl data
1577  * @file: GEM object info
1578  *
1579  * Simply returns the fake offset to userspace so it can mmap it.
1580  * The mmap call will end up in drm_gem_mmap(), which will set things
1581  * up so we can get faults in the handler above.
1582  *
1583  * The fault handler will take care of binding the object into the GTT
1584  * (since it may have been evicted to make room for something), allocating
1585  * a fence register, and mapping the appropriate aperture address into
1586  * userspace.
1587  */
1588 int
1589 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1590                         struct drm_file *file)
1591 {
1592         struct drm_i915_gem_mmap_gtt *args = data;
1593
1594         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1595 }
1596
1597 /* Immediately discard the backing storage */
1598 static void
1599 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1600 {
1601         struct inode *inode;
1602
1603         i915_gem_object_free_mmap_offset(obj);
1604
1605         if (obj->base.filp == NULL)
1606                 return;
1607
1608         /* Our goal here is to return as much of the memory as
1609          * is possible back to the system as we are called from OOM.
1610          * To do this we must instruct the shmfs to drop all of its
1611          * backing pages, *now*.
1612          */
1613         inode = file_inode(obj->base.filp);
1614         shmem_truncate_range(inode, 0, (loff_t)-1);
1615
1616         obj->madv = __I915_MADV_PURGED;
1617 }
1618
1619 static inline int
1620 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1621 {
1622         return obj->madv == I915_MADV_DONTNEED;
1623 }
1624
1625 static void
1626 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1627 {
1628         struct sg_page_iter sg_iter;
1629         int ret;
1630
1631         BUG_ON(obj->madv == __I915_MADV_PURGED);
1632
1633         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1634         if (ret) {
1635                 /* In the event of a disaster, abandon all caches and
1636                  * hope for the best.
1637                  */
1638                 WARN_ON(ret != -EIO);
1639                 i915_gem_clflush_object(obj, true);
1640                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1641         }
1642
1643         if (i915_gem_object_needs_bit17_swizzle(obj))
1644                 i915_gem_object_save_bit_17_swizzle(obj);
1645
1646         if (obj->madv == I915_MADV_DONTNEED)
1647                 obj->dirty = 0;
1648
1649         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1650                 struct page *page = sg_page_iter_page(&sg_iter);
1651
1652                 if (obj->dirty)
1653                         set_page_dirty(page);
1654
1655                 if (obj->madv == I915_MADV_WILLNEED)
1656                         mark_page_accessed(page);
1657
1658                 page_cache_release(page);
1659         }
1660         obj->dirty = 0;
1661
1662         sg_free_table(obj->pages);
1663         kfree(obj->pages);
1664 }
1665
1666 int
1667 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1668 {
1669         const struct drm_i915_gem_object_ops *ops = obj->ops;
1670
1671         if (obj->pages == NULL)
1672                 return 0;
1673
1674         if (obj->pages_pin_count)
1675                 return -EBUSY;
1676
1677         BUG_ON(i915_gem_obj_bound_any(obj));
1678
1679         /* ->put_pages might need to allocate memory for the bit17 swizzle
1680          * array, hence protect them from being reaped by removing them from gtt
1681          * lists early. */
1682         list_del(&obj->global_list);
1683
1684         ops->put_pages(obj);
1685         obj->pages = NULL;
1686
1687         if (i915_gem_object_is_purgeable(obj))
1688                 i915_gem_object_truncate(obj);
1689
1690         return 0;
1691 }
1692
1693 static long
1694 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1695                   bool purgeable_only)
1696 {
1697         struct list_head still_bound_list;
1698         struct drm_i915_gem_object *obj, *next;
1699         long count = 0;
1700
1701         list_for_each_entry_safe(obj, next,
1702                                  &dev_priv->mm.unbound_list,
1703                                  global_list) {
1704                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1705                     i915_gem_object_put_pages(obj) == 0) {
1706                         count += obj->base.size >> PAGE_SHIFT;
1707                         if (count >= target)
1708                                 return count;
1709                 }
1710         }
1711
1712         /*
1713          * As we may completely rewrite the bound list whilst unbinding
1714          * (due to retiring requests) we have to strictly process only
1715          * one element of the list at the time, and recheck the list
1716          * on every iteration.
1717          */
1718         INIT_LIST_HEAD(&still_bound_list);
1719         while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1720                 struct i915_vma *vma, *v;
1721
1722                 obj = list_first_entry(&dev_priv->mm.bound_list,
1723                                        typeof(*obj), global_list);
1724                 list_move_tail(&obj->global_list, &still_bound_list);
1725
1726                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1727                         continue;
1728
1729                 /*
1730                  * Hold a reference whilst we unbind this object, as we may
1731                  * end up waiting for and retiring requests. This might
1732                  * release the final reference (held by the active list)
1733                  * and result in the object being freed from under us.
1734                  * in this object being freed.
1735                  *
1736                  * Note 1: Shrinking the bound list is special since only active
1737                  * (and hence bound objects) can contain such limbo objects, so
1738                  * we don't need special tricks for shrinking the unbound list.
1739                  * The only other place where we have to be careful with active
1740                  * objects suddenly disappearing due to retiring requests is the
1741                  * eviction code.
1742                  *
1743                  * Note 2: Even though the bound list doesn't hold a reference
1744                  * to the object we can safely grab one here: The final object
1745                  * unreferencing and the bound_list are both protected by the
1746                  * dev->struct_mutex and so we won't ever be able to observe an
1747                  * object on the bound_list with a reference count equals 0.
1748                  */
1749                 drm_gem_object_reference(&obj->base);
1750
1751                 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1752                         if (i915_vma_unbind(vma))
1753                                 break;
1754
1755                 if (i915_gem_object_put_pages(obj) == 0)
1756                         count += obj->base.size >> PAGE_SHIFT;
1757
1758                 drm_gem_object_unreference(&obj->base);
1759         }
1760         list_splice(&still_bound_list, &dev_priv->mm.bound_list);
1761
1762         return count;
1763 }
1764
1765 static long
1766 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1767 {
1768         return __i915_gem_shrink(dev_priv, target, true);
1769 }
1770
1771 static long
1772 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1773 {
1774         struct drm_i915_gem_object *obj, *next;
1775         long freed = 0;
1776
1777         i915_gem_evict_everything(dev_priv->dev);
1778
1779         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1780                                  global_list) {
1781                 if (obj->pages_pin_count == 0)
1782                         freed += obj->base.size >> PAGE_SHIFT;
1783                 i915_gem_object_put_pages(obj);
1784         }
1785         return freed;
1786 }
1787
1788 static int
1789 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1790 {
1791         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1792         int page_count, i;
1793         struct address_space *mapping;
1794         struct sg_table *st;
1795         struct scatterlist *sg;
1796         struct sg_page_iter sg_iter;
1797         struct page *page;
1798         unsigned long last_pfn = 0;     /* suppress gcc warning */
1799         gfp_t gfp;
1800
1801         /* Assert that the object is not currently in any GPU domain. As it
1802          * wasn't in the GTT, there shouldn't be any way it could have been in
1803          * a GPU cache
1804          */
1805         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1806         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1807
1808         st = kmalloc(sizeof(*st), GFP_KERNEL);
1809         if (st == NULL)
1810                 return -ENOMEM;
1811
1812         page_count = obj->base.size / PAGE_SIZE;
1813         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1814                 kfree(st);
1815                 return -ENOMEM;
1816         }
1817
1818         /* Get the list of pages out of our struct file.  They'll be pinned
1819          * at this point until we release them.
1820          *
1821          * Fail silently without starting the shrinker
1822          */
1823         mapping = file_inode(obj->base.filp)->i_mapping;
1824         gfp = mapping_gfp_mask(mapping);
1825         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1826         gfp &= ~(__GFP_IO | __GFP_WAIT);
1827         sg = st->sgl;
1828         st->nents = 0;
1829         for (i = 0; i < page_count; i++) {
1830                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1831                 if (IS_ERR(page)) {
1832                         i915_gem_purge(dev_priv, page_count);
1833                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1834                 }
1835                 if (IS_ERR(page)) {
1836                         /* We've tried hard to allocate the memory by reaping
1837                          * our own buffer, now let the real VM do its job and
1838                          * go down in flames if truly OOM.
1839                          */
1840                         gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1841                         gfp |= __GFP_IO | __GFP_WAIT;
1842
1843                         i915_gem_shrink_all(dev_priv);
1844                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1845                         if (IS_ERR(page))
1846                                 goto err_pages;
1847
1848                         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1849                         gfp &= ~(__GFP_IO | __GFP_WAIT);
1850                 }
1851 #ifdef CONFIG_SWIOTLB
1852                 if (swiotlb_nr_tbl()) {
1853                         st->nents++;
1854                         sg_set_page(sg, page, PAGE_SIZE, 0);
1855                         sg = sg_next(sg);
1856                         continue;
1857                 }
1858 #endif
1859                 if (!i || page_to_pfn(page) != last_pfn + 1) {
1860                         if (i)
1861                                 sg = sg_next(sg);
1862                         st->nents++;
1863                         sg_set_page(sg, page, PAGE_SIZE, 0);
1864                 } else {
1865                         sg->length += PAGE_SIZE;
1866                 }
1867                 last_pfn = page_to_pfn(page);
1868         }
1869 #ifdef CONFIG_SWIOTLB
1870         if (!swiotlb_nr_tbl())
1871 #endif
1872                 sg_mark_end(sg);
1873         obj->pages = st;
1874
1875         if (i915_gem_object_needs_bit17_swizzle(obj))
1876                 i915_gem_object_do_bit_17_swizzle(obj);
1877
1878         return 0;
1879
1880 err_pages:
1881         sg_mark_end(sg);
1882         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1883                 page_cache_release(sg_page_iter_page(&sg_iter));
1884         sg_free_table(st);
1885         kfree(st);
1886         return PTR_ERR(page);
1887 }
1888
1889 /* Ensure that the associated pages are gathered from the backing storage
1890  * and pinned into our object. i915_gem_object_get_pages() may be called
1891  * multiple times before they are released by a single call to
1892  * i915_gem_object_put_pages() - once the pages are no longer referenced
1893  * either as a result of memory pressure (reaping pages under the shrinker)
1894  * or as the object is itself released.
1895  */
1896 int
1897 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1898 {
1899         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1900         const struct drm_i915_gem_object_ops *ops = obj->ops;
1901         int ret;
1902
1903         if (obj->pages)
1904                 return 0;
1905
1906         if (obj->madv != I915_MADV_WILLNEED) {
1907                 DRM_ERROR("Attempting to obtain a purgeable object\n");
1908                 return -EINVAL;
1909         }
1910
1911         BUG_ON(obj->pages_pin_count);
1912
1913         ret = ops->get_pages(obj);
1914         if (ret)
1915                 return ret;
1916
1917         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1918         return 0;
1919 }
1920
1921 void
1922 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1923                                struct intel_ring_buffer *ring)
1924 {
1925         struct drm_device *dev = obj->base.dev;
1926         struct drm_i915_private *dev_priv = dev->dev_private;
1927         u32 seqno = intel_ring_get_seqno(ring);
1928
1929         BUG_ON(ring == NULL);
1930         if (obj->ring != ring && obj->last_write_seqno) {
1931                 /* Keep the seqno relative to the current ring */
1932                 obj->last_write_seqno = seqno;
1933         }
1934         obj->ring = ring;
1935
1936         /* Add a reference if we're newly entering the active list. */
1937         if (!obj->active) {
1938                 drm_gem_object_reference(&obj->base);
1939                 obj->active = 1;
1940         }
1941
1942         list_move_tail(&obj->ring_list, &ring->active_list);
1943
1944         obj->last_read_seqno = seqno;
1945
1946         if (obj->fenced_gpu_access) {
1947                 obj->last_fenced_seqno = seqno;
1948
1949                 /* Bump MRU to take account of the delayed flush */
1950                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1951                         struct drm_i915_fence_reg *reg;
1952
1953                         reg = &dev_priv->fence_regs[obj->fence_reg];
1954                         list_move_tail(&reg->lru_list,
1955                                        &dev_priv->mm.fence_list);
1956                 }
1957         }
1958 }
1959
1960 static void
1961 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1962 {
1963         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1964         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1965         struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1966
1967         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1968         BUG_ON(!obj->active);
1969
1970         list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
1971
1972         list_del_init(&obj->ring_list);
1973         obj->ring = NULL;
1974
1975         obj->last_read_seqno = 0;
1976         obj->last_write_seqno = 0;
1977         obj->base.write_domain = 0;
1978
1979         obj->last_fenced_seqno = 0;
1980         obj->fenced_gpu_access = false;
1981
1982         obj->active = 0;
1983         drm_gem_object_unreference(&obj->base);
1984
1985         WARN_ON(i915_verify_lists(dev));
1986 }
1987
1988 static int
1989 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1990 {
1991         struct drm_i915_private *dev_priv = dev->dev_private;
1992         struct intel_ring_buffer *ring;
1993         int ret, i, j;
1994
1995         /* Carefully retire all requests without writing to the rings */
1996         for_each_ring(ring, dev_priv, i) {
1997                 ret = intel_ring_idle(ring);
1998                 if (ret)
1999                         return ret;
2000         }
2001         i915_gem_retire_requests(dev);
2002
2003         /* Finally reset hw state */
2004         for_each_ring(ring, dev_priv, i) {
2005                 intel_ring_init_seqno(ring, seqno);
2006
2007                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2008                         ring->sync_seqno[j] = 0;
2009         }
2010
2011         return 0;
2012 }
2013
2014 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2015 {
2016         struct drm_i915_private *dev_priv = dev->dev_private;
2017         int ret;
2018
2019         if (seqno == 0)
2020                 return -EINVAL;
2021
2022         /* HWS page needs to be set less than what we
2023          * will inject to ring
2024          */
2025         ret = i915_gem_init_seqno(dev, seqno - 1);
2026         if (ret)
2027                 return ret;
2028
2029         /* Carefully set the last_seqno value so that wrap
2030          * detection still works
2031          */
2032         dev_priv->next_seqno = seqno;
2033         dev_priv->last_seqno = seqno - 1;
2034         if (dev_priv->last_seqno == 0)
2035                 dev_priv->last_seqno--;
2036
2037         return 0;
2038 }
2039
2040 int
2041 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2042 {
2043         struct drm_i915_private *dev_priv = dev->dev_private;
2044
2045         /* reserve 0 for non-seqno */
2046         if (dev_priv->next_seqno == 0) {
2047                 int ret = i915_gem_init_seqno(dev, 0);
2048                 if (ret)
2049                         return ret;
2050
2051                 dev_priv->next_seqno = 1;
2052         }
2053
2054         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2055         return 0;
2056 }
2057
2058 int __i915_add_request(struct intel_ring_buffer *ring,
2059                        struct drm_file *file,
2060                        struct drm_i915_gem_object *obj,
2061                        u32 *out_seqno)
2062 {
2063         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2064         struct drm_i915_gem_request *request;
2065         u32 request_ring_position, request_start;
2066         int was_empty;
2067         int ret;
2068
2069         request_start = intel_ring_get_tail(ring);
2070         /*
2071          * Emit any outstanding flushes - execbuf can fail to emit the flush
2072          * after having emitted the batchbuffer command. Hence we need to fix
2073          * things up similar to emitting the lazy request. The difference here
2074          * is that the flush _must_ happen before the next request, no matter
2075          * what.
2076          */
2077         ret = intel_ring_flush_all_caches(ring);
2078         if (ret)
2079                 return ret;
2080
2081         request = kmalloc(sizeof(*request), GFP_KERNEL);
2082         if (request == NULL)
2083                 return -ENOMEM;
2084
2085
2086         /* Record the position of the start of the request so that
2087          * should we detect the updated seqno part-way through the
2088          * GPU processing the request, we never over-estimate the
2089          * position of the head.
2090          */
2091         request_ring_position = intel_ring_get_tail(ring);
2092
2093         ret = ring->add_request(ring);
2094         if (ret) {
2095                 kfree(request);
2096                 return ret;
2097         }
2098
2099         request->seqno = intel_ring_get_seqno(ring);
2100         request->ring = ring;
2101         request->head = request_start;
2102         request->tail = request_ring_position;
2103         request->ctx = ring->last_context;
2104         request->batch_obj = obj;
2105
2106         /* Whilst this request exists, batch_obj will be on the
2107          * active_list, and so will hold the active reference. Only when this
2108          * request is retired will the the batch_obj be moved onto the
2109          * inactive_list and lose its active reference. Hence we do not need
2110          * to explicitly hold another reference here.
2111          */
2112
2113         if (request->ctx)
2114                 i915_gem_context_reference(request->ctx);
2115
2116         request->emitted_jiffies = jiffies;
2117         was_empty = list_empty(&ring->request_list);
2118         list_add_tail(&request->list, &ring->request_list);
2119         request->file_priv = NULL;
2120
2121         if (file) {
2122                 struct drm_i915_file_private *file_priv = file->driver_priv;
2123
2124                 spin_lock(&file_priv->mm.lock);
2125                 request->file_priv = file_priv;
2126                 list_add_tail(&request->client_list,
2127                               &file_priv->mm.request_list);
2128                 spin_unlock(&file_priv->mm.lock);
2129         }
2130
2131         trace_i915_gem_request_add(ring, request->seqno);
2132         ring->outstanding_lazy_request = 0;
2133
2134         if (!dev_priv->ums.mm_suspended) {
2135                 i915_queue_hangcheck(ring->dev);
2136
2137                 if (was_empty) {
2138                         queue_delayed_work(dev_priv->wq,
2139                                            &dev_priv->mm.retire_work,
2140                                            round_jiffies_up_relative(HZ));
2141                         intel_mark_busy(dev_priv->dev);
2142                 }
2143         }
2144
2145         if (out_seqno)
2146                 *out_seqno = request->seqno;
2147         return 0;
2148 }
2149
2150 static inline void
2151 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2152 {
2153         struct drm_i915_file_private *file_priv = request->file_priv;
2154
2155         if (!file_priv)
2156                 return;
2157
2158         spin_lock(&file_priv->mm.lock);
2159         if (request->file_priv) {
2160                 list_del(&request->client_list);
2161                 request->file_priv = NULL;
2162         }
2163         spin_unlock(&file_priv->mm.lock);
2164 }
2165
2166 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2167                                     struct i915_address_space *vm)
2168 {
2169         if (acthd >= i915_gem_obj_offset(obj, vm) &&
2170             acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2171                 return true;
2172
2173         return false;
2174 }
2175
2176 static bool i915_head_inside_request(const u32 acthd_unmasked,
2177                                      const u32 request_start,
2178                                      const u32 request_end)
2179 {
2180         const u32 acthd = acthd_unmasked & HEAD_ADDR;
2181
2182         if (request_start < request_end) {
2183                 if (acthd >= request_start && acthd < request_end)
2184                         return true;
2185         } else if (request_start > request_end) {
2186                 if (acthd >= request_start || acthd < request_end)
2187                         return true;
2188         }
2189
2190         return false;
2191 }
2192
2193 static struct i915_address_space *
2194 request_to_vm(struct drm_i915_gem_request *request)
2195 {
2196         struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2197         struct i915_address_space *vm;
2198
2199         vm = &dev_priv->gtt.base;
2200
2201         return vm;
2202 }
2203
2204 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2205                                 const u32 acthd, bool *inside)
2206 {
2207         /* There is a possibility that unmasked head address
2208          * pointing inside the ring, matches the batch_obj address range.
2209          * However this is extremely unlikely.
2210          */
2211         if (request->batch_obj) {
2212                 if (i915_head_inside_object(acthd, request->batch_obj,
2213                                             request_to_vm(request))) {
2214                         *inside = true;
2215                         return true;
2216                 }
2217         }
2218
2219         if (i915_head_inside_request(acthd, request->head, request->tail)) {
2220                 *inside = false;
2221                 return true;
2222         }
2223
2224         return false;
2225 }
2226
2227 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2228                                   struct drm_i915_gem_request *request,
2229                                   u32 acthd)
2230 {
2231         struct i915_ctx_hang_stats *hs = NULL;
2232         bool inside, guilty;
2233         unsigned long offset = 0;
2234
2235         /* Innocent until proven guilty */
2236         guilty = false;
2237
2238         if (request->batch_obj)
2239                 offset = i915_gem_obj_offset(request->batch_obj,
2240                                              request_to_vm(request));
2241
2242         if (ring->hangcheck.action != HANGCHECK_WAIT &&
2243             i915_request_guilty(request, acthd, &inside)) {
2244                 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2245                           ring->name,
2246                           inside ? "inside" : "flushing",
2247                           offset,
2248                           request->ctx ? request->ctx->id : 0,
2249                           acthd);
2250
2251                 guilty = true;
2252         }
2253
2254         /* If contexts are disabled or this is the default context, use
2255          * file_priv->reset_state
2256          */
2257         if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2258                 hs = &request->ctx->hang_stats;
2259         else if (request->file_priv)
2260                 hs = &request->file_priv->hang_stats;
2261
2262         if (hs) {
2263                 if (guilty)
2264                         hs->batch_active++;
2265                 else
2266                         hs->batch_pending++;
2267         }
2268 }
2269
2270 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2271 {
2272         list_del(&request->list);
2273         i915_gem_request_remove_from_client(request);
2274
2275         if (request->ctx)
2276                 i915_gem_context_unreference(request->ctx);
2277
2278         kfree(request);
2279 }
2280
2281 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2282                                       struct intel_ring_buffer *ring)
2283 {
2284         u32 completed_seqno;
2285         u32 acthd;
2286
2287         acthd = intel_ring_get_active_head(ring);
2288         completed_seqno = ring->get_seqno(ring, false);
2289
2290         while (!list_empty(&ring->request_list)) {
2291                 struct drm_i915_gem_request *request;
2292
2293                 request = list_first_entry(&ring->request_list,
2294                                            struct drm_i915_gem_request,
2295                                            list);
2296
2297                 if (request->seqno > completed_seqno)
2298                         i915_set_reset_status(ring, request, acthd);
2299
2300                 i915_gem_free_request(request);
2301         }
2302
2303         while (!list_empty(&ring->active_list)) {
2304                 struct drm_i915_gem_object *obj;
2305
2306                 obj = list_first_entry(&ring->active_list,
2307                                        struct drm_i915_gem_object,
2308                                        ring_list);
2309
2310                 i915_gem_object_move_to_inactive(obj);
2311         }
2312 }
2313
2314 void i915_gem_restore_fences(struct drm_device *dev)
2315 {
2316         struct drm_i915_private *dev_priv = dev->dev_private;
2317         int i;
2318
2319         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2320                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2321
2322                 /*
2323                  * Commit delayed tiling changes if we have an object still
2324                  * attached to the fence, otherwise just clear the fence.
2325                  */
2326                 if (reg->obj) {
2327                         i915_gem_object_update_fence(reg->obj, reg,
2328                                                      reg->obj->tiling_mode);
2329                 } else {
2330                         i915_gem_write_fence(dev, i, NULL);
2331                 }
2332         }
2333 }
2334
2335 void i915_gem_reset(struct drm_device *dev)
2336 {
2337         struct drm_i915_private *dev_priv = dev->dev_private;
2338         struct intel_ring_buffer *ring;
2339         int i;
2340
2341         for_each_ring(ring, dev_priv, i)
2342                 i915_gem_reset_ring_lists(dev_priv, ring);
2343
2344         i915_gem_restore_fences(dev);
2345 }
2346
2347 /**
2348  * This function clears the request list as sequence numbers are passed.
2349  */
2350 void
2351 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2352 {
2353         uint32_t seqno;
2354
2355         if (list_empty(&ring->request_list))
2356                 return;
2357
2358         WARN_ON(i915_verify_lists(ring->dev));
2359
2360         seqno = ring->get_seqno(ring, true);
2361
2362         while (!list_empty(&ring->request_list)) {
2363                 struct drm_i915_gem_request *request;
2364
2365                 request = list_first_entry(&ring->request_list,
2366                                            struct drm_i915_gem_request,
2367                                            list);
2368
2369                 if (!i915_seqno_passed(seqno, request->seqno))
2370                         break;
2371
2372                 trace_i915_gem_request_retire(ring, request->seqno);
2373                 /* We know the GPU must have read the request to have
2374                  * sent us the seqno + interrupt, so use the position
2375                  * of tail of the request to update the last known position
2376                  * of the GPU head.
2377                  */
2378                 ring->last_retired_head = request->tail;
2379
2380                 i915_gem_free_request(request);
2381         }
2382
2383         /* Move any buffers on the active list that are no longer referenced
2384          * by the ringbuffer to the flushing/inactive lists as appropriate.
2385          */
2386         while (!list_empty(&ring->active_list)) {
2387                 struct drm_i915_gem_object *obj;
2388
2389                 obj = list_first_entry(&ring->active_list,
2390                                       struct drm_i915_gem_object,
2391                                       ring_list);
2392
2393                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2394                         break;
2395
2396                 i915_gem_object_move_to_inactive(obj);
2397         }
2398
2399         if (unlikely(ring->trace_irq_seqno &&
2400                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2401                 ring->irq_put(ring);
2402                 ring->trace_irq_seqno = 0;
2403         }
2404
2405         WARN_ON(i915_verify_lists(ring->dev));
2406 }
2407
2408 void
2409 i915_gem_retire_requests(struct drm_device *dev)
2410 {
2411         drm_i915_private_t *dev_priv = dev->dev_private;
2412         struct intel_ring_buffer *ring;
2413         int i;
2414
2415         for_each_ring(ring, dev_priv, i)
2416                 i915_gem_retire_requests_ring(ring);
2417 }
2418
2419 static void
2420 i915_gem_retire_work_handler(struct work_struct *work)
2421 {
2422         drm_i915_private_t *dev_priv;
2423         struct drm_device *dev;
2424         struct intel_ring_buffer *ring;
2425         bool idle;
2426         int i;
2427
2428         dev_priv = container_of(work, drm_i915_private_t,
2429                                 mm.retire_work.work);
2430         dev = dev_priv->dev;
2431
2432         /* Come back later if the device is busy... */
2433         if (!mutex_trylock(&dev->struct_mutex)) {
2434                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2435                                    round_jiffies_up_relative(HZ));
2436                 return;
2437         }
2438
2439         i915_gem_retire_requests(dev);
2440
2441         /* Send a periodic flush down the ring so we don't hold onto GEM
2442          * objects indefinitely.
2443          */
2444         idle = true;
2445         for_each_ring(ring, dev_priv, i) {
2446                 if (ring->gpu_caches_dirty)
2447                         i915_add_request(ring, NULL);
2448
2449                 idle &= list_empty(&ring->request_list);
2450         }
2451
2452         if (!dev_priv->ums.mm_suspended && !idle)
2453                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2454                                    round_jiffies_up_relative(HZ));
2455         if (idle)
2456                 intel_mark_idle(dev);
2457
2458         mutex_unlock(&dev->struct_mutex);
2459 }
2460
2461 /**
2462  * Ensures that an object will eventually get non-busy by flushing any required
2463  * write domains, emitting any outstanding lazy request and retiring and
2464  * completed requests.
2465  */
2466 static int
2467 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2468 {
2469         int ret;
2470
2471         if (obj->active) {
2472                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2473                 if (ret)
2474                         return ret;
2475
2476                 i915_gem_retire_requests_ring(obj->ring);
2477         }
2478
2479         return 0;
2480 }
2481
2482 /**
2483  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2484  * @DRM_IOCTL_ARGS: standard ioctl arguments
2485  *
2486  * Returns 0 if successful, else an error is returned with the remaining time in
2487  * the timeout parameter.
2488  *  -ETIME: object is still busy after timeout
2489  *  -ERESTARTSYS: signal interrupted the wait
2490  *  -ENONENT: object doesn't exist
2491  * Also possible, but rare:
2492  *  -EAGAIN: GPU wedged
2493  *  -ENOMEM: damn
2494  *  -ENODEV: Internal IRQ fail
2495  *  -E?: The add request failed
2496  *
2497  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2498  * non-zero timeout parameter the wait ioctl will wait for the given number of
2499  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2500  * without holding struct_mutex the object may become re-busied before this
2501  * function completes. A similar but shorter * race condition exists in the busy
2502  * ioctl
2503  */
2504 int
2505 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2506 {
2507         drm_i915_private_t *dev_priv = dev->dev_private;
2508         struct drm_i915_gem_wait *args = data;
2509         struct drm_i915_gem_object *obj;
2510         struct intel_ring_buffer *ring = NULL;
2511         struct timespec timeout_stack, *timeout = NULL;
2512         unsigned reset_counter;
2513         u32 seqno = 0;
2514         int ret = 0;
2515
2516         if (args->timeout_ns >= 0) {
2517                 timeout_stack = ns_to_timespec(args->timeout_ns);
2518                 timeout = &timeout_stack;
2519         }
2520
2521         ret = i915_mutex_lock_interruptible(dev);
2522         if (ret)
2523                 return ret;
2524
2525         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2526         if (&obj->base == NULL) {
2527                 mutex_unlock(&dev->struct_mutex);
2528                 return -ENOENT;
2529         }
2530
2531         /* Need to make sure the object gets inactive eventually. */
2532         ret = i915_gem_object_flush_active(obj);
2533         if (ret)
2534                 goto out;
2535
2536         if (obj->active) {
2537                 seqno = obj->last_read_seqno;
2538                 ring = obj->ring;
2539         }
2540
2541         if (seqno == 0)
2542                  goto out;
2543
2544         /* Do this after OLR check to make sure we make forward progress polling
2545          * on this IOCTL with a 0 timeout (like busy ioctl)
2546          */
2547         if (!args->timeout_ns) {
2548                 ret = -ETIME;
2549                 goto out;
2550         }
2551
2552         drm_gem_object_unreference(&obj->base);
2553         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2554         mutex_unlock(&dev->struct_mutex);
2555
2556         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2557         if (timeout)
2558                 args->timeout_ns = timespec_to_ns(timeout);
2559         return ret;
2560
2561 out:
2562         drm_gem_object_unreference(&obj->base);
2563         mutex_unlock(&dev->struct_mutex);
2564         return ret;
2565 }
2566
2567 /**
2568  * i915_gem_object_sync - sync an object to a ring.
2569  *
2570  * @obj: object which may be in use on another ring.
2571  * @to: ring we wish to use the object on. May be NULL.
2572  *
2573  * This code is meant to abstract object synchronization with the GPU.
2574  * Calling with NULL implies synchronizing the object with the CPU
2575  * rather than a particular GPU ring.
2576  *
2577  * Returns 0 if successful, else propagates up the lower layer error.
2578  */
2579 int
2580 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2581                      struct intel_ring_buffer *to)
2582 {
2583         struct intel_ring_buffer *from = obj->ring;
2584         u32 seqno;
2585         int ret, idx;
2586
2587         if (from == NULL || to == from)
2588                 return 0;
2589
2590         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2591                 return i915_gem_object_wait_rendering(obj, false);
2592
2593         idx = intel_ring_sync_index(from, to);
2594
2595         seqno = obj->last_read_seqno;
2596         if (seqno <= from->sync_seqno[idx])
2597                 return 0;
2598
2599         ret = i915_gem_check_olr(obj->ring, seqno);
2600         if (ret)
2601                 return ret;
2602
2603         ret = to->sync_to(to, from, seqno);
2604         if (!ret)
2605                 /* We use last_read_seqno because sync_to()
2606                  * might have just caused seqno wrap under
2607                  * the radar.
2608                  */
2609                 from->sync_seqno[idx] = obj->last_read_seqno;
2610
2611         return ret;
2612 }
2613
2614 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2615 {
2616         u32 old_write_domain, old_read_domains;
2617
2618         /* Force a pagefault for domain tracking on next user access */
2619         i915_gem_release_mmap(obj);
2620
2621         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2622                 return;
2623
2624         /* Wait for any direct GTT access to complete */
2625         mb();
2626
2627         old_read_domains = obj->base.read_domains;
2628         old_write_domain = obj->base.write_domain;
2629
2630         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2631         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2632
2633         trace_i915_gem_object_change_domain(obj,
2634                                             old_read_domains,
2635                                             old_write_domain);
2636 }
2637
2638 int i915_vma_unbind(struct i915_vma *vma)
2639 {
2640         struct drm_i915_gem_object *obj = vma->obj;
2641         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2642         int ret;
2643
2644         if (list_empty(&vma->vma_link))
2645                 return 0;
2646
2647         if (!drm_mm_node_allocated(&vma->node))
2648                 goto destroy;
2649
2650         if (obj->pin_count)
2651                 return -EBUSY;
2652
2653         BUG_ON(obj->pages == NULL);
2654
2655         ret = i915_gem_object_finish_gpu(obj);
2656         if (ret)
2657                 return ret;
2658         /* Continue on if we fail due to EIO, the GPU is hung so we
2659          * should be safe and we need to cleanup or else we might
2660          * cause memory corruption through use-after-free.
2661          */
2662
2663         i915_gem_object_finish_gtt(obj);
2664
2665         /* release the fence reg _after_ flushing */
2666         ret = i915_gem_object_put_fence(obj);
2667         if (ret)
2668                 return ret;
2669
2670         trace_i915_vma_unbind(vma);
2671
2672         if (obj->has_global_gtt_mapping)
2673                 i915_gem_gtt_unbind_object(obj);
2674         if (obj->has_aliasing_ppgtt_mapping) {
2675                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2676                 obj->has_aliasing_ppgtt_mapping = 0;
2677         }
2678         i915_gem_gtt_finish_object(obj);
2679         i915_gem_object_unpin_pages(obj);
2680
2681         list_del(&vma->mm_list);
2682         /* Avoid an unnecessary call to unbind on rebind. */
2683         if (i915_is_ggtt(vma->vm))
2684                 obj->map_and_fenceable = true;
2685
2686         drm_mm_remove_node(&vma->node);
2687
2688 destroy:
2689         i915_gem_vma_destroy(vma);
2690
2691         /* Since the unbound list is global, only move to that list if
2692          * no more VMAs exist.
2693          * NB: Until we have real VMAs there will only ever be one */
2694         WARN_ON(!list_empty(&obj->vma_list));
2695         if (list_empty(&obj->vma_list))
2696                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2697
2698         return 0;
2699 }
2700
2701 /**
2702  * Unbinds an object from the global GTT aperture.
2703  */
2704 int
2705 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2706 {
2707         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2708         struct i915_address_space *ggtt = &dev_priv->gtt.base;
2709
2710         if (!i915_gem_obj_ggtt_bound(obj))
2711                 return 0;
2712
2713         if (obj->pin_count)
2714                 return -EBUSY;
2715
2716         BUG_ON(obj->pages == NULL);
2717
2718         return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2719 }
2720
2721 int i915_gpu_idle(struct drm_device *dev)
2722 {
2723         drm_i915_private_t *dev_priv = dev->dev_private;
2724         struct intel_ring_buffer *ring;
2725         int ret, i;
2726
2727         /* Flush everything onto the inactive list. */
2728         for_each_ring(ring, dev_priv, i) {
2729                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2730                 if (ret)
2731                         return ret;
2732
2733                 ret = intel_ring_idle(ring);
2734                 if (ret)
2735                         return ret;
2736         }
2737
2738         return 0;
2739 }
2740
2741 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2742                                  struct drm_i915_gem_object *obj)
2743 {
2744         drm_i915_private_t *dev_priv = dev->dev_private;
2745         int fence_reg;
2746         int fence_pitch_shift;
2747
2748         if (INTEL_INFO(dev)->gen >= 6) {
2749                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2750                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2751         } else {
2752                 fence_reg = FENCE_REG_965_0;
2753                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2754         }
2755
2756         fence_reg += reg * 8;
2757
2758         /* To w/a incoherency with non-atomic 64-bit register updates,
2759          * we split the 64-bit update into two 32-bit writes. In order
2760          * for a partial fence not to be evaluated between writes, we
2761          * precede the update with write to turn off the fence register,
2762          * and only enable the fence as the last step.
2763          *
2764          * For extra levels of paranoia, we make sure each step lands
2765          * before applying the next step.
2766          */
2767         I915_WRITE(fence_reg, 0);
2768         POSTING_READ(fence_reg);
2769
2770         if (obj) {
2771                 u32 size = i915_gem_obj_ggtt_size(obj);
2772                 uint64_t val;
2773
2774                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2775                                  0xfffff000) << 32;
2776                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2777                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2778                 if (obj->tiling_mode == I915_TILING_Y)
2779                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2780                 val |= I965_FENCE_REG_VALID;
2781
2782                 I915_WRITE(fence_reg + 4, val >> 32);
2783                 POSTING_READ(fence_reg + 4);
2784
2785                 I915_WRITE(fence_reg + 0, val);
2786                 POSTING_READ(fence_reg);
2787         } else {
2788                 I915_WRITE(fence_reg + 4, 0);
2789                 POSTING_READ(fence_reg + 4);
2790         }
2791 }
2792
2793 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2794                                  struct drm_i915_gem_object *obj)
2795 {
2796         drm_i915_private_t *dev_priv = dev->dev_private;
2797         u32 val;
2798
2799         if (obj) {
2800                 u32 size = i915_gem_obj_ggtt_size(obj);
2801                 int pitch_val;
2802                 int tile_width;
2803
2804                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2805                      (size & -size) != size ||
2806                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2807                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2808                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2809
2810                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2811                         tile_width = 128;
2812                 else
2813                         tile_width = 512;
2814
2815                 /* Note: pitch better be a power of two tile widths */
2816                 pitch_val = obj->stride / tile_width;
2817                 pitch_val = ffs(pitch_val) - 1;
2818
2819                 val = i915_gem_obj_ggtt_offset(obj);
2820                 if (obj->tiling_mode == I915_TILING_Y)
2821                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2822                 val |= I915_FENCE_SIZE_BITS(size);
2823                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2824                 val |= I830_FENCE_REG_VALID;
2825         } else
2826                 val = 0;
2827
2828         if (reg < 8)
2829                 reg = FENCE_REG_830_0 + reg * 4;
2830         else
2831                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2832
2833         I915_WRITE(reg, val);
2834         POSTING_READ(reg);
2835 }
2836
2837 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2838                                 struct drm_i915_gem_object *obj)
2839 {
2840         drm_i915_private_t *dev_priv = dev->dev_private;
2841         uint32_t val;
2842
2843         if (obj) {
2844                 u32 size = i915_gem_obj_ggtt_size(obj);
2845                 uint32_t pitch_val;
2846
2847                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2848                      (size & -size) != size ||
2849                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2850                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2851                      i915_gem_obj_ggtt_offset(obj), size);
2852
2853                 pitch_val = obj->stride / 128;
2854                 pitch_val = ffs(pitch_val) - 1;
2855
2856                 val = i915_gem_obj_ggtt_offset(obj);
2857                 if (obj->tiling_mode == I915_TILING_Y)
2858                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2859                 val |= I830_FENCE_SIZE_BITS(size);
2860                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2861                 val |= I830_FENCE_REG_VALID;
2862         } else
2863                 val = 0;
2864
2865         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2866         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2867 }
2868
2869 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2870 {
2871         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2872 }
2873
2874 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2875                                  struct drm_i915_gem_object *obj)
2876 {
2877         struct drm_i915_private *dev_priv = dev->dev_private;
2878
2879         /* Ensure that all CPU reads are completed before installing a fence
2880          * and all writes before removing the fence.
2881          */
2882         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2883                 mb();
2884
2885         WARN(obj && (!obj->stride || !obj->tiling_mode),
2886              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2887              obj->stride, obj->tiling_mode);
2888
2889         switch (INTEL_INFO(dev)->gen) {
2890         case 7:
2891         case 6:
2892         case 5:
2893         case 4: i965_write_fence_reg(dev, reg, obj); break;
2894         case 3: i915_write_fence_reg(dev, reg, obj); break;
2895         case 2: i830_write_fence_reg(dev, reg, obj); break;
2896         default: BUG();
2897         }
2898
2899         /* And similarly be paranoid that no direct access to this region
2900          * is reordered to before the fence is installed.
2901          */
2902         if (i915_gem_object_needs_mb(obj))
2903                 mb();
2904 }
2905
2906 static inline int fence_number(struct drm_i915_private *dev_priv,
2907                                struct drm_i915_fence_reg *fence)
2908 {
2909         return fence - dev_priv->fence_regs;
2910 }
2911
2912 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2913                                          struct drm_i915_fence_reg *fence,
2914                                          bool enable)
2915 {
2916         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2917         int reg = fence_number(dev_priv, fence);
2918
2919         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2920
2921         if (enable) {
2922                 obj->fence_reg = reg;
2923                 fence->obj = obj;
2924                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2925         } else {
2926                 obj->fence_reg = I915_FENCE_REG_NONE;
2927                 fence->obj = NULL;
2928                 list_del_init(&fence->lru_list);
2929         }
2930         obj->fence_dirty = false;
2931 }
2932
2933 static int
2934 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2935 {
2936         if (obj->last_fenced_seqno) {
2937                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2938                 if (ret)
2939                         return ret;
2940
2941                 obj->last_fenced_seqno = 0;
2942         }
2943
2944         obj->fenced_gpu_access = false;
2945         return 0;
2946 }
2947
2948 int
2949 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2950 {
2951         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2952         struct drm_i915_fence_reg *fence;
2953         int ret;
2954
2955         ret = i915_gem_object_wait_fence(obj);
2956         if (ret)
2957                 return ret;
2958
2959         if (obj->fence_reg == I915_FENCE_REG_NONE)
2960                 return 0;
2961
2962         fence = &dev_priv->fence_regs[obj->fence_reg];
2963
2964         i915_gem_object_fence_lost(obj);
2965         i915_gem_object_update_fence(obj, fence, false);
2966
2967         return 0;
2968 }
2969
2970 static struct drm_i915_fence_reg *
2971 i915_find_fence_reg(struct drm_device *dev)
2972 {
2973         struct drm_i915_private *dev_priv = dev->dev_private;
2974         struct drm_i915_fence_reg *reg, *avail;
2975         int i;
2976
2977         /* First try to find a free reg */
2978         avail = NULL;
2979         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2980                 reg = &dev_priv->fence_regs[i];
2981                 if (!reg->obj)
2982                         return reg;
2983
2984                 if (!reg->pin_count)
2985                         avail = reg;
2986         }
2987
2988         if (avail == NULL)
2989                 return NULL;
2990
2991         /* None available, try to steal one or wait for a user to finish */
2992         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2993                 if (reg->pin_count)
2994                         continue;
2995
2996                 return reg;
2997         }
2998
2999         return NULL;
3000 }
3001
3002 /**
3003  * i915_gem_object_get_fence - set up fencing for an object
3004  * @obj: object to map through a fence reg
3005  *
3006  * When mapping objects through the GTT, userspace wants to be able to write
3007  * to them without having to worry about swizzling if the object is tiled.
3008  * This function walks the fence regs looking for a free one for @obj,
3009  * stealing one if it can't find any.
3010  *
3011  * It then sets up the reg based on the object's properties: address, pitch
3012  * and tiling format.
3013  *
3014  * For an untiled surface, this removes any existing fence.
3015  */
3016 int
3017 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3018 {
3019         struct drm_device *dev = obj->base.dev;
3020         struct drm_i915_private *dev_priv = dev->dev_private;
3021         bool enable = obj->tiling_mode != I915_TILING_NONE;
3022         struct drm_i915_fence_reg *reg;
3023         int ret;
3024
3025         /* Have we updated the tiling parameters upon the object and so
3026          * will need to serialise the write to the associated fence register?
3027          */
3028         if (obj->fence_dirty) {
3029                 ret = i915_gem_object_wait_fence(obj);
3030                 if (ret)
3031                         return ret;
3032         }
3033
3034         /* Just update our place in the LRU if our fence is getting reused. */
3035         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3036                 reg = &dev_priv->fence_regs[obj->fence_reg];
3037                 if (!obj->fence_dirty) {
3038                         list_move_tail(&reg->lru_list,
3039                                        &dev_priv->mm.fence_list);
3040                         return 0;
3041                 }
3042         } else if (enable) {
3043                 reg = i915_find_fence_reg(dev);
3044                 if (reg == NULL)
3045                         return -EDEADLK;
3046
3047                 if (reg->obj) {
3048                         struct drm_i915_gem_object *old = reg->obj;
3049
3050                         ret = i915_gem_object_wait_fence(old);
3051                         if (ret)
3052                                 return ret;
3053
3054                         i915_gem_object_fence_lost(old);
3055                 }
3056         } else
3057                 return 0;
3058
3059         i915_gem_object_update_fence(obj, reg, enable);
3060
3061         return 0;
3062 }
3063
3064 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3065                                      struct drm_mm_node *gtt_space,
3066                                      unsigned long cache_level)
3067 {
3068         struct drm_mm_node *other;
3069
3070         /* On non-LLC machines we have to be careful when putting differing
3071          * types of snoopable memory together to avoid the prefetcher
3072          * crossing memory domains and dying.
3073          */
3074         if (HAS_LLC(dev))
3075                 return true;
3076
3077         if (!drm_mm_node_allocated(gtt_space))
3078                 return true;
3079
3080         if (list_empty(&gtt_space->node_list))
3081                 return true;
3082
3083         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3084         if (other->allocated && !other->hole_follows && other->color != cache_level)
3085                 return false;
3086
3087         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3088         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3089                 return false;
3090
3091         return true;
3092 }
3093
3094 static void i915_gem_verify_gtt(struct drm_device *dev)
3095 {
3096 #if WATCH_GTT
3097         struct drm_i915_private *dev_priv = dev->dev_private;
3098         struct drm_i915_gem_object *obj;
3099         int err = 0;
3100
3101         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3102                 if (obj->gtt_space == NULL) {
3103                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
3104                         err++;
3105                         continue;
3106                 }
3107
3108                 if (obj->cache_level != obj->gtt_space->color) {
3109                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3110                                i915_gem_obj_ggtt_offset(obj),
3111                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3112                                obj->cache_level,
3113                                obj->gtt_space->color);
3114                         err++;
3115                         continue;
3116                 }
3117
3118                 if (!i915_gem_valid_gtt_space(dev,
3119                                               obj->gtt_space,
3120                                               obj->cache_level)) {
3121                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3122                                i915_gem_obj_ggtt_offset(obj),
3123                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3124                                obj->cache_level);
3125                         err++;
3126                         continue;
3127                 }
3128         }
3129
3130         WARN_ON(err);
3131 #endif
3132 }
3133
3134 /**
3135  * Finds free space in the GTT aperture and binds the object there.
3136  */
3137 static int
3138 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3139                            struct i915_address_space *vm,
3140                            unsigned alignment,
3141                            bool map_and_fenceable,
3142                            bool nonblocking)
3143 {
3144         struct drm_device *dev = obj->base.dev;
3145         drm_i915_private_t *dev_priv = dev->dev_private;
3146         u32 size, fence_size, fence_alignment, unfenced_alignment;
3147         size_t gtt_max =
3148                 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3149         struct i915_vma *vma;
3150         int ret;
3151
3152         fence_size = i915_gem_get_gtt_size(dev,
3153                                            obj->base.size,
3154                                            obj->tiling_mode);
3155         fence_alignment = i915_gem_get_gtt_alignment(dev,
3156                                                      obj->base.size,
3157                                                      obj->tiling_mode, true);
3158         unfenced_alignment =
3159                 i915_gem_get_gtt_alignment(dev,
3160                                                     obj->base.size,
3161                                                     obj->tiling_mode, false);
3162
3163         if (alignment == 0)
3164                 alignment = map_and_fenceable ? fence_alignment :
3165                                                 unfenced_alignment;
3166         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3167                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3168                 return -EINVAL;
3169         }
3170
3171         size = map_and_fenceable ? fence_size : obj->base.size;
3172
3173         /* If the object is bigger than the entire aperture, reject it early
3174          * before evicting everything in a vain attempt to find space.
3175          */
3176         if (obj->base.size > gtt_max) {
3177                 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3178                           obj->base.size,
3179                           map_and_fenceable ? "mappable" : "total",
3180                           gtt_max);
3181                 return -E2BIG;
3182         }
3183
3184         ret = i915_gem_object_get_pages(obj);
3185         if (ret)
3186                 return ret;
3187
3188         i915_gem_object_pin_pages(obj);
3189
3190         BUG_ON(!i915_is_ggtt(vm));
3191
3192         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3193         if (IS_ERR(vma)) {
3194                 ret = PTR_ERR(vma);
3195                 goto err_unpin;
3196         }
3197
3198         /* For now we only ever use 1 vma per object */
3199         WARN_ON(!list_is_singular(&obj->vma_list));
3200
3201 search_free:
3202         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3203                                                   size, alignment,
3204                                                   obj->cache_level, 0, gtt_max,
3205                                                   DRM_MM_SEARCH_DEFAULT);
3206         if (ret) {
3207                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3208                                                obj->cache_level,
3209                                                map_and_fenceable,
3210                                                nonblocking);
3211                 if (ret == 0)
3212                         goto search_free;
3213
3214                 goto err_free_vma;
3215         }
3216         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3217                                               obj->cache_level))) {
3218                 ret = -EINVAL;
3219                 goto err_remove_node;
3220         }
3221
3222         ret = i915_gem_gtt_prepare_object(obj);
3223         if (ret)
3224                 goto err_remove_node;
3225
3226         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3227         list_add_tail(&vma->mm_list, &vm->inactive_list);
3228
3229         if (i915_is_ggtt(vm)) {
3230                 bool mappable, fenceable;
3231
3232                 fenceable = (vma->node.size == fence_size &&
3233                              (vma->node.start & (fence_alignment - 1)) == 0);
3234
3235                 mappable = (vma->node.start + obj->base.size <=
3236                             dev_priv->gtt.mappable_end);
3237
3238                 obj->map_and_fenceable = mappable && fenceable;
3239         }
3240
3241         WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3242
3243         trace_i915_vma_bind(vma, map_and_fenceable);
3244         i915_gem_verify_gtt(dev);
3245         return 0;
3246
3247 err_remove_node:
3248         drm_mm_remove_node(&vma->node);
3249 err_free_vma:
3250         i915_gem_vma_destroy(vma);
3251 err_unpin:
3252         i915_gem_object_unpin_pages(obj);
3253         return ret;
3254 }
3255
3256 bool
3257 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3258                         bool force)
3259 {
3260         /* If we don't have a page list set up, then we're not pinned
3261          * to GPU, and we can ignore the cache flush because it'll happen
3262          * again at bind time.
3263          */
3264         if (obj->pages == NULL)
3265                 return false;
3266
3267         /*
3268          * Stolen memory is always coherent with the GPU as it is explicitly
3269          * marked as wc by the system, or the system is cache-coherent.
3270          */
3271         if (obj->stolen)
3272                 return false;
3273
3274         /* If the GPU is snooping the contents of the CPU cache,
3275          * we do not need to manually clear the CPU cache lines.  However,
3276          * the caches are only snooped when the render cache is
3277          * flushed/invalidated.  As we always have to emit invalidations
3278          * and flushes when moving into and out of the RENDER domain, correct
3279          * snooping behaviour occurs naturally as the result of our domain
3280          * tracking.
3281          */
3282         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3283                 return false;
3284
3285         trace_i915_gem_object_clflush(obj);
3286         drm_clflush_sg(obj->pages);
3287
3288         return true;
3289 }
3290
3291 /** Flushes the GTT write domain for the object if it's dirty. */
3292 static void
3293 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3294 {
3295         uint32_t old_write_domain;
3296
3297         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3298                 return;
3299
3300         /* No actual flushing is required for the GTT write domain.  Writes
3301          * to it immediately go to main memory as far as we know, so there's
3302          * no chipset flush.  It also doesn't land in render cache.
3303          *
3304          * However, we do have to enforce the order so that all writes through
3305          * the GTT land before any writes to the device, such as updates to
3306          * the GATT itself.
3307          */
3308         wmb();
3309
3310         old_write_domain = obj->base.write_domain;
3311         obj->base.write_domain = 0;
3312
3313         trace_i915_gem_object_change_domain(obj,
3314                                             obj->base.read_domains,
3315                                             old_write_domain);
3316 }
3317
3318 /** Flushes the CPU write domain for the object if it's dirty. */
3319 static void
3320 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3321                                        bool force)
3322 {
3323         uint32_t old_write_domain;
3324
3325         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3326                 return;
3327
3328         if (i915_gem_clflush_object(obj, force))
3329                 i915_gem_chipset_flush(obj->base.dev);
3330
3331         old_write_domain = obj->base.write_domain;
3332         obj->base.write_domain = 0;
3333
3334         trace_i915_gem_object_change_domain(obj,
3335                                             obj->base.read_domains,
3336                                             old_write_domain);
3337 }
3338
3339 /**
3340  * Moves a single object to the GTT read, and possibly write domain.
3341  *
3342  * This function returns when the move is complete, including waiting on
3343  * flushes to occur.
3344  */
3345 int
3346 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3347 {
3348         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3349         uint32_t old_write_domain, old_read_domains;
3350         int ret;
3351
3352         /* Not valid to be called on unbound objects. */
3353         if (!i915_gem_obj_bound_any(obj))
3354                 return -EINVAL;
3355
3356         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3357                 return 0;
3358
3359         ret = i915_gem_object_wait_rendering(obj, !write);
3360         if (ret)
3361                 return ret;
3362
3363         i915_gem_object_flush_cpu_write_domain(obj, false);
3364
3365         /* Serialise direct access to this object with the barriers for
3366          * coherent writes from the GPU, by effectively invalidating the
3367          * GTT domain upon first access.
3368          */
3369         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3370                 mb();
3371
3372         old_write_domain = obj->base.write_domain;
3373         old_read_domains = obj->base.read_domains;
3374
3375         /* It should now be out of any other write domains, and we can update
3376          * the domain values for our changes.
3377          */
3378         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3379         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3380         if (write) {
3381                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3382                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3383                 obj->dirty = 1;
3384         }
3385
3386         trace_i915_gem_object_change_domain(obj,
3387                                             old_read_domains,
3388                                             old_write_domain);
3389
3390         /* And bump the LRU for this access */
3391         if (i915_gem_object_is_inactive(obj)) {
3392                 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3393                                                            &dev_priv->gtt.base);
3394                 if (vma)
3395                         list_move_tail(&vma->mm_list,
3396                                        &dev_priv->gtt.base.inactive_list);
3397
3398         }
3399
3400         return 0;
3401 }
3402
3403 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3404                                     enum i915_cache_level cache_level)
3405 {
3406         struct drm_device *dev = obj->base.dev;
3407         drm_i915_private_t *dev_priv = dev->dev_private;
3408         struct i915_vma *vma;
3409         int ret;
3410
3411         if (obj->cache_level == cache_level)
3412                 return 0;
3413
3414         if (obj->pin_count) {
3415                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3416                 return -EBUSY;
3417         }
3418
3419         list_for_each_entry(vma, &obj->vma_list, vma_link) {
3420                 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3421                         ret = i915_vma_unbind(vma);
3422                         if (ret)
3423                                 return ret;
3424
3425                         break;
3426                 }
3427         }
3428
3429         if (i915_gem_obj_bound_any(obj)) {
3430                 ret = i915_gem_object_finish_gpu(obj);
3431                 if (ret)
3432                         return ret;
3433
3434                 i915_gem_object_finish_gtt(obj);
3435
3436                 /* Before SandyBridge, you could not use tiling or fence
3437                  * registers with snooped memory, so relinquish any fences
3438                  * currently pointing to our region in the aperture.
3439                  */
3440                 if (INTEL_INFO(dev)->gen < 6) {
3441                         ret = i915_gem_object_put_fence(obj);
3442                         if (ret)
3443                                 return ret;
3444                 }
3445
3446                 if (obj->has_global_gtt_mapping)
3447                         i915_gem_gtt_bind_object(obj, cache_level);
3448                 if (obj->has_aliasing_ppgtt_mapping)
3449                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3450                                                obj, cache_level);
3451         }
3452
3453         list_for_each_entry(vma, &obj->vma_list, vma_link)
3454                 vma->node.color = cache_level;
3455         obj->cache_level = cache_level;
3456
3457         if (cpu_write_needs_clflush(obj)) {
3458                 u32 old_read_domains, old_write_domain;
3459
3460                 /* If we're coming from LLC cached, then we haven't
3461                  * actually been tracking whether the data is in the
3462                  * CPU cache or not, since we only allow one bit set
3463                  * in obj->write_domain and have been skipping the clflushes.
3464                  * Just set it to the CPU cache for now.
3465                  */
3466                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3467
3468                 old_read_domains = obj->base.read_domains;
3469                 old_write_domain = obj->base.write_domain;
3470
3471                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3472                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3473
3474                 trace_i915_gem_object_change_domain(obj,
3475                                                     old_read_domains,
3476                                                     old_write_domain);
3477         }
3478
3479         i915_gem_verify_gtt(dev);
3480         return 0;
3481 }
3482
3483 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3484                                struct drm_file *file)
3485 {
3486         struct drm_i915_gem_caching *args = data;
3487         struct drm_i915_gem_object *obj;
3488         int ret;
3489
3490         ret = i915_mutex_lock_interruptible(dev);
3491         if (ret)
3492                 return ret;
3493
3494         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3495         if (&obj->base == NULL) {
3496                 ret = -ENOENT;
3497                 goto unlock;
3498         }
3499
3500         switch (obj->cache_level) {
3501         case I915_CACHE_LLC:
3502         case I915_CACHE_L3_LLC:
3503                 args->caching = I915_CACHING_CACHED;
3504                 break;
3505
3506         case I915_CACHE_WT:
3507                 args->caching = I915_CACHING_DISPLAY;
3508                 break;
3509
3510         default:
3511                 args->caching = I915_CACHING_NONE;
3512                 break;
3513         }
3514
3515         drm_gem_object_unreference(&obj->base);
3516 unlock:
3517         mutex_unlock(&dev->struct_mutex);
3518         return ret;
3519 }
3520
3521 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3522                                struct drm_file *file)
3523 {
3524         struct drm_i915_gem_caching *args = data;
3525         struct drm_i915_gem_object *obj;
3526         enum i915_cache_level level;
3527         int ret;
3528
3529         switch (args->caching) {
3530         case I915_CACHING_NONE:
3531                 level = I915_CACHE_NONE;
3532                 break;
3533         case I915_CACHING_CACHED:
3534                 level = I915_CACHE_LLC;
3535                 break;
3536         case I915_CACHING_DISPLAY:
3537                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3538                 break;
3539         default:
3540                 return -EINVAL;
3541         }
3542
3543         ret = i915_mutex_lock_interruptible(dev);
3544         if (ret)
3545                 return ret;
3546
3547         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3548         if (&obj->base == NULL) {
3549                 ret = -ENOENT;
3550                 goto unlock;
3551         }
3552
3553         ret = i915_gem_object_set_cache_level(obj, level);
3554
3555         drm_gem_object_unreference(&obj->base);
3556 unlock:
3557         mutex_unlock(&dev->struct_mutex);
3558         return ret;
3559 }
3560
3561 static bool is_pin_display(struct drm_i915_gem_object *obj)
3562 {
3563         /* There are 3 sources that pin objects:
3564          *   1. The display engine (scanouts, sprites, cursors);
3565          *   2. Reservations for execbuffer;
3566          *   3. The user.
3567          *
3568          * We can ignore reservations as we hold the struct_mutex and
3569          * are only called outside of the reservation path.  The user
3570          * can only increment pin_count once, and so if after
3571          * subtracting the potential reference by the user, any pin_count
3572          * remains, it must be due to another use by the display engine.
3573          */
3574         return obj->pin_count - !!obj->user_pin_count;
3575 }
3576
3577 /*
3578  * Prepare buffer for display plane (scanout, cursors, etc).
3579  * Can be called from an uninterruptible phase (modesetting) and allows
3580  * any flushes to be pipelined (for pageflips).
3581  */
3582 int
3583 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3584                                      u32 alignment,
3585                                      struct intel_ring_buffer *pipelined)
3586 {
3587         u32 old_read_domains, old_write_domain;
3588         int ret;
3589
3590         if (pipelined != obj->ring) {
3591                 ret = i915_gem_object_sync(obj, pipelined);
3592                 if (ret)
3593                         return ret;
3594         }
3595
3596         /* Mark the pin_display early so that we account for the
3597          * display coherency whilst setting up the cache domains.
3598          */
3599         obj->pin_display = true;
3600
3601         /* The display engine is not coherent with the LLC cache on gen6.  As
3602          * a result, we make sure that the pinning that is about to occur is
3603          * done with uncached PTEs. This is lowest common denominator for all
3604          * chipsets.
3605          *
3606          * However for gen6+, we could do better by using the GFDT bit instead
3607          * of uncaching, which would allow us to flush all the LLC-cached data
3608          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3609          */
3610         ret = i915_gem_object_set_cache_level(obj,
3611                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3612         if (ret)
3613                 goto err_unpin_display;
3614
3615         /* As the user may map the buffer once pinned in the display plane
3616          * (e.g. libkms for the bootup splash), we have to ensure that we
3617          * always use map_and_fenceable for all scanout buffers.
3618          */
3619         ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3620         if (ret)
3621                 goto err_unpin_display;
3622
3623         i915_gem_object_flush_cpu_write_domain(obj, true);
3624
3625         old_write_domain = obj->base.write_domain;
3626         old_read_domains = obj->base.read_domains;
3627
3628         /* It should now be out of any other write domains, and we can update
3629          * the domain values for our changes.
3630          */
3631         obj->base.write_domain = 0;
3632         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3633
3634         trace_i915_gem_object_change_domain(obj,
3635                                             old_read_domains,
3636                                             old_write_domain);
3637
3638         return 0;
3639
3640 err_unpin_display:
3641         obj->pin_display = is_pin_display(obj);
3642         return ret;
3643 }
3644
3645 void
3646 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3647 {
3648         i915_gem_object_unpin(obj);
3649         obj->pin_display = is_pin_display(obj);
3650 }
3651
3652 int
3653 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3654 {
3655         int ret;
3656
3657         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3658                 return 0;
3659
3660         ret = i915_gem_object_wait_rendering(obj, false);
3661         if (ret)
3662                 return ret;
3663
3664         /* Ensure that we invalidate the GPU's caches and TLBs. */
3665         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3666         return 0;
3667 }
3668
3669 /**
3670  * Moves a single object to the CPU read, and possibly write domain.
3671  *
3672  * This function returns when the move is complete, including waiting on
3673  * flushes to occur.
3674  */
3675 int
3676 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3677 {
3678         uint32_t old_write_domain, old_read_domains;
3679         int ret;
3680
3681         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3682                 return 0;
3683
3684         ret = i915_gem_object_wait_rendering(obj, !write);
3685         if (ret)
3686                 return ret;
3687
3688         i915_gem_object_flush_gtt_write_domain(obj);
3689
3690         old_write_domain = obj->base.write_domain;
3691         old_read_domains = obj->base.read_domains;
3692
3693         /* Flush the CPU cache if it's still invalid. */
3694         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3695                 i915_gem_clflush_object(obj, false);
3696
3697                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3698         }
3699
3700         /* It should now be out of any other write domains, and we can update
3701          * the domain values for our changes.
3702          */
3703         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3704
3705         /* If we're writing through the CPU, then the GPU read domains will
3706          * need to be invalidated at next use.
3707          */
3708         if (write) {
3709                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3710                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3711         }
3712
3713         trace_i915_gem_object_change_domain(obj,
3714                                             old_read_domains,
3715                                             old_write_domain);
3716
3717         return 0;
3718 }
3719
3720 /* Throttle our rendering by waiting until the ring has completed our requests
3721  * emitted over 20 msec ago.
3722  *
3723  * Note that if we were to use the current jiffies each time around the loop,
3724  * we wouldn't escape the function with any frames outstanding if the time to
3725  * render a frame was over 20ms.
3726  *
3727  * This should get us reasonable parallelism between CPU and GPU but also
3728  * relatively low latency when blocking on a particular request to finish.
3729  */
3730 static int
3731 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3732 {
3733         struct drm_i915_private *dev_priv = dev->dev_private;
3734         struct drm_i915_file_private *file_priv = file->driver_priv;
3735         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3736         struct drm_i915_gem_request *request;
3737         struct intel_ring_buffer *ring = NULL;
3738         unsigned reset_counter;
3739         u32 seqno = 0;
3740         int ret;
3741
3742         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3743         if (ret)
3744                 return ret;
3745
3746         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3747         if (ret)
3748                 return ret;
3749
3750         spin_lock(&file_priv->mm.lock);
3751         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3752                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3753                         break;
3754
3755                 ring = request->ring;
3756                 seqno = request->seqno;
3757         }
3758         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3759         spin_unlock(&file_priv->mm.lock);
3760
3761         if (seqno == 0)
3762                 return 0;
3763
3764         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3765         if (ret == 0)
3766                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3767
3768         return ret;
3769 }
3770
3771 int
3772 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3773                     struct i915_address_space *vm,
3774                     uint32_t alignment,
3775                     bool map_and_fenceable,
3776                     bool nonblocking)
3777 {
3778         struct i915_vma *vma;
3779         int ret;
3780
3781         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3782                 return -EBUSY;
3783
3784         WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3785
3786         vma = i915_gem_obj_to_vma(obj, vm);
3787
3788         if (vma) {
3789                 if ((alignment &&
3790                      vma->node.start & (alignment - 1)) ||
3791                     (map_and_fenceable && !obj->map_and_fenceable)) {
3792                         WARN(obj->pin_count,
3793                              "bo is already pinned with incorrect alignment:"
3794                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3795                              " obj->map_and_fenceable=%d\n",
3796                              i915_gem_obj_offset(obj, vm), alignment,
3797                              map_and_fenceable,
3798                              obj->map_and_fenceable);
3799                         ret = i915_vma_unbind(vma);
3800                         if (ret)
3801                                 return ret;
3802                 }
3803         }
3804
3805         if (!i915_gem_obj_bound(obj, vm)) {
3806                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3807
3808                 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3809                                                  map_and_fenceable,
3810                                                  nonblocking);
3811                 if (ret)
3812                         return ret;
3813
3814                 if (!dev_priv->mm.aliasing_ppgtt)
3815                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3816         }
3817
3818         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3819                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3820
3821         obj->pin_count++;
3822         obj->pin_mappable |= map_and_fenceable;
3823
3824         return 0;
3825 }
3826
3827 void
3828 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3829 {
3830         BUG_ON(obj->pin_count == 0);
3831         BUG_ON(!i915_gem_obj_bound_any(obj));
3832
3833         if (--obj->pin_count == 0)
3834                 obj->pin_mappable = false;
3835 }
3836
3837 int
3838 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3839                    struct drm_file *file)
3840 {
3841         struct drm_i915_gem_pin *args = data;
3842         struct drm_i915_gem_object *obj;
3843         int ret;
3844
3845         ret = i915_mutex_lock_interruptible(dev);
3846         if (ret)
3847                 return ret;
3848
3849         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3850         if (&obj->base == NULL) {
3851                 ret = -ENOENT;
3852                 goto unlock;
3853         }
3854
3855         if (obj->madv != I915_MADV_WILLNEED) {
3856                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3857                 ret = -EINVAL;
3858                 goto out;
3859         }
3860
3861         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3862                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3863                           args->handle);
3864                 ret = -EINVAL;
3865                 goto out;
3866         }
3867
3868         if (obj->user_pin_count == 0) {
3869                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3870                 if (ret)
3871                         goto out;
3872         }
3873
3874         obj->user_pin_count++;
3875         obj->pin_filp = file;
3876
3877         args->offset = i915_gem_obj_ggtt_offset(obj);
3878 out:
3879         drm_gem_object_unreference(&obj->base);
3880 unlock:
3881         mutex_unlock(&dev->struct_mutex);
3882         return ret;
3883 }
3884
3885 int
3886 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3887                      struct drm_file *file)
3888 {
3889         struct drm_i915_gem_pin *args = data;
3890         struct drm_i915_gem_object *obj;
3891         int ret;
3892
3893         ret = i915_mutex_lock_interruptible(dev);
3894         if (ret)
3895                 return ret;
3896
3897         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3898         if (&obj->base == NULL) {
3899                 ret = -ENOENT;
3900                 goto unlock;
3901         }
3902
3903         if (obj->pin_filp != file) {
3904                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3905                           args->handle);
3906                 ret = -EINVAL;
3907                 goto out;
3908         }
3909         obj->user_pin_count--;
3910         if (obj->user_pin_count == 0) {
3911                 obj->pin_filp = NULL;
3912                 i915_gem_object_unpin(obj);
3913         }
3914
3915 out:
3916         drm_gem_object_unreference(&obj->base);
3917 unlock:
3918         mutex_unlock(&dev->struct_mutex);
3919         return ret;
3920 }
3921
3922 int
3923 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3924                     struct drm_file *file)
3925 {
3926         struct drm_i915_gem_busy *args = data;
3927         struct drm_i915_gem_object *obj;
3928         int ret;
3929
3930         ret = i915_mutex_lock_interruptible(dev);
3931         if (ret)
3932                 return ret;
3933
3934         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3935         if (&obj->base == NULL) {
3936                 ret = -ENOENT;
3937                 goto unlock;
3938         }
3939
3940         /* Count all active objects as busy, even if they are currently not used
3941          * by the gpu. Users of this interface expect objects to eventually
3942          * become non-busy without any further actions, therefore emit any
3943          * necessary flushes here.
3944          */
3945         ret = i915_gem_object_flush_active(obj);
3946
3947         args->busy = obj->active;
3948         if (obj->ring) {
3949                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3950                 args->busy |= intel_ring_flag(obj->ring) << 16;
3951         }
3952
3953         drm_gem_object_unreference(&obj->base);
3954 unlock:
3955         mutex_unlock(&dev->struct_mutex);
3956         return ret;
3957 }
3958
3959 int
3960 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3961                         struct drm_file *file_priv)
3962 {
3963         return i915_gem_ring_throttle(dev, file_priv);
3964 }
3965
3966 int
3967 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3968                        struct drm_file *file_priv)
3969 {
3970         struct drm_i915_gem_madvise *args = data;
3971         struct drm_i915_gem_object *obj;
3972         int ret;
3973
3974         switch (args->madv) {
3975         case I915_MADV_DONTNEED:
3976         case I915_MADV_WILLNEED:
3977             break;
3978         default:
3979             return -EINVAL;
3980         }
3981
3982         ret = i915_mutex_lock_interruptible(dev);
3983         if (ret)
3984                 return ret;
3985
3986         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3987         if (&obj->base == NULL) {
3988                 ret = -ENOENT;
3989                 goto unlock;
3990         }
3991
3992         if (obj->pin_count) {
3993                 ret = -EINVAL;
3994                 goto out;
3995         }
3996
3997         if (obj->madv != __I915_MADV_PURGED)
3998                 obj->madv = args->madv;
3999
4000         /* if the object is no longer attached, discard its backing storage */
4001         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4002                 i915_gem_object_truncate(obj);
4003
4004         args->retained = obj->madv != __I915_MADV_PURGED;
4005
4006 out:
4007         drm_gem_object_unreference(&obj->base);
4008 unlock:
4009         mutex_unlock(&dev->struct_mutex);
4010         return ret;
4011 }
4012
4013 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4014                           const struct drm_i915_gem_object_ops *ops)
4015 {
4016         INIT_LIST_HEAD(&obj->global_list);
4017         INIT_LIST_HEAD(&obj->ring_list);
4018         INIT_LIST_HEAD(&obj->exec_list);
4019         INIT_LIST_HEAD(&obj->obj_exec_link);
4020         INIT_LIST_HEAD(&obj->vma_list);
4021
4022         obj->ops = ops;
4023
4024         obj->fence_reg = I915_FENCE_REG_NONE;
4025         obj->madv = I915_MADV_WILLNEED;
4026         /* Avoid an unnecessary call to unbind on the first bind. */
4027         obj->map_and_fenceable = true;
4028
4029         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4030 }
4031
4032 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4033         .get_pages = i915_gem_object_get_pages_gtt,
4034         .put_pages = i915_gem_object_put_pages_gtt,
4035 };
4036
4037 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4038                                                   size_t size)
4039 {
4040         struct drm_i915_gem_object *obj;
4041         struct address_space *mapping;
4042         gfp_t mask;
4043
4044         obj = i915_gem_object_alloc(dev);
4045         if (obj == NULL)
4046                 return NULL;
4047
4048         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4049                 i915_gem_object_free(obj);
4050                 return NULL;
4051         }
4052
4053         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4054         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4055                 /* 965gm cannot relocate objects above 4GiB. */
4056                 mask &= ~__GFP_HIGHMEM;
4057                 mask |= __GFP_DMA32;
4058         }
4059
4060         mapping = file_inode(obj->base.filp)->i_mapping;
4061         mapping_set_gfp_mask(mapping, mask);
4062
4063         i915_gem_object_init(obj, &i915_gem_object_ops);
4064
4065         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4066         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4067
4068         if (HAS_LLC(dev)) {
4069                 /* On some devices, we can have the GPU use the LLC (the CPU
4070                  * cache) for about a 10% performance improvement
4071                  * compared to uncached.  Graphics requests other than
4072                  * display scanout are coherent with the CPU in
4073                  * accessing this cache.  This means in this mode we
4074                  * don't need to clflush on the CPU side, and on the
4075                  * GPU side we only need to flush internal caches to
4076                  * get data visible to the CPU.
4077                  *
4078                  * However, we maintain the display planes as UC, and so
4079                  * need to rebind when first used as such.
4080                  */
4081                 obj->cache_level = I915_CACHE_LLC;
4082         } else
4083                 obj->cache_level = I915_CACHE_NONE;
4084
4085         trace_i915_gem_object_create(obj);
4086
4087         return obj;
4088 }
4089
4090 int i915_gem_init_object(struct drm_gem_object *obj)
4091 {
4092         BUG();
4093
4094         return 0;
4095 }
4096
4097 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4098 {
4099         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4100         struct drm_device *dev = obj->base.dev;
4101         drm_i915_private_t *dev_priv = dev->dev_private;
4102         struct i915_vma *vma, *next;
4103
4104         trace_i915_gem_object_destroy(obj);
4105
4106         if (obj->phys_obj)
4107                 i915_gem_detach_phys_object(dev, obj);
4108
4109         obj->pin_count = 0;
4110         /* NB: 0 or 1 elements */
4111         WARN_ON(!list_empty(&obj->vma_list) &&
4112                 !list_is_singular(&obj->vma_list));
4113         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4114                 int ret = i915_vma_unbind(vma);
4115                 if (WARN_ON(ret == -ERESTARTSYS)) {
4116                         bool was_interruptible;
4117
4118                         was_interruptible = dev_priv->mm.interruptible;
4119                         dev_priv->mm.interruptible = false;
4120
4121                         WARN_ON(i915_vma_unbind(vma));
4122
4123                         dev_priv->mm.interruptible = was_interruptible;
4124                 }
4125         }
4126
4127         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4128          * before progressing. */
4129         if (obj->stolen)
4130                 i915_gem_object_unpin_pages(obj);
4131
4132         if (WARN_ON(obj->pages_pin_count))
4133                 obj->pages_pin_count = 0;
4134         i915_gem_object_put_pages(obj);
4135         i915_gem_object_free_mmap_offset(obj);
4136         i915_gem_object_release_stolen(obj);
4137
4138         BUG_ON(obj->pages);
4139
4140         if (obj->base.import_attach)
4141                 drm_prime_gem_destroy(&obj->base, NULL);
4142
4143         drm_gem_object_release(&obj->base);
4144         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4145
4146         kfree(obj->bit_17);
4147         i915_gem_object_free(obj);
4148 }
4149
4150 struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4151                                      struct i915_address_space *vm)
4152 {
4153         struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4154         if (vma == NULL)
4155                 return ERR_PTR(-ENOMEM);
4156
4157         INIT_LIST_HEAD(&vma->vma_link);
4158         INIT_LIST_HEAD(&vma->mm_list);
4159         INIT_LIST_HEAD(&vma->exec_list);
4160         vma->vm = vm;
4161         vma->obj = obj;
4162
4163         /* Keep GGTT vmas first to make debug easier */
4164         if (i915_is_ggtt(vm))
4165                 list_add(&vma->vma_link, &obj->vma_list);
4166         else
4167                 list_add_tail(&vma->vma_link, &obj->vma_list);
4168
4169         return vma;
4170 }
4171
4172 void i915_gem_vma_destroy(struct i915_vma *vma)
4173 {
4174         WARN_ON(vma->node.allocated);
4175         list_del(&vma->vma_link);
4176         kfree(vma);
4177 }
4178
4179 int
4180 i915_gem_idle(struct drm_device *dev)
4181 {
4182         drm_i915_private_t *dev_priv = dev->dev_private;
4183         int ret;
4184
4185         if (dev_priv->ums.mm_suspended) {
4186                 mutex_unlock(&dev->struct_mutex);
4187                 return 0;
4188         }
4189
4190         ret = i915_gpu_idle(dev);
4191         if (ret) {
4192                 mutex_unlock(&dev->struct_mutex);
4193                 return ret;
4194         }
4195         i915_gem_retire_requests(dev);
4196
4197         /* Under UMS, be paranoid and evict. */
4198         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4199                 i915_gem_evict_everything(dev);
4200
4201         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4202
4203         i915_kernel_lost_context(dev);
4204         i915_gem_cleanup_ringbuffer(dev);
4205
4206         /* Cancel the retire work handler, which should be idle now. */
4207         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4208
4209         return 0;
4210 }
4211
4212 void i915_gem_l3_remap(struct drm_device *dev)
4213 {
4214         drm_i915_private_t *dev_priv = dev->dev_private;
4215         u32 misccpctl;
4216         int i;
4217
4218         if (!HAS_L3_GPU_CACHE(dev))
4219                 return;
4220
4221         if (!dev_priv->l3_parity.remap_info)
4222                 return;
4223
4224         misccpctl = I915_READ(GEN7_MISCCPCTL);
4225         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4226         POSTING_READ(GEN7_MISCCPCTL);
4227
4228         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4229                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4230                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4231                         DRM_DEBUG("0x%x was already programmed to %x\n",
4232                                   GEN7_L3LOG_BASE + i, remap);
4233                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4234                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4235                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4236         }
4237
4238         /* Make sure all the writes land before disabling dop clock gating */
4239         POSTING_READ(GEN7_L3LOG_BASE);
4240
4241         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4242 }
4243
4244 void i915_gem_init_swizzling(struct drm_device *dev)
4245 {
4246         drm_i915_private_t *dev_priv = dev->dev_private;
4247
4248         if (INTEL_INFO(dev)->gen < 5 ||
4249             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4250                 return;
4251
4252         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4253                                  DISP_TILE_SURFACE_SWIZZLING);
4254
4255         if (IS_GEN5(dev))
4256                 return;
4257
4258         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4259         if (IS_GEN6(dev))
4260                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4261         else if (IS_GEN7(dev))
4262                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4263         else
4264                 BUG();
4265 }
4266
4267 static bool
4268 intel_enable_blt(struct drm_device *dev)
4269 {
4270         if (!HAS_BLT(dev))
4271                 return false;
4272
4273         /* The blitter was dysfunctional on early prototypes */
4274         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4275                 DRM_INFO("BLT not supported on this pre-production hardware;"
4276                          " graphics performance will be degraded.\n");
4277                 return false;
4278         }
4279
4280         return true;
4281 }
4282
4283 static int i915_gem_init_rings(struct drm_device *dev)
4284 {
4285         struct drm_i915_private *dev_priv = dev->dev_private;
4286         int ret;
4287
4288         ret = intel_init_render_ring_buffer(dev);
4289         if (ret)
4290                 return ret;
4291
4292         if (HAS_BSD(dev)) {
4293                 ret = intel_init_bsd_ring_buffer(dev);
4294                 if (ret)
4295                         goto cleanup_render_ring;
4296         }
4297
4298         if (intel_enable_blt(dev)) {
4299                 ret = intel_init_blt_ring_buffer(dev);
4300                 if (ret)
4301                         goto cleanup_bsd_ring;
4302         }
4303
4304         if (HAS_VEBOX(dev)) {
4305                 ret = intel_init_vebox_ring_buffer(dev);
4306                 if (ret)
4307                         goto cleanup_blt_ring;
4308         }
4309
4310
4311         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4312         if (ret)
4313                 goto cleanup_vebox_ring;
4314
4315         return 0;
4316
4317 cleanup_vebox_ring:
4318         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4319 cleanup_blt_ring:
4320         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4321 cleanup_bsd_ring:
4322         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4323 cleanup_render_ring:
4324         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4325
4326         return ret;
4327 }
4328
4329 int
4330 i915_gem_init_hw(struct drm_device *dev)
4331 {
4332         drm_i915_private_t *dev_priv = dev->dev_private;
4333         int ret;
4334
4335         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4336                 return -EIO;
4337
4338         if (dev_priv->ellc_size)
4339                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4340
4341         if (HAS_PCH_NOP(dev)) {
4342                 u32 temp = I915_READ(GEN7_MSG_CTL);
4343                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4344                 I915_WRITE(GEN7_MSG_CTL, temp);
4345         }
4346
4347         i915_gem_l3_remap(dev);
4348
4349         i915_gem_init_swizzling(dev);
4350
4351         ret = i915_gem_init_rings(dev);
4352         if (ret)
4353                 return ret;
4354
4355         /*
4356          * XXX: There was some w/a described somewhere suggesting loading
4357          * contexts before PPGTT.
4358          */
4359         i915_gem_context_init(dev);
4360         if (dev_priv->mm.aliasing_ppgtt) {
4361                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4362                 if (ret) {
4363                         i915_gem_cleanup_aliasing_ppgtt(dev);
4364                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4365                 }
4366         }
4367
4368         return 0;
4369 }
4370
4371 int i915_gem_init(struct drm_device *dev)
4372 {
4373         struct drm_i915_private *dev_priv = dev->dev_private;
4374         int ret;
4375
4376         mutex_lock(&dev->struct_mutex);
4377
4378         if (IS_VALLEYVIEW(dev)) {
4379                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4380                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4381                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4382                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4383         }
4384
4385         i915_gem_init_global_gtt(dev);
4386
4387         ret = i915_gem_init_hw(dev);
4388         mutex_unlock(&dev->struct_mutex);
4389         if (ret) {
4390                 i915_gem_cleanup_aliasing_ppgtt(dev);
4391                 return ret;
4392         }
4393
4394         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4395         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4396                 dev_priv->dri1.allow_batchbuffer = 1;
4397         return 0;
4398 }
4399
4400 void
4401 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4402 {
4403         drm_i915_private_t *dev_priv = dev->dev_private;
4404         struct intel_ring_buffer *ring;
4405         int i;
4406
4407         for_each_ring(ring, dev_priv, i)
4408                 intel_cleanup_ring_buffer(ring);
4409 }
4410
4411 int
4412 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4413                        struct drm_file *file_priv)
4414 {
4415         struct drm_i915_private *dev_priv = dev->dev_private;
4416         int ret;
4417
4418         if (drm_core_check_feature(dev, DRIVER_MODESET))
4419                 return 0;
4420
4421         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4422                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4423                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4424         }
4425
4426         mutex_lock(&dev->struct_mutex);
4427         dev_priv->ums.mm_suspended = 0;
4428
4429         ret = i915_gem_init_hw(dev);
4430         if (ret != 0) {
4431                 mutex_unlock(&dev->struct_mutex);
4432                 return ret;
4433         }
4434
4435         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4436         mutex_unlock(&dev->struct_mutex);
4437
4438         ret = drm_irq_install(dev);
4439         if (ret)
4440                 goto cleanup_ringbuffer;
4441
4442         return 0;
4443
4444 cleanup_ringbuffer:
4445         mutex_lock(&dev->struct_mutex);
4446         i915_gem_cleanup_ringbuffer(dev);
4447         dev_priv->ums.mm_suspended = 1;
4448         mutex_unlock(&dev->struct_mutex);
4449
4450         return ret;
4451 }
4452
4453 int
4454 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4455                        struct drm_file *file_priv)
4456 {
4457         struct drm_i915_private *dev_priv = dev->dev_private;
4458         int ret;
4459
4460         if (drm_core_check_feature(dev, DRIVER_MODESET))
4461                 return 0;
4462
4463         drm_irq_uninstall(dev);
4464
4465         mutex_lock(&dev->struct_mutex);
4466         ret =  i915_gem_idle(dev);
4467
4468         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4469          * We need to replace this with a semaphore, or something.
4470          * And not confound ums.mm_suspended!
4471          */
4472         if (ret != 0)
4473                 dev_priv->ums.mm_suspended = 1;
4474         mutex_unlock(&dev->struct_mutex);
4475
4476         return ret;
4477 }
4478
4479 void
4480 i915_gem_lastclose(struct drm_device *dev)
4481 {
4482         int ret;
4483
4484         if (drm_core_check_feature(dev, DRIVER_MODESET))
4485                 return;
4486
4487         mutex_lock(&dev->struct_mutex);
4488         ret = i915_gem_idle(dev);
4489         if (ret)
4490                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4491         mutex_unlock(&dev->struct_mutex);
4492 }
4493
4494 static void
4495 init_ring_lists(struct intel_ring_buffer *ring)
4496 {
4497         INIT_LIST_HEAD(&ring->active_list);
4498         INIT_LIST_HEAD(&ring->request_list);
4499 }
4500
4501 static void i915_init_vm(struct drm_i915_private *dev_priv,
4502                          struct i915_address_space *vm)
4503 {
4504         vm->dev = dev_priv->dev;
4505         INIT_LIST_HEAD(&vm->active_list);
4506         INIT_LIST_HEAD(&vm->inactive_list);
4507         INIT_LIST_HEAD(&vm->global_link);
4508         list_add(&vm->global_link, &dev_priv->vm_list);
4509 }
4510
4511 void
4512 i915_gem_load(struct drm_device *dev)
4513 {
4514         drm_i915_private_t *dev_priv = dev->dev_private;
4515         int i;
4516
4517         dev_priv->slab =
4518                 kmem_cache_create("i915_gem_object",
4519                                   sizeof(struct drm_i915_gem_object), 0,
4520                                   SLAB_HWCACHE_ALIGN,
4521                                   NULL);
4522
4523         INIT_LIST_HEAD(&dev_priv->vm_list);
4524         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4525
4526         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4527         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4528         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4529         for (i = 0; i < I915_NUM_RINGS; i++)
4530                 init_ring_lists(&dev_priv->ring[i]);
4531         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4532                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4533         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4534                           i915_gem_retire_work_handler);
4535         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4536
4537         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4538         if (IS_GEN3(dev)) {
4539                 I915_WRITE(MI_ARB_STATE,
4540                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4541         }
4542
4543         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4544
4545         /* Old X drivers will take 0-2 for front, back, depth buffers */
4546         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4547                 dev_priv->fence_reg_start = 3;
4548
4549         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4550                 dev_priv->num_fence_regs = 32;
4551         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4552                 dev_priv->num_fence_regs = 16;
4553         else
4554                 dev_priv->num_fence_regs = 8;
4555
4556         /* Initialize fence registers to zero */
4557         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4558         i915_gem_restore_fences(dev);
4559
4560         i915_gem_detect_bit_6_swizzle(dev);
4561         init_waitqueue_head(&dev_priv->pending_flip_queue);
4562
4563         dev_priv->mm.interruptible = true;
4564
4565         dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4566         dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4567         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4568         register_shrinker(&dev_priv->mm.inactive_shrinker);
4569 }
4570
4571 /*
4572  * Create a physically contiguous memory object for this object
4573  * e.g. for cursor + overlay regs
4574  */
4575 static int i915_gem_init_phys_object(struct drm_device *dev,
4576                                      int id, int size, int align)
4577 {
4578         drm_i915_private_t *dev_priv = dev->dev_private;
4579         struct drm_i915_gem_phys_object *phys_obj;
4580         int ret;
4581
4582         if (dev_priv->mm.phys_objs[id - 1] || !size)
4583                 return 0;
4584
4585         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4586         if (!phys_obj)
4587                 return -ENOMEM;
4588
4589         phys_obj->id = id;
4590
4591         phys_obj->handle = drm_pci_alloc(dev, size, align);
4592         if (!phys_obj->handle) {
4593                 ret = -ENOMEM;
4594                 goto kfree_obj;
4595         }
4596 #ifdef CONFIG_X86
4597         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4598 #endif
4599
4600         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4601
4602         return 0;
4603 kfree_obj:
4604         kfree(phys_obj);
4605         return ret;
4606 }
4607
4608 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4609 {
4610         drm_i915_private_t *dev_priv = dev->dev_private;
4611         struct drm_i915_gem_phys_object *phys_obj;
4612
4613         if (!dev_priv->mm.phys_objs[id - 1])
4614                 return;
4615
4616         phys_obj = dev_priv->mm.phys_objs[id - 1];
4617         if (phys_obj->cur_obj) {
4618                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4619         }
4620
4621 #ifdef CONFIG_X86
4622         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4623 #endif
4624         drm_pci_free(dev, phys_obj->handle);
4625         kfree(phys_obj);
4626         dev_priv->mm.phys_objs[id - 1] = NULL;
4627 }
4628
4629 void i915_gem_free_all_phys_object(struct drm_device *dev)
4630 {
4631         int i;
4632
4633         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4634                 i915_gem_free_phys_object(dev, i);
4635 }
4636
4637 void i915_gem_detach_phys_object(struct drm_device *dev,
4638                                  struct drm_i915_gem_object *obj)
4639 {
4640         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4641         char *vaddr;
4642         int i;
4643         int page_count;
4644
4645         if (!obj->phys_obj)
4646                 return;
4647         vaddr = obj->phys_obj->handle->vaddr;
4648
4649         page_count = obj->base.size / PAGE_SIZE;
4650         for (i = 0; i < page_count; i++) {
4651                 struct page *page = shmem_read_mapping_page(mapping, i);
4652                 if (!IS_ERR(page)) {
4653                         char *dst = kmap_atomic(page);
4654                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4655                         kunmap_atomic(dst);
4656
4657                         drm_clflush_pages(&page, 1);
4658
4659                         set_page_dirty(page);
4660                         mark_page_accessed(page);
4661                         page_cache_release(page);
4662                 }
4663         }
4664         i915_gem_chipset_flush(dev);
4665
4666         obj->phys_obj->cur_obj = NULL;
4667         obj->phys_obj = NULL;
4668 }
4669
4670 int
4671 i915_gem_attach_phys_object(struct drm_device *dev,
4672                             struct drm_i915_gem_object *obj,
4673                             int id,
4674                             int align)
4675 {
4676         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4677         drm_i915_private_t *dev_priv = dev->dev_private;
4678         int ret = 0;
4679         int page_count;
4680         int i;
4681
4682         if (id > I915_MAX_PHYS_OBJECT)
4683                 return -EINVAL;
4684
4685         if (obj->phys_obj) {
4686                 if (obj->phys_obj->id == id)
4687                         return 0;
4688                 i915_gem_detach_phys_object(dev, obj);
4689         }
4690
4691         /* create a new object */
4692         if (!dev_priv->mm.phys_objs[id - 1]) {
4693                 ret = i915_gem_init_phys_object(dev, id,
4694                                                 obj->base.size, align);
4695                 if (ret) {
4696                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4697                                   id, obj->base.size);
4698                         return ret;
4699                 }
4700         }
4701
4702         /* bind to the object */
4703         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4704         obj->phys_obj->cur_obj = obj;
4705
4706         page_count = obj->base.size / PAGE_SIZE;
4707
4708         for (i = 0; i < page_count; i++) {
4709                 struct page *page;
4710                 char *dst, *src;
4711
4712                 page = shmem_read_mapping_page(mapping, i);
4713                 if (IS_ERR(page))
4714                         return PTR_ERR(page);
4715
4716                 src = kmap_atomic(page);
4717                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4718                 memcpy(dst, src, PAGE_SIZE);
4719                 kunmap_atomic(src);
4720
4721                 mark_page_accessed(page);
4722                 page_cache_release(page);
4723         }
4724
4725         return 0;
4726 }
4727
4728 static int
4729 i915_gem_phys_pwrite(struct drm_device *dev,
4730                      struct drm_i915_gem_object *obj,
4731                      struct drm_i915_gem_pwrite *args,
4732                      struct drm_file *file_priv)
4733 {
4734         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4735         char __user *user_data = to_user_ptr(args->data_ptr);
4736
4737         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4738                 unsigned long unwritten;
4739
4740                 /* The physical object once assigned is fixed for the lifetime
4741                  * of the obj, so we can safely drop the lock and continue
4742                  * to access vaddr.
4743                  */
4744                 mutex_unlock(&dev->struct_mutex);
4745                 unwritten = copy_from_user(vaddr, user_data, args->size);
4746                 mutex_lock(&dev->struct_mutex);
4747                 if (unwritten)
4748                         return -EFAULT;
4749         }
4750
4751         i915_gem_chipset_flush(dev);
4752         return 0;
4753 }
4754
4755 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4756 {
4757         struct drm_i915_file_private *file_priv = file->driver_priv;
4758
4759         /* Clean up our request list when the client is going away, so that
4760          * later retire_requests won't dereference our soon-to-be-gone
4761          * file_priv.
4762          */
4763         spin_lock(&file_priv->mm.lock);
4764         while (!list_empty(&file_priv->mm.request_list)) {
4765                 struct drm_i915_gem_request *request;
4766
4767                 request = list_first_entry(&file_priv->mm.request_list,
4768                                            struct drm_i915_gem_request,
4769                                            client_list);
4770                 list_del(&request->client_list);
4771                 request->file_priv = NULL;
4772         }
4773         spin_unlock(&file_priv->mm.lock);
4774 }
4775
4776 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4777 {
4778         if (!mutex_is_locked(mutex))
4779                 return false;
4780
4781 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4782         return mutex->owner == task;
4783 #else
4784         /* Since UP may be pre-empted, we cannot assume that we own the lock */
4785         return false;
4786 #endif
4787 }
4788
4789 static unsigned long
4790 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4791 {
4792         struct drm_i915_private *dev_priv =
4793                 container_of(shrinker,
4794                              struct drm_i915_private,
4795                              mm.inactive_shrinker);
4796         struct drm_device *dev = dev_priv->dev;
4797         struct drm_i915_gem_object *obj;
4798         bool unlock = true;
4799         unsigned long count;
4800
4801         if (!mutex_trylock(&dev->struct_mutex)) {
4802                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4803                         return SHRINK_STOP;
4804
4805                 if (dev_priv->mm.shrinker_no_lock_stealing)
4806                         return SHRINK_STOP;
4807
4808                 unlock = false;
4809         }
4810
4811         count = 0;
4812         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4813                 if (obj->pages_pin_count == 0)
4814                         count += obj->base.size >> PAGE_SHIFT;
4815
4816         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4817                 if (obj->active)
4818                         continue;
4819
4820                 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4821                         count += obj->base.size >> PAGE_SHIFT;
4822         }
4823
4824         if (unlock)
4825                 mutex_unlock(&dev->struct_mutex);
4826         return count;
4827 }
4828
4829 /* All the new VM stuff */
4830 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4831                                   struct i915_address_space *vm)
4832 {
4833         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4834         struct i915_vma *vma;
4835
4836         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4837                 vm = &dev_priv->gtt.base;
4838
4839         BUG_ON(list_empty(&o->vma_list));
4840         list_for_each_entry(vma, &o->vma_list, vma_link) {
4841                 if (vma->vm == vm)
4842                         return vma->node.start;
4843
4844         }
4845         return -1;
4846 }
4847
4848 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4849                         struct i915_address_space *vm)
4850 {
4851         struct i915_vma *vma;
4852
4853         list_for_each_entry(vma, &o->vma_list, vma_link)
4854                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4855                         return true;
4856
4857         return false;
4858 }
4859
4860 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4861 {
4862         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4863         struct i915_address_space *vm;
4864
4865         list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4866                 if (i915_gem_obj_bound(o, vm))
4867                         return true;
4868
4869         return false;
4870 }
4871
4872 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4873                                 struct i915_address_space *vm)
4874 {
4875         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4876         struct i915_vma *vma;
4877
4878         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4879                 vm = &dev_priv->gtt.base;
4880
4881         BUG_ON(list_empty(&o->vma_list));
4882
4883         list_for_each_entry(vma, &o->vma_list, vma_link)
4884                 if (vma->vm == vm)
4885                         return vma->node.size;
4886
4887         return 0;
4888 }
4889
4890 static unsigned long
4891 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4892 {
4893         struct drm_i915_private *dev_priv =
4894                 container_of(shrinker,
4895                              struct drm_i915_private,
4896                              mm.inactive_shrinker);
4897         struct drm_device *dev = dev_priv->dev;
4898         int nr_to_scan = sc->nr_to_scan;
4899         unsigned long freed;
4900         bool unlock = true;
4901
4902         if (!mutex_trylock(&dev->struct_mutex)) {
4903                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4904                         return 0;
4905
4906                 if (dev_priv->mm.shrinker_no_lock_stealing)
4907                         return 0;
4908
4909                 unlock = false;
4910         }
4911
4912         freed = i915_gem_purge(dev_priv, nr_to_scan);
4913         if (freed < nr_to_scan)
4914                 freed += __i915_gem_shrink(dev_priv, nr_to_scan,
4915                                                         false);
4916         if (freed < nr_to_scan)
4917                 freed += i915_gem_shrink_all(dev_priv);
4918
4919         if (unlock)
4920                 mutex_unlock(&dev->struct_mutex);
4921         return freed;
4922 }
4923
4924 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4925                                      struct i915_address_space *vm)
4926 {
4927         struct i915_vma *vma;
4928         list_for_each_entry(vma, &obj->vma_list, vma_link)
4929                 if (vma->vm == vm)
4930                         return vma;
4931
4932         return NULL;
4933 }
4934
4935 struct i915_vma *
4936 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4937                                   struct i915_address_space *vm)
4938 {
4939         struct i915_vma *vma;
4940
4941         vma = i915_gem_obj_to_vma(obj, vm);
4942         if (!vma)
4943                 vma = i915_gem_vma_create(obj, vm);
4944
4945         return vma;
4946 }