Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/i915_drm.h>
30 #include "i915_drv.h"
31 #include "i915_trace.h"
32 #include "intel_drv.h"
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38
39 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
42                                                     unsigned alignment,
43                                                     bool map_and_fenceable,
44                                                     bool nonblocking);
45 static int i915_gem_phys_pwrite(struct drm_device *dev,
46                                 struct drm_i915_gem_object *obj,
47                                 struct drm_i915_gem_pwrite *args,
48                                 struct drm_file *file);
49
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51                                  struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53                                          struct drm_i915_fence_reg *fence,
54                                          bool enable);
55
56 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57                                     struct shrink_control *sc);
58 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61
62 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63 {
64         if (obj->tiling_mode)
65                 i915_gem_release_mmap(obj);
66
67         /* As we do not have an associated fence register, we will force
68          * a tiling change if we ever need to acquire one.
69          */
70         obj->fence_dirty = false;
71         obj->fence_reg = I915_FENCE_REG_NONE;
72 }
73
74 /* some bookkeeping */
75 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76                                   size_t size)
77 {
78         dev_priv->mm.object_count++;
79         dev_priv->mm.object_memory += size;
80 }
81
82 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83                                      size_t size)
84 {
85         dev_priv->mm.object_count--;
86         dev_priv->mm.object_memory -= size;
87 }
88
89 static int
90 i915_gem_wait_for_error(struct drm_device *dev)
91 {
92         struct drm_i915_private *dev_priv = dev->dev_private;
93         struct completion *x = &dev_priv->error_completion;
94         unsigned long flags;
95         int ret;
96
97         if (!atomic_read(&dev_priv->mm.wedged))
98                 return 0;
99
100         /*
101          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
102          * userspace. If it takes that long something really bad is going on and
103          * we should simply try to bail out and fail as gracefully as possible.
104          */
105         ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
106         if (ret == 0) {
107                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
108                 return -EIO;
109         } else if (ret < 0) {
110                 return ret;
111         }
112
113         if (atomic_read(&dev_priv->mm.wedged)) {
114                 /* GPU is hung, bump the completion count to account for
115                  * the token we just consumed so that we never hit zero and
116                  * end up waiting upon a subsequent completion event that
117                  * will never happen.
118                  */
119                 spin_lock_irqsave(&x->wait.lock, flags);
120                 x->done++;
121                 spin_unlock_irqrestore(&x->wait.lock, flags);
122         }
123         return 0;
124 }
125
126 int i915_mutex_lock_interruptible(struct drm_device *dev)
127 {
128         int ret;
129
130         ret = i915_gem_wait_for_error(dev);
131         if (ret)
132                 return ret;
133
134         ret = mutex_lock_interruptible(&dev->struct_mutex);
135         if (ret)
136                 return ret;
137
138         WARN_ON(i915_verify_lists(dev));
139         return 0;
140 }
141
142 static inline bool
143 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
144 {
145         return obj->gtt_space && !obj->active;
146 }
147
148 int
149 i915_gem_init_ioctl(struct drm_device *dev, void *data,
150                     struct drm_file *file)
151 {
152         struct drm_i915_gem_init *args = data;
153
154         if (drm_core_check_feature(dev, DRIVER_MODESET))
155                 return -ENODEV;
156
157         if (args->gtt_start >= args->gtt_end ||
158             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
159                 return -EINVAL;
160
161         /* GEM with user mode setting was never supported on ilk and later. */
162         if (INTEL_INFO(dev)->gen >= 5)
163                 return -ENODEV;
164
165         mutex_lock(&dev->struct_mutex);
166         i915_gem_init_global_gtt(dev, args->gtt_start,
167                                  args->gtt_end, args->gtt_end);
168         mutex_unlock(&dev->struct_mutex);
169
170         return 0;
171 }
172
173 int
174 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
175                             struct drm_file *file)
176 {
177         struct drm_i915_private *dev_priv = dev->dev_private;
178         struct drm_i915_gem_get_aperture *args = data;
179         struct drm_i915_gem_object *obj;
180         size_t pinned;
181
182         pinned = 0;
183         mutex_lock(&dev->struct_mutex);
184         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
185                 if (obj->pin_count)
186                         pinned += obj->gtt_space->size;
187         mutex_unlock(&dev->struct_mutex);
188
189         args->aper_size = dev_priv->mm.gtt_total;
190         args->aper_available_size = args->aper_size - pinned;
191
192         return 0;
193 }
194
195 static int
196 i915_gem_create(struct drm_file *file,
197                 struct drm_device *dev,
198                 uint64_t size,
199                 uint32_t *handle_p)
200 {
201         struct drm_i915_gem_object *obj;
202         int ret;
203         u32 handle;
204
205         size = roundup(size, PAGE_SIZE);
206         if (size == 0)
207                 return -EINVAL;
208
209         /* Allocate the new object */
210         obj = i915_gem_alloc_object(dev, size);
211         if (obj == NULL)
212                 return -ENOMEM;
213
214         ret = drm_gem_handle_create(file, &obj->base, &handle);
215         if (ret) {
216                 drm_gem_object_release(&obj->base);
217                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
218                 kfree(obj);
219                 return ret;
220         }
221
222         /* drop reference from allocate - handle holds it now */
223         drm_gem_object_unreference(&obj->base);
224         trace_i915_gem_object_create(obj);
225
226         *handle_p = handle;
227         return 0;
228 }
229
230 int
231 i915_gem_dumb_create(struct drm_file *file,
232                      struct drm_device *dev,
233                      struct drm_mode_create_dumb *args)
234 {
235         /* have to work out size/pitch and return them */
236         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
237         args->size = args->pitch * args->height;
238         return i915_gem_create(file, dev,
239                                args->size, &args->handle);
240 }
241
242 int i915_gem_dumb_destroy(struct drm_file *file,
243                           struct drm_device *dev,
244                           uint32_t handle)
245 {
246         return drm_gem_handle_delete(file, handle);
247 }
248
249 /**
250  * Creates a new mm object and returns a handle to it.
251  */
252 int
253 i915_gem_create_ioctl(struct drm_device *dev, void *data,
254                       struct drm_file *file)
255 {
256         struct drm_i915_gem_create *args = data;
257
258         return i915_gem_create(file, dev,
259                                args->size, &args->handle);
260 }
261
262 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
263 {
264         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
265
266         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
267                 obj->tiling_mode != I915_TILING_NONE;
268 }
269
270 static inline int
271 __copy_to_user_swizzled(char __user *cpu_vaddr,
272                         const char *gpu_vaddr, int gpu_offset,
273                         int length)
274 {
275         int ret, cpu_offset = 0;
276
277         while (length > 0) {
278                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
279                 int this_length = min(cacheline_end - gpu_offset, length);
280                 int swizzled_gpu_offset = gpu_offset ^ 64;
281
282                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
283                                      gpu_vaddr + swizzled_gpu_offset,
284                                      this_length);
285                 if (ret)
286                         return ret + length;
287
288                 cpu_offset += this_length;
289                 gpu_offset += this_length;
290                 length -= this_length;
291         }
292
293         return 0;
294 }
295
296 static inline int
297 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
298                           const char __user *cpu_vaddr,
299                           int length)
300 {
301         int ret, cpu_offset = 0;
302
303         while (length > 0) {
304                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
305                 int this_length = min(cacheline_end - gpu_offset, length);
306                 int swizzled_gpu_offset = gpu_offset ^ 64;
307
308                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
309                                        cpu_vaddr + cpu_offset,
310                                        this_length);
311                 if (ret)
312                         return ret + length;
313
314                 cpu_offset += this_length;
315                 gpu_offset += this_length;
316                 length -= this_length;
317         }
318
319         return 0;
320 }
321
322 /* Per-page copy function for the shmem pread fastpath.
323  * Flushes invalid cachelines before reading the target if
324  * needs_clflush is set. */
325 static int
326 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
327                  char __user *user_data,
328                  bool page_do_bit17_swizzling, bool needs_clflush)
329 {
330         char *vaddr;
331         int ret;
332
333         if (unlikely(page_do_bit17_swizzling))
334                 return -EINVAL;
335
336         vaddr = kmap_atomic(page);
337         if (needs_clflush)
338                 drm_clflush_virt_range(vaddr + shmem_page_offset,
339                                        page_length);
340         ret = __copy_to_user_inatomic(user_data,
341                                       vaddr + shmem_page_offset,
342                                       page_length);
343         kunmap_atomic(vaddr);
344
345         return ret ? -EFAULT : 0;
346 }
347
348 static void
349 shmem_clflush_swizzled_range(char *addr, unsigned long length,
350                              bool swizzled)
351 {
352         if (unlikely(swizzled)) {
353                 unsigned long start = (unsigned long) addr;
354                 unsigned long end = (unsigned long) addr + length;
355
356                 /* For swizzling simply ensure that we always flush both
357                  * channels. Lame, but simple and it works. Swizzled
358                  * pwrite/pread is far from a hotpath - current userspace
359                  * doesn't use it at all. */
360                 start = round_down(start, 128);
361                 end = round_up(end, 128);
362
363                 drm_clflush_virt_range((void *)start, end - start);
364         } else {
365                 drm_clflush_virt_range(addr, length);
366         }
367
368 }
369
370 /* Only difference to the fast-path function is that this can handle bit17
371  * and uses non-atomic copy and kmap functions. */
372 static int
373 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
374                  char __user *user_data,
375                  bool page_do_bit17_swizzling, bool needs_clflush)
376 {
377         char *vaddr;
378         int ret;
379
380         vaddr = kmap(page);
381         if (needs_clflush)
382                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
383                                              page_length,
384                                              page_do_bit17_swizzling);
385
386         if (page_do_bit17_swizzling)
387                 ret = __copy_to_user_swizzled(user_data,
388                                               vaddr, shmem_page_offset,
389                                               page_length);
390         else
391                 ret = __copy_to_user(user_data,
392                                      vaddr + shmem_page_offset,
393                                      page_length);
394         kunmap(page);
395
396         return ret ? - EFAULT : 0;
397 }
398
399 static int
400 i915_gem_shmem_pread(struct drm_device *dev,
401                      struct drm_i915_gem_object *obj,
402                      struct drm_i915_gem_pread *args,
403                      struct drm_file *file)
404 {
405         char __user *user_data;
406         ssize_t remain;
407         loff_t offset;
408         int shmem_page_offset, page_length, ret = 0;
409         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
410         int hit_slowpath = 0;
411         int prefaulted = 0;
412         int needs_clflush = 0;
413         struct scatterlist *sg;
414         int i;
415
416         user_data = (char __user *) (uintptr_t) args->data_ptr;
417         remain = args->size;
418
419         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
420
421         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
422                 /* If we're not in the cpu read domain, set ourself into the gtt
423                  * read domain and manually flush cachelines (if required). This
424                  * optimizes for the case when the gpu will dirty the data
425                  * anyway again before the next pread happens. */
426                 if (obj->cache_level == I915_CACHE_NONE)
427                         needs_clflush = 1;
428                 if (obj->gtt_space) {
429                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
430                         if (ret)
431                                 return ret;
432                 }
433         }
434
435         ret = i915_gem_object_get_pages(obj);
436         if (ret)
437                 return ret;
438
439         i915_gem_object_pin_pages(obj);
440
441         offset = args->offset;
442
443         for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
444                 struct page *page;
445
446                 if (i < offset >> PAGE_SHIFT)
447                         continue;
448
449                 if (remain <= 0)
450                         break;
451
452                 /* Operation in this page
453                  *
454                  * shmem_page_offset = offset within page in shmem file
455                  * page_length = bytes to copy for this page
456                  */
457                 shmem_page_offset = offset_in_page(offset);
458                 page_length = remain;
459                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
460                         page_length = PAGE_SIZE - shmem_page_offset;
461
462                 page = sg_page(sg);
463                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
464                         (page_to_phys(page) & (1 << 17)) != 0;
465
466                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
467                                        user_data, page_do_bit17_swizzling,
468                                        needs_clflush);
469                 if (ret == 0)
470                         goto next_page;
471
472                 hit_slowpath = 1;
473                 mutex_unlock(&dev->struct_mutex);
474
475                 if (!prefaulted) {
476                         ret = fault_in_multipages_writeable(user_data, remain);
477                         /* Userspace is tricking us, but we've already clobbered
478                          * its pages with the prefault and promised to write the
479                          * data up to the first fault. Hence ignore any errors
480                          * and just continue. */
481                         (void)ret;
482                         prefaulted = 1;
483                 }
484
485                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
486                                        user_data, page_do_bit17_swizzling,
487                                        needs_clflush);
488
489                 mutex_lock(&dev->struct_mutex);
490
491 next_page:
492                 mark_page_accessed(page);
493
494                 if (ret)
495                         goto out;
496
497                 remain -= page_length;
498                 user_data += page_length;
499                 offset += page_length;
500         }
501
502 out:
503         i915_gem_object_unpin_pages(obj);
504
505         if (hit_slowpath) {
506                 /* Fixup: Kill any reinstated backing storage pages */
507                 if (obj->madv == __I915_MADV_PURGED)
508                         i915_gem_object_truncate(obj);
509         }
510
511         return ret;
512 }
513
514 /**
515  * Reads data from the object referenced by handle.
516  *
517  * On error, the contents of *data are undefined.
518  */
519 int
520 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
521                      struct drm_file *file)
522 {
523         struct drm_i915_gem_pread *args = data;
524         struct drm_i915_gem_object *obj;
525         int ret = 0;
526
527         if (args->size == 0)
528                 return 0;
529
530         if (!access_ok(VERIFY_WRITE,
531                        (char __user *)(uintptr_t)args->data_ptr,
532                        args->size))
533                 return -EFAULT;
534
535         ret = i915_mutex_lock_interruptible(dev);
536         if (ret)
537                 return ret;
538
539         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
540         if (&obj->base == NULL) {
541                 ret = -ENOENT;
542                 goto unlock;
543         }
544
545         /* Bounds check source.  */
546         if (args->offset > obj->base.size ||
547             args->size > obj->base.size - args->offset) {
548                 ret = -EINVAL;
549                 goto out;
550         }
551
552         /* prime objects have no backing filp to GEM pread/pwrite
553          * pages from.
554          */
555         if (!obj->base.filp) {
556                 ret = -EINVAL;
557                 goto out;
558         }
559
560         trace_i915_gem_object_pread(obj, args->offset, args->size);
561
562         ret = i915_gem_shmem_pread(dev, obj, args, file);
563
564 out:
565         drm_gem_object_unreference(&obj->base);
566 unlock:
567         mutex_unlock(&dev->struct_mutex);
568         return ret;
569 }
570
571 /* This is the fast write path which cannot handle
572  * page faults in the source data
573  */
574
575 static inline int
576 fast_user_write(struct io_mapping *mapping,
577                 loff_t page_base, int page_offset,
578                 char __user *user_data,
579                 int length)
580 {
581         void __iomem *vaddr_atomic;
582         void *vaddr;
583         unsigned long unwritten;
584
585         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
586         /* We can use the cpu mem copy function because this is X86. */
587         vaddr = (void __force*)vaddr_atomic + page_offset;
588         unwritten = __copy_from_user_inatomic_nocache(vaddr,
589                                                       user_data, length);
590         io_mapping_unmap_atomic(vaddr_atomic);
591         return unwritten;
592 }
593
594 /**
595  * This is the fast pwrite path, where we copy the data directly from the
596  * user into the GTT, uncached.
597  */
598 static int
599 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
600                          struct drm_i915_gem_object *obj,
601                          struct drm_i915_gem_pwrite *args,
602                          struct drm_file *file)
603 {
604         drm_i915_private_t *dev_priv = dev->dev_private;
605         ssize_t remain;
606         loff_t offset, page_base;
607         char __user *user_data;
608         int page_offset, page_length, ret;
609
610         ret = i915_gem_object_pin(obj, 0, true, true);
611         if (ret)
612                 goto out;
613
614         ret = i915_gem_object_set_to_gtt_domain(obj, true);
615         if (ret)
616                 goto out_unpin;
617
618         ret = i915_gem_object_put_fence(obj);
619         if (ret)
620                 goto out_unpin;
621
622         user_data = (char __user *) (uintptr_t) args->data_ptr;
623         remain = args->size;
624
625         offset = obj->gtt_offset + args->offset;
626
627         while (remain > 0) {
628                 /* Operation in this page
629                  *
630                  * page_base = page offset within aperture
631                  * page_offset = offset within page
632                  * page_length = bytes to copy for this page
633                  */
634                 page_base = offset & PAGE_MASK;
635                 page_offset = offset_in_page(offset);
636                 page_length = remain;
637                 if ((page_offset + remain) > PAGE_SIZE)
638                         page_length = PAGE_SIZE - page_offset;
639
640                 /* If we get a fault while copying data, then (presumably) our
641                  * source page isn't available.  Return the error and we'll
642                  * retry in the slow path.
643                  */
644                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
645                                     page_offset, user_data, page_length)) {
646                         ret = -EFAULT;
647                         goto out_unpin;
648                 }
649
650                 remain -= page_length;
651                 user_data += page_length;
652                 offset += page_length;
653         }
654
655 out_unpin:
656         i915_gem_object_unpin(obj);
657 out:
658         return ret;
659 }
660
661 /* Per-page copy function for the shmem pwrite fastpath.
662  * Flushes invalid cachelines before writing to the target if
663  * needs_clflush_before is set and flushes out any written cachelines after
664  * writing if needs_clflush is set. */
665 static int
666 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
667                   char __user *user_data,
668                   bool page_do_bit17_swizzling,
669                   bool needs_clflush_before,
670                   bool needs_clflush_after)
671 {
672         char *vaddr;
673         int ret;
674
675         if (unlikely(page_do_bit17_swizzling))
676                 return -EINVAL;
677
678         vaddr = kmap_atomic(page);
679         if (needs_clflush_before)
680                 drm_clflush_virt_range(vaddr + shmem_page_offset,
681                                        page_length);
682         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
683                                                 user_data,
684                                                 page_length);
685         if (needs_clflush_after)
686                 drm_clflush_virt_range(vaddr + shmem_page_offset,
687                                        page_length);
688         kunmap_atomic(vaddr);
689
690         return ret ? -EFAULT : 0;
691 }
692
693 /* Only difference to the fast-path function is that this can handle bit17
694  * and uses non-atomic copy and kmap functions. */
695 static int
696 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
697                   char __user *user_data,
698                   bool page_do_bit17_swizzling,
699                   bool needs_clflush_before,
700                   bool needs_clflush_after)
701 {
702         char *vaddr;
703         int ret;
704
705         vaddr = kmap(page);
706         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
707                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
708                                              page_length,
709                                              page_do_bit17_swizzling);
710         if (page_do_bit17_swizzling)
711                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
712                                                 user_data,
713                                                 page_length);
714         else
715                 ret = __copy_from_user(vaddr + shmem_page_offset,
716                                        user_data,
717                                        page_length);
718         if (needs_clflush_after)
719                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
720                                              page_length,
721                                              page_do_bit17_swizzling);
722         kunmap(page);
723
724         return ret ? -EFAULT : 0;
725 }
726
727 static int
728 i915_gem_shmem_pwrite(struct drm_device *dev,
729                       struct drm_i915_gem_object *obj,
730                       struct drm_i915_gem_pwrite *args,
731                       struct drm_file *file)
732 {
733         ssize_t remain;
734         loff_t offset;
735         char __user *user_data;
736         int shmem_page_offset, page_length, ret = 0;
737         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
738         int hit_slowpath = 0;
739         int needs_clflush_after = 0;
740         int needs_clflush_before = 0;
741         int i;
742         struct scatterlist *sg;
743
744         user_data = (char __user *) (uintptr_t) args->data_ptr;
745         remain = args->size;
746
747         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
748
749         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
750                 /* If we're not in the cpu write domain, set ourself into the gtt
751                  * write domain and manually flush cachelines (if required). This
752                  * optimizes for the case when the gpu will use the data
753                  * right away and we therefore have to clflush anyway. */
754                 if (obj->cache_level == I915_CACHE_NONE)
755                         needs_clflush_after = 1;
756                 if (obj->gtt_space) {
757                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
758                         if (ret)
759                                 return ret;
760                 }
761         }
762         /* Same trick applies for invalidate partially written cachelines before
763          * writing.  */
764         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
765             && obj->cache_level == I915_CACHE_NONE)
766                 needs_clflush_before = 1;
767
768         ret = i915_gem_object_get_pages(obj);
769         if (ret)
770                 return ret;
771
772         i915_gem_object_pin_pages(obj);
773
774         offset = args->offset;
775         obj->dirty = 1;
776
777         for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
778                 struct page *page;
779                 int partial_cacheline_write;
780
781                 if (i < offset >> PAGE_SHIFT)
782                         continue;
783
784                 if (remain <= 0)
785                         break;
786
787                 /* Operation in this page
788                  *
789                  * shmem_page_offset = offset within page in shmem file
790                  * page_length = bytes to copy for this page
791                  */
792                 shmem_page_offset = offset_in_page(offset);
793
794                 page_length = remain;
795                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
796                         page_length = PAGE_SIZE - shmem_page_offset;
797
798                 /* If we don't overwrite a cacheline completely we need to be
799                  * careful to have up-to-date data by first clflushing. Don't
800                  * overcomplicate things and flush the entire patch. */
801                 partial_cacheline_write = needs_clflush_before &&
802                         ((shmem_page_offset | page_length)
803                                 & (boot_cpu_data.x86_clflush_size - 1));
804
805                 page = sg_page(sg);
806                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
807                         (page_to_phys(page) & (1 << 17)) != 0;
808
809                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
810                                         user_data, page_do_bit17_swizzling,
811                                         partial_cacheline_write,
812                                         needs_clflush_after);
813                 if (ret == 0)
814                         goto next_page;
815
816                 hit_slowpath = 1;
817                 mutex_unlock(&dev->struct_mutex);
818                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
819                                         user_data, page_do_bit17_swizzling,
820                                         partial_cacheline_write,
821                                         needs_clflush_after);
822
823                 mutex_lock(&dev->struct_mutex);
824
825 next_page:
826                 set_page_dirty(page);
827                 mark_page_accessed(page);
828
829                 if (ret)
830                         goto out;
831
832                 remain -= page_length;
833                 user_data += page_length;
834                 offset += page_length;
835         }
836
837 out:
838         i915_gem_object_unpin_pages(obj);
839
840         if (hit_slowpath) {
841                 /* Fixup: Kill any reinstated backing storage pages */
842                 if (obj->madv == __I915_MADV_PURGED)
843                         i915_gem_object_truncate(obj);
844                 /* and flush dirty cachelines in case the object isn't in the cpu write
845                  * domain anymore. */
846                 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847                         i915_gem_clflush_object(obj);
848                         intel_gtt_chipset_flush();
849                 }
850         }
851
852         if (needs_clflush_after)
853                 intel_gtt_chipset_flush();
854
855         return ret;
856 }
857
858 /**
859  * Writes data to the object referenced by handle.
860  *
861  * On error, the contents of the buffer that were to be modified are undefined.
862  */
863 int
864 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
865                       struct drm_file *file)
866 {
867         struct drm_i915_gem_pwrite *args = data;
868         struct drm_i915_gem_object *obj;
869         int ret;
870
871         if (args->size == 0)
872                 return 0;
873
874         if (!access_ok(VERIFY_READ,
875                        (char __user *)(uintptr_t)args->data_ptr,
876                        args->size))
877                 return -EFAULT;
878
879         ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
880                                            args->size);
881         if (ret)
882                 return -EFAULT;
883
884         ret = i915_mutex_lock_interruptible(dev);
885         if (ret)
886                 return ret;
887
888         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
889         if (&obj->base == NULL) {
890                 ret = -ENOENT;
891                 goto unlock;
892         }
893
894         /* Bounds check destination. */
895         if (args->offset > obj->base.size ||
896             args->size > obj->base.size - args->offset) {
897                 ret = -EINVAL;
898                 goto out;
899         }
900
901         /* prime objects have no backing filp to GEM pread/pwrite
902          * pages from.
903          */
904         if (!obj->base.filp) {
905                 ret = -EINVAL;
906                 goto out;
907         }
908
909         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
910
911         ret = -EFAULT;
912         /* We can only do the GTT pwrite on untiled buffers, as otherwise
913          * it would end up going through the fenced access, and we'll get
914          * different detiling behavior between reading and writing.
915          * pread/pwrite currently are reading and writing from the CPU
916          * perspective, requiring manual detiling by the client.
917          */
918         if (obj->phys_obj) {
919                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
920                 goto out;
921         }
922
923         if (obj->cache_level == I915_CACHE_NONE &&
924             obj->tiling_mode == I915_TILING_NONE &&
925             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
926                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
927                 /* Note that the gtt paths might fail with non-page-backed user
928                  * pointers (e.g. gtt mappings when moving data between
929                  * textures). Fallback to the shmem path in that case. */
930         }
931
932         if (ret == -EFAULT || ret == -ENOSPC)
933                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
934
935 out:
936         drm_gem_object_unreference(&obj->base);
937 unlock:
938         mutex_unlock(&dev->struct_mutex);
939         return ret;
940 }
941
942 int
943 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
944                      bool interruptible)
945 {
946         if (atomic_read(&dev_priv->mm.wedged)) {
947                 struct completion *x = &dev_priv->error_completion;
948                 bool recovery_complete;
949                 unsigned long flags;
950
951                 /* Give the error handler a chance to run. */
952                 spin_lock_irqsave(&x->wait.lock, flags);
953                 recovery_complete = x->done > 0;
954                 spin_unlock_irqrestore(&x->wait.lock, flags);
955
956                 /* Non-interruptible callers can't handle -EAGAIN, hence return
957                  * -EIO unconditionally for these. */
958                 if (!interruptible)
959                         return -EIO;
960
961                 /* Recovery complete, but still wedged means reset failure. */
962                 if (recovery_complete)
963                         return -EIO;
964
965                 return -EAGAIN;
966         }
967
968         return 0;
969 }
970
971 /*
972  * Compare seqno against outstanding lazy request. Emit a request if they are
973  * equal.
974  */
975 static int
976 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
977 {
978         int ret;
979
980         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
981
982         ret = 0;
983         if (seqno == ring->outstanding_lazy_request)
984                 ret = i915_add_request(ring, NULL, NULL);
985
986         return ret;
987 }
988
989 /**
990  * __wait_seqno - wait until execution of seqno has finished
991  * @ring: the ring expected to report seqno
992  * @seqno: duh!
993  * @interruptible: do an interruptible wait (normally yes)
994  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
995  *
996  * Returns 0 if the seqno was found within the alloted time. Else returns the
997  * errno with remaining time filled in timeout argument.
998  */
999 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1000                         bool interruptible, struct timespec *timeout)
1001 {
1002         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1003         struct timespec before, now, wait_time={1,0};
1004         unsigned long timeout_jiffies;
1005         long end;
1006         bool wait_forever = true;
1007         int ret;
1008
1009         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1010                 return 0;
1011
1012         trace_i915_gem_request_wait_begin(ring, seqno);
1013
1014         if (timeout != NULL) {
1015                 wait_time = *timeout;
1016                 wait_forever = false;
1017         }
1018
1019         timeout_jiffies = timespec_to_jiffies(&wait_time);
1020
1021         if (WARN_ON(!ring->irq_get(ring)))
1022                 return -ENODEV;
1023
1024         /* Record current time in case interrupted by signal, or wedged * */
1025         getrawmonotonic(&before);
1026
1027 #define EXIT_COND \
1028         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1029         atomic_read(&dev_priv->mm.wedged))
1030         do {
1031                 if (interruptible)
1032                         end = wait_event_interruptible_timeout(ring->irq_queue,
1033                                                                EXIT_COND,
1034                                                                timeout_jiffies);
1035                 else
1036                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1037                                                  timeout_jiffies);
1038
1039                 ret = i915_gem_check_wedge(dev_priv, interruptible);
1040                 if (ret)
1041                         end = ret;
1042         } while (end == 0 && wait_forever);
1043
1044         getrawmonotonic(&now);
1045
1046         ring->irq_put(ring);
1047         trace_i915_gem_request_wait_end(ring, seqno);
1048 #undef EXIT_COND
1049
1050         if (timeout) {
1051                 struct timespec sleep_time = timespec_sub(now, before);
1052                 *timeout = timespec_sub(*timeout, sleep_time);
1053         }
1054
1055         switch (end) {
1056         case -EIO:
1057         case -EAGAIN: /* Wedged */
1058         case -ERESTARTSYS: /* Signal */
1059                 return (int)end;
1060         case 0: /* Timeout */
1061                 if (timeout)
1062                         set_normalized_timespec(timeout, 0, 0);
1063                 return -ETIME;
1064         default: /* Completed */
1065                 WARN_ON(end < 0); /* We're not aware of other errors */
1066                 return 0;
1067         }
1068 }
1069
1070 /**
1071  * Waits for a sequence number to be signaled, and cleans up the
1072  * request and object lists appropriately for that event.
1073  */
1074 int
1075 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1076 {
1077         struct drm_device *dev = ring->dev;
1078         struct drm_i915_private *dev_priv = dev->dev_private;
1079         bool interruptible = dev_priv->mm.interruptible;
1080         int ret;
1081
1082         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1083         BUG_ON(seqno == 0);
1084
1085         ret = i915_gem_check_wedge(dev_priv, interruptible);
1086         if (ret)
1087                 return ret;
1088
1089         ret = i915_gem_check_olr(ring, seqno);
1090         if (ret)
1091                 return ret;
1092
1093         return __wait_seqno(ring, seqno, interruptible, NULL);
1094 }
1095
1096 /**
1097  * Ensures that all rendering to the object has completed and the object is
1098  * safe to unbind from the GTT or access from the CPU.
1099  */
1100 static __must_check int
1101 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1102                                bool readonly)
1103 {
1104         struct intel_ring_buffer *ring = obj->ring;
1105         u32 seqno;
1106         int ret;
1107
1108         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1109         if (seqno == 0)
1110                 return 0;
1111
1112         ret = i915_wait_seqno(ring, seqno);
1113         if (ret)
1114                 return ret;
1115
1116         i915_gem_retire_requests_ring(ring);
1117
1118         /* Manually manage the write flush as we may have not yet
1119          * retired the buffer.
1120          */
1121         if (obj->last_write_seqno &&
1122             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1123                 obj->last_write_seqno = 0;
1124                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1125         }
1126
1127         return 0;
1128 }
1129
1130 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1131  * as the object state may change during this call.
1132  */
1133 static __must_check int
1134 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1135                                             bool readonly)
1136 {
1137         struct drm_device *dev = obj->base.dev;
1138         struct drm_i915_private *dev_priv = dev->dev_private;
1139         struct intel_ring_buffer *ring = obj->ring;
1140         u32 seqno;
1141         int ret;
1142
1143         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1144         BUG_ON(!dev_priv->mm.interruptible);
1145
1146         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1147         if (seqno == 0)
1148                 return 0;
1149
1150         ret = i915_gem_check_wedge(dev_priv, true);
1151         if (ret)
1152                 return ret;
1153
1154         ret = i915_gem_check_olr(ring, seqno);
1155         if (ret)
1156                 return ret;
1157
1158         mutex_unlock(&dev->struct_mutex);
1159         ret = __wait_seqno(ring, seqno, true, NULL);
1160         mutex_lock(&dev->struct_mutex);
1161
1162         i915_gem_retire_requests_ring(ring);
1163
1164         /* Manually manage the write flush as we may have not yet
1165          * retired the buffer.
1166          */
1167         if (obj->last_write_seqno &&
1168             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1169                 obj->last_write_seqno = 0;
1170                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1171         }
1172
1173         return ret;
1174 }
1175
1176 /**
1177  * Called when user space prepares to use an object with the CPU, either
1178  * through the mmap ioctl's mapping or a GTT mapping.
1179  */
1180 int
1181 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1182                           struct drm_file *file)
1183 {
1184         struct drm_i915_gem_set_domain *args = data;
1185         struct drm_i915_gem_object *obj;
1186         uint32_t read_domains = args->read_domains;
1187         uint32_t write_domain = args->write_domain;
1188         int ret;
1189
1190         /* Only handle setting domains to types used by the CPU. */
1191         if (write_domain & I915_GEM_GPU_DOMAINS)
1192                 return -EINVAL;
1193
1194         if (read_domains & I915_GEM_GPU_DOMAINS)
1195                 return -EINVAL;
1196
1197         /* Having something in the write domain implies it's in the read
1198          * domain, and only that read domain.  Enforce that in the request.
1199          */
1200         if (write_domain != 0 && read_domains != write_domain)
1201                 return -EINVAL;
1202
1203         ret = i915_mutex_lock_interruptible(dev);
1204         if (ret)
1205                 return ret;
1206
1207         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1208         if (&obj->base == NULL) {
1209                 ret = -ENOENT;
1210                 goto unlock;
1211         }
1212
1213         /* Try to flush the object off the GPU without holding the lock.
1214          * We will repeat the flush holding the lock in the normal manner
1215          * to catch cases where we are gazumped.
1216          */
1217         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1218         if (ret)
1219                 goto unref;
1220
1221         if (read_domains & I915_GEM_DOMAIN_GTT) {
1222                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1223
1224                 /* Silently promote "you're not bound, there was nothing to do"
1225                  * to success, since the client was just asking us to
1226                  * make sure everything was done.
1227                  */
1228                 if (ret == -EINVAL)
1229                         ret = 0;
1230         } else {
1231                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1232         }
1233
1234 unref:
1235         drm_gem_object_unreference(&obj->base);
1236 unlock:
1237         mutex_unlock(&dev->struct_mutex);
1238         return ret;
1239 }
1240
1241 /**
1242  * Called when user space has done writes to this buffer
1243  */
1244 int
1245 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1246                          struct drm_file *file)
1247 {
1248         struct drm_i915_gem_sw_finish *args = data;
1249         struct drm_i915_gem_object *obj;
1250         int ret = 0;
1251
1252         ret = i915_mutex_lock_interruptible(dev);
1253         if (ret)
1254                 return ret;
1255
1256         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1257         if (&obj->base == NULL) {
1258                 ret = -ENOENT;
1259                 goto unlock;
1260         }
1261
1262         /* Pinned buffers may be scanout, so flush the cache */
1263         if (obj->pin_count)
1264                 i915_gem_object_flush_cpu_write_domain(obj);
1265
1266         drm_gem_object_unreference(&obj->base);
1267 unlock:
1268         mutex_unlock(&dev->struct_mutex);
1269         return ret;
1270 }
1271
1272 /**
1273  * Maps the contents of an object, returning the address it is mapped
1274  * into.
1275  *
1276  * While the mapping holds a reference on the contents of the object, it doesn't
1277  * imply a ref on the object itself.
1278  */
1279 int
1280 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1281                     struct drm_file *file)
1282 {
1283         struct drm_i915_gem_mmap *args = data;
1284         struct drm_gem_object *obj;
1285         unsigned long addr;
1286
1287         obj = drm_gem_object_lookup(dev, file, args->handle);
1288         if (obj == NULL)
1289                 return -ENOENT;
1290
1291         /* prime objects have no backing filp to GEM mmap
1292          * pages from.
1293          */
1294         if (!obj->filp) {
1295                 drm_gem_object_unreference_unlocked(obj);
1296                 return -EINVAL;
1297         }
1298
1299         addr = vm_mmap(obj->filp, 0, args->size,
1300                        PROT_READ | PROT_WRITE, MAP_SHARED,
1301                        args->offset);
1302         drm_gem_object_unreference_unlocked(obj);
1303         if (IS_ERR((void *)addr))
1304                 return addr;
1305
1306         args->addr_ptr = (uint64_t) addr;
1307
1308         return 0;
1309 }
1310
1311 /**
1312  * i915_gem_fault - fault a page into the GTT
1313  * vma: VMA in question
1314  * vmf: fault info
1315  *
1316  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1317  * from userspace.  The fault handler takes care of binding the object to
1318  * the GTT (if needed), allocating and programming a fence register (again,
1319  * only if needed based on whether the old reg is still valid or the object
1320  * is tiled) and inserting a new PTE into the faulting process.
1321  *
1322  * Note that the faulting process may involve evicting existing objects
1323  * from the GTT and/or fence registers to make room.  So performance may
1324  * suffer if the GTT working set is large or there are few fence registers
1325  * left.
1326  */
1327 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1328 {
1329         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1330         struct drm_device *dev = obj->base.dev;
1331         drm_i915_private_t *dev_priv = dev->dev_private;
1332         pgoff_t page_offset;
1333         unsigned long pfn;
1334         int ret = 0;
1335         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1336
1337         /* We don't use vmf->pgoff since that has the fake offset */
1338         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1339                 PAGE_SHIFT;
1340
1341         ret = i915_mutex_lock_interruptible(dev);
1342         if (ret)
1343                 goto out;
1344
1345         trace_i915_gem_object_fault(obj, page_offset, true, write);
1346
1347         /* Now bind it into the GTT if needed */
1348         if (!obj->map_and_fenceable) {
1349                 ret = i915_gem_object_unbind(obj);
1350                 if (ret)
1351                         goto unlock;
1352         }
1353         if (!obj->gtt_space) {
1354                 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1355                 if (ret)
1356                         goto unlock;
1357
1358                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1359                 if (ret)
1360                         goto unlock;
1361         }
1362
1363         if (!obj->has_global_gtt_mapping)
1364                 i915_gem_gtt_bind_object(obj, obj->cache_level);
1365
1366         ret = i915_gem_object_get_fence(obj);
1367         if (ret)
1368                 goto unlock;
1369
1370         if (i915_gem_object_is_inactive(obj))
1371                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1372
1373         obj->fault_mappable = true;
1374
1375         pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
1376                 page_offset;
1377
1378         /* Finally, remap it using the new GTT offset */
1379         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1380 unlock:
1381         mutex_unlock(&dev->struct_mutex);
1382 out:
1383         switch (ret) {
1384         case -EIO:
1385                 /* If this -EIO is due to a gpu hang, give the reset code a
1386                  * chance to clean up the mess. Otherwise return the proper
1387                  * SIGBUS. */
1388                 if (!atomic_read(&dev_priv->mm.wedged))
1389                         return VM_FAULT_SIGBUS;
1390         case -EAGAIN:
1391                 /* Give the error handler a chance to run and move the
1392                  * objects off the GPU active list. Next time we service the
1393                  * fault, we should be able to transition the page into the
1394                  * GTT without touching the GPU (and so avoid further
1395                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1396                  * with coherency, just lost writes.
1397                  */
1398                 set_need_resched();
1399         case 0:
1400         case -ERESTARTSYS:
1401         case -EINTR:
1402         case -EBUSY:
1403                 /*
1404                  * EBUSY is ok: this just means that another thread
1405                  * already did the job.
1406                  */
1407                 return VM_FAULT_NOPAGE;
1408         case -ENOMEM:
1409                 return VM_FAULT_OOM;
1410         default:
1411                 WARN_ON_ONCE(ret);
1412                 return VM_FAULT_SIGBUS;
1413         }
1414 }
1415
1416 /**
1417  * i915_gem_release_mmap - remove physical page mappings
1418  * @obj: obj in question
1419  *
1420  * Preserve the reservation of the mmapping with the DRM core code, but
1421  * relinquish ownership of the pages back to the system.
1422  *
1423  * It is vital that we remove the page mapping if we have mapped a tiled
1424  * object through the GTT and then lose the fence register due to
1425  * resource pressure. Similarly if the object has been moved out of the
1426  * aperture, than pages mapped into userspace must be revoked. Removing the
1427  * mapping will then trigger a page fault on the next user access, allowing
1428  * fixup by i915_gem_fault().
1429  */
1430 void
1431 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1432 {
1433         if (!obj->fault_mappable)
1434                 return;
1435
1436         if (obj->base.dev->dev_mapping)
1437                 unmap_mapping_range(obj->base.dev->dev_mapping,
1438                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1439                                     obj->base.size, 1);
1440
1441         obj->fault_mappable = false;
1442 }
1443
1444 static uint32_t
1445 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1446 {
1447         uint32_t gtt_size;
1448
1449         if (INTEL_INFO(dev)->gen >= 4 ||
1450             tiling_mode == I915_TILING_NONE)
1451                 return size;
1452
1453         /* Previous chips need a power-of-two fence region when tiling */
1454         if (INTEL_INFO(dev)->gen == 3)
1455                 gtt_size = 1024*1024;
1456         else
1457                 gtt_size = 512*1024;
1458
1459         while (gtt_size < size)
1460                 gtt_size <<= 1;
1461
1462         return gtt_size;
1463 }
1464
1465 /**
1466  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1467  * @obj: object to check
1468  *
1469  * Return the required GTT alignment for an object, taking into account
1470  * potential fence register mapping.
1471  */
1472 static uint32_t
1473 i915_gem_get_gtt_alignment(struct drm_device *dev,
1474                            uint32_t size,
1475                            int tiling_mode)
1476 {
1477         /*
1478          * Minimum alignment is 4k (GTT page size), but might be greater
1479          * if a fence register is needed for the object.
1480          */
1481         if (INTEL_INFO(dev)->gen >= 4 ||
1482             tiling_mode == I915_TILING_NONE)
1483                 return 4096;
1484
1485         /*
1486          * Previous chips need to be aligned to the size of the smallest
1487          * fence register that can contain the object.
1488          */
1489         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1490 }
1491
1492 /**
1493  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1494  *                                       unfenced object
1495  * @dev: the device
1496  * @size: size of the object
1497  * @tiling_mode: tiling mode of the object
1498  *
1499  * Return the required GTT alignment for an object, only taking into account
1500  * unfenced tiled surface requirements.
1501  */
1502 uint32_t
1503 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1504                                     uint32_t size,
1505                                     int tiling_mode)
1506 {
1507         /*
1508          * Minimum alignment is 4k (GTT page size) for sane hw.
1509          */
1510         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1511             tiling_mode == I915_TILING_NONE)
1512                 return 4096;
1513
1514         /* Previous hardware however needs to be aligned to a power-of-two
1515          * tile height. The simplest method for determining this is to reuse
1516          * the power-of-tile object size.
1517          */
1518         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1519 }
1520
1521 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1522 {
1523         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1524         int ret;
1525
1526         if (obj->base.map_list.map)
1527                 return 0;
1528
1529         ret = drm_gem_create_mmap_offset(&obj->base);
1530         if (ret != -ENOSPC)
1531                 return ret;
1532
1533         /* Badly fragmented mmap space? The only way we can recover
1534          * space is by destroying unwanted objects. We can't randomly release
1535          * mmap_offsets as userspace expects them to be persistent for the
1536          * lifetime of the objects. The closest we can is to release the
1537          * offsets on purgeable objects by truncating it and marking it purged,
1538          * which prevents userspace from ever using that object again.
1539          */
1540         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1541         ret = drm_gem_create_mmap_offset(&obj->base);
1542         if (ret != -ENOSPC)
1543                 return ret;
1544
1545         i915_gem_shrink_all(dev_priv);
1546         return drm_gem_create_mmap_offset(&obj->base);
1547 }
1548
1549 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1550 {
1551         if (!obj->base.map_list.map)
1552                 return;
1553
1554         drm_gem_free_mmap_offset(&obj->base);
1555 }
1556
1557 int
1558 i915_gem_mmap_gtt(struct drm_file *file,
1559                   struct drm_device *dev,
1560                   uint32_t handle,
1561                   uint64_t *offset)
1562 {
1563         struct drm_i915_private *dev_priv = dev->dev_private;
1564         struct drm_i915_gem_object *obj;
1565         int ret;
1566
1567         ret = i915_mutex_lock_interruptible(dev);
1568         if (ret)
1569                 return ret;
1570
1571         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1572         if (&obj->base == NULL) {
1573                 ret = -ENOENT;
1574                 goto unlock;
1575         }
1576
1577         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1578                 ret = -E2BIG;
1579                 goto out;
1580         }
1581
1582         if (obj->madv != I915_MADV_WILLNEED) {
1583                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1584                 ret = -EINVAL;
1585                 goto out;
1586         }
1587
1588         ret = i915_gem_object_create_mmap_offset(obj);
1589         if (ret)
1590                 goto out;
1591
1592         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1593
1594 out:
1595         drm_gem_object_unreference(&obj->base);
1596 unlock:
1597         mutex_unlock(&dev->struct_mutex);
1598         return ret;
1599 }
1600
1601 /**
1602  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1603  * @dev: DRM device
1604  * @data: GTT mapping ioctl data
1605  * @file: GEM object info
1606  *
1607  * Simply returns the fake offset to userspace so it can mmap it.
1608  * The mmap call will end up in drm_gem_mmap(), which will set things
1609  * up so we can get faults in the handler above.
1610  *
1611  * The fault handler will take care of binding the object into the GTT
1612  * (since it may have been evicted to make room for something), allocating
1613  * a fence register, and mapping the appropriate aperture address into
1614  * userspace.
1615  */
1616 int
1617 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1618                         struct drm_file *file)
1619 {
1620         struct drm_i915_gem_mmap_gtt *args = data;
1621
1622         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1623 }
1624
1625 /* Immediately discard the backing storage */
1626 static void
1627 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1628 {
1629         struct inode *inode;
1630
1631         i915_gem_object_free_mmap_offset(obj);
1632
1633         if (obj->base.filp == NULL)
1634                 return;
1635
1636         /* Our goal here is to return as much of the memory as
1637          * is possible back to the system as we are called from OOM.
1638          * To do this we must instruct the shmfs to drop all of its
1639          * backing pages, *now*.
1640          */
1641         inode = obj->base.filp->f_path.dentry->d_inode;
1642         shmem_truncate_range(inode, 0, (loff_t)-1);
1643
1644         obj->madv = __I915_MADV_PURGED;
1645 }
1646
1647 static inline int
1648 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1649 {
1650         return obj->madv == I915_MADV_DONTNEED;
1651 }
1652
1653 static void
1654 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1655 {
1656         int page_count = obj->base.size / PAGE_SIZE;
1657         struct scatterlist *sg;
1658         int ret, i;
1659
1660         BUG_ON(obj->madv == __I915_MADV_PURGED);
1661
1662         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1663         if (ret) {
1664                 /* In the event of a disaster, abandon all caches and
1665                  * hope for the best.
1666                  */
1667                 WARN_ON(ret != -EIO);
1668                 i915_gem_clflush_object(obj);
1669                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1670         }
1671
1672         if (i915_gem_object_needs_bit17_swizzle(obj))
1673                 i915_gem_object_save_bit_17_swizzle(obj);
1674
1675         if (obj->madv == I915_MADV_DONTNEED)
1676                 obj->dirty = 0;
1677
1678         for_each_sg(obj->pages->sgl, sg, page_count, i) {
1679                 struct page *page = sg_page(sg);
1680
1681                 if (obj->dirty)
1682                         set_page_dirty(page);
1683
1684                 if (obj->madv == I915_MADV_WILLNEED)
1685                         mark_page_accessed(page);
1686
1687                 page_cache_release(page);
1688         }
1689         obj->dirty = 0;
1690
1691         sg_free_table(obj->pages);
1692         kfree(obj->pages);
1693 }
1694
1695 static int
1696 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1697 {
1698         const struct drm_i915_gem_object_ops *ops = obj->ops;
1699
1700         if (obj->pages == NULL)
1701                 return 0;
1702
1703         BUG_ON(obj->gtt_space);
1704
1705         if (obj->pages_pin_count)
1706                 return -EBUSY;
1707
1708         ops->put_pages(obj);
1709         obj->pages = NULL;
1710
1711         list_del(&obj->gtt_list);
1712         if (i915_gem_object_is_purgeable(obj))
1713                 i915_gem_object_truncate(obj);
1714
1715         return 0;
1716 }
1717
1718 static long
1719 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1720 {
1721         struct drm_i915_gem_object *obj, *next;
1722         long count = 0;
1723
1724         list_for_each_entry_safe(obj, next,
1725                                  &dev_priv->mm.unbound_list,
1726                                  gtt_list) {
1727                 if (i915_gem_object_is_purgeable(obj) &&
1728                     i915_gem_object_put_pages(obj) == 0) {
1729                         count += obj->base.size >> PAGE_SHIFT;
1730                         if (count >= target)
1731                                 return count;
1732                 }
1733         }
1734
1735         list_for_each_entry_safe(obj, next,
1736                                  &dev_priv->mm.inactive_list,
1737                                  mm_list) {
1738                 if (i915_gem_object_is_purgeable(obj) &&
1739                     i915_gem_object_unbind(obj) == 0 &&
1740                     i915_gem_object_put_pages(obj) == 0) {
1741                         count += obj->base.size >> PAGE_SHIFT;
1742                         if (count >= target)
1743                                 return count;
1744                 }
1745         }
1746
1747         return count;
1748 }
1749
1750 static void
1751 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1752 {
1753         struct drm_i915_gem_object *obj, *next;
1754
1755         i915_gem_evict_everything(dev_priv->dev);
1756
1757         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1758                 i915_gem_object_put_pages(obj);
1759 }
1760
1761 static int
1762 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1763 {
1764         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1765         int page_count, i;
1766         struct address_space *mapping;
1767         struct sg_table *st;
1768         struct scatterlist *sg;
1769         struct page *page;
1770         gfp_t gfp;
1771
1772         /* Assert that the object is not currently in any GPU domain. As it
1773          * wasn't in the GTT, there shouldn't be any way it could have been in
1774          * a GPU cache
1775          */
1776         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1777         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1778
1779         st = kmalloc(sizeof(*st), GFP_KERNEL);
1780         if (st == NULL)
1781                 return -ENOMEM;
1782
1783         page_count = obj->base.size / PAGE_SIZE;
1784         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1785                 sg_free_table(st);
1786                 kfree(st);
1787                 return -ENOMEM;
1788         }
1789
1790         /* Get the list of pages out of our struct file.  They'll be pinned
1791          * at this point until we release them.
1792          *
1793          * Fail silently without starting the shrinker
1794          */
1795         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1796         gfp = mapping_gfp_mask(mapping);
1797         gfp |= __GFP_NORETRY | __GFP_NOWARN;
1798         gfp &= ~(__GFP_IO | __GFP_WAIT);
1799         for_each_sg(st->sgl, sg, page_count, i) {
1800                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1801                 if (IS_ERR(page)) {
1802                         i915_gem_purge(dev_priv, page_count);
1803                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1804                 }
1805                 if (IS_ERR(page)) {
1806                         /* We've tried hard to allocate the memory by reaping
1807                          * our own buffer, now let the real VM do its job and
1808                          * go down in flames if truly OOM.
1809                          */
1810                         gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
1811                         gfp |= __GFP_IO | __GFP_WAIT;
1812
1813                         i915_gem_shrink_all(dev_priv);
1814                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1815                         if (IS_ERR(page))
1816                                 goto err_pages;
1817
1818                         gfp |= __GFP_NORETRY | __GFP_NOWARN;
1819                         gfp &= ~(__GFP_IO | __GFP_WAIT);
1820                 }
1821
1822                 sg_set_page(sg, page, PAGE_SIZE, 0);
1823         }
1824
1825         if (i915_gem_object_needs_bit17_swizzle(obj))
1826                 i915_gem_object_do_bit_17_swizzle(obj);
1827
1828         obj->pages = st;
1829         return 0;
1830
1831 err_pages:
1832         for_each_sg(st->sgl, sg, i, page_count)
1833                 page_cache_release(sg_page(sg));
1834         sg_free_table(st);
1835         kfree(st);
1836         return PTR_ERR(page);
1837 }
1838
1839 /* Ensure that the associated pages are gathered from the backing storage
1840  * and pinned into our object. i915_gem_object_get_pages() may be called
1841  * multiple times before they are released by a single call to
1842  * i915_gem_object_put_pages() - once the pages are no longer referenced
1843  * either as a result of memory pressure (reaping pages under the shrinker)
1844  * or as the object is itself released.
1845  */
1846 int
1847 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1848 {
1849         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1850         const struct drm_i915_gem_object_ops *ops = obj->ops;
1851         int ret;
1852
1853         if (obj->pages)
1854                 return 0;
1855
1856         BUG_ON(obj->pages_pin_count);
1857
1858         ret = ops->get_pages(obj);
1859         if (ret)
1860                 return ret;
1861
1862         list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1863         return 0;
1864 }
1865
1866 void
1867 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1868                                struct intel_ring_buffer *ring,
1869                                u32 seqno)
1870 {
1871         struct drm_device *dev = obj->base.dev;
1872         struct drm_i915_private *dev_priv = dev->dev_private;
1873
1874         BUG_ON(ring == NULL);
1875         obj->ring = ring;
1876
1877         /* Add a reference if we're newly entering the active list. */
1878         if (!obj->active) {
1879                 drm_gem_object_reference(&obj->base);
1880                 obj->active = 1;
1881         }
1882
1883         /* Move from whatever list we were on to the tail of execution. */
1884         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1885         list_move_tail(&obj->ring_list, &ring->active_list);
1886
1887         obj->last_read_seqno = seqno;
1888
1889         if (obj->fenced_gpu_access) {
1890                 obj->last_fenced_seqno = seqno;
1891
1892                 /* Bump MRU to take account of the delayed flush */
1893                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1894                         struct drm_i915_fence_reg *reg;
1895
1896                         reg = &dev_priv->fence_regs[obj->fence_reg];
1897                         list_move_tail(&reg->lru_list,
1898                                        &dev_priv->mm.fence_list);
1899                 }
1900         }
1901 }
1902
1903 static void
1904 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1905 {
1906         struct drm_device *dev = obj->base.dev;
1907         struct drm_i915_private *dev_priv = dev->dev_private;
1908
1909         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1910         BUG_ON(!obj->active);
1911
1912         if (obj->pin_count) /* are we a framebuffer? */
1913                 intel_mark_fb_idle(obj);
1914
1915         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1916
1917         list_del_init(&obj->ring_list);
1918         obj->ring = NULL;
1919
1920         obj->last_read_seqno = 0;
1921         obj->last_write_seqno = 0;
1922         obj->base.write_domain = 0;
1923
1924         obj->last_fenced_seqno = 0;
1925         obj->fenced_gpu_access = false;
1926
1927         obj->active = 0;
1928         drm_gem_object_unreference(&obj->base);
1929
1930         WARN_ON(i915_verify_lists(dev));
1931 }
1932
1933 static u32
1934 i915_gem_get_seqno(struct drm_device *dev)
1935 {
1936         drm_i915_private_t *dev_priv = dev->dev_private;
1937         u32 seqno = dev_priv->next_seqno;
1938
1939         /* reserve 0 for non-seqno */
1940         if (++dev_priv->next_seqno == 0)
1941                 dev_priv->next_seqno = 1;
1942
1943         return seqno;
1944 }
1945
1946 u32
1947 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1948 {
1949         if (ring->outstanding_lazy_request == 0)
1950                 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1951
1952         return ring->outstanding_lazy_request;
1953 }
1954
1955 int
1956 i915_add_request(struct intel_ring_buffer *ring,
1957                  struct drm_file *file,
1958                  struct drm_i915_gem_request *request)
1959 {
1960         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1961         uint32_t seqno;
1962         u32 request_ring_position;
1963         int was_empty;
1964         int ret;
1965
1966         /*
1967          * Emit any outstanding flushes - execbuf can fail to emit the flush
1968          * after having emitted the batchbuffer command. Hence we need to fix
1969          * things up similar to emitting the lazy request. The difference here
1970          * is that the flush _must_ happen before the next request, no matter
1971          * what.
1972          */
1973         ret = intel_ring_flush_all_caches(ring);
1974         if (ret)
1975                 return ret;
1976
1977         if (request == NULL) {
1978                 request = kmalloc(sizeof(*request), GFP_KERNEL);
1979                 if (request == NULL)
1980                         return -ENOMEM;
1981         }
1982
1983         seqno = i915_gem_next_request_seqno(ring);
1984
1985         /* Record the position of the start of the request so that
1986          * should we detect the updated seqno part-way through the
1987          * GPU processing the request, we never over-estimate the
1988          * position of the head.
1989          */
1990         request_ring_position = intel_ring_get_tail(ring);
1991
1992         ret = ring->add_request(ring, &seqno);
1993         if (ret) {
1994                 kfree(request);
1995                 return ret;
1996         }
1997
1998         trace_i915_gem_request_add(ring, seqno);
1999
2000         request->seqno = seqno;
2001         request->ring = ring;
2002         request->tail = request_ring_position;
2003         request->emitted_jiffies = jiffies;
2004         was_empty = list_empty(&ring->request_list);
2005         list_add_tail(&request->list, &ring->request_list);
2006         request->file_priv = NULL;
2007
2008         if (file) {
2009                 struct drm_i915_file_private *file_priv = file->driver_priv;
2010
2011                 spin_lock(&file_priv->mm.lock);
2012                 request->file_priv = file_priv;
2013                 list_add_tail(&request->client_list,
2014                               &file_priv->mm.request_list);
2015                 spin_unlock(&file_priv->mm.lock);
2016         }
2017
2018         ring->outstanding_lazy_request = 0;
2019
2020         if (!dev_priv->mm.suspended) {
2021                 if (i915_enable_hangcheck) {
2022                         mod_timer(&dev_priv->hangcheck_timer,
2023                                   jiffies +
2024                                   msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
2025                 }
2026                 if (was_empty) {
2027                         queue_delayed_work(dev_priv->wq,
2028                                            &dev_priv->mm.retire_work, HZ);
2029                         intel_mark_busy(dev_priv->dev);
2030                 }
2031         }
2032
2033         return 0;
2034 }
2035
2036 static inline void
2037 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2038 {
2039         struct drm_i915_file_private *file_priv = request->file_priv;
2040
2041         if (!file_priv)
2042                 return;
2043
2044         spin_lock(&file_priv->mm.lock);
2045         if (request->file_priv) {
2046                 list_del(&request->client_list);
2047                 request->file_priv = NULL;
2048         }
2049         spin_unlock(&file_priv->mm.lock);
2050 }
2051
2052 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2053                                       struct intel_ring_buffer *ring)
2054 {
2055         while (!list_empty(&ring->request_list)) {
2056                 struct drm_i915_gem_request *request;
2057
2058                 request = list_first_entry(&ring->request_list,
2059                                            struct drm_i915_gem_request,
2060                                            list);
2061
2062                 list_del(&request->list);
2063                 i915_gem_request_remove_from_client(request);
2064                 kfree(request);
2065         }
2066
2067         while (!list_empty(&ring->active_list)) {
2068                 struct drm_i915_gem_object *obj;
2069
2070                 obj = list_first_entry(&ring->active_list,
2071                                        struct drm_i915_gem_object,
2072                                        ring_list);
2073
2074                 i915_gem_object_move_to_inactive(obj);
2075         }
2076 }
2077
2078 static void i915_gem_reset_fences(struct drm_device *dev)
2079 {
2080         struct drm_i915_private *dev_priv = dev->dev_private;
2081         int i;
2082
2083         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2084                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2085
2086                 i915_gem_write_fence(dev, i, NULL);
2087
2088                 if (reg->obj)
2089                         i915_gem_object_fence_lost(reg->obj);
2090
2091                 reg->pin_count = 0;
2092                 reg->obj = NULL;
2093                 INIT_LIST_HEAD(&reg->lru_list);
2094         }
2095
2096         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2097 }
2098
2099 void i915_gem_reset(struct drm_device *dev)
2100 {
2101         struct drm_i915_private *dev_priv = dev->dev_private;
2102         struct drm_i915_gem_object *obj;
2103         struct intel_ring_buffer *ring;
2104         int i;
2105
2106         for_each_ring(ring, dev_priv, i)
2107                 i915_gem_reset_ring_lists(dev_priv, ring);
2108
2109         /* Move everything out of the GPU domains to ensure we do any
2110          * necessary invalidation upon reuse.
2111          */
2112         list_for_each_entry(obj,
2113                             &dev_priv->mm.inactive_list,
2114                             mm_list)
2115         {
2116                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2117         }
2118
2119         /* The fence registers are invalidated so clear them out */
2120         i915_gem_reset_fences(dev);
2121 }
2122
2123 /**
2124  * This function clears the request list as sequence numbers are passed.
2125  */
2126 void
2127 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2128 {
2129         uint32_t seqno;
2130         int i;
2131
2132         if (list_empty(&ring->request_list))
2133                 return;
2134
2135         WARN_ON(i915_verify_lists(ring->dev));
2136
2137         seqno = ring->get_seqno(ring, true);
2138
2139         for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
2140                 if (seqno >= ring->sync_seqno[i])
2141                         ring->sync_seqno[i] = 0;
2142
2143         while (!list_empty(&ring->request_list)) {
2144                 struct drm_i915_gem_request *request;
2145
2146                 request = list_first_entry(&ring->request_list,
2147                                            struct drm_i915_gem_request,
2148                                            list);
2149
2150                 if (!i915_seqno_passed(seqno, request->seqno))
2151                         break;
2152
2153                 trace_i915_gem_request_retire(ring, request->seqno);
2154                 /* We know the GPU must have read the request to have
2155                  * sent us the seqno + interrupt, so use the position
2156                  * of tail of the request to update the last known position
2157                  * of the GPU head.
2158                  */
2159                 ring->last_retired_head = request->tail;
2160
2161                 list_del(&request->list);
2162                 i915_gem_request_remove_from_client(request);
2163                 kfree(request);
2164         }
2165
2166         /* Move any buffers on the active list that are no longer referenced
2167          * by the ringbuffer to the flushing/inactive lists as appropriate.
2168          */
2169         while (!list_empty(&ring->active_list)) {
2170                 struct drm_i915_gem_object *obj;
2171
2172                 obj = list_first_entry(&ring->active_list,
2173                                       struct drm_i915_gem_object,
2174                                       ring_list);
2175
2176                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2177                         break;
2178
2179                 i915_gem_object_move_to_inactive(obj);
2180         }
2181
2182         if (unlikely(ring->trace_irq_seqno &&
2183                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2184                 ring->irq_put(ring);
2185                 ring->trace_irq_seqno = 0;
2186         }
2187
2188         WARN_ON(i915_verify_lists(ring->dev));
2189 }
2190
2191 void
2192 i915_gem_retire_requests(struct drm_device *dev)
2193 {
2194         drm_i915_private_t *dev_priv = dev->dev_private;
2195         struct intel_ring_buffer *ring;
2196         int i;
2197
2198         for_each_ring(ring, dev_priv, i)
2199                 i915_gem_retire_requests_ring(ring);
2200 }
2201
2202 static void
2203 i915_gem_retire_work_handler(struct work_struct *work)
2204 {
2205         drm_i915_private_t *dev_priv;
2206         struct drm_device *dev;
2207         struct intel_ring_buffer *ring;
2208         bool idle;
2209         int i;
2210
2211         dev_priv = container_of(work, drm_i915_private_t,
2212                                 mm.retire_work.work);
2213         dev = dev_priv->dev;
2214
2215         /* Come back later if the device is busy... */
2216         if (!mutex_trylock(&dev->struct_mutex)) {
2217                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2218                 return;
2219         }
2220
2221         i915_gem_retire_requests(dev);
2222
2223         /* Send a periodic flush down the ring so we don't hold onto GEM
2224          * objects indefinitely.
2225          */
2226         idle = true;
2227         for_each_ring(ring, dev_priv, i) {
2228                 if (ring->gpu_caches_dirty)
2229                         i915_add_request(ring, NULL, NULL);
2230
2231                 idle &= list_empty(&ring->request_list);
2232         }
2233
2234         if (!dev_priv->mm.suspended && !idle)
2235                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2236         if (idle)
2237                 intel_mark_idle(dev);
2238
2239         mutex_unlock(&dev->struct_mutex);
2240 }
2241
2242 /**
2243  * Ensures that an object will eventually get non-busy by flushing any required
2244  * write domains, emitting any outstanding lazy request and retiring and
2245  * completed requests.
2246  */
2247 static int
2248 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2249 {
2250         int ret;
2251
2252         if (obj->active) {
2253                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2254                 if (ret)
2255                         return ret;
2256
2257                 i915_gem_retire_requests_ring(obj->ring);
2258         }
2259
2260         return 0;
2261 }
2262
2263 /**
2264  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2265  * @DRM_IOCTL_ARGS: standard ioctl arguments
2266  *
2267  * Returns 0 if successful, else an error is returned with the remaining time in
2268  * the timeout parameter.
2269  *  -ETIME: object is still busy after timeout
2270  *  -ERESTARTSYS: signal interrupted the wait
2271  *  -ENONENT: object doesn't exist
2272  * Also possible, but rare:
2273  *  -EAGAIN: GPU wedged
2274  *  -ENOMEM: damn
2275  *  -ENODEV: Internal IRQ fail
2276  *  -E?: The add request failed
2277  *
2278  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2279  * non-zero timeout parameter the wait ioctl will wait for the given number of
2280  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2281  * without holding struct_mutex the object may become re-busied before this
2282  * function completes. A similar but shorter * race condition exists in the busy
2283  * ioctl
2284  */
2285 int
2286 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2287 {
2288         struct drm_i915_gem_wait *args = data;
2289         struct drm_i915_gem_object *obj;
2290         struct intel_ring_buffer *ring = NULL;
2291         struct timespec timeout_stack, *timeout = NULL;
2292         u32 seqno = 0;
2293         int ret = 0;
2294
2295         if (args->timeout_ns >= 0) {
2296                 timeout_stack = ns_to_timespec(args->timeout_ns);
2297                 timeout = &timeout_stack;
2298         }
2299
2300         ret = i915_mutex_lock_interruptible(dev);
2301         if (ret)
2302                 return ret;
2303
2304         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2305         if (&obj->base == NULL) {
2306                 mutex_unlock(&dev->struct_mutex);
2307                 return -ENOENT;
2308         }
2309
2310         /* Need to make sure the object gets inactive eventually. */
2311         ret = i915_gem_object_flush_active(obj);
2312         if (ret)
2313                 goto out;
2314
2315         if (obj->active) {
2316                 seqno = obj->last_read_seqno;
2317                 ring = obj->ring;
2318         }
2319
2320         if (seqno == 0)
2321                  goto out;
2322
2323         /* Do this after OLR check to make sure we make forward progress polling
2324          * on this IOCTL with a 0 timeout (like busy ioctl)
2325          */
2326         if (!args->timeout_ns) {
2327                 ret = -ETIME;
2328                 goto out;
2329         }
2330
2331         drm_gem_object_unreference(&obj->base);
2332         mutex_unlock(&dev->struct_mutex);
2333
2334         ret = __wait_seqno(ring, seqno, true, timeout);
2335         if (timeout) {
2336                 WARN_ON(!timespec_valid(timeout));
2337                 args->timeout_ns = timespec_to_ns(timeout);
2338         }
2339         return ret;
2340
2341 out:
2342         drm_gem_object_unreference(&obj->base);
2343         mutex_unlock(&dev->struct_mutex);
2344         return ret;
2345 }
2346
2347 /**
2348  * i915_gem_object_sync - sync an object to a ring.
2349  *
2350  * @obj: object which may be in use on another ring.
2351  * @to: ring we wish to use the object on. May be NULL.
2352  *
2353  * This code is meant to abstract object synchronization with the GPU.
2354  * Calling with NULL implies synchronizing the object with the CPU
2355  * rather than a particular GPU ring.
2356  *
2357  * Returns 0 if successful, else propagates up the lower layer error.
2358  */
2359 int
2360 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2361                      struct intel_ring_buffer *to)
2362 {
2363         struct intel_ring_buffer *from = obj->ring;
2364         u32 seqno;
2365         int ret, idx;
2366
2367         if (from == NULL || to == from)
2368                 return 0;
2369
2370         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2371                 return i915_gem_object_wait_rendering(obj, false);
2372
2373         idx = intel_ring_sync_index(from, to);
2374
2375         seqno = obj->last_read_seqno;
2376         if (seqno <= from->sync_seqno[idx])
2377                 return 0;
2378
2379         ret = i915_gem_check_olr(obj->ring, seqno);
2380         if (ret)
2381                 return ret;
2382
2383         ret = to->sync_to(to, from, seqno);
2384         if (!ret)
2385                 from->sync_seqno[idx] = seqno;
2386
2387         return ret;
2388 }
2389
2390 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2391 {
2392         u32 old_write_domain, old_read_domains;
2393
2394         /* Act a barrier for all accesses through the GTT */
2395         mb();
2396
2397         /* Force a pagefault for domain tracking on next user access */
2398         i915_gem_release_mmap(obj);
2399
2400         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2401                 return;
2402
2403         old_read_domains = obj->base.read_domains;
2404         old_write_domain = obj->base.write_domain;
2405
2406         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2407         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2408
2409         trace_i915_gem_object_change_domain(obj,
2410                                             old_read_domains,
2411                                             old_write_domain);
2412 }
2413
2414 /**
2415  * Unbinds an object from the GTT aperture.
2416  */
2417 int
2418 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2419 {
2420         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2421         int ret = 0;
2422
2423         if (obj->gtt_space == NULL)
2424                 return 0;
2425
2426         if (obj->pin_count)
2427                 return -EBUSY;
2428
2429         BUG_ON(obj->pages == NULL);
2430
2431         ret = i915_gem_object_finish_gpu(obj);
2432         if (ret)
2433                 return ret;
2434         /* Continue on if we fail due to EIO, the GPU is hung so we
2435          * should be safe and we need to cleanup or else we might
2436          * cause memory corruption through use-after-free.
2437          */
2438
2439         i915_gem_object_finish_gtt(obj);
2440
2441         /* release the fence reg _after_ flushing */
2442         ret = i915_gem_object_put_fence(obj);
2443         if (ret)
2444                 return ret;
2445
2446         trace_i915_gem_object_unbind(obj);
2447
2448         if (obj->has_global_gtt_mapping)
2449                 i915_gem_gtt_unbind_object(obj);
2450         if (obj->has_aliasing_ppgtt_mapping) {
2451                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2452                 obj->has_aliasing_ppgtt_mapping = 0;
2453         }
2454         i915_gem_gtt_finish_object(obj);
2455
2456         list_del(&obj->mm_list);
2457         list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2458         /* Avoid an unnecessary call to unbind on rebind. */
2459         obj->map_and_fenceable = true;
2460
2461         drm_mm_put_block(obj->gtt_space);
2462         obj->gtt_space = NULL;
2463         obj->gtt_offset = 0;
2464
2465         return 0;
2466 }
2467
2468 static int i915_ring_idle(struct intel_ring_buffer *ring)
2469 {
2470         if (list_empty(&ring->active_list))
2471                 return 0;
2472
2473         return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
2474 }
2475
2476 int i915_gpu_idle(struct drm_device *dev)
2477 {
2478         drm_i915_private_t *dev_priv = dev->dev_private;
2479         struct intel_ring_buffer *ring;
2480         int ret, i;
2481
2482         /* Flush everything onto the inactive list. */
2483         for_each_ring(ring, dev_priv, i) {
2484                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2485                 if (ret)
2486                         return ret;
2487
2488                 ret = i915_ring_idle(ring);
2489                 if (ret)
2490                         return ret;
2491         }
2492
2493         return 0;
2494 }
2495
2496 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2497                                         struct drm_i915_gem_object *obj)
2498 {
2499         drm_i915_private_t *dev_priv = dev->dev_private;
2500         uint64_t val;
2501
2502         if (obj) {
2503                 u32 size = obj->gtt_space->size;
2504
2505                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2506                                  0xfffff000) << 32;
2507                 val |= obj->gtt_offset & 0xfffff000;
2508                 val |= (uint64_t)((obj->stride / 128) - 1) <<
2509                         SANDYBRIDGE_FENCE_PITCH_SHIFT;
2510
2511                 if (obj->tiling_mode == I915_TILING_Y)
2512                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2513                 val |= I965_FENCE_REG_VALID;
2514         } else
2515                 val = 0;
2516
2517         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2518         POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2519 }
2520
2521 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2522                                  struct drm_i915_gem_object *obj)
2523 {
2524         drm_i915_private_t *dev_priv = dev->dev_private;
2525         uint64_t val;
2526
2527         if (obj) {
2528                 u32 size = obj->gtt_space->size;
2529
2530                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2531                                  0xfffff000) << 32;
2532                 val |= obj->gtt_offset & 0xfffff000;
2533                 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2534                 if (obj->tiling_mode == I915_TILING_Y)
2535                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2536                 val |= I965_FENCE_REG_VALID;
2537         } else
2538                 val = 0;
2539
2540         I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2541         POSTING_READ(FENCE_REG_965_0 + reg * 8);
2542 }
2543
2544 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2545                                  struct drm_i915_gem_object *obj)
2546 {
2547         drm_i915_private_t *dev_priv = dev->dev_private;
2548         u32 val;
2549
2550         if (obj) {
2551                 u32 size = obj->gtt_space->size;
2552                 int pitch_val;
2553                 int tile_width;
2554
2555                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2556                      (size & -size) != size ||
2557                      (obj->gtt_offset & (size - 1)),
2558                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2559                      obj->gtt_offset, obj->map_and_fenceable, size);
2560
2561                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2562                         tile_width = 128;
2563                 else
2564                         tile_width = 512;
2565
2566                 /* Note: pitch better be a power of two tile widths */
2567                 pitch_val = obj->stride / tile_width;
2568                 pitch_val = ffs(pitch_val) - 1;
2569
2570                 val = obj->gtt_offset;
2571                 if (obj->tiling_mode == I915_TILING_Y)
2572                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2573                 val |= I915_FENCE_SIZE_BITS(size);
2574                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2575                 val |= I830_FENCE_REG_VALID;
2576         } else
2577                 val = 0;
2578
2579         if (reg < 8)
2580                 reg = FENCE_REG_830_0 + reg * 4;
2581         else
2582                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2583
2584         I915_WRITE(reg, val);
2585         POSTING_READ(reg);
2586 }
2587
2588 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2589                                 struct drm_i915_gem_object *obj)
2590 {
2591         drm_i915_private_t *dev_priv = dev->dev_private;
2592         uint32_t val;
2593
2594         if (obj) {
2595                 u32 size = obj->gtt_space->size;
2596                 uint32_t pitch_val;
2597
2598                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2599                      (size & -size) != size ||
2600                      (obj->gtt_offset & (size - 1)),
2601                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2602                      obj->gtt_offset, size);
2603
2604                 pitch_val = obj->stride / 128;
2605                 pitch_val = ffs(pitch_val) - 1;
2606
2607                 val = obj->gtt_offset;
2608                 if (obj->tiling_mode == I915_TILING_Y)
2609                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2610                 val |= I830_FENCE_SIZE_BITS(size);
2611                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2612                 val |= I830_FENCE_REG_VALID;
2613         } else
2614                 val = 0;
2615
2616         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2617         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2618 }
2619
2620 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2621                                  struct drm_i915_gem_object *obj)
2622 {
2623         switch (INTEL_INFO(dev)->gen) {
2624         case 7:
2625         case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2626         case 5:
2627         case 4: i965_write_fence_reg(dev, reg, obj); break;
2628         case 3: i915_write_fence_reg(dev, reg, obj); break;
2629         case 2: i830_write_fence_reg(dev, reg, obj); break;
2630         default: break;
2631         }
2632 }
2633
2634 static inline int fence_number(struct drm_i915_private *dev_priv,
2635                                struct drm_i915_fence_reg *fence)
2636 {
2637         return fence - dev_priv->fence_regs;
2638 }
2639
2640 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2641                                          struct drm_i915_fence_reg *fence,
2642                                          bool enable)
2643 {
2644         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2645         int reg = fence_number(dev_priv, fence);
2646
2647         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2648
2649         if (enable) {
2650                 obj->fence_reg = reg;
2651                 fence->obj = obj;
2652                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2653         } else {
2654                 obj->fence_reg = I915_FENCE_REG_NONE;
2655                 fence->obj = NULL;
2656                 list_del_init(&fence->lru_list);
2657         }
2658 }
2659
2660 static int
2661 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2662 {
2663         if (obj->last_fenced_seqno) {
2664                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2665                 if (ret)
2666                         return ret;
2667
2668                 obj->last_fenced_seqno = 0;
2669         }
2670
2671         /* Ensure that all CPU reads are completed before installing a fence
2672          * and all writes before removing the fence.
2673          */
2674         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2675                 mb();
2676
2677         obj->fenced_gpu_access = false;
2678         return 0;
2679 }
2680
2681 int
2682 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2683 {
2684         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2685         int ret;
2686
2687         ret = i915_gem_object_flush_fence(obj);
2688         if (ret)
2689                 return ret;
2690
2691         if (obj->fence_reg == I915_FENCE_REG_NONE)
2692                 return 0;
2693
2694         i915_gem_object_update_fence(obj,
2695                                      &dev_priv->fence_regs[obj->fence_reg],
2696                                      false);
2697         i915_gem_object_fence_lost(obj);
2698
2699         return 0;
2700 }
2701
2702 static struct drm_i915_fence_reg *
2703 i915_find_fence_reg(struct drm_device *dev)
2704 {
2705         struct drm_i915_private *dev_priv = dev->dev_private;
2706         struct drm_i915_fence_reg *reg, *avail;
2707         int i;
2708
2709         /* First try to find a free reg */
2710         avail = NULL;
2711         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2712                 reg = &dev_priv->fence_regs[i];
2713                 if (!reg->obj)
2714                         return reg;
2715
2716                 if (!reg->pin_count)
2717                         avail = reg;
2718         }
2719
2720         if (avail == NULL)
2721                 return NULL;
2722
2723         /* None available, try to steal one or wait for a user to finish */
2724         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2725                 if (reg->pin_count)
2726                         continue;
2727
2728                 return reg;
2729         }
2730
2731         return NULL;
2732 }
2733
2734 /**
2735  * i915_gem_object_get_fence - set up fencing for an object
2736  * @obj: object to map through a fence reg
2737  *
2738  * When mapping objects through the GTT, userspace wants to be able to write
2739  * to them without having to worry about swizzling if the object is tiled.
2740  * This function walks the fence regs looking for a free one for @obj,
2741  * stealing one if it can't find any.
2742  *
2743  * It then sets up the reg based on the object's properties: address, pitch
2744  * and tiling format.
2745  *
2746  * For an untiled surface, this removes any existing fence.
2747  */
2748 int
2749 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2750 {
2751         struct drm_device *dev = obj->base.dev;
2752         struct drm_i915_private *dev_priv = dev->dev_private;
2753         bool enable = obj->tiling_mode != I915_TILING_NONE;
2754         struct drm_i915_fence_reg *reg;
2755         int ret;
2756
2757         /* Have we updated the tiling parameters upon the object and so
2758          * will need to serialise the write to the associated fence register?
2759          */
2760         if (obj->fence_dirty) {
2761                 ret = i915_gem_object_flush_fence(obj);
2762                 if (ret)
2763                         return ret;
2764         }
2765
2766         /* Just update our place in the LRU if our fence is getting reused. */
2767         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2768                 reg = &dev_priv->fence_regs[obj->fence_reg];
2769                 if (!obj->fence_dirty) {
2770                         list_move_tail(&reg->lru_list,
2771                                        &dev_priv->mm.fence_list);
2772                         return 0;
2773                 }
2774         } else if (enable) {
2775                 reg = i915_find_fence_reg(dev);
2776                 if (reg == NULL)
2777                         return -EDEADLK;
2778
2779                 if (reg->obj) {
2780                         struct drm_i915_gem_object *old = reg->obj;
2781
2782                         ret = i915_gem_object_flush_fence(old);
2783                         if (ret)
2784                                 return ret;
2785
2786                         i915_gem_object_fence_lost(old);
2787                 }
2788         } else
2789                 return 0;
2790
2791         i915_gem_object_update_fence(obj, reg, enable);
2792         obj->fence_dirty = false;
2793
2794         return 0;
2795 }
2796
2797 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2798                                      struct drm_mm_node *gtt_space,
2799                                      unsigned long cache_level)
2800 {
2801         struct drm_mm_node *other;
2802
2803         /* On non-LLC machines we have to be careful when putting differing
2804          * types of snoopable memory together to avoid the prefetcher
2805          * crossing memory domains and dieing.
2806          */
2807         if (HAS_LLC(dev))
2808                 return true;
2809
2810         if (gtt_space == NULL)
2811                 return true;
2812
2813         if (list_empty(&gtt_space->node_list))
2814                 return true;
2815
2816         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2817         if (other->allocated && !other->hole_follows && other->color != cache_level)
2818                 return false;
2819
2820         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2821         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2822                 return false;
2823
2824         return true;
2825 }
2826
2827 static void i915_gem_verify_gtt(struct drm_device *dev)
2828 {
2829 #if WATCH_GTT
2830         struct drm_i915_private *dev_priv = dev->dev_private;
2831         struct drm_i915_gem_object *obj;
2832         int err = 0;
2833
2834         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2835                 if (obj->gtt_space == NULL) {
2836                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
2837                         err++;
2838                         continue;
2839                 }
2840
2841                 if (obj->cache_level != obj->gtt_space->color) {
2842                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2843                                obj->gtt_space->start,
2844                                obj->gtt_space->start + obj->gtt_space->size,
2845                                obj->cache_level,
2846                                obj->gtt_space->color);
2847                         err++;
2848                         continue;
2849                 }
2850
2851                 if (!i915_gem_valid_gtt_space(dev,
2852                                               obj->gtt_space,
2853                                               obj->cache_level)) {
2854                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2855                                obj->gtt_space->start,
2856                                obj->gtt_space->start + obj->gtt_space->size,
2857                                obj->cache_level);
2858                         err++;
2859                         continue;
2860                 }
2861         }
2862
2863         WARN_ON(err);
2864 #endif
2865 }
2866
2867 /**
2868  * Finds free space in the GTT aperture and binds the object there.
2869  */
2870 static int
2871 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2872                             unsigned alignment,
2873                             bool map_and_fenceable,
2874                             bool nonblocking)
2875 {
2876         struct drm_device *dev = obj->base.dev;
2877         drm_i915_private_t *dev_priv = dev->dev_private;
2878         struct drm_mm_node *free_space;
2879         u32 size, fence_size, fence_alignment, unfenced_alignment;
2880         bool mappable, fenceable;
2881         int ret;
2882
2883         if (obj->madv != I915_MADV_WILLNEED) {
2884                 DRM_ERROR("Attempting to bind a purgeable object\n");
2885                 return -EINVAL;
2886         }
2887
2888         fence_size = i915_gem_get_gtt_size(dev,
2889                                            obj->base.size,
2890                                            obj->tiling_mode);
2891         fence_alignment = i915_gem_get_gtt_alignment(dev,
2892                                                      obj->base.size,
2893                                                      obj->tiling_mode);
2894         unfenced_alignment =
2895                 i915_gem_get_unfenced_gtt_alignment(dev,
2896                                                     obj->base.size,
2897                                                     obj->tiling_mode);
2898
2899         if (alignment == 0)
2900                 alignment = map_and_fenceable ? fence_alignment :
2901                                                 unfenced_alignment;
2902         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2903                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2904                 return -EINVAL;
2905         }
2906
2907         size = map_and_fenceable ? fence_size : obj->base.size;
2908
2909         /* If the object is bigger than the entire aperture, reject it early
2910          * before evicting everything in a vain attempt to find space.
2911          */
2912         if (obj->base.size >
2913             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2914                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2915                 return -E2BIG;
2916         }
2917
2918         ret = i915_gem_object_get_pages(obj);
2919         if (ret)
2920                 return ret;
2921
2922  search_free:
2923         if (map_and_fenceable)
2924                 free_space =
2925                         drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2926                                                           size, alignment, obj->cache_level,
2927                                                           0, dev_priv->mm.gtt_mappable_end,
2928                                                           false);
2929         else
2930                 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2931                                                       size, alignment, obj->cache_level,
2932                                                       false);
2933
2934         if (free_space != NULL) {
2935                 if (map_and_fenceable)
2936                         obj->gtt_space =
2937                                 drm_mm_get_block_range_generic(free_space,
2938                                                                size, alignment, obj->cache_level,
2939                                                                0, dev_priv->mm.gtt_mappable_end,
2940                                                                false);
2941                 else
2942                         obj->gtt_space =
2943                                 drm_mm_get_block_generic(free_space,
2944                                                          size, alignment, obj->cache_level,
2945                                                          false);
2946         }
2947         if (obj->gtt_space == NULL) {
2948                 ret = i915_gem_evict_something(dev, size, alignment,
2949                                                obj->cache_level,
2950                                                map_and_fenceable,
2951                                                nonblocking);
2952                 if (ret)
2953                         return ret;
2954
2955                 goto search_free;
2956         }
2957         if (WARN_ON(!i915_gem_valid_gtt_space(dev,
2958                                               obj->gtt_space,
2959                                               obj->cache_level))) {
2960                 drm_mm_put_block(obj->gtt_space);
2961                 obj->gtt_space = NULL;
2962                 return -EINVAL;
2963         }
2964
2965
2966         ret = i915_gem_gtt_prepare_object(obj);
2967         if (ret) {
2968                 drm_mm_put_block(obj->gtt_space);
2969                 obj->gtt_space = NULL;
2970                 return ret;
2971         }
2972
2973         if (!dev_priv->mm.aliasing_ppgtt)
2974                 i915_gem_gtt_bind_object(obj, obj->cache_level);
2975
2976         list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2977         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2978
2979         obj->gtt_offset = obj->gtt_space->start;
2980
2981         fenceable =
2982                 obj->gtt_space->size == fence_size &&
2983                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2984
2985         mappable =
2986                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2987
2988         obj->map_and_fenceable = mappable && fenceable;
2989
2990         trace_i915_gem_object_bind(obj, map_and_fenceable);
2991         i915_gem_verify_gtt(dev);
2992         return 0;
2993 }
2994
2995 void
2996 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2997 {
2998         /* If we don't have a page list set up, then we're not pinned
2999          * to GPU, and we can ignore the cache flush because it'll happen
3000          * again at bind time.
3001          */
3002         if (obj->pages == NULL)
3003                 return;
3004
3005         /* If the GPU is snooping the contents of the CPU cache,
3006          * we do not need to manually clear the CPU cache lines.  However,
3007          * the caches are only snooped when the render cache is
3008          * flushed/invalidated.  As we always have to emit invalidations
3009          * and flushes when moving into and out of the RENDER domain, correct
3010          * snooping behaviour occurs naturally as the result of our domain
3011          * tracking.
3012          */
3013         if (obj->cache_level != I915_CACHE_NONE)
3014                 return;
3015
3016         trace_i915_gem_object_clflush(obj);
3017
3018         drm_clflush_sg(obj->pages);
3019 }
3020
3021 /** Flushes the GTT write domain for the object if it's dirty. */
3022 static void
3023 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3024 {
3025         uint32_t old_write_domain;
3026
3027         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3028                 return;
3029
3030         /* No actual flushing is required for the GTT write domain.  Writes
3031          * to it immediately go to main memory as far as we know, so there's
3032          * no chipset flush.  It also doesn't land in render cache.
3033          *
3034          * However, we do have to enforce the order so that all writes through
3035          * the GTT land before any writes to the device, such as updates to
3036          * the GATT itself.
3037          */
3038         wmb();
3039
3040         old_write_domain = obj->base.write_domain;
3041         obj->base.write_domain = 0;
3042
3043         trace_i915_gem_object_change_domain(obj,
3044                                             obj->base.read_domains,
3045                                             old_write_domain);
3046 }
3047
3048 /** Flushes the CPU write domain for the object if it's dirty. */
3049 static void
3050 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3051 {
3052         uint32_t old_write_domain;
3053
3054         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3055                 return;
3056
3057         i915_gem_clflush_object(obj);
3058         intel_gtt_chipset_flush();
3059         old_write_domain = obj->base.write_domain;
3060         obj->base.write_domain = 0;
3061
3062         trace_i915_gem_object_change_domain(obj,
3063                                             obj->base.read_domains,
3064                                             old_write_domain);
3065 }
3066
3067 /**
3068  * Moves a single object to the GTT read, and possibly write domain.
3069  *
3070  * This function returns when the move is complete, including waiting on
3071  * flushes to occur.
3072  */
3073 int
3074 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3075 {
3076         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3077         uint32_t old_write_domain, old_read_domains;
3078         int ret;
3079
3080         /* Not valid to be called on unbound objects. */
3081         if (obj->gtt_space == NULL)
3082                 return -EINVAL;
3083
3084         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3085                 return 0;
3086
3087         ret = i915_gem_object_wait_rendering(obj, !write);
3088         if (ret)
3089                 return ret;
3090
3091         i915_gem_object_flush_cpu_write_domain(obj);
3092
3093         old_write_domain = obj->base.write_domain;
3094         old_read_domains = obj->base.read_domains;
3095
3096         /* It should now be out of any other write domains, and we can update
3097          * the domain values for our changes.
3098          */
3099         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3100         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3101         if (write) {
3102                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3103                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3104                 obj->dirty = 1;
3105         }
3106
3107         trace_i915_gem_object_change_domain(obj,
3108                                             old_read_domains,
3109                                             old_write_domain);
3110
3111         /* And bump the LRU for this access */
3112         if (i915_gem_object_is_inactive(obj))
3113                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3114
3115         return 0;
3116 }
3117
3118 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3119                                     enum i915_cache_level cache_level)
3120 {
3121         struct drm_device *dev = obj->base.dev;
3122         drm_i915_private_t *dev_priv = dev->dev_private;
3123         int ret;
3124
3125         if (obj->cache_level == cache_level)
3126                 return 0;
3127
3128         if (obj->pin_count) {
3129                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3130                 return -EBUSY;
3131         }
3132
3133         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3134                 ret = i915_gem_object_unbind(obj);
3135                 if (ret)
3136                         return ret;
3137         }
3138
3139         if (obj->gtt_space) {
3140                 ret = i915_gem_object_finish_gpu(obj);
3141                 if (ret)
3142                         return ret;
3143
3144                 i915_gem_object_finish_gtt(obj);
3145
3146                 /* Before SandyBridge, you could not use tiling or fence
3147                  * registers with snooped memory, so relinquish any fences
3148                  * currently pointing to our region in the aperture.
3149                  */
3150                 if (INTEL_INFO(dev)->gen < 6) {
3151                         ret = i915_gem_object_put_fence(obj);
3152                         if (ret)
3153                                 return ret;
3154                 }
3155
3156                 if (obj->has_global_gtt_mapping)
3157                         i915_gem_gtt_bind_object(obj, cache_level);
3158                 if (obj->has_aliasing_ppgtt_mapping)
3159                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3160                                                obj, cache_level);
3161
3162                 obj->gtt_space->color = cache_level;
3163         }
3164
3165         if (cache_level == I915_CACHE_NONE) {
3166                 u32 old_read_domains, old_write_domain;
3167
3168                 /* If we're coming from LLC cached, then we haven't
3169                  * actually been tracking whether the data is in the
3170                  * CPU cache or not, since we only allow one bit set
3171                  * in obj->write_domain and have been skipping the clflushes.
3172                  * Just set it to the CPU cache for now.
3173                  */
3174                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3175                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3176
3177                 old_read_domains = obj->base.read_domains;
3178                 old_write_domain = obj->base.write_domain;
3179
3180                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3181                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3182
3183                 trace_i915_gem_object_change_domain(obj,
3184                                                     old_read_domains,
3185                                                     old_write_domain);
3186         }
3187
3188         obj->cache_level = cache_level;
3189         i915_gem_verify_gtt(dev);
3190         return 0;
3191 }
3192
3193 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3194                                struct drm_file *file)
3195 {
3196         struct drm_i915_gem_caching *args = data;
3197         struct drm_i915_gem_object *obj;
3198         int ret;
3199
3200         ret = i915_mutex_lock_interruptible(dev);
3201         if (ret)
3202                 return ret;
3203
3204         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3205         if (&obj->base == NULL) {
3206                 ret = -ENOENT;
3207                 goto unlock;
3208         }
3209
3210         args->caching = obj->cache_level != I915_CACHE_NONE;
3211
3212         drm_gem_object_unreference(&obj->base);
3213 unlock:
3214         mutex_unlock(&dev->struct_mutex);
3215         return ret;
3216 }
3217
3218 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3219                                struct drm_file *file)
3220 {
3221         struct drm_i915_gem_caching *args = data;
3222         struct drm_i915_gem_object *obj;
3223         enum i915_cache_level level;
3224         int ret;
3225
3226         switch (args->caching) {
3227         case I915_CACHING_NONE:
3228                 level = I915_CACHE_NONE;
3229                 break;
3230         case I915_CACHING_CACHED:
3231                 level = I915_CACHE_LLC;
3232                 break;
3233         default:
3234                 return -EINVAL;
3235         }
3236
3237         ret = i915_mutex_lock_interruptible(dev);
3238         if (ret)
3239                 return ret;
3240
3241         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3242         if (&obj->base == NULL) {
3243                 ret = -ENOENT;
3244                 goto unlock;
3245         }
3246
3247         ret = i915_gem_object_set_cache_level(obj, level);
3248
3249         drm_gem_object_unreference(&obj->base);
3250 unlock:
3251         mutex_unlock(&dev->struct_mutex);
3252         return ret;
3253 }
3254
3255 /*
3256  * Prepare buffer for display plane (scanout, cursors, etc).
3257  * Can be called from an uninterruptible phase (modesetting) and allows
3258  * any flushes to be pipelined (for pageflips).
3259  */
3260 int
3261 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3262                                      u32 alignment,
3263                                      struct intel_ring_buffer *pipelined)
3264 {
3265         u32 old_read_domains, old_write_domain;
3266         int ret;
3267
3268         if (pipelined != obj->ring) {
3269                 ret = i915_gem_object_sync(obj, pipelined);
3270                 if (ret)
3271                         return ret;
3272         }
3273
3274         /* The display engine is not coherent with the LLC cache on gen6.  As
3275          * a result, we make sure that the pinning that is about to occur is
3276          * done with uncached PTEs. This is lowest common denominator for all
3277          * chipsets.
3278          *
3279          * However for gen6+, we could do better by using the GFDT bit instead
3280          * of uncaching, which would allow us to flush all the LLC-cached data
3281          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3282          */
3283         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3284         if (ret)
3285                 return ret;
3286
3287         /* As the user may map the buffer once pinned in the display plane
3288          * (e.g. libkms for the bootup splash), we have to ensure that we
3289          * always use map_and_fenceable for all scanout buffers.
3290          */
3291         ret = i915_gem_object_pin(obj, alignment, true, false);
3292         if (ret)
3293                 return ret;
3294
3295         i915_gem_object_flush_cpu_write_domain(obj);
3296
3297         old_write_domain = obj->base.write_domain;
3298         old_read_domains = obj->base.read_domains;
3299
3300         /* It should now be out of any other write domains, and we can update
3301          * the domain values for our changes.
3302          */
3303         obj->base.write_domain = 0;
3304         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3305
3306         trace_i915_gem_object_change_domain(obj,
3307                                             old_read_domains,
3308                                             old_write_domain);
3309
3310         return 0;
3311 }
3312
3313 int
3314 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3315 {
3316         int ret;
3317
3318         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3319                 return 0;
3320
3321         ret = i915_gem_object_wait_rendering(obj, false);
3322         if (ret)
3323                 return ret;
3324
3325         /* Ensure that we invalidate the GPU's caches and TLBs. */
3326         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3327         return 0;
3328 }
3329
3330 /**
3331  * Moves a single object to the CPU read, and possibly write domain.
3332  *
3333  * This function returns when the move is complete, including waiting on
3334  * flushes to occur.
3335  */
3336 int
3337 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3338 {
3339         uint32_t old_write_domain, old_read_domains;
3340         int ret;
3341
3342         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3343                 return 0;
3344
3345         ret = i915_gem_object_wait_rendering(obj, !write);
3346         if (ret)
3347                 return ret;
3348
3349         i915_gem_object_flush_gtt_write_domain(obj);
3350
3351         old_write_domain = obj->base.write_domain;
3352         old_read_domains = obj->base.read_domains;
3353
3354         /* Flush the CPU cache if it's still invalid. */
3355         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3356                 i915_gem_clflush_object(obj);
3357
3358                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3359         }
3360
3361         /* It should now be out of any other write domains, and we can update
3362          * the domain values for our changes.
3363          */
3364         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3365
3366         /* If we're writing through the CPU, then the GPU read domains will
3367          * need to be invalidated at next use.
3368          */
3369         if (write) {
3370                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3371                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3372         }
3373
3374         trace_i915_gem_object_change_domain(obj,
3375                                             old_read_domains,
3376                                             old_write_domain);
3377
3378         return 0;
3379 }
3380
3381 /* Throttle our rendering by waiting until the ring has completed our requests
3382  * emitted over 20 msec ago.
3383  *
3384  * Note that if we were to use the current jiffies each time around the loop,
3385  * we wouldn't escape the function with any frames outstanding if the time to
3386  * render a frame was over 20ms.
3387  *
3388  * This should get us reasonable parallelism between CPU and GPU but also
3389  * relatively low latency when blocking on a particular request to finish.
3390  */
3391 static int
3392 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3393 {
3394         struct drm_i915_private *dev_priv = dev->dev_private;
3395         struct drm_i915_file_private *file_priv = file->driver_priv;
3396         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3397         struct drm_i915_gem_request *request;
3398         struct intel_ring_buffer *ring = NULL;
3399         u32 seqno = 0;
3400         int ret;
3401
3402         if (atomic_read(&dev_priv->mm.wedged))
3403                 return -EIO;
3404
3405         spin_lock(&file_priv->mm.lock);
3406         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3407                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3408                         break;
3409
3410                 ring = request->ring;
3411                 seqno = request->seqno;
3412         }
3413         spin_unlock(&file_priv->mm.lock);
3414
3415         if (seqno == 0)
3416                 return 0;
3417
3418         ret = __wait_seqno(ring, seqno, true, NULL);
3419         if (ret == 0)
3420                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3421
3422         return ret;
3423 }
3424
3425 int
3426 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3427                     uint32_t alignment,
3428                     bool map_and_fenceable,
3429                     bool nonblocking)
3430 {
3431         int ret;
3432
3433         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3434                 return -EBUSY;
3435
3436         if (obj->gtt_space != NULL) {
3437                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3438                     (map_and_fenceable && !obj->map_and_fenceable)) {
3439                         WARN(obj->pin_count,
3440                              "bo is already pinned with incorrect alignment:"
3441                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3442                              " obj->map_and_fenceable=%d\n",
3443                              obj->gtt_offset, alignment,
3444                              map_and_fenceable,
3445                              obj->map_and_fenceable);
3446                         ret = i915_gem_object_unbind(obj);
3447                         if (ret)
3448                                 return ret;
3449                 }
3450         }
3451
3452         if (obj->gtt_space == NULL) {
3453                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3454                                                   map_and_fenceable,
3455                                                   nonblocking);
3456                 if (ret)
3457                         return ret;
3458         }
3459
3460         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3461                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3462
3463         obj->pin_count++;
3464         obj->pin_mappable |= map_and_fenceable;
3465
3466         return 0;
3467 }
3468
3469 void
3470 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3471 {
3472         BUG_ON(obj->pin_count == 0);
3473         BUG_ON(obj->gtt_space == NULL);
3474
3475         if (--obj->pin_count == 0)
3476                 obj->pin_mappable = false;
3477 }
3478
3479 int
3480 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3481                    struct drm_file *file)
3482 {
3483         struct drm_i915_gem_pin *args = data;
3484         struct drm_i915_gem_object *obj;
3485         int ret;
3486
3487         ret = i915_mutex_lock_interruptible(dev);
3488         if (ret)
3489                 return ret;
3490
3491         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3492         if (&obj->base == NULL) {
3493                 ret = -ENOENT;
3494                 goto unlock;
3495         }
3496
3497         if (obj->madv != I915_MADV_WILLNEED) {
3498                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3499                 ret = -EINVAL;
3500                 goto out;
3501         }
3502
3503         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3504                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3505                           args->handle);
3506                 ret = -EINVAL;
3507                 goto out;
3508         }
3509
3510         obj->user_pin_count++;
3511         obj->pin_filp = file;
3512         if (obj->user_pin_count == 1) {
3513                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3514                 if (ret)
3515                         goto out;
3516         }
3517
3518         /* XXX - flush the CPU caches for pinned objects
3519          * as the X server doesn't manage domains yet
3520          */
3521         i915_gem_object_flush_cpu_write_domain(obj);
3522         args->offset = obj->gtt_offset;
3523 out:
3524         drm_gem_object_unreference(&obj->base);
3525 unlock:
3526         mutex_unlock(&dev->struct_mutex);
3527         return ret;
3528 }
3529
3530 int
3531 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3532                      struct drm_file *file)
3533 {
3534         struct drm_i915_gem_pin *args = data;
3535         struct drm_i915_gem_object *obj;
3536         int ret;
3537
3538         ret = i915_mutex_lock_interruptible(dev);
3539         if (ret)
3540                 return ret;
3541
3542         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3543         if (&obj->base == NULL) {
3544                 ret = -ENOENT;
3545                 goto unlock;
3546         }
3547
3548         if (obj->pin_filp != file) {
3549                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3550                           args->handle);
3551                 ret = -EINVAL;
3552                 goto out;
3553         }
3554         obj->user_pin_count--;
3555         if (obj->user_pin_count == 0) {
3556                 obj->pin_filp = NULL;
3557                 i915_gem_object_unpin(obj);
3558         }
3559
3560 out:
3561         drm_gem_object_unreference(&obj->base);
3562 unlock:
3563         mutex_unlock(&dev->struct_mutex);
3564         return ret;
3565 }
3566
3567 int
3568 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3569                     struct drm_file *file)
3570 {
3571         struct drm_i915_gem_busy *args = data;
3572         struct drm_i915_gem_object *obj;
3573         int ret;
3574
3575         ret = i915_mutex_lock_interruptible(dev);
3576         if (ret)
3577                 return ret;
3578
3579         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3580         if (&obj->base == NULL) {
3581                 ret = -ENOENT;
3582                 goto unlock;
3583         }
3584
3585         /* Count all active objects as busy, even if they are currently not used
3586          * by the gpu. Users of this interface expect objects to eventually
3587          * become non-busy without any further actions, therefore emit any
3588          * necessary flushes here.
3589          */
3590         ret = i915_gem_object_flush_active(obj);
3591
3592         args->busy = obj->active;
3593         if (obj->ring) {
3594                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3595                 args->busy |= intel_ring_flag(obj->ring) << 16;
3596         }
3597
3598         drm_gem_object_unreference(&obj->base);
3599 unlock:
3600         mutex_unlock(&dev->struct_mutex);
3601         return ret;
3602 }
3603
3604 int
3605 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3606                         struct drm_file *file_priv)
3607 {
3608         return i915_gem_ring_throttle(dev, file_priv);
3609 }
3610
3611 int
3612 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3613                        struct drm_file *file_priv)
3614 {
3615         struct drm_i915_gem_madvise *args = data;
3616         struct drm_i915_gem_object *obj;
3617         int ret;
3618
3619         switch (args->madv) {
3620         case I915_MADV_DONTNEED:
3621         case I915_MADV_WILLNEED:
3622             break;
3623         default:
3624             return -EINVAL;
3625         }
3626
3627         ret = i915_mutex_lock_interruptible(dev);
3628         if (ret)
3629                 return ret;
3630
3631         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3632         if (&obj->base == NULL) {
3633                 ret = -ENOENT;
3634                 goto unlock;
3635         }
3636
3637         if (obj->pin_count) {
3638                 ret = -EINVAL;
3639                 goto out;
3640         }
3641
3642         if (obj->madv != __I915_MADV_PURGED)
3643                 obj->madv = args->madv;
3644
3645         /* if the object is no longer attached, discard its backing storage */
3646         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3647                 i915_gem_object_truncate(obj);
3648
3649         args->retained = obj->madv != __I915_MADV_PURGED;
3650
3651 out:
3652         drm_gem_object_unreference(&obj->base);
3653 unlock:
3654         mutex_unlock(&dev->struct_mutex);
3655         return ret;
3656 }
3657
3658 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3659                           const struct drm_i915_gem_object_ops *ops)
3660 {
3661         INIT_LIST_HEAD(&obj->mm_list);
3662         INIT_LIST_HEAD(&obj->gtt_list);
3663         INIT_LIST_HEAD(&obj->ring_list);
3664         INIT_LIST_HEAD(&obj->exec_list);
3665
3666         obj->ops = ops;
3667
3668         obj->fence_reg = I915_FENCE_REG_NONE;
3669         obj->madv = I915_MADV_WILLNEED;
3670         /* Avoid an unnecessary call to unbind on the first bind. */
3671         obj->map_and_fenceable = true;
3672
3673         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3674 }
3675
3676 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3677         .get_pages = i915_gem_object_get_pages_gtt,
3678         .put_pages = i915_gem_object_put_pages_gtt,
3679 };
3680
3681 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3682                                                   size_t size)
3683 {
3684         struct drm_i915_gem_object *obj;
3685         struct address_space *mapping;
3686         u32 mask;
3687
3688         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3689         if (obj == NULL)
3690                 return NULL;
3691
3692         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3693                 kfree(obj);
3694                 return NULL;
3695         }
3696
3697         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3698         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3699                 /* 965gm cannot relocate objects above 4GiB. */
3700                 mask &= ~__GFP_HIGHMEM;
3701                 mask |= __GFP_DMA32;
3702         }
3703
3704         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3705         mapping_set_gfp_mask(mapping, mask);
3706
3707         i915_gem_object_init(obj, &i915_gem_object_ops);
3708
3709         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3710         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3711
3712         if (HAS_LLC(dev)) {
3713                 /* On some devices, we can have the GPU use the LLC (the CPU
3714                  * cache) for about a 10% performance improvement
3715                  * compared to uncached.  Graphics requests other than
3716                  * display scanout are coherent with the CPU in
3717                  * accessing this cache.  This means in this mode we
3718                  * don't need to clflush on the CPU side, and on the
3719                  * GPU side we only need to flush internal caches to
3720                  * get data visible to the CPU.
3721                  *
3722                  * However, we maintain the display planes as UC, and so
3723                  * need to rebind when first used as such.
3724                  */
3725                 obj->cache_level = I915_CACHE_LLC;
3726         } else
3727                 obj->cache_level = I915_CACHE_NONE;
3728
3729         return obj;
3730 }
3731
3732 int i915_gem_init_object(struct drm_gem_object *obj)
3733 {
3734         BUG();
3735
3736         return 0;
3737 }
3738
3739 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3740 {
3741         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3742         struct drm_device *dev = obj->base.dev;
3743         drm_i915_private_t *dev_priv = dev->dev_private;
3744
3745         trace_i915_gem_object_destroy(obj);
3746
3747         if (obj->phys_obj)
3748                 i915_gem_detach_phys_object(dev, obj);
3749
3750         obj->pin_count = 0;
3751         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3752                 bool was_interruptible;
3753
3754                 was_interruptible = dev_priv->mm.interruptible;
3755                 dev_priv->mm.interruptible = false;
3756
3757                 WARN_ON(i915_gem_object_unbind(obj));
3758
3759                 dev_priv->mm.interruptible = was_interruptible;
3760         }
3761
3762         obj->pages_pin_count = 0;
3763         i915_gem_object_put_pages(obj);
3764         i915_gem_object_free_mmap_offset(obj);
3765
3766         BUG_ON(obj->pages);
3767
3768         if (obj->base.import_attach)
3769                 drm_prime_gem_destroy(&obj->base, NULL);
3770
3771         drm_gem_object_release(&obj->base);
3772         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3773
3774         kfree(obj->bit_17);
3775         kfree(obj);
3776 }
3777
3778 int
3779 i915_gem_idle(struct drm_device *dev)
3780 {
3781         drm_i915_private_t *dev_priv = dev->dev_private;
3782         int ret;
3783
3784         mutex_lock(&dev->struct_mutex);
3785
3786         if (dev_priv->mm.suspended) {
3787                 mutex_unlock(&dev->struct_mutex);
3788                 return 0;
3789         }
3790
3791         ret = i915_gpu_idle(dev);
3792         if (ret) {
3793                 mutex_unlock(&dev->struct_mutex);
3794                 return ret;
3795         }
3796         i915_gem_retire_requests(dev);
3797
3798         /* Under UMS, be paranoid and evict. */
3799         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3800                 i915_gem_evict_everything(dev);
3801
3802         i915_gem_reset_fences(dev);
3803
3804         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3805          * We need to replace this with a semaphore, or something.
3806          * And not confound mm.suspended!
3807          */
3808         dev_priv->mm.suspended = 1;
3809         del_timer_sync(&dev_priv->hangcheck_timer);
3810
3811         i915_kernel_lost_context(dev);
3812         i915_gem_cleanup_ringbuffer(dev);
3813
3814         mutex_unlock(&dev->struct_mutex);
3815
3816         /* Cancel the retire work handler, which should be idle now. */
3817         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3818
3819         return 0;
3820 }
3821
3822 void i915_gem_l3_remap(struct drm_device *dev)
3823 {
3824         drm_i915_private_t *dev_priv = dev->dev_private;
3825         u32 misccpctl;
3826         int i;
3827
3828         if (!IS_IVYBRIDGE(dev))
3829                 return;
3830
3831         if (!dev_priv->mm.l3_remap_info)
3832                 return;
3833
3834         misccpctl = I915_READ(GEN7_MISCCPCTL);
3835         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3836         POSTING_READ(GEN7_MISCCPCTL);
3837
3838         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3839                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3840                 if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
3841                         DRM_DEBUG("0x%x was already programmed to %x\n",
3842                                   GEN7_L3LOG_BASE + i, remap);
3843                 if (remap && !dev_priv->mm.l3_remap_info[i/4])
3844                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
3845                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
3846         }
3847
3848         /* Make sure all the writes land before disabling dop clock gating */
3849         POSTING_READ(GEN7_L3LOG_BASE);
3850
3851         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3852 }
3853
3854 void i915_gem_init_swizzling(struct drm_device *dev)
3855 {
3856         drm_i915_private_t *dev_priv = dev->dev_private;
3857
3858         if (INTEL_INFO(dev)->gen < 5 ||
3859             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3860                 return;
3861
3862         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3863                                  DISP_TILE_SURFACE_SWIZZLING);
3864
3865         if (IS_GEN5(dev))
3866                 return;
3867
3868         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3869         if (IS_GEN6(dev))
3870                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3871         else
3872                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3873 }
3874
3875 void i915_gem_init_ppgtt(struct drm_device *dev)
3876 {
3877         drm_i915_private_t *dev_priv = dev->dev_private;
3878         uint32_t pd_offset;
3879         struct intel_ring_buffer *ring;
3880         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3881         uint32_t __iomem *pd_addr;
3882         uint32_t pd_entry;
3883         int i;
3884
3885         if (!dev_priv->mm.aliasing_ppgtt)
3886                 return;
3887
3888
3889         pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3890         for (i = 0; i < ppgtt->num_pd_entries; i++) {
3891                 dma_addr_t pt_addr;
3892
3893                 if (dev_priv->mm.gtt->needs_dmar)
3894                         pt_addr = ppgtt->pt_dma_addr[i];
3895                 else
3896                         pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3897
3898                 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3899                 pd_entry |= GEN6_PDE_VALID;
3900
3901                 writel(pd_entry, pd_addr + i);
3902         }
3903         readl(pd_addr);
3904
3905         pd_offset = ppgtt->pd_offset;
3906         pd_offset /= 64; /* in cachelines, */
3907         pd_offset <<= 16;
3908
3909         if (INTEL_INFO(dev)->gen == 6) {
3910                 uint32_t ecochk, gab_ctl, ecobits;
3911
3912                 ecobits = I915_READ(GAC_ECO_BITS); 
3913                 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
3914
3915                 gab_ctl = I915_READ(GAB_CTL);
3916                 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3917
3918                 ecochk = I915_READ(GAM_ECOCHK);
3919                 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3920                                        ECOCHK_PPGTT_CACHE64B);
3921                 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3922         } else if (INTEL_INFO(dev)->gen >= 7) {
3923                 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3924                 /* GFX_MODE is per-ring on gen7+ */
3925         }
3926
3927         for_each_ring(ring, dev_priv, i) {
3928                 if (INTEL_INFO(dev)->gen >= 7)
3929                         I915_WRITE(RING_MODE_GEN7(ring),
3930                                    _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3931
3932                 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3933                 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3934         }
3935 }
3936
3937 static bool
3938 intel_enable_blt(struct drm_device *dev)
3939 {
3940         if (!HAS_BLT(dev))
3941                 return false;
3942
3943         /* The blitter was dysfunctional on early prototypes */
3944         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3945                 DRM_INFO("BLT not supported on this pre-production hardware;"
3946                          " graphics performance will be degraded.\n");
3947                 return false;
3948         }
3949
3950         return true;
3951 }
3952
3953 int
3954 i915_gem_init_hw(struct drm_device *dev)
3955 {
3956         drm_i915_private_t *dev_priv = dev->dev_private;
3957         int ret;
3958
3959         if (!intel_enable_gtt())
3960                 return -EIO;
3961
3962         i915_gem_l3_remap(dev);
3963
3964         i915_gem_init_swizzling(dev);
3965
3966         ret = intel_init_render_ring_buffer(dev);
3967         if (ret)
3968                 return ret;
3969
3970         if (HAS_BSD(dev)) {
3971                 ret = intel_init_bsd_ring_buffer(dev);
3972                 if (ret)
3973                         goto cleanup_render_ring;
3974         }
3975
3976         if (intel_enable_blt(dev)) {
3977                 ret = intel_init_blt_ring_buffer(dev);
3978                 if (ret)
3979                         goto cleanup_bsd_ring;
3980         }
3981
3982         dev_priv->next_seqno = 1;
3983
3984         /*
3985          * XXX: There was some w/a described somewhere suggesting loading
3986          * contexts before PPGTT.
3987          */
3988         i915_gem_context_init(dev);
3989         i915_gem_init_ppgtt(dev);
3990
3991         return 0;
3992
3993 cleanup_bsd_ring:
3994         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3995 cleanup_render_ring:
3996         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3997         return ret;
3998 }
3999
4000 static bool
4001 intel_enable_ppgtt(struct drm_device *dev)
4002 {
4003         if (i915_enable_ppgtt >= 0)
4004                 return i915_enable_ppgtt;
4005
4006 #ifdef CONFIG_INTEL_IOMMU
4007         /* Disable ppgtt on SNB if VT-d is on. */
4008         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
4009                 return false;
4010 #endif
4011
4012         return true;
4013 }
4014
4015 int i915_gem_init(struct drm_device *dev)
4016 {
4017         struct drm_i915_private *dev_priv = dev->dev_private;
4018         unsigned long gtt_size, mappable_size;
4019         int ret;
4020
4021         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
4022         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
4023
4024         mutex_lock(&dev->struct_mutex);
4025         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
4026                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
4027                  * aperture accordingly when using aliasing ppgtt. */
4028                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
4029
4030                 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
4031
4032                 ret = i915_gem_init_aliasing_ppgtt(dev);
4033                 if (ret) {
4034                         mutex_unlock(&dev->struct_mutex);
4035                         return ret;
4036                 }
4037         } else {
4038                 /* Let GEM Manage all of the aperture.
4039                  *
4040                  * However, leave one page at the end still bound to the scratch
4041                  * page.  There are a number of places where the hardware
4042                  * apparently prefetches past the end of the object, and we've
4043                  * seen multiple hangs with the GPU head pointer stuck in a
4044                  * batchbuffer bound at the last page of the aperture.  One page
4045                  * should be enough to keep any prefetching inside of the
4046                  * aperture.
4047                  */
4048                 i915_gem_init_global_gtt(dev, 0, mappable_size,
4049                                          gtt_size);
4050         }
4051
4052         ret = i915_gem_init_hw(dev);
4053         mutex_unlock(&dev->struct_mutex);
4054         if (ret) {
4055                 i915_gem_cleanup_aliasing_ppgtt(dev);
4056                 return ret;
4057         }
4058
4059         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4060         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4061                 dev_priv->dri1.allow_batchbuffer = 1;
4062         return 0;
4063 }
4064
4065 void
4066 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4067 {
4068         drm_i915_private_t *dev_priv = dev->dev_private;
4069         struct intel_ring_buffer *ring;
4070         int i;
4071
4072         for_each_ring(ring, dev_priv, i)
4073                 intel_cleanup_ring_buffer(ring);
4074 }
4075
4076 int
4077 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4078                        struct drm_file *file_priv)
4079 {
4080         drm_i915_private_t *dev_priv = dev->dev_private;
4081         int ret;
4082
4083         if (drm_core_check_feature(dev, DRIVER_MODESET))
4084                 return 0;
4085
4086         if (atomic_read(&dev_priv->mm.wedged)) {
4087                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4088                 atomic_set(&dev_priv->mm.wedged, 0);
4089         }
4090
4091         mutex_lock(&dev->struct_mutex);
4092         dev_priv->mm.suspended = 0;
4093
4094         ret = i915_gem_init_hw(dev);
4095         if (ret != 0) {
4096                 mutex_unlock(&dev->struct_mutex);
4097                 return ret;
4098         }
4099
4100         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4101         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4102         mutex_unlock(&dev->struct_mutex);
4103
4104         ret = drm_irq_install(dev);
4105         if (ret)
4106                 goto cleanup_ringbuffer;
4107
4108         return 0;
4109
4110 cleanup_ringbuffer:
4111         mutex_lock(&dev->struct_mutex);
4112         i915_gem_cleanup_ringbuffer(dev);
4113         dev_priv->mm.suspended = 1;
4114         mutex_unlock(&dev->struct_mutex);
4115
4116         return ret;
4117 }
4118
4119 int
4120 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4121                        struct drm_file *file_priv)
4122 {
4123         if (drm_core_check_feature(dev, DRIVER_MODESET))
4124                 return 0;
4125
4126         drm_irq_uninstall(dev);
4127         return i915_gem_idle(dev);
4128 }
4129
4130 void
4131 i915_gem_lastclose(struct drm_device *dev)
4132 {
4133         int ret;
4134
4135         if (drm_core_check_feature(dev, DRIVER_MODESET))
4136                 return;
4137
4138         ret = i915_gem_idle(dev);
4139         if (ret)
4140                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4141 }
4142
4143 static void
4144 init_ring_lists(struct intel_ring_buffer *ring)
4145 {
4146         INIT_LIST_HEAD(&ring->active_list);
4147         INIT_LIST_HEAD(&ring->request_list);
4148 }
4149
4150 void
4151 i915_gem_load(struct drm_device *dev)
4152 {
4153         int i;
4154         drm_i915_private_t *dev_priv = dev->dev_private;
4155
4156         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4157         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4158         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4159         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4160         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4161         for (i = 0; i < I915_NUM_RINGS; i++)
4162                 init_ring_lists(&dev_priv->ring[i]);
4163         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4164                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4165         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4166                           i915_gem_retire_work_handler);
4167         init_completion(&dev_priv->error_completion);
4168
4169         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4170         if (IS_GEN3(dev)) {
4171                 I915_WRITE(MI_ARB_STATE,
4172                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4173         }
4174
4175         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4176
4177         /* Old X drivers will take 0-2 for front, back, depth buffers */
4178         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4179                 dev_priv->fence_reg_start = 3;
4180
4181         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4182                 dev_priv->num_fence_regs = 16;
4183         else
4184                 dev_priv->num_fence_regs = 8;
4185
4186         /* Initialize fence registers to zero */
4187         i915_gem_reset_fences(dev);
4188
4189         i915_gem_detect_bit_6_swizzle(dev);
4190         init_waitqueue_head(&dev_priv->pending_flip_queue);
4191
4192         dev_priv->mm.interruptible = true;
4193
4194         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4195         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4196         register_shrinker(&dev_priv->mm.inactive_shrinker);
4197 }
4198
4199 /*
4200  * Create a physically contiguous memory object for this object
4201  * e.g. for cursor + overlay regs
4202  */
4203 static int i915_gem_init_phys_object(struct drm_device *dev,
4204                                      int id, int size, int align)
4205 {
4206         drm_i915_private_t *dev_priv = dev->dev_private;
4207         struct drm_i915_gem_phys_object *phys_obj;
4208         int ret;
4209
4210         if (dev_priv->mm.phys_objs[id - 1] || !size)
4211                 return 0;
4212
4213         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4214         if (!phys_obj)
4215                 return -ENOMEM;
4216
4217         phys_obj->id = id;
4218
4219         phys_obj->handle = drm_pci_alloc(dev, size, align);
4220         if (!phys_obj->handle) {
4221                 ret = -ENOMEM;
4222                 goto kfree_obj;
4223         }
4224 #ifdef CONFIG_X86
4225         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4226 #endif
4227
4228         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4229
4230         return 0;
4231 kfree_obj:
4232         kfree(phys_obj);
4233         return ret;
4234 }
4235
4236 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4237 {
4238         drm_i915_private_t *dev_priv = dev->dev_private;
4239         struct drm_i915_gem_phys_object *phys_obj;
4240
4241         if (!dev_priv->mm.phys_objs[id - 1])
4242                 return;
4243
4244         phys_obj = dev_priv->mm.phys_objs[id - 1];
4245         if (phys_obj->cur_obj) {
4246                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4247         }
4248
4249 #ifdef CONFIG_X86
4250         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4251 #endif
4252         drm_pci_free(dev, phys_obj->handle);
4253         kfree(phys_obj);
4254         dev_priv->mm.phys_objs[id - 1] = NULL;
4255 }
4256
4257 void i915_gem_free_all_phys_object(struct drm_device *dev)
4258 {
4259         int i;
4260
4261         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4262                 i915_gem_free_phys_object(dev, i);
4263 }
4264
4265 void i915_gem_detach_phys_object(struct drm_device *dev,
4266                                  struct drm_i915_gem_object *obj)
4267 {
4268         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4269         char *vaddr;
4270         int i;
4271         int page_count;
4272
4273         if (!obj->phys_obj)
4274                 return;
4275         vaddr = obj->phys_obj->handle->vaddr;
4276
4277         page_count = obj->base.size / PAGE_SIZE;
4278         for (i = 0; i < page_count; i++) {
4279                 struct page *page = shmem_read_mapping_page(mapping, i);
4280                 if (!IS_ERR(page)) {
4281                         char *dst = kmap_atomic(page);
4282                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4283                         kunmap_atomic(dst);
4284
4285                         drm_clflush_pages(&page, 1);
4286
4287                         set_page_dirty(page);
4288                         mark_page_accessed(page);
4289                         page_cache_release(page);
4290                 }
4291         }
4292         intel_gtt_chipset_flush();
4293
4294         obj->phys_obj->cur_obj = NULL;
4295         obj->phys_obj = NULL;
4296 }
4297
4298 int
4299 i915_gem_attach_phys_object(struct drm_device *dev,
4300                             struct drm_i915_gem_object *obj,
4301                             int id,
4302                             int align)
4303 {
4304         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4305         drm_i915_private_t *dev_priv = dev->dev_private;
4306         int ret = 0;
4307         int page_count;
4308         int i;
4309
4310         if (id > I915_MAX_PHYS_OBJECT)
4311                 return -EINVAL;
4312
4313         if (obj->phys_obj) {
4314                 if (obj->phys_obj->id == id)
4315                         return 0;
4316                 i915_gem_detach_phys_object(dev, obj);
4317         }
4318
4319         /* create a new object */
4320         if (!dev_priv->mm.phys_objs[id - 1]) {
4321                 ret = i915_gem_init_phys_object(dev, id,
4322                                                 obj->base.size, align);
4323                 if (ret) {
4324                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4325                                   id, obj->base.size);
4326                         return ret;
4327                 }
4328         }
4329
4330         /* bind to the object */
4331         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4332         obj->phys_obj->cur_obj = obj;
4333
4334         page_count = obj->base.size / PAGE_SIZE;
4335
4336         for (i = 0; i < page_count; i++) {
4337                 struct page *page;
4338                 char *dst, *src;
4339
4340                 page = shmem_read_mapping_page(mapping, i);
4341                 if (IS_ERR(page))
4342                         return PTR_ERR(page);
4343
4344                 src = kmap_atomic(page);
4345                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4346                 memcpy(dst, src, PAGE_SIZE);
4347                 kunmap_atomic(src);
4348
4349                 mark_page_accessed(page);
4350                 page_cache_release(page);
4351         }
4352
4353         return 0;
4354 }
4355
4356 static int
4357 i915_gem_phys_pwrite(struct drm_device *dev,
4358                      struct drm_i915_gem_object *obj,
4359                      struct drm_i915_gem_pwrite *args,
4360                      struct drm_file *file_priv)
4361 {
4362         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4363         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4364
4365         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4366                 unsigned long unwritten;
4367
4368                 /* The physical object once assigned is fixed for the lifetime
4369                  * of the obj, so we can safely drop the lock and continue
4370                  * to access vaddr.
4371                  */
4372                 mutex_unlock(&dev->struct_mutex);
4373                 unwritten = copy_from_user(vaddr, user_data, args->size);
4374                 mutex_lock(&dev->struct_mutex);
4375                 if (unwritten)
4376                         return -EFAULT;
4377         }
4378
4379         intel_gtt_chipset_flush();
4380         return 0;
4381 }
4382
4383 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4384 {
4385         struct drm_i915_file_private *file_priv = file->driver_priv;
4386
4387         /* Clean up our request list when the client is going away, so that
4388          * later retire_requests won't dereference our soon-to-be-gone
4389          * file_priv.
4390          */
4391         spin_lock(&file_priv->mm.lock);
4392         while (!list_empty(&file_priv->mm.request_list)) {
4393                 struct drm_i915_gem_request *request;
4394
4395                 request = list_first_entry(&file_priv->mm.request_list,
4396                                            struct drm_i915_gem_request,
4397                                            client_list);
4398                 list_del(&request->client_list);
4399                 request->file_priv = NULL;
4400         }
4401         spin_unlock(&file_priv->mm.lock);
4402 }
4403
4404 static int
4405 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4406 {
4407         struct drm_i915_private *dev_priv =
4408                 container_of(shrinker,
4409                              struct drm_i915_private,
4410                              mm.inactive_shrinker);
4411         struct drm_device *dev = dev_priv->dev;
4412         struct drm_i915_gem_object *obj;
4413         int nr_to_scan = sc->nr_to_scan;
4414         int cnt;
4415
4416         if (!mutex_trylock(&dev->struct_mutex))
4417                 return 0;
4418
4419         if (nr_to_scan) {
4420                 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4421                 if (nr_to_scan > 0)
4422                         i915_gem_shrink_all(dev_priv);
4423         }
4424
4425         cnt = 0;
4426         list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4427                 if (obj->pages_pin_count == 0)
4428                         cnt += obj->base.size >> PAGE_SHIFT;
4429         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
4430                 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4431                         cnt += obj->base.size >> PAGE_SHIFT;
4432
4433         mutex_unlock(&dev->struct_mutex);
4434         return cnt;
4435 }