Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38
39 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43                                                           bool write);
44 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45                                                                   uint64_t offset,
46                                                                   uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49                                                     unsigned alignment,
50                                                     bool map_and_fenceable);
51 static void i915_gem_clear_fence_reg(struct drm_device *dev,
52                                      struct drm_i915_fence_reg *reg);
53 static int i915_gem_phys_pwrite(struct drm_device *dev,
54                                 struct drm_i915_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file);
57 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58
59 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60                                     struct shrink_control *sc);
61
62 /* some bookkeeping */
63 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
64                                   size_t size)
65 {
66         dev_priv->mm.object_count++;
67         dev_priv->mm.object_memory += size;
68 }
69
70 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
71                                      size_t size)
72 {
73         dev_priv->mm.object_count--;
74         dev_priv->mm.object_memory -= size;
75 }
76
77 static int
78 i915_gem_wait_for_error(struct drm_device *dev)
79 {
80         struct drm_i915_private *dev_priv = dev->dev_private;
81         struct completion *x = &dev_priv->error_completion;
82         unsigned long flags;
83         int ret;
84
85         if (!atomic_read(&dev_priv->mm.wedged))
86                 return 0;
87
88         ret = wait_for_completion_interruptible(x);
89         if (ret)
90                 return ret;
91
92         if (atomic_read(&dev_priv->mm.wedged)) {
93                 /* GPU is hung, bump the completion count to account for
94                  * the token we just consumed so that we never hit zero and
95                  * end up waiting upon a subsequent completion event that
96                  * will never happen.
97                  */
98                 spin_lock_irqsave(&x->wait.lock, flags);
99                 x->done++;
100                 spin_unlock_irqrestore(&x->wait.lock, flags);
101         }
102         return 0;
103 }
104
105 int i915_mutex_lock_interruptible(struct drm_device *dev)
106 {
107         int ret;
108
109         ret = i915_gem_wait_for_error(dev);
110         if (ret)
111                 return ret;
112
113         ret = mutex_lock_interruptible(&dev->struct_mutex);
114         if (ret)
115                 return ret;
116
117         WARN_ON(i915_verify_lists(dev));
118         return 0;
119 }
120
121 static inline bool
122 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
123 {
124         return obj->gtt_space && !obj->active && obj->pin_count == 0;
125 }
126
127 void i915_gem_do_init(struct drm_device *dev,
128                       unsigned long start,
129                       unsigned long mappable_end,
130                       unsigned long end)
131 {
132         drm_i915_private_t *dev_priv = dev->dev_private;
133
134         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
135
136         dev_priv->mm.gtt_start = start;
137         dev_priv->mm.gtt_mappable_end = mappable_end;
138         dev_priv->mm.gtt_end = end;
139         dev_priv->mm.gtt_total = end - start;
140         dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
141
142         /* Take over this portion of the GTT */
143         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
144 }
145
146 int
147 i915_gem_init_ioctl(struct drm_device *dev, void *data,
148                     struct drm_file *file)
149 {
150         struct drm_i915_gem_init *args = data;
151
152         if (args->gtt_start >= args->gtt_end ||
153             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
154                 return -EINVAL;
155
156         mutex_lock(&dev->struct_mutex);
157         i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
158         mutex_unlock(&dev->struct_mutex);
159
160         return 0;
161 }
162
163 int
164 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
165                             struct drm_file *file)
166 {
167         struct drm_i915_private *dev_priv = dev->dev_private;
168         struct drm_i915_gem_get_aperture *args = data;
169         struct drm_i915_gem_object *obj;
170         size_t pinned;
171
172         if (!(dev->driver->driver_features & DRIVER_GEM))
173                 return -ENODEV;
174
175         pinned = 0;
176         mutex_lock(&dev->struct_mutex);
177         list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
178                 pinned += obj->gtt_space->size;
179         mutex_unlock(&dev->struct_mutex);
180
181         args->aper_size = dev_priv->mm.gtt_total;
182         args->aper_available_size = args->aper_size - pinned;
183
184         return 0;
185 }
186
187 static int
188 i915_gem_create(struct drm_file *file,
189                 struct drm_device *dev,
190                 uint64_t size,
191                 uint32_t *handle_p)
192 {
193         struct drm_i915_gem_object *obj;
194         int ret;
195         u32 handle;
196
197         size = roundup(size, PAGE_SIZE);
198         if (size == 0)
199                 return -EINVAL;
200
201         /* Allocate the new object */
202         obj = i915_gem_alloc_object(dev, size);
203         if (obj == NULL)
204                 return -ENOMEM;
205
206         ret = drm_gem_handle_create(file, &obj->base, &handle);
207         if (ret) {
208                 drm_gem_object_release(&obj->base);
209                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
210                 kfree(obj);
211                 return ret;
212         }
213
214         /* drop reference from allocate - handle holds it now */
215         drm_gem_object_unreference(&obj->base);
216         trace_i915_gem_object_create(obj);
217
218         *handle_p = handle;
219         return 0;
220 }
221
222 int
223 i915_gem_dumb_create(struct drm_file *file,
224                      struct drm_device *dev,
225                      struct drm_mode_create_dumb *args)
226 {
227         /* have to work out size/pitch and return them */
228         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
229         args->size = args->pitch * args->height;
230         return i915_gem_create(file, dev,
231                                args->size, &args->handle);
232 }
233
234 int i915_gem_dumb_destroy(struct drm_file *file,
235                           struct drm_device *dev,
236                           uint32_t handle)
237 {
238         return drm_gem_handle_delete(file, handle);
239 }
240
241 /**
242  * Creates a new mm object and returns a handle to it.
243  */
244 int
245 i915_gem_create_ioctl(struct drm_device *dev, void *data,
246                       struct drm_file *file)
247 {
248         struct drm_i915_gem_create *args = data;
249         return i915_gem_create(file, dev,
250                                args->size, &args->handle);
251 }
252
253 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
254 {
255         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
256
257         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
258                 obj->tiling_mode != I915_TILING_NONE;
259 }
260
261 static inline void
262 slow_shmem_copy(struct page *dst_page,
263                 int dst_offset,
264                 struct page *src_page,
265                 int src_offset,
266                 int length)
267 {
268         char *dst_vaddr, *src_vaddr;
269
270         dst_vaddr = kmap(dst_page);
271         src_vaddr = kmap(src_page);
272
273         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
274
275         kunmap(src_page);
276         kunmap(dst_page);
277 }
278
279 static inline void
280 slow_shmem_bit17_copy(struct page *gpu_page,
281                       int gpu_offset,
282                       struct page *cpu_page,
283                       int cpu_offset,
284                       int length,
285                       int is_read)
286 {
287         char *gpu_vaddr, *cpu_vaddr;
288
289         /* Use the unswizzled path if this page isn't affected. */
290         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
291                 if (is_read)
292                         return slow_shmem_copy(cpu_page, cpu_offset,
293                                                gpu_page, gpu_offset, length);
294                 else
295                         return slow_shmem_copy(gpu_page, gpu_offset,
296                                                cpu_page, cpu_offset, length);
297         }
298
299         gpu_vaddr = kmap(gpu_page);
300         cpu_vaddr = kmap(cpu_page);
301
302         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
303          * XORing with the other bits (A9 for Y, A9 and A10 for X)
304          */
305         while (length > 0) {
306                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
307                 int this_length = min(cacheline_end - gpu_offset, length);
308                 int swizzled_gpu_offset = gpu_offset ^ 64;
309
310                 if (is_read) {
311                         memcpy(cpu_vaddr + cpu_offset,
312                                gpu_vaddr + swizzled_gpu_offset,
313                                this_length);
314                 } else {
315                         memcpy(gpu_vaddr + swizzled_gpu_offset,
316                                cpu_vaddr + cpu_offset,
317                                this_length);
318                 }
319                 cpu_offset += this_length;
320                 gpu_offset += this_length;
321                 length -= this_length;
322         }
323
324         kunmap(cpu_page);
325         kunmap(gpu_page);
326 }
327
328 /**
329  * This is the fast shmem pread path, which attempts to copy_from_user directly
330  * from the backing pages of the object to the user's address space.  On a
331  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
332  */
333 static int
334 i915_gem_shmem_pread_fast(struct drm_device *dev,
335                           struct drm_i915_gem_object *obj,
336                           struct drm_i915_gem_pread *args,
337                           struct drm_file *file)
338 {
339         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
340         ssize_t remain;
341         loff_t offset;
342         char __user *user_data;
343         int page_offset, page_length;
344
345         user_data = (char __user *) (uintptr_t) args->data_ptr;
346         remain = args->size;
347
348         offset = args->offset;
349
350         while (remain > 0) {
351                 struct page *page;
352                 char *vaddr;
353                 int ret;
354
355                 /* Operation in this page
356                  *
357                  * page_offset = offset within page
358                  * page_length = bytes to copy for this page
359                  */
360                 page_offset = offset_in_page(offset);
361                 page_length = remain;
362                 if ((page_offset + remain) > PAGE_SIZE)
363                         page_length = PAGE_SIZE - page_offset;
364
365                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
366                 if (IS_ERR(page))
367                         return PTR_ERR(page);
368
369                 vaddr = kmap_atomic(page);
370                 ret = __copy_to_user_inatomic(user_data,
371                                               vaddr + page_offset,
372                                               page_length);
373                 kunmap_atomic(vaddr);
374
375                 mark_page_accessed(page);
376                 page_cache_release(page);
377                 if (ret)
378                         return -EFAULT;
379
380                 remain -= page_length;
381                 user_data += page_length;
382                 offset += page_length;
383         }
384
385         return 0;
386 }
387
388 /**
389  * This is the fallback shmem pread path, which allocates temporary storage
390  * in kernel space to copy_to_user into outside of the struct_mutex, so we
391  * can copy out of the object's backing pages while holding the struct mutex
392  * and not take page faults.
393  */
394 static int
395 i915_gem_shmem_pread_slow(struct drm_device *dev,
396                           struct drm_i915_gem_object *obj,
397                           struct drm_i915_gem_pread *args,
398                           struct drm_file *file)
399 {
400         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
401         struct mm_struct *mm = current->mm;
402         struct page **user_pages;
403         ssize_t remain;
404         loff_t offset, pinned_pages, i;
405         loff_t first_data_page, last_data_page, num_pages;
406         int shmem_page_offset;
407         int data_page_index, data_page_offset;
408         int page_length;
409         int ret;
410         uint64_t data_ptr = args->data_ptr;
411         int do_bit17_swizzling;
412
413         remain = args->size;
414
415         /* Pin the user pages containing the data.  We can't fault while
416          * holding the struct mutex, yet we want to hold it while
417          * dereferencing the user data.
418          */
419         first_data_page = data_ptr / PAGE_SIZE;
420         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
421         num_pages = last_data_page - first_data_page + 1;
422
423         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
424         if (user_pages == NULL)
425                 return -ENOMEM;
426
427         mutex_unlock(&dev->struct_mutex);
428         down_read(&mm->mmap_sem);
429         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
430                                       num_pages, 1, 0, user_pages, NULL);
431         up_read(&mm->mmap_sem);
432         mutex_lock(&dev->struct_mutex);
433         if (pinned_pages < num_pages) {
434                 ret = -EFAULT;
435                 goto out;
436         }
437
438         ret = i915_gem_object_set_cpu_read_domain_range(obj,
439                                                         args->offset,
440                                                         args->size);
441         if (ret)
442                 goto out;
443
444         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
445
446         offset = args->offset;
447
448         while (remain > 0) {
449                 struct page *page;
450
451                 /* Operation in this page
452                  *
453                  * shmem_page_offset = offset within page in shmem file
454                  * data_page_index = page number in get_user_pages return
455                  * data_page_offset = offset with data_page_index page.
456                  * page_length = bytes to copy for this page
457                  */
458                 shmem_page_offset = offset_in_page(offset);
459                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
460                 data_page_offset = offset_in_page(data_ptr);
461
462                 page_length = remain;
463                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
464                         page_length = PAGE_SIZE - shmem_page_offset;
465                 if ((data_page_offset + page_length) > PAGE_SIZE)
466                         page_length = PAGE_SIZE - data_page_offset;
467
468                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
469                 if (IS_ERR(page)) {
470                         ret = PTR_ERR(page);
471                         goto out;
472                 }
473
474                 if (do_bit17_swizzling) {
475                         slow_shmem_bit17_copy(page,
476                                               shmem_page_offset,
477                                               user_pages[data_page_index],
478                                               data_page_offset,
479                                               page_length,
480                                               1);
481                 } else {
482                         slow_shmem_copy(user_pages[data_page_index],
483                                         data_page_offset,
484                                         page,
485                                         shmem_page_offset,
486                                         page_length);
487                 }
488
489                 mark_page_accessed(page);
490                 page_cache_release(page);
491
492                 remain -= page_length;
493                 data_ptr += page_length;
494                 offset += page_length;
495         }
496
497 out:
498         for (i = 0; i < pinned_pages; i++) {
499                 SetPageDirty(user_pages[i]);
500                 mark_page_accessed(user_pages[i]);
501                 page_cache_release(user_pages[i]);
502         }
503         drm_free_large(user_pages);
504
505         return ret;
506 }
507
508 /**
509  * Reads data from the object referenced by handle.
510  *
511  * On error, the contents of *data are undefined.
512  */
513 int
514 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
515                      struct drm_file *file)
516 {
517         struct drm_i915_gem_pread *args = data;
518         struct drm_i915_gem_object *obj;
519         int ret = 0;
520
521         if (args->size == 0)
522                 return 0;
523
524         if (!access_ok(VERIFY_WRITE,
525                        (char __user *)(uintptr_t)args->data_ptr,
526                        args->size))
527                 return -EFAULT;
528
529         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
530                                        args->size);
531         if (ret)
532                 return -EFAULT;
533
534         ret = i915_mutex_lock_interruptible(dev);
535         if (ret)
536                 return ret;
537
538         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
539         if (&obj->base == NULL) {
540                 ret = -ENOENT;
541                 goto unlock;
542         }
543
544         /* Bounds check source.  */
545         if (args->offset > obj->base.size ||
546             args->size > obj->base.size - args->offset) {
547                 ret = -EINVAL;
548                 goto out;
549         }
550
551         trace_i915_gem_object_pread(obj, args->offset, args->size);
552
553         ret = i915_gem_object_set_cpu_read_domain_range(obj,
554                                                         args->offset,
555                                                         args->size);
556         if (ret)
557                 goto out;
558
559         ret = -EFAULT;
560         if (!i915_gem_object_needs_bit17_swizzle(obj))
561                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
562         if (ret == -EFAULT)
563                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
564
565 out:
566         drm_gem_object_unreference(&obj->base);
567 unlock:
568         mutex_unlock(&dev->struct_mutex);
569         return ret;
570 }
571
572 /* This is the fast write path which cannot handle
573  * page faults in the source data
574  */
575
576 static inline int
577 fast_user_write(struct io_mapping *mapping,
578                 loff_t page_base, int page_offset,
579                 char __user *user_data,
580                 int length)
581 {
582         char *vaddr_atomic;
583         unsigned long unwritten;
584
585         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
586         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
587                                                       user_data, length);
588         io_mapping_unmap_atomic(vaddr_atomic);
589         return unwritten;
590 }
591
592 /* Here's the write path which can sleep for
593  * page faults
594  */
595
596 static inline void
597 slow_kernel_write(struct io_mapping *mapping,
598                   loff_t gtt_base, int gtt_offset,
599                   struct page *user_page, int user_offset,
600                   int length)
601 {
602         char __iomem *dst_vaddr;
603         char *src_vaddr;
604
605         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
606         src_vaddr = kmap(user_page);
607
608         memcpy_toio(dst_vaddr + gtt_offset,
609                     src_vaddr + user_offset,
610                     length);
611
612         kunmap(user_page);
613         io_mapping_unmap(dst_vaddr);
614 }
615
616 /**
617  * This is the fast pwrite path, where we copy the data directly from the
618  * user into the GTT, uncached.
619  */
620 static int
621 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
622                          struct drm_i915_gem_object *obj,
623                          struct drm_i915_gem_pwrite *args,
624                          struct drm_file *file)
625 {
626         drm_i915_private_t *dev_priv = dev->dev_private;
627         ssize_t remain;
628         loff_t offset, page_base;
629         char __user *user_data;
630         int page_offset, page_length;
631
632         user_data = (char __user *) (uintptr_t) args->data_ptr;
633         remain = args->size;
634
635         offset = obj->gtt_offset + args->offset;
636
637         while (remain > 0) {
638                 /* Operation in this page
639                  *
640                  * page_base = page offset within aperture
641                  * page_offset = offset within page
642                  * page_length = bytes to copy for this page
643                  */
644                 page_base = offset & PAGE_MASK;
645                 page_offset = offset_in_page(offset);
646                 page_length = remain;
647                 if ((page_offset + remain) > PAGE_SIZE)
648                         page_length = PAGE_SIZE - page_offset;
649
650                 /* If we get a fault while copying data, then (presumably) our
651                  * source page isn't available.  Return the error and we'll
652                  * retry in the slow path.
653                  */
654                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
655                                     page_offset, user_data, page_length))
656                         return -EFAULT;
657
658                 remain -= page_length;
659                 user_data += page_length;
660                 offset += page_length;
661         }
662
663         return 0;
664 }
665
666 /**
667  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
668  * the memory and maps it using kmap_atomic for copying.
669  *
670  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
671  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
672  */
673 static int
674 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
675                          struct drm_i915_gem_object *obj,
676                          struct drm_i915_gem_pwrite *args,
677                          struct drm_file *file)
678 {
679         drm_i915_private_t *dev_priv = dev->dev_private;
680         ssize_t remain;
681         loff_t gtt_page_base, offset;
682         loff_t first_data_page, last_data_page, num_pages;
683         loff_t pinned_pages, i;
684         struct page **user_pages;
685         struct mm_struct *mm = current->mm;
686         int gtt_page_offset, data_page_offset, data_page_index, page_length;
687         int ret;
688         uint64_t data_ptr = args->data_ptr;
689
690         remain = args->size;
691
692         /* Pin the user pages containing the data.  We can't fault while
693          * holding the struct mutex, and all of the pwrite implementations
694          * want to hold it while dereferencing the user data.
695          */
696         first_data_page = data_ptr / PAGE_SIZE;
697         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
698         num_pages = last_data_page - first_data_page + 1;
699
700         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
701         if (user_pages == NULL)
702                 return -ENOMEM;
703
704         mutex_unlock(&dev->struct_mutex);
705         down_read(&mm->mmap_sem);
706         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
707                                       num_pages, 0, 0, user_pages, NULL);
708         up_read(&mm->mmap_sem);
709         mutex_lock(&dev->struct_mutex);
710         if (pinned_pages < num_pages) {
711                 ret = -EFAULT;
712                 goto out_unpin_pages;
713         }
714
715         ret = i915_gem_object_set_to_gtt_domain(obj, true);
716         if (ret)
717                 goto out_unpin_pages;
718
719         ret = i915_gem_object_put_fence(obj);
720         if (ret)
721                 goto out_unpin_pages;
722
723         offset = obj->gtt_offset + args->offset;
724
725         while (remain > 0) {
726                 /* Operation in this page
727                  *
728                  * gtt_page_base = page offset within aperture
729                  * gtt_page_offset = offset within page in aperture
730                  * data_page_index = page number in get_user_pages return
731                  * data_page_offset = offset with data_page_index page.
732                  * page_length = bytes to copy for this page
733                  */
734                 gtt_page_base = offset & PAGE_MASK;
735                 gtt_page_offset = offset_in_page(offset);
736                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
737                 data_page_offset = offset_in_page(data_ptr);
738
739                 page_length = remain;
740                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
741                         page_length = PAGE_SIZE - gtt_page_offset;
742                 if ((data_page_offset + page_length) > PAGE_SIZE)
743                         page_length = PAGE_SIZE - data_page_offset;
744
745                 slow_kernel_write(dev_priv->mm.gtt_mapping,
746                                   gtt_page_base, gtt_page_offset,
747                                   user_pages[data_page_index],
748                                   data_page_offset,
749                                   page_length);
750
751                 remain -= page_length;
752                 offset += page_length;
753                 data_ptr += page_length;
754         }
755
756 out_unpin_pages:
757         for (i = 0; i < pinned_pages; i++)
758                 page_cache_release(user_pages[i]);
759         drm_free_large(user_pages);
760
761         return ret;
762 }
763
764 /**
765  * This is the fast shmem pwrite path, which attempts to directly
766  * copy_from_user into the kmapped pages backing the object.
767  */
768 static int
769 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
770                            struct drm_i915_gem_object *obj,
771                            struct drm_i915_gem_pwrite *args,
772                            struct drm_file *file)
773 {
774         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
775         ssize_t remain;
776         loff_t offset;
777         char __user *user_data;
778         int page_offset, page_length;
779
780         user_data = (char __user *) (uintptr_t) args->data_ptr;
781         remain = args->size;
782
783         offset = args->offset;
784         obj->dirty = 1;
785
786         while (remain > 0) {
787                 struct page *page;
788                 char *vaddr;
789                 int ret;
790
791                 /* Operation in this page
792                  *
793                  * page_offset = offset within page
794                  * page_length = bytes to copy for this page
795                  */
796                 page_offset = offset_in_page(offset);
797                 page_length = remain;
798                 if ((page_offset + remain) > PAGE_SIZE)
799                         page_length = PAGE_SIZE - page_offset;
800
801                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
802                 if (IS_ERR(page))
803                         return PTR_ERR(page);
804
805                 vaddr = kmap_atomic(page);
806                 ret = __copy_from_user_inatomic(vaddr + page_offset,
807                                                 user_data,
808                                                 page_length);
809                 kunmap_atomic(vaddr);
810
811                 set_page_dirty(page);
812                 mark_page_accessed(page);
813                 page_cache_release(page);
814
815                 /* If we get a fault while copying data, then (presumably) our
816                  * source page isn't available.  Return the error and we'll
817                  * retry in the slow path.
818                  */
819                 if (ret)
820                         return -EFAULT;
821
822                 remain -= page_length;
823                 user_data += page_length;
824                 offset += page_length;
825         }
826
827         return 0;
828 }
829
830 /**
831  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
832  * the memory and maps it using kmap_atomic for copying.
833  *
834  * This avoids taking mmap_sem for faulting on the user's address while the
835  * struct_mutex is held.
836  */
837 static int
838 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
839                            struct drm_i915_gem_object *obj,
840                            struct drm_i915_gem_pwrite *args,
841                            struct drm_file *file)
842 {
843         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
844         struct mm_struct *mm = current->mm;
845         struct page **user_pages;
846         ssize_t remain;
847         loff_t offset, pinned_pages, i;
848         loff_t first_data_page, last_data_page, num_pages;
849         int shmem_page_offset;
850         int data_page_index,  data_page_offset;
851         int page_length;
852         int ret;
853         uint64_t data_ptr = args->data_ptr;
854         int do_bit17_swizzling;
855
856         remain = args->size;
857
858         /* Pin the user pages containing the data.  We can't fault while
859          * holding the struct mutex, and all of the pwrite implementations
860          * want to hold it while dereferencing the user data.
861          */
862         first_data_page = data_ptr / PAGE_SIZE;
863         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
864         num_pages = last_data_page - first_data_page + 1;
865
866         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
867         if (user_pages == NULL)
868                 return -ENOMEM;
869
870         mutex_unlock(&dev->struct_mutex);
871         down_read(&mm->mmap_sem);
872         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
873                                       num_pages, 0, 0, user_pages, NULL);
874         up_read(&mm->mmap_sem);
875         mutex_lock(&dev->struct_mutex);
876         if (pinned_pages < num_pages) {
877                 ret = -EFAULT;
878                 goto out;
879         }
880
881         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
882         if (ret)
883                 goto out;
884
885         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886
887         offset = args->offset;
888         obj->dirty = 1;
889
890         while (remain > 0) {
891                 struct page *page;
892
893                 /* Operation in this page
894                  *
895                  * shmem_page_offset = offset within page in shmem file
896                  * data_page_index = page number in get_user_pages return
897                  * data_page_offset = offset with data_page_index page.
898                  * page_length = bytes to copy for this page
899                  */
900                 shmem_page_offset = offset_in_page(offset);
901                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
902                 data_page_offset = offset_in_page(data_ptr);
903
904                 page_length = remain;
905                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
906                         page_length = PAGE_SIZE - shmem_page_offset;
907                 if ((data_page_offset + page_length) > PAGE_SIZE)
908                         page_length = PAGE_SIZE - data_page_offset;
909
910                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
911                 if (IS_ERR(page)) {
912                         ret = PTR_ERR(page);
913                         goto out;
914                 }
915
916                 if (do_bit17_swizzling) {
917                         slow_shmem_bit17_copy(page,
918                                               shmem_page_offset,
919                                               user_pages[data_page_index],
920                                               data_page_offset,
921                                               page_length,
922                                               0);
923                 } else {
924                         slow_shmem_copy(page,
925                                         shmem_page_offset,
926                                         user_pages[data_page_index],
927                                         data_page_offset,
928                                         page_length);
929                 }
930
931                 set_page_dirty(page);
932                 mark_page_accessed(page);
933                 page_cache_release(page);
934
935                 remain -= page_length;
936                 data_ptr += page_length;
937                 offset += page_length;
938         }
939
940 out:
941         for (i = 0; i < pinned_pages; i++)
942                 page_cache_release(user_pages[i]);
943         drm_free_large(user_pages);
944
945         return ret;
946 }
947
948 /**
949  * Writes data to the object referenced by handle.
950  *
951  * On error, the contents of the buffer that were to be modified are undefined.
952  */
953 int
954 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
955                       struct drm_file *file)
956 {
957         struct drm_i915_gem_pwrite *args = data;
958         struct drm_i915_gem_object *obj;
959         int ret;
960
961         if (args->size == 0)
962                 return 0;
963
964         if (!access_ok(VERIFY_READ,
965                        (char __user *)(uintptr_t)args->data_ptr,
966                        args->size))
967                 return -EFAULT;
968
969         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
970                                       args->size);
971         if (ret)
972                 return -EFAULT;
973
974         ret = i915_mutex_lock_interruptible(dev);
975         if (ret)
976                 return ret;
977
978         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
979         if (&obj->base == NULL) {
980                 ret = -ENOENT;
981                 goto unlock;
982         }
983
984         /* Bounds check destination. */
985         if (args->offset > obj->base.size ||
986             args->size > obj->base.size - args->offset) {
987                 ret = -EINVAL;
988                 goto out;
989         }
990
991         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
992
993         /* We can only do the GTT pwrite on untiled buffers, as otherwise
994          * it would end up going through the fenced access, and we'll get
995          * different detiling behavior between reading and writing.
996          * pread/pwrite currently are reading and writing from the CPU
997          * perspective, requiring manual detiling by the client.
998          */
999         if (obj->phys_obj)
1000                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1001         else if (obj->gtt_space &&
1002                  obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003                 ret = i915_gem_object_pin(obj, 0, true);
1004                 if (ret)
1005                         goto out;
1006
1007                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1008                 if (ret)
1009                         goto out_unpin;
1010
1011                 ret = i915_gem_object_put_fence(obj);
1012                 if (ret)
1013                         goto out_unpin;
1014
1015                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1016                 if (ret == -EFAULT)
1017                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1018
1019 out_unpin:
1020                 i915_gem_object_unpin(obj);
1021         } else {
1022                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1023                 if (ret)
1024                         goto out;
1025
1026                 ret = -EFAULT;
1027                 if (!i915_gem_object_needs_bit17_swizzle(obj))
1028                         ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1029                 if (ret == -EFAULT)
1030                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1031         }
1032
1033 out:
1034         drm_gem_object_unreference(&obj->base);
1035 unlock:
1036         mutex_unlock(&dev->struct_mutex);
1037         return ret;
1038 }
1039
1040 /**
1041  * Called when user space prepares to use an object with the CPU, either
1042  * through the mmap ioctl's mapping or a GTT mapping.
1043  */
1044 int
1045 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1046                           struct drm_file *file)
1047 {
1048         struct drm_i915_gem_set_domain *args = data;
1049         struct drm_i915_gem_object *obj;
1050         uint32_t read_domains = args->read_domains;
1051         uint32_t write_domain = args->write_domain;
1052         int ret;
1053
1054         if (!(dev->driver->driver_features & DRIVER_GEM))
1055                 return -ENODEV;
1056
1057         /* Only handle setting domains to types used by the CPU. */
1058         if (write_domain & I915_GEM_GPU_DOMAINS)
1059                 return -EINVAL;
1060
1061         if (read_domains & I915_GEM_GPU_DOMAINS)
1062                 return -EINVAL;
1063
1064         /* Having something in the write domain implies it's in the read
1065          * domain, and only that read domain.  Enforce that in the request.
1066          */
1067         if (write_domain != 0 && read_domains != write_domain)
1068                 return -EINVAL;
1069
1070         ret = i915_mutex_lock_interruptible(dev);
1071         if (ret)
1072                 return ret;
1073
1074         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1075         if (&obj->base == NULL) {
1076                 ret = -ENOENT;
1077                 goto unlock;
1078         }
1079
1080         if (read_domains & I915_GEM_DOMAIN_GTT) {
1081                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1082
1083                 /* Silently promote "you're not bound, there was nothing to do"
1084                  * to success, since the client was just asking us to
1085                  * make sure everything was done.
1086                  */
1087                 if (ret == -EINVAL)
1088                         ret = 0;
1089         } else {
1090                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1091         }
1092
1093         drm_gem_object_unreference(&obj->base);
1094 unlock:
1095         mutex_unlock(&dev->struct_mutex);
1096         return ret;
1097 }
1098
1099 /**
1100  * Called when user space has done writes to this buffer
1101  */
1102 int
1103 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1104                          struct drm_file *file)
1105 {
1106         struct drm_i915_gem_sw_finish *args = data;
1107         struct drm_i915_gem_object *obj;
1108         int ret = 0;
1109
1110         if (!(dev->driver->driver_features & DRIVER_GEM))
1111                 return -ENODEV;
1112
1113         ret = i915_mutex_lock_interruptible(dev);
1114         if (ret)
1115                 return ret;
1116
1117         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1118         if (&obj->base == NULL) {
1119                 ret = -ENOENT;
1120                 goto unlock;
1121         }
1122
1123         /* Pinned buffers may be scanout, so flush the cache */
1124         if (obj->pin_count)
1125                 i915_gem_object_flush_cpu_write_domain(obj);
1126
1127         drm_gem_object_unreference(&obj->base);
1128 unlock:
1129         mutex_unlock(&dev->struct_mutex);
1130         return ret;
1131 }
1132
1133 /**
1134  * Maps the contents of an object, returning the address it is mapped
1135  * into.
1136  *
1137  * While the mapping holds a reference on the contents of the object, it doesn't
1138  * imply a ref on the object itself.
1139  */
1140 int
1141 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1142                     struct drm_file *file)
1143 {
1144         struct drm_i915_private *dev_priv = dev->dev_private;
1145         struct drm_i915_gem_mmap *args = data;
1146         struct drm_gem_object *obj;
1147         unsigned long addr;
1148
1149         if (!(dev->driver->driver_features & DRIVER_GEM))
1150                 return -ENODEV;
1151
1152         obj = drm_gem_object_lookup(dev, file, args->handle);
1153         if (obj == NULL)
1154                 return -ENOENT;
1155
1156         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1157                 drm_gem_object_unreference_unlocked(obj);
1158                 return -E2BIG;
1159         }
1160
1161         down_write(&current->mm->mmap_sem);
1162         addr = do_mmap(obj->filp, 0, args->size,
1163                        PROT_READ | PROT_WRITE, MAP_SHARED,
1164                        args->offset);
1165         up_write(&current->mm->mmap_sem);
1166         drm_gem_object_unreference_unlocked(obj);
1167         if (IS_ERR((void *)addr))
1168                 return addr;
1169
1170         args->addr_ptr = (uint64_t) addr;
1171
1172         return 0;
1173 }
1174
1175 /**
1176  * i915_gem_fault - fault a page into the GTT
1177  * vma: VMA in question
1178  * vmf: fault info
1179  *
1180  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1181  * from userspace.  The fault handler takes care of binding the object to
1182  * the GTT (if needed), allocating and programming a fence register (again,
1183  * only if needed based on whether the old reg is still valid or the object
1184  * is tiled) and inserting a new PTE into the faulting process.
1185  *
1186  * Note that the faulting process may involve evicting existing objects
1187  * from the GTT and/or fence registers to make room.  So performance may
1188  * suffer if the GTT working set is large or there are few fence registers
1189  * left.
1190  */
1191 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1192 {
1193         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1194         struct drm_device *dev = obj->base.dev;
1195         drm_i915_private_t *dev_priv = dev->dev_private;
1196         pgoff_t page_offset;
1197         unsigned long pfn;
1198         int ret = 0;
1199         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1200
1201         /* We don't use vmf->pgoff since that has the fake offset */
1202         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1203                 PAGE_SHIFT;
1204
1205         ret = i915_mutex_lock_interruptible(dev);
1206         if (ret)
1207                 goto out;
1208
1209         trace_i915_gem_object_fault(obj, page_offset, true, write);
1210
1211         /* Now bind it into the GTT if needed */
1212         if (!obj->map_and_fenceable) {
1213                 ret = i915_gem_object_unbind(obj);
1214                 if (ret)
1215                         goto unlock;
1216         }
1217         if (!obj->gtt_space) {
1218                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1219                 if (ret)
1220                         goto unlock;
1221
1222                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1223                 if (ret)
1224                         goto unlock;
1225         }
1226
1227         if (obj->tiling_mode == I915_TILING_NONE)
1228                 ret = i915_gem_object_put_fence(obj);
1229         else
1230                 ret = i915_gem_object_get_fence(obj, NULL);
1231         if (ret)
1232                 goto unlock;
1233
1234         if (i915_gem_object_is_inactive(obj))
1235                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1236
1237         obj->fault_mappable = true;
1238
1239         pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1240                 page_offset;
1241
1242         /* Finally, remap it using the new GTT offset */
1243         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1244 unlock:
1245         mutex_unlock(&dev->struct_mutex);
1246 out:
1247         switch (ret) {
1248         case -EIO:
1249         case -EAGAIN:
1250                 /* Give the error handler a chance to run and move the
1251                  * objects off the GPU active list. Next time we service the
1252                  * fault, we should be able to transition the page into the
1253                  * GTT without touching the GPU (and so avoid further
1254                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1255                  * with coherency, just lost writes.
1256                  */
1257                 set_need_resched();
1258         case 0:
1259         case -ERESTARTSYS:
1260         case -EINTR:
1261                 return VM_FAULT_NOPAGE;
1262         case -ENOMEM:
1263                 return VM_FAULT_OOM;
1264         default:
1265                 return VM_FAULT_SIGBUS;
1266         }
1267 }
1268
1269 /**
1270  * i915_gem_release_mmap - remove physical page mappings
1271  * @obj: obj in question
1272  *
1273  * Preserve the reservation of the mmapping with the DRM core code, but
1274  * relinquish ownership of the pages back to the system.
1275  *
1276  * It is vital that we remove the page mapping if we have mapped a tiled
1277  * object through the GTT and then lose the fence register due to
1278  * resource pressure. Similarly if the object has been moved out of the
1279  * aperture, than pages mapped into userspace must be revoked. Removing the
1280  * mapping will then trigger a page fault on the next user access, allowing
1281  * fixup by i915_gem_fault().
1282  */
1283 void
1284 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1285 {
1286         if (!obj->fault_mappable)
1287                 return;
1288
1289         if (obj->base.dev->dev_mapping)
1290                 unmap_mapping_range(obj->base.dev->dev_mapping,
1291                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1292                                     obj->base.size, 1);
1293
1294         obj->fault_mappable = false;
1295 }
1296
1297 static uint32_t
1298 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1299 {
1300         uint32_t gtt_size;
1301
1302         if (INTEL_INFO(dev)->gen >= 4 ||
1303             tiling_mode == I915_TILING_NONE)
1304                 return size;
1305
1306         /* Previous chips need a power-of-two fence region when tiling */
1307         if (INTEL_INFO(dev)->gen == 3)
1308                 gtt_size = 1024*1024;
1309         else
1310                 gtt_size = 512*1024;
1311
1312         while (gtt_size < size)
1313                 gtt_size <<= 1;
1314
1315         return gtt_size;
1316 }
1317
1318 /**
1319  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1320  * @obj: object to check
1321  *
1322  * Return the required GTT alignment for an object, taking into account
1323  * potential fence register mapping.
1324  */
1325 static uint32_t
1326 i915_gem_get_gtt_alignment(struct drm_device *dev,
1327                            uint32_t size,
1328                            int tiling_mode)
1329 {
1330         /*
1331          * Minimum alignment is 4k (GTT page size), but might be greater
1332          * if a fence register is needed for the object.
1333          */
1334         if (INTEL_INFO(dev)->gen >= 4 ||
1335             tiling_mode == I915_TILING_NONE)
1336                 return 4096;
1337
1338         /*
1339          * Previous chips need to be aligned to the size of the smallest
1340          * fence register that can contain the object.
1341          */
1342         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1343 }
1344
1345 /**
1346  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1347  *                                       unfenced object
1348  * @dev: the device
1349  * @size: size of the object
1350  * @tiling_mode: tiling mode of the object
1351  *
1352  * Return the required GTT alignment for an object, only taking into account
1353  * unfenced tiled surface requirements.
1354  */
1355 uint32_t
1356 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1357                                     uint32_t size,
1358                                     int tiling_mode)
1359 {
1360         /*
1361          * Minimum alignment is 4k (GTT page size) for sane hw.
1362          */
1363         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1364             tiling_mode == I915_TILING_NONE)
1365                 return 4096;
1366
1367         /* Previous hardware however needs to be aligned to a power-of-two
1368          * tile height. The simplest method for determining this is to reuse
1369          * the power-of-tile object size.
1370          */
1371         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1372 }
1373
1374 int
1375 i915_gem_mmap_gtt(struct drm_file *file,
1376                   struct drm_device *dev,
1377                   uint32_t handle,
1378                   uint64_t *offset)
1379 {
1380         struct drm_i915_private *dev_priv = dev->dev_private;
1381         struct drm_i915_gem_object *obj;
1382         int ret;
1383
1384         if (!(dev->driver->driver_features & DRIVER_GEM))
1385                 return -ENODEV;
1386
1387         ret = i915_mutex_lock_interruptible(dev);
1388         if (ret)
1389                 return ret;
1390
1391         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1392         if (&obj->base == NULL) {
1393                 ret = -ENOENT;
1394                 goto unlock;
1395         }
1396
1397         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1398                 ret = -E2BIG;
1399                 goto out;
1400         }
1401
1402         if (obj->madv != I915_MADV_WILLNEED) {
1403                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1404                 ret = -EINVAL;
1405                 goto out;
1406         }
1407
1408         if (!obj->base.map_list.map) {
1409                 ret = drm_gem_create_mmap_offset(&obj->base);
1410                 if (ret)
1411                         goto out;
1412         }
1413
1414         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1415
1416 out:
1417         drm_gem_object_unreference(&obj->base);
1418 unlock:
1419         mutex_unlock(&dev->struct_mutex);
1420         return ret;
1421 }
1422
1423 /**
1424  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1425  * @dev: DRM device
1426  * @data: GTT mapping ioctl data
1427  * @file: GEM object info
1428  *
1429  * Simply returns the fake offset to userspace so it can mmap it.
1430  * The mmap call will end up in drm_gem_mmap(), which will set things
1431  * up so we can get faults in the handler above.
1432  *
1433  * The fault handler will take care of binding the object into the GTT
1434  * (since it may have been evicted to make room for something), allocating
1435  * a fence register, and mapping the appropriate aperture address into
1436  * userspace.
1437  */
1438 int
1439 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1440                         struct drm_file *file)
1441 {
1442         struct drm_i915_gem_mmap_gtt *args = data;
1443
1444         if (!(dev->driver->driver_features & DRIVER_GEM))
1445                 return -ENODEV;
1446
1447         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1448 }
1449
1450
1451 static int
1452 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1453                               gfp_t gfpmask)
1454 {
1455         int page_count, i;
1456         struct address_space *mapping;
1457         struct inode *inode;
1458         struct page *page;
1459
1460         /* Get the list of pages out of our struct file.  They'll be pinned
1461          * at this point until we release them.
1462          */
1463         page_count = obj->base.size / PAGE_SIZE;
1464         BUG_ON(obj->pages != NULL);
1465         obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1466         if (obj->pages == NULL)
1467                 return -ENOMEM;
1468
1469         inode = obj->base.filp->f_path.dentry->d_inode;
1470         mapping = inode->i_mapping;
1471         gfpmask |= mapping_gfp_mask(mapping);
1472
1473         for (i = 0; i < page_count; i++) {
1474                 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1475                 if (IS_ERR(page))
1476                         goto err_pages;
1477
1478                 obj->pages[i] = page;
1479         }
1480
1481         if (i915_gem_object_needs_bit17_swizzle(obj))
1482                 i915_gem_object_do_bit_17_swizzle(obj);
1483
1484         return 0;
1485
1486 err_pages:
1487         while (i--)
1488                 page_cache_release(obj->pages[i]);
1489
1490         drm_free_large(obj->pages);
1491         obj->pages = NULL;
1492         return PTR_ERR(page);
1493 }
1494
1495 static void
1496 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1497 {
1498         int page_count = obj->base.size / PAGE_SIZE;
1499         int i;
1500
1501         BUG_ON(obj->madv == __I915_MADV_PURGED);
1502
1503         if (i915_gem_object_needs_bit17_swizzle(obj))
1504                 i915_gem_object_save_bit_17_swizzle(obj);
1505
1506         if (obj->madv == I915_MADV_DONTNEED)
1507                 obj->dirty = 0;
1508
1509         for (i = 0; i < page_count; i++) {
1510                 if (obj->dirty)
1511                         set_page_dirty(obj->pages[i]);
1512
1513                 if (obj->madv == I915_MADV_WILLNEED)
1514                         mark_page_accessed(obj->pages[i]);
1515
1516                 page_cache_release(obj->pages[i]);
1517         }
1518         obj->dirty = 0;
1519
1520         drm_free_large(obj->pages);
1521         obj->pages = NULL;
1522 }
1523
1524 void
1525 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1526                                struct intel_ring_buffer *ring,
1527                                u32 seqno)
1528 {
1529         struct drm_device *dev = obj->base.dev;
1530         struct drm_i915_private *dev_priv = dev->dev_private;
1531
1532         BUG_ON(ring == NULL);
1533         obj->ring = ring;
1534
1535         /* Add a reference if we're newly entering the active list. */
1536         if (!obj->active) {
1537                 drm_gem_object_reference(&obj->base);
1538                 obj->active = 1;
1539         }
1540
1541         /* Move from whatever list we were on to the tail of execution. */
1542         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1543         list_move_tail(&obj->ring_list, &ring->active_list);
1544
1545         obj->last_rendering_seqno = seqno;
1546         if (obj->fenced_gpu_access) {
1547                 struct drm_i915_fence_reg *reg;
1548
1549                 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1550
1551                 obj->last_fenced_seqno = seqno;
1552                 obj->last_fenced_ring = ring;
1553
1554                 reg = &dev_priv->fence_regs[obj->fence_reg];
1555                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1556         }
1557 }
1558
1559 static void
1560 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1561 {
1562         list_del_init(&obj->ring_list);
1563         obj->last_rendering_seqno = 0;
1564 }
1565
1566 static void
1567 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1568 {
1569         struct drm_device *dev = obj->base.dev;
1570         drm_i915_private_t *dev_priv = dev->dev_private;
1571
1572         BUG_ON(!obj->active);
1573         list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1574
1575         i915_gem_object_move_off_active(obj);
1576 }
1577
1578 static void
1579 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1580 {
1581         struct drm_device *dev = obj->base.dev;
1582         struct drm_i915_private *dev_priv = dev->dev_private;
1583
1584         if (obj->pin_count != 0)
1585                 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1586         else
1587                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1588
1589         BUG_ON(!list_empty(&obj->gpu_write_list));
1590         BUG_ON(!obj->active);
1591         obj->ring = NULL;
1592
1593         i915_gem_object_move_off_active(obj);
1594         obj->fenced_gpu_access = false;
1595
1596         obj->active = 0;
1597         obj->pending_gpu_write = false;
1598         drm_gem_object_unreference(&obj->base);
1599
1600         WARN_ON(i915_verify_lists(dev));
1601 }
1602
1603 /* Immediately discard the backing storage */
1604 static void
1605 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1606 {
1607         struct inode *inode;
1608
1609         /* Our goal here is to return as much of the memory as
1610          * is possible back to the system as we are called from OOM.
1611          * To do this we must instruct the shmfs to drop all of its
1612          * backing pages, *now*.
1613          */
1614         inode = obj->base.filp->f_path.dentry->d_inode;
1615         shmem_truncate_range(inode, 0, (loff_t)-1);
1616
1617         obj->madv = __I915_MADV_PURGED;
1618 }
1619
1620 static inline int
1621 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1622 {
1623         return obj->madv == I915_MADV_DONTNEED;
1624 }
1625
1626 static void
1627 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1628                                uint32_t flush_domains)
1629 {
1630         struct drm_i915_gem_object *obj, *next;
1631
1632         list_for_each_entry_safe(obj, next,
1633                                  &ring->gpu_write_list,
1634                                  gpu_write_list) {
1635                 if (obj->base.write_domain & flush_domains) {
1636                         uint32_t old_write_domain = obj->base.write_domain;
1637
1638                         obj->base.write_domain = 0;
1639                         list_del_init(&obj->gpu_write_list);
1640                         i915_gem_object_move_to_active(obj, ring,
1641                                                        i915_gem_next_request_seqno(ring));
1642
1643                         trace_i915_gem_object_change_domain(obj,
1644                                                             obj->base.read_domains,
1645                                                             old_write_domain);
1646                 }
1647         }
1648 }
1649
1650 static u32
1651 i915_gem_get_seqno(struct drm_device *dev)
1652 {
1653         drm_i915_private_t *dev_priv = dev->dev_private;
1654         u32 seqno = dev_priv->next_seqno;
1655
1656         /* reserve 0 for non-seqno */
1657         if (++dev_priv->next_seqno == 0)
1658                 dev_priv->next_seqno = 1;
1659
1660         return seqno;
1661 }
1662
1663 u32
1664 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1665 {
1666         if (ring->outstanding_lazy_request == 0)
1667                 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1668
1669         return ring->outstanding_lazy_request;
1670 }
1671
1672 int
1673 i915_add_request(struct intel_ring_buffer *ring,
1674                  struct drm_file *file,
1675                  struct drm_i915_gem_request *request)
1676 {
1677         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1678         uint32_t seqno;
1679         int was_empty;
1680         int ret;
1681
1682         BUG_ON(request == NULL);
1683         seqno = i915_gem_next_request_seqno(ring);
1684
1685         ret = ring->add_request(ring, &seqno);
1686         if (ret)
1687             return ret;
1688
1689         trace_i915_gem_request_add(ring, seqno);
1690
1691         request->seqno = seqno;
1692         request->ring = ring;
1693         request->emitted_jiffies = jiffies;
1694         was_empty = list_empty(&ring->request_list);
1695         list_add_tail(&request->list, &ring->request_list);
1696
1697         if (file) {
1698                 struct drm_i915_file_private *file_priv = file->driver_priv;
1699
1700                 spin_lock(&file_priv->mm.lock);
1701                 request->file_priv = file_priv;
1702                 list_add_tail(&request->client_list,
1703                               &file_priv->mm.request_list);
1704                 spin_unlock(&file_priv->mm.lock);
1705         }
1706
1707         ring->outstanding_lazy_request = false;
1708
1709         if (!dev_priv->mm.suspended) {
1710                 if (i915_enable_hangcheck) {
1711                         mod_timer(&dev_priv->hangcheck_timer,
1712                                   jiffies +
1713                                   msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1714                 }
1715                 if (was_empty)
1716                         queue_delayed_work(dev_priv->wq,
1717                                            &dev_priv->mm.retire_work, HZ);
1718         }
1719         return 0;
1720 }
1721
1722 static inline void
1723 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1724 {
1725         struct drm_i915_file_private *file_priv = request->file_priv;
1726
1727         if (!file_priv)
1728                 return;
1729
1730         spin_lock(&file_priv->mm.lock);
1731         if (request->file_priv) {
1732                 list_del(&request->client_list);
1733                 request->file_priv = NULL;
1734         }
1735         spin_unlock(&file_priv->mm.lock);
1736 }
1737
1738 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1739                                       struct intel_ring_buffer *ring)
1740 {
1741         while (!list_empty(&ring->request_list)) {
1742                 struct drm_i915_gem_request *request;
1743
1744                 request = list_first_entry(&ring->request_list,
1745                                            struct drm_i915_gem_request,
1746                                            list);
1747
1748                 list_del(&request->list);
1749                 i915_gem_request_remove_from_client(request);
1750                 kfree(request);
1751         }
1752
1753         while (!list_empty(&ring->active_list)) {
1754                 struct drm_i915_gem_object *obj;
1755
1756                 obj = list_first_entry(&ring->active_list,
1757                                        struct drm_i915_gem_object,
1758                                        ring_list);
1759
1760                 obj->base.write_domain = 0;
1761                 list_del_init(&obj->gpu_write_list);
1762                 i915_gem_object_move_to_inactive(obj);
1763         }
1764 }
1765
1766 static void i915_gem_reset_fences(struct drm_device *dev)
1767 {
1768         struct drm_i915_private *dev_priv = dev->dev_private;
1769         int i;
1770
1771         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1772                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1773                 struct drm_i915_gem_object *obj = reg->obj;
1774
1775                 if (!obj)
1776                         continue;
1777
1778                 if (obj->tiling_mode)
1779                         i915_gem_release_mmap(obj);
1780
1781                 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1782                 reg->obj->fenced_gpu_access = false;
1783                 reg->obj->last_fenced_seqno = 0;
1784                 reg->obj->last_fenced_ring = NULL;
1785                 i915_gem_clear_fence_reg(dev, reg);
1786         }
1787 }
1788
1789 void i915_gem_reset(struct drm_device *dev)
1790 {
1791         struct drm_i915_private *dev_priv = dev->dev_private;
1792         struct drm_i915_gem_object *obj;
1793         int i;
1794
1795         for (i = 0; i < I915_NUM_RINGS; i++)
1796                 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1797
1798         /* Remove anything from the flushing lists. The GPU cache is likely
1799          * to be lost on reset along with the data, so simply move the
1800          * lost bo to the inactive list.
1801          */
1802         while (!list_empty(&dev_priv->mm.flushing_list)) {
1803                 obj = list_first_entry(&dev_priv->mm.flushing_list,
1804                                       struct drm_i915_gem_object,
1805                                       mm_list);
1806
1807                 obj->base.write_domain = 0;
1808                 list_del_init(&obj->gpu_write_list);
1809                 i915_gem_object_move_to_inactive(obj);
1810         }
1811
1812         /* Move everything out of the GPU domains to ensure we do any
1813          * necessary invalidation upon reuse.
1814          */
1815         list_for_each_entry(obj,
1816                             &dev_priv->mm.inactive_list,
1817                             mm_list)
1818         {
1819                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1820         }
1821
1822         /* The fence registers are invalidated so clear them out */
1823         i915_gem_reset_fences(dev);
1824 }
1825
1826 /**
1827  * This function clears the request list as sequence numbers are passed.
1828  */
1829 static void
1830 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1831 {
1832         uint32_t seqno;
1833         int i;
1834
1835         if (list_empty(&ring->request_list))
1836                 return;
1837
1838         WARN_ON(i915_verify_lists(ring->dev));
1839
1840         seqno = ring->get_seqno(ring);
1841
1842         for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1843                 if (seqno >= ring->sync_seqno[i])
1844                         ring->sync_seqno[i] = 0;
1845
1846         while (!list_empty(&ring->request_list)) {
1847                 struct drm_i915_gem_request *request;
1848
1849                 request = list_first_entry(&ring->request_list,
1850                                            struct drm_i915_gem_request,
1851                                            list);
1852
1853                 if (!i915_seqno_passed(seqno, request->seqno))
1854                         break;
1855
1856                 trace_i915_gem_request_retire(ring, request->seqno);
1857
1858                 list_del(&request->list);
1859                 i915_gem_request_remove_from_client(request);
1860                 kfree(request);
1861         }
1862
1863         /* Move any buffers on the active list that are no longer referenced
1864          * by the ringbuffer to the flushing/inactive lists as appropriate.
1865          */
1866         while (!list_empty(&ring->active_list)) {
1867                 struct drm_i915_gem_object *obj;
1868
1869                 obj = list_first_entry(&ring->active_list,
1870                                       struct drm_i915_gem_object,
1871                                       ring_list);
1872
1873                 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1874                         break;
1875
1876                 if (obj->base.write_domain != 0)
1877                         i915_gem_object_move_to_flushing(obj);
1878                 else
1879                         i915_gem_object_move_to_inactive(obj);
1880         }
1881
1882         if (unlikely(ring->trace_irq_seqno &&
1883                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1884                 ring->irq_put(ring);
1885                 ring->trace_irq_seqno = 0;
1886         }
1887
1888         WARN_ON(i915_verify_lists(ring->dev));
1889 }
1890
1891 void
1892 i915_gem_retire_requests(struct drm_device *dev)
1893 {
1894         drm_i915_private_t *dev_priv = dev->dev_private;
1895         int i;
1896
1897         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1898             struct drm_i915_gem_object *obj, *next;
1899
1900             /* We must be careful that during unbind() we do not
1901              * accidentally infinitely recurse into retire requests.
1902              * Currently:
1903              *   retire -> free -> unbind -> wait -> retire_ring
1904              */
1905             list_for_each_entry_safe(obj, next,
1906                                      &dev_priv->mm.deferred_free_list,
1907                                      mm_list)
1908                     i915_gem_free_object_tail(obj);
1909         }
1910
1911         for (i = 0; i < I915_NUM_RINGS; i++)
1912                 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1913 }
1914
1915 static void
1916 i915_gem_retire_work_handler(struct work_struct *work)
1917 {
1918         drm_i915_private_t *dev_priv;
1919         struct drm_device *dev;
1920         bool idle;
1921         int i;
1922
1923         dev_priv = container_of(work, drm_i915_private_t,
1924                                 mm.retire_work.work);
1925         dev = dev_priv->dev;
1926
1927         /* Come back later if the device is busy... */
1928         if (!mutex_trylock(&dev->struct_mutex)) {
1929                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1930                 return;
1931         }
1932
1933         i915_gem_retire_requests(dev);
1934
1935         /* Send a periodic flush down the ring so we don't hold onto GEM
1936          * objects indefinitely.
1937          */
1938         idle = true;
1939         for (i = 0; i < I915_NUM_RINGS; i++) {
1940                 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1941
1942                 if (!list_empty(&ring->gpu_write_list)) {
1943                         struct drm_i915_gem_request *request;
1944                         int ret;
1945
1946                         ret = i915_gem_flush_ring(ring,
1947                                                   0, I915_GEM_GPU_DOMAINS);
1948                         request = kzalloc(sizeof(*request), GFP_KERNEL);
1949                         if (ret || request == NULL ||
1950                             i915_add_request(ring, NULL, request))
1951                             kfree(request);
1952                 }
1953
1954                 idle &= list_empty(&ring->request_list);
1955         }
1956
1957         if (!dev_priv->mm.suspended && !idle)
1958                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1959
1960         mutex_unlock(&dev->struct_mutex);
1961 }
1962
1963 /**
1964  * Waits for a sequence number to be signaled, and cleans up the
1965  * request and object lists appropriately for that event.
1966  */
1967 int
1968 i915_wait_request(struct intel_ring_buffer *ring,
1969                   uint32_t seqno)
1970 {
1971         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1972         u32 ier;
1973         int ret = 0;
1974
1975         BUG_ON(seqno == 0);
1976
1977         if (atomic_read(&dev_priv->mm.wedged)) {
1978                 struct completion *x = &dev_priv->error_completion;
1979                 bool recovery_complete;
1980                 unsigned long flags;
1981
1982                 /* Give the error handler a chance to run. */
1983                 spin_lock_irqsave(&x->wait.lock, flags);
1984                 recovery_complete = x->done > 0;
1985                 spin_unlock_irqrestore(&x->wait.lock, flags);
1986
1987                 return recovery_complete ? -EIO : -EAGAIN;
1988         }
1989
1990         if (seqno == ring->outstanding_lazy_request) {
1991                 struct drm_i915_gem_request *request;
1992
1993                 request = kzalloc(sizeof(*request), GFP_KERNEL);
1994                 if (request == NULL)
1995                         return -ENOMEM;
1996
1997                 ret = i915_add_request(ring, NULL, request);
1998                 if (ret) {
1999                         kfree(request);
2000                         return ret;
2001                 }
2002
2003                 seqno = request->seqno;
2004         }
2005
2006         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2007                 if (HAS_PCH_SPLIT(ring->dev))
2008                         ier = I915_READ(DEIER) | I915_READ(GTIER);
2009                 else
2010                         ier = I915_READ(IER);
2011                 if (!ier) {
2012                         DRM_ERROR("something (likely vbetool) disabled "
2013                                   "interrupts, re-enabling\n");
2014                         ring->dev->driver->irq_preinstall(ring->dev);
2015                         ring->dev->driver->irq_postinstall(ring->dev);
2016                 }
2017
2018                 trace_i915_gem_request_wait_begin(ring, seqno);
2019
2020                 ring->waiting_seqno = seqno;
2021                 if (ring->irq_get(ring)) {
2022                         if (dev_priv->mm.interruptible)
2023                                 ret = wait_event_interruptible(ring->irq_queue,
2024                                                                i915_seqno_passed(ring->get_seqno(ring), seqno)
2025                                                                || atomic_read(&dev_priv->mm.wedged));
2026                         else
2027                                 wait_event(ring->irq_queue,
2028                                            i915_seqno_passed(ring->get_seqno(ring), seqno)
2029                                            || atomic_read(&dev_priv->mm.wedged));
2030
2031                         ring->irq_put(ring);
2032                 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2033                                                       seqno) ||
2034                                     atomic_read(&dev_priv->mm.wedged), 3000))
2035                         ret = -EBUSY;
2036                 ring->waiting_seqno = 0;
2037
2038                 trace_i915_gem_request_wait_end(ring, seqno);
2039         }
2040         if (atomic_read(&dev_priv->mm.wedged))
2041                 ret = -EAGAIN;
2042
2043         if (ret && ret != -ERESTARTSYS)
2044                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2045                           __func__, ret, seqno, ring->get_seqno(ring),
2046                           dev_priv->next_seqno);
2047
2048         /* Directly dispatch request retiring.  While we have the work queue
2049          * to handle this, the waiter on a request often wants an associated
2050          * buffer to have made it to the inactive list, and we would need
2051          * a separate wait queue to handle that.
2052          */
2053         if (ret == 0)
2054                 i915_gem_retire_requests_ring(ring);
2055
2056         return ret;
2057 }
2058
2059 /**
2060  * Ensures that all rendering to the object has completed and the object is
2061  * safe to unbind from the GTT or access from the CPU.
2062  */
2063 int
2064 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2065 {
2066         int ret;
2067
2068         /* This function only exists to support waiting for existing rendering,
2069          * not for emitting required flushes.
2070          */
2071         BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2072
2073         /* If there is rendering queued on the buffer being evicted, wait for
2074          * it.
2075          */
2076         if (obj->active) {
2077                 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2078                 if (ret)
2079                         return ret;
2080         }
2081
2082         return 0;
2083 }
2084
2085 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2086 {
2087         u32 old_write_domain, old_read_domains;
2088
2089         /* Act a barrier for all accesses through the GTT */
2090         mb();
2091
2092         /* Force a pagefault for domain tracking on next user access */
2093         i915_gem_release_mmap(obj);
2094
2095         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2096                 return;
2097
2098         old_read_domains = obj->base.read_domains;
2099         old_write_domain = obj->base.write_domain;
2100
2101         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2102         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2103
2104         trace_i915_gem_object_change_domain(obj,
2105                                             old_read_domains,
2106                                             old_write_domain);
2107 }
2108
2109 /**
2110  * Unbinds an object from the GTT aperture.
2111  */
2112 int
2113 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2114 {
2115         int ret = 0;
2116
2117         if (obj->gtt_space == NULL)
2118                 return 0;
2119
2120         if (obj->pin_count != 0) {
2121                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2122                 return -EINVAL;
2123         }
2124
2125         ret = i915_gem_object_finish_gpu(obj);
2126         if (ret == -ERESTARTSYS)
2127                 return ret;
2128         /* Continue on if we fail due to EIO, the GPU is hung so we
2129          * should be safe and we need to cleanup or else we might
2130          * cause memory corruption through use-after-free.
2131          */
2132
2133         i915_gem_object_finish_gtt(obj);
2134
2135         /* Move the object to the CPU domain to ensure that
2136          * any possible CPU writes while it's not in the GTT
2137          * are flushed when we go to remap it.
2138          */
2139         if (ret == 0)
2140                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2141         if (ret == -ERESTARTSYS)
2142                 return ret;
2143         if (ret) {
2144                 /* In the event of a disaster, abandon all caches and
2145                  * hope for the best.
2146                  */
2147                 i915_gem_clflush_object(obj);
2148                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2149         }
2150
2151         /* release the fence reg _after_ flushing */
2152         ret = i915_gem_object_put_fence(obj);
2153         if (ret == -ERESTARTSYS)
2154                 return ret;
2155
2156         trace_i915_gem_object_unbind(obj);
2157
2158         i915_gem_gtt_unbind_object(obj);
2159         i915_gem_object_put_pages_gtt(obj);
2160
2161         list_del_init(&obj->gtt_list);
2162         list_del_init(&obj->mm_list);
2163         /* Avoid an unnecessary call to unbind on rebind. */
2164         obj->map_and_fenceable = true;
2165
2166         drm_mm_put_block(obj->gtt_space);
2167         obj->gtt_space = NULL;
2168         obj->gtt_offset = 0;
2169
2170         if (i915_gem_object_is_purgeable(obj))
2171                 i915_gem_object_truncate(obj);
2172
2173         return ret;
2174 }
2175
2176 int
2177 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2178                     uint32_t invalidate_domains,
2179                     uint32_t flush_domains)
2180 {
2181         int ret;
2182
2183         if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2184                 return 0;
2185
2186         trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2187
2188         ret = ring->flush(ring, invalidate_domains, flush_domains);
2189         if (ret)
2190                 return ret;
2191
2192         if (flush_domains & I915_GEM_GPU_DOMAINS)
2193                 i915_gem_process_flushing_list(ring, flush_domains);
2194
2195         return 0;
2196 }
2197
2198 static int i915_ring_idle(struct intel_ring_buffer *ring)
2199 {
2200         int ret;
2201
2202         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2203                 return 0;
2204
2205         if (!list_empty(&ring->gpu_write_list)) {
2206                 ret = i915_gem_flush_ring(ring,
2207                                     I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2208                 if (ret)
2209                         return ret;
2210         }
2211
2212         return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2213 }
2214
2215 int
2216 i915_gpu_idle(struct drm_device *dev)
2217 {
2218         drm_i915_private_t *dev_priv = dev->dev_private;
2219         int ret, i;
2220
2221         /* Flush everything onto the inactive list. */
2222         for (i = 0; i < I915_NUM_RINGS; i++) {
2223                 ret = i915_ring_idle(&dev_priv->ring[i]);
2224                 if (ret)
2225                         return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2232                                        struct intel_ring_buffer *pipelined)
2233 {
2234         struct drm_device *dev = obj->base.dev;
2235         drm_i915_private_t *dev_priv = dev->dev_private;
2236         u32 size = obj->gtt_space->size;
2237         int regnum = obj->fence_reg;
2238         uint64_t val;
2239
2240         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2241                          0xfffff000) << 32;
2242         val |= obj->gtt_offset & 0xfffff000;
2243         val |= (uint64_t)((obj->stride / 128) - 1) <<
2244                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2245
2246         if (obj->tiling_mode == I915_TILING_Y)
2247                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2248         val |= I965_FENCE_REG_VALID;
2249
2250         if (pipelined) {
2251                 int ret = intel_ring_begin(pipelined, 6);
2252                 if (ret)
2253                         return ret;
2254
2255                 intel_ring_emit(pipelined, MI_NOOP);
2256                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2257                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2258                 intel_ring_emit(pipelined, (u32)val);
2259                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2260                 intel_ring_emit(pipelined, (u32)(val >> 32));
2261                 intel_ring_advance(pipelined);
2262         } else
2263                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2264
2265         return 0;
2266 }
2267
2268 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2269                                 struct intel_ring_buffer *pipelined)
2270 {
2271         struct drm_device *dev = obj->base.dev;
2272         drm_i915_private_t *dev_priv = dev->dev_private;
2273         u32 size = obj->gtt_space->size;
2274         int regnum = obj->fence_reg;
2275         uint64_t val;
2276
2277         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2278                     0xfffff000) << 32;
2279         val |= obj->gtt_offset & 0xfffff000;
2280         val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2281         if (obj->tiling_mode == I915_TILING_Y)
2282                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2283         val |= I965_FENCE_REG_VALID;
2284
2285         if (pipelined) {
2286                 int ret = intel_ring_begin(pipelined, 6);
2287                 if (ret)
2288                         return ret;
2289
2290                 intel_ring_emit(pipelined, MI_NOOP);
2291                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2292                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2293                 intel_ring_emit(pipelined, (u32)val);
2294                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2295                 intel_ring_emit(pipelined, (u32)(val >> 32));
2296                 intel_ring_advance(pipelined);
2297         } else
2298                 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2299
2300         return 0;
2301 }
2302
2303 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2304                                 struct intel_ring_buffer *pipelined)
2305 {
2306         struct drm_device *dev = obj->base.dev;
2307         drm_i915_private_t *dev_priv = dev->dev_private;
2308         u32 size = obj->gtt_space->size;
2309         u32 fence_reg, val, pitch_val;
2310         int tile_width;
2311
2312         if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2313                  (size & -size) != size ||
2314                  (obj->gtt_offset & (size - 1)),
2315                  "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2316                  obj->gtt_offset, obj->map_and_fenceable, size))
2317                 return -EINVAL;
2318
2319         if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2320                 tile_width = 128;
2321         else
2322                 tile_width = 512;
2323
2324         /* Note: pitch better be a power of two tile widths */
2325         pitch_val = obj->stride / tile_width;
2326         pitch_val = ffs(pitch_val) - 1;
2327
2328         val = obj->gtt_offset;
2329         if (obj->tiling_mode == I915_TILING_Y)
2330                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2331         val |= I915_FENCE_SIZE_BITS(size);
2332         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2333         val |= I830_FENCE_REG_VALID;
2334
2335         fence_reg = obj->fence_reg;
2336         if (fence_reg < 8)
2337                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2338         else
2339                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2340
2341         if (pipelined) {
2342                 int ret = intel_ring_begin(pipelined, 4);
2343                 if (ret)
2344                         return ret;
2345
2346                 intel_ring_emit(pipelined, MI_NOOP);
2347                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2348                 intel_ring_emit(pipelined, fence_reg);
2349                 intel_ring_emit(pipelined, val);
2350                 intel_ring_advance(pipelined);
2351         } else
2352                 I915_WRITE(fence_reg, val);
2353
2354         return 0;
2355 }
2356
2357 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2358                                 struct intel_ring_buffer *pipelined)
2359 {
2360         struct drm_device *dev = obj->base.dev;
2361         drm_i915_private_t *dev_priv = dev->dev_private;
2362         u32 size = obj->gtt_space->size;
2363         int regnum = obj->fence_reg;
2364         uint32_t val;
2365         uint32_t pitch_val;
2366
2367         if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2368                  (size & -size) != size ||
2369                  (obj->gtt_offset & (size - 1)),
2370                  "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2371                  obj->gtt_offset, size))
2372                 return -EINVAL;
2373
2374         pitch_val = obj->stride / 128;
2375         pitch_val = ffs(pitch_val) - 1;
2376
2377         val = obj->gtt_offset;
2378         if (obj->tiling_mode == I915_TILING_Y)
2379                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2380         val |= I830_FENCE_SIZE_BITS(size);
2381         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2382         val |= I830_FENCE_REG_VALID;
2383
2384         if (pipelined) {
2385                 int ret = intel_ring_begin(pipelined, 4);
2386                 if (ret)
2387                         return ret;
2388
2389                 intel_ring_emit(pipelined, MI_NOOP);
2390                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2391                 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2392                 intel_ring_emit(pipelined, val);
2393                 intel_ring_advance(pipelined);
2394         } else
2395                 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2396
2397         return 0;
2398 }
2399
2400 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2401 {
2402         return i915_seqno_passed(ring->get_seqno(ring), seqno);
2403 }
2404
2405 static int
2406 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2407                             struct intel_ring_buffer *pipelined)
2408 {
2409         int ret;
2410
2411         if (obj->fenced_gpu_access) {
2412                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2413                         ret = i915_gem_flush_ring(obj->last_fenced_ring,
2414                                                   0, obj->base.write_domain);
2415                         if (ret)
2416                                 return ret;
2417                 }
2418
2419                 obj->fenced_gpu_access = false;
2420         }
2421
2422         if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2423                 if (!ring_passed_seqno(obj->last_fenced_ring,
2424                                        obj->last_fenced_seqno)) {
2425                         ret = i915_wait_request(obj->last_fenced_ring,
2426                                                 obj->last_fenced_seqno);
2427                         if (ret)
2428                                 return ret;
2429                 }
2430
2431                 obj->last_fenced_seqno = 0;
2432                 obj->last_fenced_ring = NULL;
2433         }
2434
2435         /* Ensure that all CPU reads are completed before installing a fence
2436          * and all writes before removing the fence.
2437          */
2438         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2439                 mb();
2440
2441         return 0;
2442 }
2443
2444 int
2445 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2446 {
2447         int ret;
2448
2449         if (obj->tiling_mode)
2450                 i915_gem_release_mmap(obj);
2451
2452         ret = i915_gem_object_flush_fence(obj, NULL);
2453         if (ret)
2454                 return ret;
2455
2456         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2457                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2458                 i915_gem_clear_fence_reg(obj->base.dev,
2459                                          &dev_priv->fence_regs[obj->fence_reg]);
2460
2461                 obj->fence_reg = I915_FENCE_REG_NONE;
2462         }
2463
2464         return 0;
2465 }
2466
2467 static struct drm_i915_fence_reg *
2468 i915_find_fence_reg(struct drm_device *dev,
2469                     struct intel_ring_buffer *pipelined)
2470 {
2471         struct drm_i915_private *dev_priv = dev->dev_private;
2472         struct drm_i915_fence_reg *reg, *first, *avail;
2473         int i;
2474
2475         /* First try to find a free reg */
2476         avail = NULL;
2477         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2478                 reg = &dev_priv->fence_regs[i];
2479                 if (!reg->obj)
2480                         return reg;
2481
2482                 if (!reg->obj->pin_count)
2483                         avail = reg;
2484         }
2485
2486         if (avail == NULL)
2487                 return NULL;
2488
2489         /* None available, try to steal one or wait for a user to finish */
2490         avail = first = NULL;
2491         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2492                 if (reg->obj->pin_count)
2493                         continue;
2494
2495                 if (first == NULL)
2496                         first = reg;
2497
2498                 if (!pipelined ||
2499                     !reg->obj->last_fenced_ring ||
2500                     reg->obj->last_fenced_ring == pipelined) {
2501                         avail = reg;
2502                         break;
2503                 }
2504         }
2505
2506         if (avail == NULL)
2507                 avail = first;
2508
2509         return avail;
2510 }
2511
2512 /**
2513  * i915_gem_object_get_fence - set up a fence reg for an object
2514  * @obj: object to map through a fence reg
2515  * @pipelined: ring on which to queue the change, or NULL for CPU access
2516  * @interruptible: must we wait uninterruptibly for the register to retire?
2517  *
2518  * When mapping objects through the GTT, userspace wants to be able to write
2519  * to them without having to worry about swizzling if the object is tiled.
2520  *
2521  * This function walks the fence regs looking for a free one for @obj,
2522  * stealing one if it can't find any.
2523  *
2524  * It then sets up the reg based on the object's properties: address, pitch
2525  * and tiling format.
2526  */
2527 int
2528 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2529                           struct intel_ring_buffer *pipelined)
2530 {
2531         struct drm_device *dev = obj->base.dev;
2532         struct drm_i915_private *dev_priv = dev->dev_private;
2533         struct drm_i915_fence_reg *reg;
2534         int ret;
2535
2536         /* XXX disable pipelining. There are bugs. Shocking. */
2537         pipelined = NULL;
2538
2539         /* Just update our place in the LRU if our fence is getting reused. */
2540         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2541                 reg = &dev_priv->fence_regs[obj->fence_reg];
2542                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2543
2544                 if (obj->tiling_changed) {
2545                         ret = i915_gem_object_flush_fence(obj, pipelined);
2546                         if (ret)
2547                                 return ret;
2548
2549                         if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2550                                 pipelined = NULL;
2551
2552                         if (pipelined) {
2553                                 reg->setup_seqno =
2554                                         i915_gem_next_request_seqno(pipelined);
2555                                 obj->last_fenced_seqno = reg->setup_seqno;
2556                                 obj->last_fenced_ring = pipelined;
2557                         }
2558
2559                         goto update;
2560                 }
2561
2562                 if (!pipelined) {
2563                         if (reg->setup_seqno) {
2564                                 if (!ring_passed_seqno(obj->last_fenced_ring,
2565                                                        reg->setup_seqno)) {
2566                                         ret = i915_wait_request(obj->last_fenced_ring,
2567                                                                 reg->setup_seqno);
2568                                         if (ret)
2569                                                 return ret;
2570                                 }
2571
2572                                 reg->setup_seqno = 0;
2573                         }
2574                 } else if (obj->last_fenced_ring &&
2575                            obj->last_fenced_ring != pipelined) {
2576                         ret = i915_gem_object_flush_fence(obj, pipelined);
2577                         if (ret)
2578                                 return ret;
2579                 }
2580
2581                 return 0;
2582         }
2583
2584         reg = i915_find_fence_reg(dev, pipelined);
2585         if (reg == NULL)
2586                 return -ENOSPC;
2587
2588         ret = i915_gem_object_flush_fence(obj, pipelined);
2589         if (ret)
2590                 return ret;
2591
2592         if (reg->obj) {
2593                 struct drm_i915_gem_object *old = reg->obj;
2594
2595                 drm_gem_object_reference(&old->base);
2596
2597                 if (old->tiling_mode)
2598                         i915_gem_release_mmap(old);
2599
2600                 ret = i915_gem_object_flush_fence(old, pipelined);
2601                 if (ret) {
2602                         drm_gem_object_unreference(&old->base);
2603                         return ret;
2604                 }
2605
2606                 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2607                         pipelined = NULL;
2608
2609                 old->fence_reg = I915_FENCE_REG_NONE;
2610                 old->last_fenced_ring = pipelined;
2611                 old->last_fenced_seqno =
2612                         pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2613
2614                 drm_gem_object_unreference(&old->base);
2615         } else if (obj->last_fenced_seqno == 0)
2616                 pipelined = NULL;
2617
2618         reg->obj = obj;
2619         list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2620         obj->fence_reg = reg - dev_priv->fence_regs;
2621         obj->last_fenced_ring = pipelined;
2622
2623         reg->setup_seqno =
2624                 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2625         obj->last_fenced_seqno = reg->setup_seqno;
2626
2627 update:
2628         obj->tiling_changed = false;
2629         switch (INTEL_INFO(dev)->gen) {
2630         case 7:
2631         case 6:
2632                 ret = sandybridge_write_fence_reg(obj, pipelined);
2633                 break;
2634         case 5:
2635         case 4:
2636                 ret = i965_write_fence_reg(obj, pipelined);
2637                 break;
2638         case 3:
2639                 ret = i915_write_fence_reg(obj, pipelined);
2640                 break;
2641         case 2:
2642                 ret = i830_write_fence_reg(obj, pipelined);
2643                 break;
2644         }
2645
2646         return ret;
2647 }
2648
2649 /**
2650  * i915_gem_clear_fence_reg - clear out fence register info
2651  * @obj: object to clear
2652  *
2653  * Zeroes out the fence register itself and clears out the associated
2654  * data structures in dev_priv and obj.
2655  */
2656 static void
2657 i915_gem_clear_fence_reg(struct drm_device *dev,
2658                          struct drm_i915_fence_reg *reg)
2659 {
2660         drm_i915_private_t *dev_priv = dev->dev_private;
2661         uint32_t fence_reg = reg - dev_priv->fence_regs;
2662
2663         switch (INTEL_INFO(dev)->gen) {
2664         case 7:
2665         case 6:
2666                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2667                 break;
2668         case 5:
2669         case 4:
2670                 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2671                 break;
2672         case 3:
2673                 if (fence_reg >= 8)
2674                         fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2675                 else
2676         case 2:
2677                         fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2678
2679                 I915_WRITE(fence_reg, 0);
2680                 break;
2681         }
2682
2683         list_del_init(&reg->lru_list);
2684         reg->obj = NULL;
2685         reg->setup_seqno = 0;
2686 }
2687
2688 /**
2689  * Finds free space in the GTT aperture and binds the object there.
2690  */
2691 static int
2692 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2693                             unsigned alignment,
2694                             bool map_and_fenceable)
2695 {
2696         struct drm_device *dev = obj->base.dev;
2697         drm_i915_private_t *dev_priv = dev->dev_private;
2698         struct drm_mm_node *free_space;
2699         gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2700         u32 size, fence_size, fence_alignment, unfenced_alignment;
2701         bool mappable, fenceable;
2702         int ret;
2703
2704         if (obj->madv != I915_MADV_WILLNEED) {
2705                 DRM_ERROR("Attempting to bind a purgeable object\n");
2706                 return -EINVAL;
2707         }
2708
2709         fence_size = i915_gem_get_gtt_size(dev,
2710                                            obj->base.size,
2711                                            obj->tiling_mode);
2712         fence_alignment = i915_gem_get_gtt_alignment(dev,
2713                                                      obj->base.size,
2714                                                      obj->tiling_mode);
2715         unfenced_alignment =
2716                 i915_gem_get_unfenced_gtt_alignment(dev,
2717                                                     obj->base.size,
2718                                                     obj->tiling_mode);
2719
2720         if (alignment == 0)
2721                 alignment = map_and_fenceable ? fence_alignment :
2722                                                 unfenced_alignment;
2723         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2724                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2725                 return -EINVAL;
2726         }
2727
2728         size = map_and_fenceable ? fence_size : obj->base.size;
2729
2730         /* If the object is bigger than the entire aperture, reject it early
2731          * before evicting everything in a vain attempt to find space.
2732          */
2733         if (obj->base.size >
2734             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2735                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2736                 return -E2BIG;
2737         }
2738
2739  search_free:
2740         if (map_and_fenceable)
2741                 free_space =
2742                         drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2743                                                     size, alignment, 0,
2744                                                     dev_priv->mm.gtt_mappable_end,
2745                                                     0);
2746         else
2747                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2748                                                 size, alignment, 0);
2749
2750         if (free_space != NULL) {
2751                 if (map_and_fenceable)
2752                         obj->gtt_space =
2753                                 drm_mm_get_block_range_generic(free_space,
2754                                                                size, alignment, 0,
2755                                                                dev_priv->mm.gtt_mappable_end,
2756                                                                0);
2757                 else
2758                         obj->gtt_space =
2759                                 drm_mm_get_block(free_space, size, alignment);
2760         }
2761         if (obj->gtt_space == NULL) {
2762                 /* If the gtt is empty and we're still having trouble
2763                  * fitting our object in, we're out of memory.
2764                  */
2765                 ret = i915_gem_evict_something(dev, size, alignment,
2766                                                map_and_fenceable);
2767                 if (ret)
2768                         return ret;
2769
2770                 goto search_free;
2771         }
2772
2773         ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2774         if (ret) {
2775                 drm_mm_put_block(obj->gtt_space);
2776                 obj->gtt_space = NULL;
2777
2778                 if (ret == -ENOMEM) {
2779                         /* first try to reclaim some memory by clearing the GTT */
2780                         ret = i915_gem_evict_everything(dev, false);
2781                         if (ret) {
2782                                 /* now try to shrink everyone else */
2783                                 if (gfpmask) {
2784                                         gfpmask = 0;
2785                                         goto search_free;
2786                                 }
2787
2788                                 return -ENOMEM;
2789                         }
2790
2791                         goto search_free;
2792                 }
2793
2794                 return ret;
2795         }
2796
2797         ret = i915_gem_gtt_bind_object(obj);
2798         if (ret) {
2799                 i915_gem_object_put_pages_gtt(obj);
2800                 drm_mm_put_block(obj->gtt_space);
2801                 obj->gtt_space = NULL;
2802
2803                 if (i915_gem_evict_everything(dev, false))
2804                         return ret;
2805
2806                 goto search_free;
2807         }
2808
2809         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2810         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2811
2812         /* Assert that the object is not currently in any GPU domain. As it
2813          * wasn't in the GTT, there shouldn't be any way it could have been in
2814          * a GPU cache
2815          */
2816         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2817         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2818
2819         obj->gtt_offset = obj->gtt_space->start;
2820
2821         fenceable =
2822                 obj->gtt_space->size == fence_size &&
2823                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2824
2825         mappable =
2826                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2827
2828         obj->map_and_fenceable = mappable && fenceable;
2829
2830         trace_i915_gem_object_bind(obj, map_and_fenceable);
2831         return 0;
2832 }
2833
2834 void
2835 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2836 {
2837         /* If we don't have a page list set up, then we're not pinned
2838          * to GPU, and we can ignore the cache flush because it'll happen
2839          * again at bind time.
2840          */
2841         if (obj->pages == NULL)
2842                 return;
2843
2844         /* If the GPU is snooping the contents of the CPU cache,
2845          * we do not need to manually clear the CPU cache lines.  However,
2846          * the caches are only snooped when the render cache is
2847          * flushed/invalidated.  As we always have to emit invalidations
2848          * and flushes when moving into and out of the RENDER domain, correct
2849          * snooping behaviour occurs naturally as the result of our domain
2850          * tracking.
2851          */
2852         if (obj->cache_level != I915_CACHE_NONE)
2853                 return;
2854
2855         trace_i915_gem_object_clflush(obj);
2856
2857         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2858 }
2859
2860 /** Flushes any GPU write domain for the object if it's dirty. */
2861 static int
2862 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2863 {
2864         if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2865                 return 0;
2866
2867         /* Queue the GPU write cache flushing we need. */
2868         return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2869 }
2870
2871 /** Flushes the GTT write domain for the object if it's dirty. */
2872 static void
2873 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2874 {
2875         uint32_t old_write_domain;
2876
2877         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2878                 return;
2879
2880         /* No actual flushing is required for the GTT write domain.  Writes
2881          * to it immediately go to main memory as far as we know, so there's
2882          * no chipset flush.  It also doesn't land in render cache.
2883          *
2884          * However, we do have to enforce the order so that all writes through
2885          * the GTT land before any writes to the device, such as updates to
2886          * the GATT itself.
2887          */
2888         wmb();
2889
2890         old_write_domain = obj->base.write_domain;
2891         obj->base.write_domain = 0;
2892
2893         trace_i915_gem_object_change_domain(obj,
2894                                             obj->base.read_domains,
2895                                             old_write_domain);
2896 }
2897
2898 /** Flushes the CPU write domain for the object if it's dirty. */
2899 static void
2900 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2901 {
2902         uint32_t old_write_domain;
2903
2904         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2905                 return;
2906
2907         i915_gem_clflush_object(obj);
2908         intel_gtt_chipset_flush();
2909         old_write_domain = obj->base.write_domain;
2910         obj->base.write_domain = 0;
2911
2912         trace_i915_gem_object_change_domain(obj,
2913                                             obj->base.read_domains,
2914                                             old_write_domain);
2915 }
2916
2917 /**
2918  * Moves a single object to the GTT read, and possibly write domain.
2919  *
2920  * This function returns when the move is complete, including waiting on
2921  * flushes to occur.
2922  */
2923 int
2924 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2925 {
2926         uint32_t old_write_domain, old_read_domains;
2927         int ret;
2928
2929         /* Not valid to be called on unbound objects. */
2930         if (obj->gtt_space == NULL)
2931                 return -EINVAL;
2932
2933         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2934                 return 0;
2935
2936         ret = i915_gem_object_flush_gpu_write_domain(obj);
2937         if (ret)
2938                 return ret;
2939
2940         if (obj->pending_gpu_write || write) {
2941                 ret = i915_gem_object_wait_rendering(obj);
2942                 if (ret)
2943                         return ret;
2944         }
2945
2946         i915_gem_object_flush_cpu_write_domain(obj);
2947
2948         old_write_domain = obj->base.write_domain;
2949         old_read_domains = obj->base.read_domains;
2950
2951         /* It should now be out of any other write domains, and we can update
2952          * the domain values for our changes.
2953          */
2954         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2955         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2956         if (write) {
2957                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2958                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2959                 obj->dirty = 1;
2960         }
2961
2962         trace_i915_gem_object_change_domain(obj,
2963                                             old_read_domains,
2964                                             old_write_domain);
2965
2966         return 0;
2967 }
2968
2969 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2970                                     enum i915_cache_level cache_level)
2971 {
2972         int ret;
2973
2974         if (obj->cache_level == cache_level)
2975                 return 0;
2976
2977         if (obj->pin_count) {
2978                 DRM_DEBUG("can not change the cache level of pinned objects\n");
2979                 return -EBUSY;
2980         }
2981
2982         if (obj->gtt_space) {
2983                 ret = i915_gem_object_finish_gpu(obj);
2984                 if (ret)
2985                         return ret;
2986
2987                 i915_gem_object_finish_gtt(obj);
2988
2989                 /* Before SandyBridge, you could not use tiling or fence
2990                  * registers with snooped memory, so relinquish any fences
2991                  * currently pointing to our region in the aperture.
2992                  */
2993                 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2994                         ret = i915_gem_object_put_fence(obj);
2995                         if (ret)
2996                                 return ret;
2997                 }
2998
2999                 i915_gem_gtt_rebind_object(obj, cache_level);
3000         }
3001
3002         if (cache_level == I915_CACHE_NONE) {
3003                 u32 old_read_domains, old_write_domain;
3004
3005                 /* If we're coming from LLC cached, then we haven't
3006                  * actually been tracking whether the data is in the
3007                  * CPU cache or not, since we only allow one bit set
3008                  * in obj->write_domain and have been skipping the clflushes.
3009                  * Just set it to the CPU cache for now.
3010                  */
3011                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3012                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3013
3014                 old_read_domains = obj->base.read_domains;
3015                 old_write_domain = obj->base.write_domain;
3016
3017                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3018                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3019
3020                 trace_i915_gem_object_change_domain(obj,
3021                                                     old_read_domains,
3022                                                     old_write_domain);
3023         }
3024
3025         obj->cache_level = cache_level;
3026         return 0;
3027 }
3028
3029 /*
3030  * Prepare buffer for display plane (scanout, cursors, etc).
3031  * Can be called from an uninterruptible phase (modesetting) and allows
3032  * any flushes to be pipelined (for pageflips).
3033  *
3034  * For the display plane, we want to be in the GTT but out of any write
3035  * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3036  * ability to pipeline the waits, pinning and any additional subtleties
3037  * that may differentiate the display plane from ordinary buffers.
3038  */
3039 int
3040 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3041                                      u32 alignment,
3042                                      struct intel_ring_buffer *pipelined)
3043 {
3044         u32 old_read_domains, old_write_domain;
3045         int ret;
3046
3047         ret = i915_gem_object_flush_gpu_write_domain(obj);
3048         if (ret)
3049                 return ret;
3050
3051         if (pipelined != obj->ring) {
3052                 ret = i915_gem_object_wait_rendering(obj);
3053                 if (ret == -ERESTARTSYS)
3054                         return ret;
3055         }
3056
3057         /* The display engine is not coherent with the LLC cache on gen6.  As
3058          * a result, we make sure that the pinning that is about to occur is
3059          * done with uncached PTEs. This is lowest common denominator for all
3060          * chipsets.
3061          *
3062          * However for gen6+, we could do better by using the GFDT bit instead
3063          * of uncaching, which would allow us to flush all the LLC-cached data
3064          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3065          */
3066         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3067         if (ret)
3068                 return ret;
3069
3070         /* As the user may map the buffer once pinned in the display plane
3071          * (e.g. libkms for the bootup splash), we have to ensure that we
3072          * always use map_and_fenceable for all scanout buffers.
3073          */
3074         ret = i915_gem_object_pin(obj, alignment, true);
3075         if (ret)
3076                 return ret;
3077
3078         i915_gem_object_flush_cpu_write_domain(obj);
3079
3080         old_write_domain = obj->base.write_domain;
3081         old_read_domains = obj->base.read_domains;
3082
3083         /* It should now be out of any other write domains, and we can update
3084          * the domain values for our changes.
3085          */
3086         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3087         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3088
3089         trace_i915_gem_object_change_domain(obj,
3090                                             old_read_domains,
3091                                             old_write_domain);
3092
3093         return 0;
3094 }
3095
3096 int
3097 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3098 {
3099         int ret;
3100
3101         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3102                 return 0;
3103
3104         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3105                 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3106                 if (ret)
3107                         return ret;
3108         }
3109
3110         ret = i915_gem_object_wait_rendering(obj);
3111         if (ret)
3112                 return ret;
3113
3114         /* Ensure that we invalidate the GPU's caches and TLBs. */
3115         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3116         return 0;
3117 }
3118
3119 /**
3120  * Moves a single object to the CPU read, and possibly write domain.
3121  *
3122  * This function returns when the move is complete, including waiting on
3123  * flushes to occur.
3124  */
3125 static int
3126 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3127 {
3128         uint32_t old_write_domain, old_read_domains;
3129         int ret;
3130
3131         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3132                 return 0;
3133
3134         ret = i915_gem_object_flush_gpu_write_domain(obj);
3135         if (ret)
3136                 return ret;
3137
3138         ret = i915_gem_object_wait_rendering(obj);
3139         if (ret)
3140                 return ret;
3141
3142         i915_gem_object_flush_gtt_write_domain(obj);
3143
3144         /* If we have a partially-valid cache of the object in the CPU,
3145          * finish invalidating it and free the per-page flags.
3146          */
3147         i915_gem_object_set_to_full_cpu_read_domain(obj);
3148
3149         old_write_domain = obj->base.write_domain;
3150         old_read_domains = obj->base.read_domains;
3151
3152         /* Flush the CPU cache if it's still invalid. */
3153         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3154                 i915_gem_clflush_object(obj);
3155
3156                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3157         }
3158
3159         /* It should now be out of any other write domains, and we can update
3160          * the domain values for our changes.
3161          */
3162         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3163
3164         /* If we're writing through the CPU, then the GPU read domains will
3165          * need to be invalidated at next use.
3166          */
3167         if (write) {
3168                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3169                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3170         }
3171
3172         trace_i915_gem_object_change_domain(obj,
3173                                             old_read_domains,
3174                                             old_write_domain);
3175
3176         return 0;
3177 }
3178
3179 /**
3180  * Moves the object from a partially CPU read to a full one.
3181  *
3182  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3183  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3184  */
3185 static void
3186 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3187 {
3188         if (!obj->page_cpu_valid)
3189                 return;
3190
3191         /* If we're partially in the CPU read domain, finish moving it in.
3192          */
3193         if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3194                 int i;
3195
3196                 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3197                         if (obj->page_cpu_valid[i])
3198                                 continue;
3199                         drm_clflush_pages(obj->pages + i, 1);
3200                 }
3201         }
3202
3203         /* Free the page_cpu_valid mappings which are now stale, whether
3204          * or not we've got I915_GEM_DOMAIN_CPU.
3205          */
3206         kfree(obj->page_cpu_valid);
3207         obj->page_cpu_valid = NULL;
3208 }
3209
3210 /**
3211  * Set the CPU read domain on a range of the object.
3212  *
3213  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3214  * not entirely valid.  The page_cpu_valid member of the object flags which
3215  * pages have been flushed, and will be respected by
3216  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3217  * of the whole object.
3218  *
3219  * This function returns when the move is complete, including waiting on
3220  * flushes to occur.
3221  */
3222 static int
3223 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3224                                           uint64_t offset, uint64_t size)
3225 {
3226         uint32_t old_read_domains;
3227         int i, ret;
3228
3229         if (offset == 0 && size == obj->base.size)
3230                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3231
3232         ret = i915_gem_object_flush_gpu_write_domain(obj);
3233         if (ret)
3234                 return ret;
3235
3236         ret = i915_gem_object_wait_rendering(obj);
3237         if (ret)
3238                 return ret;
3239
3240         i915_gem_object_flush_gtt_write_domain(obj);
3241
3242         /* If we're already fully in the CPU read domain, we're done. */
3243         if (obj->page_cpu_valid == NULL &&
3244             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3245                 return 0;
3246
3247         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3248          * newly adding I915_GEM_DOMAIN_CPU
3249          */
3250         if (obj->page_cpu_valid == NULL) {
3251                 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3252                                               GFP_KERNEL);
3253                 if (obj->page_cpu_valid == NULL)
3254                         return -ENOMEM;
3255         } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3256                 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3257
3258         /* Flush the cache on any pages that are still invalid from the CPU's
3259          * perspective.
3260          */
3261         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3262              i++) {
3263                 if (obj->page_cpu_valid[i])
3264                         continue;
3265
3266                 drm_clflush_pages(obj->pages + i, 1);
3267
3268                 obj->page_cpu_valid[i] = 1;
3269         }
3270
3271         /* It should now be out of any other write domains, and we can update
3272          * the domain values for our changes.
3273          */
3274         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3275
3276         old_read_domains = obj->base.read_domains;
3277         obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3278
3279         trace_i915_gem_object_change_domain(obj,
3280                                             old_read_domains,
3281                                             obj->base.write_domain);
3282
3283         return 0;
3284 }
3285
3286 /* Throttle our rendering by waiting until the ring has completed our requests
3287  * emitted over 20 msec ago.
3288  *
3289  * Note that if we were to use the current jiffies each time around the loop,
3290  * we wouldn't escape the function with any frames outstanding if the time to
3291  * render a frame was over 20ms.
3292  *
3293  * This should get us reasonable parallelism between CPU and GPU but also
3294  * relatively low latency when blocking on a particular request to finish.
3295  */
3296 static int
3297 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3298 {
3299         struct drm_i915_private *dev_priv = dev->dev_private;
3300         struct drm_i915_file_private *file_priv = file->driver_priv;
3301         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3302         struct drm_i915_gem_request *request;
3303         struct intel_ring_buffer *ring = NULL;
3304         u32 seqno = 0;
3305         int ret;
3306
3307         if (atomic_read(&dev_priv->mm.wedged))
3308                 return -EIO;
3309
3310         spin_lock(&file_priv->mm.lock);
3311         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3312                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3313                         break;
3314
3315                 ring = request->ring;
3316                 seqno = request->seqno;
3317         }
3318         spin_unlock(&file_priv->mm.lock);
3319
3320         if (seqno == 0)
3321                 return 0;
3322
3323         ret = 0;
3324         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3325                 /* And wait for the seqno passing without holding any locks and
3326                  * causing extra latency for others. This is safe as the irq
3327                  * generation is designed to be run atomically and so is
3328                  * lockless.
3329                  */
3330                 if (ring->irq_get(ring)) {
3331                         ret = wait_event_interruptible(ring->irq_queue,
3332                                                        i915_seqno_passed(ring->get_seqno(ring), seqno)
3333                                                        || atomic_read(&dev_priv->mm.wedged));
3334                         ring->irq_put(ring);
3335
3336                         if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3337                                 ret = -EIO;
3338                 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
3339                                                       seqno) ||
3340                                     atomic_read(&dev_priv->mm.wedged), 3000)) {
3341                         ret = -EBUSY;
3342                 }
3343         }
3344
3345         if (ret == 0)
3346                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3347
3348         return ret;
3349 }
3350
3351 int
3352 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3353                     uint32_t alignment,
3354                     bool map_and_fenceable)
3355 {
3356         struct drm_device *dev = obj->base.dev;
3357         struct drm_i915_private *dev_priv = dev->dev_private;
3358         int ret;
3359
3360         BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3361         WARN_ON(i915_verify_lists(dev));
3362
3363         if (obj->gtt_space != NULL) {
3364                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3365                     (map_and_fenceable && !obj->map_and_fenceable)) {
3366                         WARN(obj->pin_count,
3367                              "bo is already pinned with incorrect alignment:"
3368                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3369                              " obj->map_and_fenceable=%d\n",
3370                              obj->gtt_offset, alignment,
3371                              map_and_fenceable,
3372                              obj->map_and_fenceable);
3373                         ret = i915_gem_object_unbind(obj);
3374                         if (ret)
3375                                 return ret;
3376                 }
3377         }
3378
3379         if (obj->gtt_space == NULL) {
3380                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3381                                                   map_and_fenceable);
3382                 if (ret)
3383                         return ret;
3384         }
3385
3386         if (obj->pin_count++ == 0) {
3387                 if (!obj->active)
3388                         list_move_tail(&obj->mm_list,
3389                                        &dev_priv->mm.pinned_list);
3390         }
3391         obj->pin_mappable |= map_and_fenceable;
3392
3393         WARN_ON(i915_verify_lists(dev));
3394         return 0;
3395 }
3396
3397 void
3398 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3399 {
3400         struct drm_device *dev = obj->base.dev;
3401         drm_i915_private_t *dev_priv = dev->dev_private;
3402
3403         WARN_ON(i915_verify_lists(dev));
3404         BUG_ON(obj->pin_count == 0);
3405         BUG_ON(obj->gtt_space == NULL);
3406
3407         if (--obj->pin_count == 0) {
3408                 if (!obj->active)
3409                         list_move_tail(&obj->mm_list,
3410                                        &dev_priv->mm.inactive_list);
3411                 obj->pin_mappable = false;
3412         }
3413         WARN_ON(i915_verify_lists(dev));
3414 }
3415
3416 int
3417 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3418                    struct drm_file *file)
3419 {
3420         struct drm_i915_gem_pin *args = data;
3421         struct drm_i915_gem_object *obj;
3422         int ret;
3423
3424         ret = i915_mutex_lock_interruptible(dev);
3425         if (ret)
3426                 return ret;
3427
3428         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3429         if (&obj->base == NULL) {
3430                 ret = -ENOENT;
3431                 goto unlock;
3432         }
3433
3434         if (obj->madv != I915_MADV_WILLNEED) {
3435                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3436                 ret = -EINVAL;
3437                 goto out;
3438         }
3439
3440         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3441                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3442                           args->handle);
3443                 ret = -EINVAL;
3444                 goto out;
3445         }
3446
3447         obj->user_pin_count++;
3448         obj->pin_filp = file;
3449         if (obj->user_pin_count == 1) {
3450                 ret = i915_gem_object_pin(obj, args->alignment, true);
3451                 if (ret)
3452                         goto out;
3453         }
3454
3455         /* XXX - flush the CPU caches for pinned objects
3456          * as the X server doesn't manage domains yet
3457          */
3458         i915_gem_object_flush_cpu_write_domain(obj);
3459         args->offset = obj->gtt_offset;
3460 out:
3461         drm_gem_object_unreference(&obj->base);
3462 unlock:
3463         mutex_unlock(&dev->struct_mutex);
3464         return ret;
3465 }
3466
3467 int
3468 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3469                      struct drm_file *file)
3470 {
3471         struct drm_i915_gem_pin *args = data;
3472         struct drm_i915_gem_object *obj;
3473         int ret;
3474
3475         ret = i915_mutex_lock_interruptible(dev);
3476         if (ret)
3477                 return ret;
3478
3479         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3480         if (&obj->base == NULL) {
3481                 ret = -ENOENT;
3482                 goto unlock;
3483         }
3484
3485         if (obj->pin_filp != file) {
3486                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3487                           args->handle);
3488                 ret = -EINVAL;
3489                 goto out;
3490         }
3491         obj->user_pin_count--;
3492         if (obj->user_pin_count == 0) {
3493                 obj->pin_filp = NULL;
3494                 i915_gem_object_unpin(obj);
3495         }
3496
3497 out:
3498         drm_gem_object_unreference(&obj->base);
3499 unlock:
3500         mutex_unlock(&dev->struct_mutex);
3501         return ret;
3502 }
3503
3504 int
3505 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3506                     struct drm_file *file)
3507 {
3508         struct drm_i915_gem_busy *args = data;
3509         struct drm_i915_gem_object *obj;
3510         int ret;
3511
3512         ret = i915_mutex_lock_interruptible(dev);
3513         if (ret)
3514                 return ret;
3515
3516         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3517         if (&obj->base == NULL) {
3518                 ret = -ENOENT;
3519                 goto unlock;
3520         }
3521
3522         /* Count all active objects as busy, even if they are currently not used
3523          * by the gpu. Users of this interface expect objects to eventually
3524          * become non-busy without any further actions, therefore emit any
3525          * necessary flushes here.
3526          */
3527         args->busy = obj->active;
3528         if (args->busy) {
3529                 /* Unconditionally flush objects, even when the gpu still uses this
3530                  * object. Userspace calling this function indicates that it wants to
3531                  * use this buffer rather sooner than later, so issuing the required
3532                  * flush earlier is beneficial.
3533                  */
3534                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3535                         ret = i915_gem_flush_ring(obj->ring,
3536                                                   0, obj->base.write_domain);
3537                 } else if (obj->ring->outstanding_lazy_request ==
3538                            obj->last_rendering_seqno) {
3539                         struct drm_i915_gem_request *request;
3540
3541                         /* This ring is not being cleared by active usage,
3542                          * so emit a request to do so.
3543                          */
3544                         request = kzalloc(sizeof(*request), GFP_KERNEL);
3545                         if (request) {
3546                                 ret = i915_add_request(obj->ring, NULL, request);
3547                                 if (ret)
3548                                         kfree(request);
3549                         } else
3550                                 ret = -ENOMEM;
3551                 }
3552
3553                 /* Update the active list for the hardware's current position.
3554                  * Otherwise this only updates on a delayed timer or when irqs
3555                  * are actually unmasked, and our working set ends up being
3556                  * larger than required.
3557                  */
3558                 i915_gem_retire_requests_ring(obj->ring);
3559
3560                 args->busy = obj->active;
3561         }
3562
3563         drm_gem_object_unreference(&obj->base);
3564 unlock:
3565         mutex_unlock(&dev->struct_mutex);
3566         return ret;
3567 }
3568
3569 int
3570 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3571                         struct drm_file *file_priv)
3572 {
3573         return i915_gem_ring_throttle(dev, file_priv);
3574 }
3575
3576 int
3577 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3578                        struct drm_file *file_priv)
3579 {
3580         struct drm_i915_gem_madvise *args = data;
3581         struct drm_i915_gem_object *obj;
3582         int ret;
3583
3584         switch (args->madv) {
3585         case I915_MADV_DONTNEED:
3586         case I915_MADV_WILLNEED:
3587             break;
3588         default:
3589             return -EINVAL;
3590         }
3591
3592         ret = i915_mutex_lock_interruptible(dev);
3593         if (ret)
3594                 return ret;
3595
3596         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3597         if (&obj->base == NULL) {
3598                 ret = -ENOENT;
3599                 goto unlock;
3600         }
3601
3602         if (obj->pin_count) {
3603                 ret = -EINVAL;
3604                 goto out;
3605         }
3606
3607         if (obj->madv != __I915_MADV_PURGED)
3608                 obj->madv = args->madv;
3609
3610         /* if the object is no longer bound, discard its backing storage */
3611         if (i915_gem_object_is_purgeable(obj) &&
3612             obj->gtt_space == NULL)
3613                 i915_gem_object_truncate(obj);
3614
3615         args->retained = obj->madv != __I915_MADV_PURGED;
3616
3617 out:
3618         drm_gem_object_unreference(&obj->base);
3619 unlock:
3620         mutex_unlock(&dev->struct_mutex);
3621         return ret;
3622 }
3623
3624 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3625                                                   size_t size)
3626 {
3627         struct drm_i915_private *dev_priv = dev->dev_private;
3628         struct drm_i915_gem_object *obj;
3629         struct address_space *mapping;
3630
3631         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3632         if (obj == NULL)
3633                 return NULL;
3634
3635         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3636                 kfree(obj);
3637                 return NULL;
3638         }
3639
3640         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3641         mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3642
3643         i915_gem_info_add_obj(dev_priv, size);
3644
3645         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3646         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3647
3648         if (IS_GEN6(dev) || IS_GEN7(dev)) {
3649                 /* On Gen6, we can have the GPU use the LLC (the CPU
3650                  * cache) for about a 10% performance improvement
3651                  * compared to uncached.  Graphics requests other than
3652                  * display scanout are coherent with the CPU in
3653                  * accessing this cache.  This means in this mode we
3654                  * don't need to clflush on the CPU side, and on the
3655                  * GPU side we only need to flush internal caches to
3656                  * get data visible to the CPU.
3657                  *
3658                  * However, we maintain the display planes as UC, and so
3659                  * need to rebind when first used as such.
3660                  */
3661                 obj->cache_level = I915_CACHE_LLC;
3662         } else
3663                 obj->cache_level = I915_CACHE_NONE;
3664
3665         obj->base.driver_private = NULL;
3666         obj->fence_reg = I915_FENCE_REG_NONE;
3667         INIT_LIST_HEAD(&obj->mm_list);
3668         INIT_LIST_HEAD(&obj->gtt_list);
3669         INIT_LIST_HEAD(&obj->ring_list);
3670         INIT_LIST_HEAD(&obj->exec_list);
3671         INIT_LIST_HEAD(&obj->gpu_write_list);
3672         obj->madv = I915_MADV_WILLNEED;
3673         /* Avoid an unnecessary call to unbind on the first bind. */
3674         obj->map_and_fenceable = true;
3675
3676         return obj;
3677 }
3678
3679 int i915_gem_init_object(struct drm_gem_object *obj)
3680 {
3681         BUG();
3682
3683         return 0;
3684 }
3685
3686 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3687 {
3688         struct drm_device *dev = obj->base.dev;
3689         drm_i915_private_t *dev_priv = dev->dev_private;
3690         int ret;
3691
3692         ret = i915_gem_object_unbind(obj);
3693         if (ret == -ERESTARTSYS) {
3694                 list_move(&obj->mm_list,
3695                           &dev_priv->mm.deferred_free_list);
3696                 return;
3697         }
3698
3699         trace_i915_gem_object_destroy(obj);
3700
3701         if (obj->base.map_list.map)
3702                 drm_gem_free_mmap_offset(&obj->base);
3703
3704         drm_gem_object_release(&obj->base);
3705         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3706
3707         kfree(obj->page_cpu_valid);
3708         kfree(obj->bit_17);
3709         kfree(obj);
3710 }
3711
3712 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3713 {
3714         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3715         struct drm_device *dev = obj->base.dev;
3716
3717         while (obj->pin_count > 0)
3718                 i915_gem_object_unpin(obj);
3719
3720         if (obj->phys_obj)
3721                 i915_gem_detach_phys_object(dev, obj);
3722
3723         i915_gem_free_object_tail(obj);
3724 }
3725
3726 int
3727 i915_gem_idle(struct drm_device *dev)
3728 {
3729         drm_i915_private_t *dev_priv = dev->dev_private;
3730         int ret;
3731
3732         mutex_lock(&dev->struct_mutex);
3733
3734         if (dev_priv->mm.suspended) {
3735                 mutex_unlock(&dev->struct_mutex);
3736                 return 0;
3737         }
3738
3739         ret = i915_gpu_idle(dev);
3740         if (ret) {
3741                 mutex_unlock(&dev->struct_mutex);
3742                 return ret;
3743         }
3744
3745         /* Under UMS, be paranoid and evict. */
3746         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3747                 ret = i915_gem_evict_inactive(dev, false);
3748                 if (ret) {
3749                         mutex_unlock(&dev->struct_mutex);
3750                         return ret;
3751                 }
3752         }
3753
3754         i915_gem_reset_fences(dev);
3755
3756         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3757          * We need to replace this with a semaphore, or something.
3758          * And not confound mm.suspended!
3759          */
3760         dev_priv->mm.suspended = 1;
3761         del_timer_sync(&dev_priv->hangcheck_timer);
3762
3763         i915_kernel_lost_context(dev);
3764         i915_gem_cleanup_ringbuffer(dev);
3765
3766         mutex_unlock(&dev->struct_mutex);
3767
3768         /* Cancel the retire work handler, which should be idle now. */
3769         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3770
3771         return 0;
3772 }
3773
3774 int
3775 i915_gem_init_ringbuffer(struct drm_device *dev)
3776 {
3777         drm_i915_private_t *dev_priv = dev->dev_private;
3778         int ret;
3779
3780         ret = intel_init_render_ring_buffer(dev);
3781         if (ret)
3782                 return ret;
3783
3784         if (HAS_BSD(dev)) {
3785                 ret = intel_init_bsd_ring_buffer(dev);
3786                 if (ret)
3787                         goto cleanup_render_ring;
3788         }
3789
3790         if (HAS_BLT(dev)) {
3791                 ret = intel_init_blt_ring_buffer(dev);
3792                 if (ret)
3793                         goto cleanup_bsd_ring;
3794         }
3795
3796         dev_priv->next_seqno = 1;
3797
3798         return 0;
3799
3800 cleanup_bsd_ring:
3801         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3802 cleanup_render_ring:
3803         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3804         return ret;
3805 }
3806
3807 void
3808 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3809 {
3810         drm_i915_private_t *dev_priv = dev->dev_private;
3811         int i;
3812
3813         for (i = 0; i < I915_NUM_RINGS; i++)
3814                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3815 }
3816
3817 int
3818 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3819                        struct drm_file *file_priv)
3820 {
3821         drm_i915_private_t *dev_priv = dev->dev_private;
3822         int ret, i;
3823
3824         if (drm_core_check_feature(dev, DRIVER_MODESET))
3825                 return 0;
3826
3827         if (atomic_read(&dev_priv->mm.wedged)) {
3828                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3829                 atomic_set(&dev_priv->mm.wedged, 0);
3830         }
3831
3832         mutex_lock(&dev->struct_mutex);
3833         dev_priv->mm.suspended = 0;
3834
3835         ret = i915_gem_init_ringbuffer(dev);
3836         if (ret != 0) {
3837                 mutex_unlock(&dev->struct_mutex);
3838                 return ret;
3839         }
3840
3841         BUG_ON(!list_empty(&dev_priv->mm.active_list));
3842         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3843         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3844         for (i = 0; i < I915_NUM_RINGS; i++) {
3845                 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3846                 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3847         }
3848         mutex_unlock(&dev->struct_mutex);
3849
3850         ret = drm_irq_install(dev);
3851         if (ret)
3852                 goto cleanup_ringbuffer;
3853
3854         return 0;
3855
3856 cleanup_ringbuffer:
3857         mutex_lock(&dev->struct_mutex);
3858         i915_gem_cleanup_ringbuffer(dev);
3859         dev_priv->mm.suspended = 1;
3860         mutex_unlock(&dev->struct_mutex);
3861
3862         return ret;
3863 }
3864
3865 int
3866 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3867                        struct drm_file *file_priv)
3868 {
3869         if (drm_core_check_feature(dev, DRIVER_MODESET))
3870                 return 0;
3871
3872         drm_irq_uninstall(dev);
3873         return i915_gem_idle(dev);
3874 }
3875
3876 void
3877 i915_gem_lastclose(struct drm_device *dev)
3878 {
3879         int ret;
3880
3881         if (drm_core_check_feature(dev, DRIVER_MODESET))
3882                 return;
3883
3884         ret = i915_gem_idle(dev);
3885         if (ret)
3886                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3887 }
3888
3889 static void
3890 init_ring_lists(struct intel_ring_buffer *ring)
3891 {
3892         INIT_LIST_HEAD(&ring->active_list);
3893         INIT_LIST_HEAD(&ring->request_list);
3894         INIT_LIST_HEAD(&ring->gpu_write_list);
3895 }
3896
3897 void
3898 i915_gem_load(struct drm_device *dev)
3899 {
3900         int i;
3901         drm_i915_private_t *dev_priv = dev->dev_private;
3902
3903         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3904         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3905         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3906         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3907         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3908         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3909         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3910         for (i = 0; i < I915_NUM_RINGS; i++)
3911                 init_ring_lists(&dev_priv->ring[i]);
3912         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3913                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3914         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3915                           i915_gem_retire_work_handler);
3916         init_completion(&dev_priv->error_completion);
3917
3918         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3919         if (IS_GEN3(dev)) {
3920                 u32 tmp = I915_READ(MI_ARB_STATE);
3921                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3922                         /* arb state is a masked write, so set bit + bit in mask */
3923                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3924                         I915_WRITE(MI_ARB_STATE, tmp);
3925                 }
3926         }
3927
3928         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3929
3930         /* Old X drivers will take 0-2 for front, back, depth buffers */
3931         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3932                 dev_priv->fence_reg_start = 3;
3933
3934         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3935                 dev_priv->num_fence_regs = 16;
3936         else
3937                 dev_priv->num_fence_regs = 8;
3938
3939         /* Initialize fence registers to zero */
3940         for (i = 0; i < dev_priv->num_fence_regs; i++) {
3941                 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3942         }
3943
3944         i915_gem_detect_bit_6_swizzle(dev);
3945         init_waitqueue_head(&dev_priv->pending_flip_queue);
3946
3947         dev_priv->mm.interruptible = true;
3948
3949         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3950         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3951         register_shrinker(&dev_priv->mm.inactive_shrinker);
3952 }
3953
3954 /*
3955  * Create a physically contiguous memory object for this object
3956  * e.g. for cursor + overlay regs
3957  */
3958 static int i915_gem_init_phys_object(struct drm_device *dev,
3959                                      int id, int size, int align)
3960 {
3961         drm_i915_private_t *dev_priv = dev->dev_private;
3962         struct drm_i915_gem_phys_object *phys_obj;
3963         int ret;
3964
3965         if (dev_priv->mm.phys_objs[id - 1] || !size)
3966                 return 0;
3967
3968         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3969         if (!phys_obj)
3970                 return -ENOMEM;
3971
3972         phys_obj->id = id;
3973
3974         phys_obj->handle = drm_pci_alloc(dev, size, align);
3975         if (!phys_obj->handle) {
3976                 ret = -ENOMEM;
3977                 goto kfree_obj;
3978         }
3979 #ifdef CONFIG_X86
3980         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3981 #endif
3982
3983         dev_priv->mm.phys_objs[id - 1] = phys_obj;
3984
3985         return 0;
3986 kfree_obj:
3987         kfree(phys_obj);
3988         return ret;
3989 }
3990
3991 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3992 {
3993         drm_i915_private_t *dev_priv = dev->dev_private;
3994         struct drm_i915_gem_phys_object *phys_obj;
3995
3996         if (!dev_priv->mm.phys_objs[id - 1])
3997                 return;
3998
3999         phys_obj = dev_priv->mm.phys_objs[id - 1];
4000         if (phys_obj->cur_obj) {
4001                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4002         }
4003
4004 #ifdef CONFIG_X86
4005         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4006 #endif
4007         drm_pci_free(dev, phys_obj->handle);
4008         kfree(phys_obj);
4009         dev_priv->mm.phys_objs[id - 1] = NULL;
4010 }
4011
4012 void i915_gem_free_all_phys_object(struct drm_device *dev)
4013 {
4014         int i;
4015
4016         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4017                 i915_gem_free_phys_object(dev, i);
4018 }
4019
4020 void i915_gem_detach_phys_object(struct drm_device *dev,
4021                                  struct drm_i915_gem_object *obj)
4022 {
4023         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4024         char *vaddr;
4025         int i;
4026         int page_count;
4027
4028         if (!obj->phys_obj)
4029                 return;
4030         vaddr = obj->phys_obj->handle->vaddr;
4031
4032         page_count = obj->base.size / PAGE_SIZE;
4033         for (i = 0; i < page_count; i++) {
4034                 struct page *page = shmem_read_mapping_page(mapping, i);
4035                 if (!IS_ERR(page)) {
4036                         char *dst = kmap_atomic(page);
4037                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4038                         kunmap_atomic(dst);
4039
4040                         drm_clflush_pages(&page, 1);
4041
4042                         set_page_dirty(page);
4043                         mark_page_accessed(page);
4044                         page_cache_release(page);
4045                 }
4046         }
4047         intel_gtt_chipset_flush();
4048
4049         obj->phys_obj->cur_obj = NULL;
4050         obj->phys_obj = NULL;
4051 }
4052
4053 int
4054 i915_gem_attach_phys_object(struct drm_device *dev,
4055                             struct drm_i915_gem_object *obj,
4056                             int id,
4057                             int align)
4058 {
4059         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4060         drm_i915_private_t *dev_priv = dev->dev_private;
4061         int ret = 0;
4062         int page_count;
4063         int i;
4064
4065         if (id > I915_MAX_PHYS_OBJECT)
4066                 return -EINVAL;
4067
4068         if (obj->phys_obj) {
4069                 if (obj->phys_obj->id == id)
4070                         return 0;
4071                 i915_gem_detach_phys_object(dev, obj);
4072         }
4073
4074         /* create a new object */
4075         if (!dev_priv->mm.phys_objs[id - 1]) {
4076                 ret = i915_gem_init_phys_object(dev, id,
4077                                                 obj->base.size, align);
4078                 if (ret) {
4079                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4080                                   id, obj->base.size);
4081                         return ret;
4082                 }
4083         }
4084
4085         /* bind to the object */
4086         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4087         obj->phys_obj->cur_obj = obj;
4088
4089         page_count = obj->base.size / PAGE_SIZE;
4090
4091         for (i = 0; i < page_count; i++) {
4092                 struct page *page;
4093                 char *dst, *src;
4094
4095                 page = shmem_read_mapping_page(mapping, i);
4096                 if (IS_ERR(page))
4097                         return PTR_ERR(page);
4098
4099                 src = kmap_atomic(page);
4100                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4101                 memcpy(dst, src, PAGE_SIZE);
4102                 kunmap_atomic(src);
4103
4104                 mark_page_accessed(page);
4105                 page_cache_release(page);
4106         }
4107
4108         return 0;
4109 }
4110
4111 static int
4112 i915_gem_phys_pwrite(struct drm_device *dev,
4113                      struct drm_i915_gem_object *obj,
4114                      struct drm_i915_gem_pwrite *args,
4115                      struct drm_file *file_priv)
4116 {
4117         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4118         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4119
4120         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4121                 unsigned long unwritten;
4122
4123                 /* The physical object once assigned is fixed for the lifetime
4124                  * of the obj, so we can safely drop the lock and continue
4125                  * to access vaddr.
4126                  */
4127                 mutex_unlock(&dev->struct_mutex);
4128                 unwritten = copy_from_user(vaddr, user_data, args->size);
4129                 mutex_lock(&dev->struct_mutex);
4130                 if (unwritten)
4131                         return -EFAULT;
4132         }
4133
4134         intel_gtt_chipset_flush();
4135         return 0;
4136 }
4137
4138 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4139 {
4140         struct drm_i915_file_private *file_priv = file->driver_priv;
4141
4142         /* Clean up our request list when the client is going away, so that
4143          * later retire_requests won't dereference our soon-to-be-gone
4144          * file_priv.
4145          */
4146         spin_lock(&file_priv->mm.lock);
4147         while (!list_empty(&file_priv->mm.request_list)) {
4148                 struct drm_i915_gem_request *request;
4149
4150                 request = list_first_entry(&file_priv->mm.request_list,
4151                                            struct drm_i915_gem_request,
4152                                            client_list);
4153                 list_del(&request->client_list);
4154                 request->file_priv = NULL;
4155         }
4156         spin_unlock(&file_priv->mm.lock);
4157 }
4158
4159 static int
4160 i915_gpu_is_active(struct drm_device *dev)
4161 {
4162         drm_i915_private_t *dev_priv = dev->dev_private;
4163         int lists_empty;
4164
4165         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4166                       list_empty(&dev_priv->mm.active_list);
4167
4168         return !lists_empty;
4169 }
4170
4171 static int
4172 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4173 {
4174         struct drm_i915_private *dev_priv =
4175                 container_of(shrinker,
4176                              struct drm_i915_private,
4177                              mm.inactive_shrinker);
4178         struct drm_device *dev = dev_priv->dev;
4179         struct drm_i915_gem_object *obj, *next;
4180         int nr_to_scan = sc->nr_to_scan;
4181         int cnt;
4182
4183         if (!mutex_trylock(&dev->struct_mutex))
4184                 return 0;
4185
4186         /* "fast-path" to count number of available objects */
4187         if (nr_to_scan == 0) {
4188                 cnt = 0;
4189                 list_for_each_entry(obj,
4190                                     &dev_priv->mm.inactive_list,
4191                                     mm_list)
4192                         cnt++;
4193                 mutex_unlock(&dev->struct_mutex);
4194                 return cnt / 100 * sysctl_vfs_cache_pressure;
4195         }
4196
4197 rescan:
4198         /* first scan for clean buffers */
4199         i915_gem_retire_requests(dev);
4200
4201         list_for_each_entry_safe(obj, next,
4202                                  &dev_priv->mm.inactive_list,
4203                                  mm_list) {
4204                 if (i915_gem_object_is_purgeable(obj)) {
4205                         if (i915_gem_object_unbind(obj) == 0 &&
4206                             --nr_to_scan == 0)
4207                                 break;
4208                 }
4209         }
4210
4211         /* second pass, evict/count anything still on the inactive list */
4212         cnt = 0;
4213         list_for_each_entry_safe(obj, next,
4214                                  &dev_priv->mm.inactive_list,
4215                                  mm_list) {
4216                 if (nr_to_scan &&
4217                     i915_gem_object_unbind(obj) == 0)
4218                         nr_to_scan--;
4219                 else
4220                         cnt++;
4221         }
4222
4223         if (nr_to_scan && i915_gpu_is_active(dev)) {
4224                 /*
4225                  * We are desperate for pages, so as a last resort, wait
4226                  * for the GPU to finish and discard whatever we can.
4227                  * This has a dramatic impact to reduce the number of
4228                  * OOM-killer events whilst running the GPU aggressively.
4229                  */
4230                 if (i915_gpu_idle(dev) == 0)
4231                         goto rescan;
4232         }
4233         mutex_unlock(&dev->struct_mutex);
4234         return cnt / 100 * sysctl_vfs_cache_pressure;
4235 }