2865b44dfab2b05fef8bcabee459aa4cc30f5a92
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38
39 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43                                                           bool write);
44 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45                                                                   uint64_t offset,
46                                                                   uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49                                                     unsigned alignment,
50                                                     bool map_and_fenceable);
51 static void i915_gem_clear_fence_reg(struct drm_device *dev,
52                                      struct drm_i915_fence_reg *reg);
53 static int i915_gem_phys_pwrite(struct drm_device *dev,
54                                 struct drm_i915_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file);
57 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58
59 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60                                     struct shrink_control *sc);
61
62 /* some bookkeeping */
63 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
64                                   size_t size)
65 {
66         dev_priv->mm.object_count++;
67         dev_priv->mm.object_memory += size;
68 }
69
70 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
71                                      size_t size)
72 {
73         dev_priv->mm.object_count--;
74         dev_priv->mm.object_memory -= size;
75 }
76
77 static int
78 i915_gem_wait_for_error(struct drm_device *dev)
79 {
80         struct drm_i915_private *dev_priv = dev->dev_private;
81         struct completion *x = &dev_priv->error_completion;
82         unsigned long flags;
83         int ret;
84
85         if (!atomic_read(&dev_priv->mm.wedged))
86                 return 0;
87
88         ret = wait_for_completion_interruptible(x);
89         if (ret)
90                 return ret;
91
92         if (atomic_read(&dev_priv->mm.wedged)) {
93                 /* GPU is hung, bump the completion count to account for
94                  * the token we just consumed so that we never hit zero and
95                  * end up waiting upon a subsequent completion event that
96                  * will never happen.
97                  */
98                 spin_lock_irqsave(&x->wait.lock, flags);
99                 x->done++;
100                 spin_unlock_irqrestore(&x->wait.lock, flags);
101         }
102         return 0;
103 }
104
105 int i915_mutex_lock_interruptible(struct drm_device *dev)
106 {
107         int ret;
108
109         ret = i915_gem_wait_for_error(dev);
110         if (ret)
111                 return ret;
112
113         ret = mutex_lock_interruptible(&dev->struct_mutex);
114         if (ret)
115                 return ret;
116
117         WARN_ON(i915_verify_lists(dev));
118         return 0;
119 }
120
121 static inline bool
122 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
123 {
124         return obj->gtt_space && !obj->active && obj->pin_count == 0;
125 }
126
127 void i915_gem_do_init(struct drm_device *dev,
128                       unsigned long start,
129                       unsigned long mappable_end,
130                       unsigned long end)
131 {
132         drm_i915_private_t *dev_priv = dev->dev_private;
133
134         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
135
136         dev_priv->mm.gtt_start = start;
137         dev_priv->mm.gtt_mappable_end = mappable_end;
138         dev_priv->mm.gtt_end = end;
139         dev_priv->mm.gtt_total = end - start;
140         dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
141
142         /* Take over this portion of the GTT */
143         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
144 }
145
146 int
147 i915_gem_init_ioctl(struct drm_device *dev, void *data,
148                     struct drm_file *file)
149 {
150         struct drm_i915_gem_init *args = data;
151
152         if (args->gtt_start >= args->gtt_end ||
153             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
154                 return -EINVAL;
155
156         mutex_lock(&dev->struct_mutex);
157         i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
158         mutex_unlock(&dev->struct_mutex);
159
160         return 0;
161 }
162
163 int
164 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
165                             struct drm_file *file)
166 {
167         struct drm_i915_private *dev_priv = dev->dev_private;
168         struct drm_i915_gem_get_aperture *args = data;
169         struct drm_i915_gem_object *obj;
170         size_t pinned;
171
172         if (!(dev->driver->driver_features & DRIVER_GEM))
173                 return -ENODEV;
174
175         pinned = 0;
176         mutex_lock(&dev->struct_mutex);
177         list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
178                 pinned += obj->gtt_space->size;
179         mutex_unlock(&dev->struct_mutex);
180
181         args->aper_size = dev_priv->mm.gtt_total;
182         args->aper_available_size = args->aper_size - pinned;
183
184         return 0;
185 }
186
187 static int
188 i915_gem_create(struct drm_file *file,
189                 struct drm_device *dev,
190                 uint64_t size,
191                 uint32_t *handle_p)
192 {
193         struct drm_i915_gem_object *obj;
194         int ret;
195         u32 handle;
196
197         size = roundup(size, PAGE_SIZE);
198         if (size == 0)
199                 return -EINVAL;
200
201         /* Allocate the new object */
202         obj = i915_gem_alloc_object(dev, size);
203         if (obj == NULL)
204                 return -ENOMEM;
205
206         ret = drm_gem_handle_create(file, &obj->base, &handle);
207         if (ret) {
208                 drm_gem_object_release(&obj->base);
209                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
210                 kfree(obj);
211                 return ret;
212         }
213
214         /* drop reference from allocate - handle holds it now */
215         drm_gem_object_unreference(&obj->base);
216         trace_i915_gem_object_create(obj);
217
218         *handle_p = handle;
219         return 0;
220 }
221
222 int
223 i915_gem_dumb_create(struct drm_file *file,
224                      struct drm_device *dev,
225                      struct drm_mode_create_dumb *args)
226 {
227         /* have to work out size/pitch and return them */
228         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
229         args->size = args->pitch * args->height;
230         return i915_gem_create(file, dev,
231                                args->size, &args->handle);
232 }
233
234 int i915_gem_dumb_destroy(struct drm_file *file,
235                           struct drm_device *dev,
236                           uint32_t handle)
237 {
238         return drm_gem_handle_delete(file, handle);
239 }
240
241 /**
242  * Creates a new mm object and returns a handle to it.
243  */
244 int
245 i915_gem_create_ioctl(struct drm_device *dev, void *data,
246                       struct drm_file *file)
247 {
248         struct drm_i915_gem_create *args = data;
249         return i915_gem_create(file, dev,
250                                args->size, &args->handle);
251 }
252
253 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
254 {
255         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
256
257         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
258                 obj->tiling_mode != I915_TILING_NONE;
259 }
260
261 static inline void
262 slow_shmem_copy(struct page *dst_page,
263                 int dst_offset,
264                 struct page *src_page,
265                 int src_offset,
266                 int length)
267 {
268         char *dst_vaddr, *src_vaddr;
269
270         dst_vaddr = kmap(dst_page);
271         src_vaddr = kmap(src_page);
272
273         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
274
275         kunmap(src_page);
276         kunmap(dst_page);
277 }
278
279 static inline void
280 slow_shmem_bit17_copy(struct page *gpu_page,
281                       int gpu_offset,
282                       struct page *cpu_page,
283                       int cpu_offset,
284                       int length,
285                       int is_read)
286 {
287         char *gpu_vaddr, *cpu_vaddr;
288
289         /* Use the unswizzled path if this page isn't affected. */
290         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
291                 if (is_read)
292                         return slow_shmem_copy(cpu_page, cpu_offset,
293                                                gpu_page, gpu_offset, length);
294                 else
295                         return slow_shmem_copy(gpu_page, gpu_offset,
296                                                cpu_page, cpu_offset, length);
297         }
298
299         gpu_vaddr = kmap(gpu_page);
300         cpu_vaddr = kmap(cpu_page);
301
302         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
303          * XORing with the other bits (A9 for Y, A9 and A10 for X)
304          */
305         while (length > 0) {
306                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
307                 int this_length = min(cacheline_end - gpu_offset, length);
308                 int swizzled_gpu_offset = gpu_offset ^ 64;
309
310                 if (is_read) {
311                         memcpy(cpu_vaddr + cpu_offset,
312                                gpu_vaddr + swizzled_gpu_offset,
313                                this_length);
314                 } else {
315                         memcpy(gpu_vaddr + swizzled_gpu_offset,
316                                cpu_vaddr + cpu_offset,
317                                this_length);
318                 }
319                 cpu_offset += this_length;
320                 gpu_offset += this_length;
321                 length -= this_length;
322         }
323
324         kunmap(cpu_page);
325         kunmap(gpu_page);
326 }
327
328 /**
329  * This is the fast shmem pread path, which attempts to copy_from_user directly
330  * from the backing pages of the object to the user's address space.  On a
331  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
332  */
333 static int
334 i915_gem_shmem_pread_fast(struct drm_device *dev,
335                           struct drm_i915_gem_object *obj,
336                           struct drm_i915_gem_pread *args,
337                           struct drm_file *file)
338 {
339         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
340         ssize_t remain;
341         loff_t offset;
342         char __user *user_data;
343         int page_offset, page_length;
344
345         user_data = (char __user *) (uintptr_t) args->data_ptr;
346         remain = args->size;
347
348         offset = args->offset;
349
350         while (remain > 0) {
351                 struct page *page;
352                 char *vaddr;
353                 int ret;
354
355                 /* Operation in this page
356                  *
357                  * page_offset = offset within page
358                  * page_length = bytes to copy for this page
359                  */
360                 page_offset = offset_in_page(offset);
361                 page_length = remain;
362                 if ((page_offset + remain) > PAGE_SIZE)
363                         page_length = PAGE_SIZE - page_offset;
364
365                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
366                 if (IS_ERR(page))
367                         return PTR_ERR(page);
368
369                 vaddr = kmap_atomic(page);
370                 ret = __copy_to_user_inatomic(user_data,
371                                               vaddr + page_offset,
372                                               page_length);
373                 kunmap_atomic(vaddr);
374
375                 mark_page_accessed(page);
376                 page_cache_release(page);
377                 if (ret)
378                         return -EFAULT;
379
380                 remain -= page_length;
381                 user_data += page_length;
382                 offset += page_length;
383         }
384
385         return 0;
386 }
387
388 /**
389  * This is the fallback shmem pread path, which allocates temporary storage
390  * in kernel space to copy_to_user into outside of the struct_mutex, so we
391  * can copy out of the object's backing pages while holding the struct mutex
392  * and not take page faults.
393  */
394 static int
395 i915_gem_shmem_pread_slow(struct drm_device *dev,
396                           struct drm_i915_gem_object *obj,
397                           struct drm_i915_gem_pread *args,
398                           struct drm_file *file)
399 {
400         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
401         struct mm_struct *mm = current->mm;
402         struct page **user_pages;
403         ssize_t remain;
404         loff_t offset, pinned_pages, i;
405         loff_t first_data_page, last_data_page, num_pages;
406         int shmem_page_offset;
407         int data_page_index, data_page_offset;
408         int page_length;
409         int ret;
410         uint64_t data_ptr = args->data_ptr;
411         int do_bit17_swizzling;
412
413         remain = args->size;
414
415         /* Pin the user pages containing the data.  We can't fault while
416          * holding the struct mutex, yet we want to hold it while
417          * dereferencing the user data.
418          */
419         first_data_page = data_ptr / PAGE_SIZE;
420         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
421         num_pages = last_data_page - first_data_page + 1;
422
423         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
424         if (user_pages == NULL)
425                 return -ENOMEM;
426
427         mutex_unlock(&dev->struct_mutex);
428         down_read(&mm->mmap_sem);
429         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
430                                       num_pages, 1, 0, user_pages, NULL);
431         up_read(&mm->mmap_sem);
432         mutex_lock(&dev->struct_mutex);
433         if (pinned_pages < num_pages) {
434                 ret = -EFAULT;
435                 goto out;
436         }
437
438         ret = i915_gem_object_set_cpu_read_domain_range(obj,
439                                                         args->offset,
440                                                         args->size);
441         if (ret)
442                 goto out;
443
444         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
445
446         offset = args->offset;
447
448         while (remain > 0) {
449                 struct page *page;
450
451                 /* Operation in this page
452                  *
453                  * shmem_page_offset = offset within page in shmem file
454                  * data_page_index = page number in get_user_pages return
455                  * data_page_offset = offset with data_page_index page.
456                  * page_length = bytes to copy for this page
457                  */
458                 shmem_page_offset = offset_in_page(offset);
459                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
460                 data_page_offset = offset_in_page(data_ptr);
461
462                 page_length = remain;
463                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
464                         page_length = PAGE_SIZE - shmem_page_offset;
465                 if ((data_page_offset + page_length) > PAGE_SIZE)
466                         page_length = PAGE_SIZE - data_page_offset;
467
468                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
469                 if (IS_ERR(page)) {
470                         ret = PTR_ERR(page);
471                         goto out;
472                 }
473
474                 if (do_bit17_swizzling) {
475                         slow_shmem_bit17_copy(page,
476                                               shmem_page_offset,
477                                               user_pages[data_page_index],
478                                               data_page_offset,
479                                               page_length,
480                                               1);
481                 } else {
482                         slow_shmem_copy(user_pages[data_page_index],
483                                         data_page_offset,
484                                         page,
485                                         shmem_page_offset,
486                                         page_length);
487                 }
488
489                 mark_page_accessed(page);
490                 page_cache_release(page);
491
492                 remain -= page_length;
493                 data_ptr += page_length;
494                 offset += page_length;
495         }
496
497 out:
498         for (i = 0; i < pinned_pages; i++) {
499                 SetPageDirty(user_pages[i]);
500                 mark_page_accessed(user_pages[i]);
501                 page_cache_release(user_pages[i]);
502         }
503         drm_free_large(user_pages);
504
505         return ret;
506 }
507
508 /**
509  * Reads data from the object referenced by handle.
510  *
511  * On error, the contents of *data are undefined.
512  */
513 int
514 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
515                      struct drm_file *file)
516 {
517         struct drm_i915_gem_pread *args = data;
518         struct drm_i915_gem_object *obj;
519         int ret = 0;
520
521         if (args->size == 0)
522                 return 0;
523
524         if (!access_ok(VERIFY_WRITE,
525                        (char __user *)(uintptr_t)args->data_ptr,
526                        args->size))
527                 return -EFAULT;
528
529         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
530                                        args->size);
531         if (ret)
532                 return -EFAULT;
533
534         ret = i915_mutex_lock_interruptible(dev);
535         if (ret)
536                 return ret;
537
538         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
539         if (&obj->base == NULL) {
540                 ret = -ENOENT;
541                 goto unlock;
542         }
543
544         /* Bounds check source.  */
545         if (args->offset > obj->base.size ||
546             args->size > obj->base.size - args->offset) {
547                 ret = -EINVAL;
548                 goto out;
549         }
550
551         trace_i915_gem_object_pread(obj, args->offset, args->size);
552
553         ret = i915_gem_object_set_cpu_read_domain_range(obj,
554                                                         args->offset,
555                                                         args->size);
556         if (ret)
557                 goto out;
558
559         ret = -EFAULT;
560         if (!i915_gem_object_needs_bit17_swizzle(obj))
561                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
562         if (ret == -EFAULT)
563                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
564
565 out:
566         drm_gem_object_unreference(&obj->base);
567 unlock:
568         mutex_unlock(&dev->struct_mutex);
569         return ret;
570 }
571
572 /* This is the fast write path which cannot handle
573  * page faults in the source data
574  */
575
576 static inline int
577 fast_user_write(struct io_mapping *mapping,
578                 loff_t page_base, int page_offset,
579                 char __user *user_data,
580                 int length)
581 {
582         char *vaddr_atomic;
583         unsigned long unwritten;
584
585         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
586         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
587                                                       user_data, length);
588         io_mapping_unmap_atomic(vaddr_atomic);
589         return unwritten;
590 }
591
592 /* Here's the write path which can sleep for
593  * page faults
594  */
595
596 static inline void
597 slow_kernel_write(struct io_mapping *mapping,
598                   loff_t gtt_base, int gtt_offset,
599                   struct page *user_page, int user_offset,
600                   int length)
601 {
602         char __iomem *dst_vaddr;
603         char *src_vaddr;
604
605         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
606         src_vaddr = kmap(user_page);
607
608         memcpy_toio(dst_vaddr + gtt_offset,
609                     src_vaddr + user_offset,
610                     length);
611
612         kunmap(user_page);
613         io_mapping_unmap(dst_vaddr);
614 }
615
616 /**
617  * This is the fast pwrite path, where we copy the data directly from the
618  * user into the GTT, uncached.
619  */
620 static int
621 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
622                          struct drm_i915_gem_object *obj,
623                          struct drm_i915_gem_pwrite *args,
624                          struct drm_file *file)
625 {
626         drm_i915_private_t *dev_priv = dev->dev_private;
627         ssize_t remain;
628         loff_t offset, page_base;
629         char __user *user_data;
630         int page_offset, page_length;
631
632         user_data = (char __user *) (uintptr_t) args->data_ptr;
633         remain = args->size;
634
635         offset = obj->gtt_offset + args->offset;
636
637         while (remain > 0) {
638                 /* Operation in this page
639                  *
640                  * page_base = page offset within aperture
641                  * page_offset = offset within page
642                  * page_length = bytes to copy for this page
643                  */
644                 page_base = offset & PAGE_MASK;
645                 page_offset = offset_in_page(offset);
646                 page_length = remain;
647                 if ((page_offset + remain) > PAGE_SIZE)
648                         page_length = PAGE_SIZE - page_offset;
649
650                 /* If we get a fault while copying data, then (presumably) our
651                  * source page isn't available.  Return the error and we'll
652                  * retry in the slow path.
653                  */
654                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
655                                     page_offset, user_data, page_length))
656                         return -EFAULT;
657
658                 remain -= page_length;
659                 user_data += page_length;
660                 offset += page_length;
661         }
662
663         return 0;
664 }
665
666 /**
667  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
668  * the memory and maps it using kmap_atomic for copying.
669  *
670  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
671  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
672  */
673 static int
674 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
675                          struct drm_i915_gem_object *obj,
676                          struct drm_i915_gem_pwrite *args,
677                          struct drm_file *file)
678 {
679         drm_i915_private_t *dev_priv = dev->dev_private;
680         ssize_t remain;
681         loff_t gtt_page_base, offset;
682         loff_t first_data_page, last_data_page, num_pages;
683         loff_t pinned_pages, i;
684         struct page **user_pages;
685         struct mm_struct *mm = current->mm;
686         int gtt_page_offset, data_page_offset, data_page_index, page_length;
687         int ret;
688         uint64_t data_ptr = args->data_ptr;
689
690         remain = args->size;
691
692         /* Pin the user pages containing the data.  We can't fault while
693          * holding the struct mutex, and all of the pwrite implementations
694          * want to hold it while dereferencing the user data.
695          */
696         first_data_page = data_ptr / PAGE_SIZE;
697         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
698         num_pages = last_data_page - first_data_page + 1;
699
700         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
701         if (user_pages == NULL)
702                 return -ENOMEM;
703
704         mutex_unlock(&dev->struct_mutex);
705         down_read(&mm->mmap_sem);
706         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
707                                       num_pages, 0, 0, user_pages, NULL);
708         up_read(&mm->mmap_sem);
709         mutex_lock(&dev->struct_mutex);
710         if (pinned_pages < num_pages) {
711                 ret = -EFAULT;
712                 goto out_unpin_pages;
713         }
714
715         ret = i915_gem_object_set_to_gtt_domain(obj, true);
716         if (ret)
717                 goto out_unpin_pages;
718
719         ret = i915_gem_object_put_fence(obj);
720         if (ret)
721                 goto out_unpin_pages;
722
723         offset = obj->gtt_offset + args->offset;
724
725         while (remain > 0) {
726                 /* Operation in this page
727                  *
728                  * gtt_page_base = page offset within aperture
729                  * gtt_page_offset = offset within page in aperture
730                  * data_page_index = page number in get_user_pages return
731                  * data_page_offset = offset with data_page_index page.
732                  * page_length = bytes to copy for this page
733                  */
734                 gtt_page_base = offset & PAGE_MASK;
735                 gtt_page_offset = offset_in_page(offset);
736                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
737                 data_page_offset = offset_in_page(data_ptr);
738
739                 page_length = remain;
740                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
741                         page_length = PAGE_SIZE - gtt_page_offset;
742                 if ((data_page_offset + page_length) > PAGE_SIZE)
743                         page_length = PAGE_SIZE - data_page_offset;
744
745                 slow_kernel_write(dev_priv->mm.gtt_mapping,
746                                   gtt_page_base, gtt_page_offset,
747                                   user_pages[data_page_index],
748                                   data_page_offset,
749                                   page_length);
750
751                 remain -= page_length;
752                 offset += page_length;
753                 data_ptr += page_length;
754         }
755
756 out_unpin_pages:
757         for (i = 0; i < pinned_pages; i++)
758                 page_cache_release(user_pages[i]);
759         drm_free_large(user_pages);
760
761         return ret;
762 }
763
764 /**
765  * This is the fast shmem pwrite path, which attempts to directly
766  * copy_from_user into the kmapped pages backing the object.
767  */
768 static int
769 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
770                            struct drm_i915_gem_object *obj,
771                            struct drm_i915_gem_pwrite *args,
772                            struct drm_file *file)
773 {
774         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
775         ssize_t remain;
776         loff_t offset;
777         char __user *user_data;
778         int page_offset, page_length;
779
780         user_data = (char __user *) (uintptr_t) args->data_ptr;
781         remain = args->size;
782
783         offset = args->offset;
784         obj->dirty = 1;
785
786         while (remain > 0) {
787                 struct page *page;
788                 char *vaddr;
789                 int ret;
790
791                 /* Operation in this page
792                  *
793                  * page_offset = offset within page
794                  * page_length = bytes to copy for this page
795                  */
796                 page_offset = offset_in_page(offset);
797                 page_length = remain;
798                 if ((page_offset + remain) > PAGE_SIZE)
799                         page_length = PAGE_SIZE - page_offset;
800
801                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
802                 if (IS_ERR(page))
803                         return PTR_ERR(page);
804
805                 vaddr = kmap_atomic(page);
806                 ret = __copy_from_user_inatomic(vaddr + page_offset,
807                                                 user_data,
808                                                 page_length);
809                 kunmap_atomic(vaddr);
810
811                 set_page_dirty(page);
812                 mark_page_accessed(page);
813                 page_cache_release(page);
814
815                 /* If we get a fault while copying data, then (presumably) our
816                  * source page isn't available.  Return the error and we'll
817                  * retry in the slow path.
818                  */
819                 if (ret)
820                         return -EFAULT;
821
822                 remain -= page_length;
823                 user_data += page_length;
824                 offset += page_length;
825         }
826
827         return 0;
828 }
829
830 /**
831  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
832  * the memory and maps it using kmap_atomic for copying.
833  *
834  * This avoids taking mmap_sem for faulting on the user's address while the
835  * struct_mutex is held.
836  */
837 static int
838 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
839                            struct drm_i915_gem_object *obj,
840                            struct drm_i915_gem_pwrite *args,
841                            struct drm_file *file)
842 {
843         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
844         struct mm_struct *mm = current->mm;
845         struct page **user_pages;
846         ssize_t remain;
847         loff_t offset, pinned_pages, i;
848         loff_t first_data_page, last_data_page, num_pages;
849         int shmem_page_offset;
850         int data_page_index,  data_page_offset;
851         int page_length;
852         int ret;
853         uint64_t data_ptr = args->data_ptr;
854         int do_bit17_swizzling;
855
856         remain = args->size;
857
858         /* Pin the user pages containing the data.  We can't fault while
859          * holding the struct mutex, and all of the pwrite implementations
860          * want to hold it while dereferencing the user data.
861          */
862         first_data_page = data_ptr / PAGE_SIZE;
863         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
864         num_pages = last_data_page - first_data_page + 1;
865
866         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
867         if (user_pages == NULL)
868                 return -ENOMEM;
869
870         mutex_unlock(&dev->struct_mutex);
871         down_read(&mm->mmap_sem);
872         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
873                                       num_pages, 0, 0, user_pages, NULL);
874         up_read(&mm->mmap_sem);
875         mutex_lock(&dev->struct_mutex);
876         if (pinned_pages < num_pages) {
877                 ret = -EFAULT;
878                 goto out;
879         }
880
881         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
882         if (ret)
883                 goto out;
884
885         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886
887         offset = args->offset;
888         obj->dirty = 1;
889
890         while (remain > 0) {
891                 struct page *page;
892
893                 /* Operation in this page
894                  *
895                  * shmem_page_offset = offset within page in shmem file
896                  * data_page_index = page number in get_user_pages return
897                  * data_page_offset = offset with data_page_index page.
898                  * page_length = bytes to copy for this page
899                  */
900                 shmem_page_offset = offset_in_page(offset);
901                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
902                 data_page_offset = offset_in_page(data_ptr);
903
904                 page_length = remain;
905                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
906                         page_length = PAGE_SIZE - shmem_page_offset;
907                 if ((data_page_offset + page_length) > PAGE_SIZE)
908                         page_length = PAGE_SIZE - data_page_offset;
909
910                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
911                 if (IS_ERR(page)) {
912                         ret = PTR_ERR(page);
913                         goto out;
914                 }
915
916                 if (do_bit17_swizzling) {
917                         slow_shmem_bit17_copy(page,
918                                               shmem_page_offset,
919                                               user_pages[data_page_index],
920                                               data_page_offset,
921                                               page_length,
922                                               0);
923                 } else {
924                         slow_shmem_copy(page,
925                                         shmem_page_offset,
926                                         user_pages[data_page_index],
927                                         data_page_offset,
928                                         page_length);
929                 }
930
931                 set_page_dirty(page);
932                 mark_page_accessed(page);
933                 page_cache_release(page);
934
935                 remain -= page_length;
936                 data_ptr += page_length;
937                 offset += page_length;
938         }
939
940 out:
941         for (i = 0; i < pinned_pages; i++)
942                 page_cache_release(user_pages[i]);
943         drm_free_large(user_pages);
944
945         return ret;
946 }
947
948 /**
949  * Writes data to the object referenced by handle.
950  *
951  * On error, the contents of the buffer that were to be modified are undefined.
952  */
953 int
954 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
955                       struct drm_file *file)
956 {
957         struct drm_i915_gem_pwrite *args = data;
958         struct drm_i915_gem_object *obj;
959         int ret;
960
961         if (args->size == 0)
962                 return 0;
963
964         if (!access_ok(VERIFY_READ,
965                        (char __user *)(uintptr_t)args->data_ptr,
966                        args->size))
967                 return -EFAULT;
968
969         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
970                                       args->size);
971         if (ret)
972                 return -EFAULT;
973
974         ret = i915_mutex_lock_interruptible(dev);
975         if (ret)
976                 return ret;
977
978         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
979         if (&obj->base == NULL) {
980                 ret = -ENOENT;
981                 goto unlock;
982         }
983
984         /* Bounds check destination. */
985         if (args->offset > obj->base.size ||
986             args->size > obj->base.size - args->offset) {
987                 ret = -EINVAL;
988                 goto out;
989         }
990
991         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
992
993         /* We can only do the GTT pwrite on untiled buffers, as otherwise
994          * it would end up going through the fenced access, and we'll get
995          * different detiling behavior between reading and writing.
996          * pread/pwrite currently are reading and writing from the CPU
997          * perspective, requiring manual detiling by the client.
998          */
999         if (obj->phys_obj)
1000                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1001         else if (obj->gtt_space &&
1002                  obj->tiling_mode == I915_TILING_NONE &&
1003                  obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1004                 ret = i915_gem_object_pin(obj, 0, true);
1005                 if (ret)
1006                         goto out;
1007
1008                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1009                 if (ret)
1010                         goto out_unpin;
1011
1012                 ret = i915_gem_object_put_fence(obj);
1013                 if (ret)
1014                         goto out_unpin;
1015
1016                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1017                 if (ret == -EFAULT)
1018                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1019
1020 out_unpin:
1021                 i915_gem_object_unpin(obj);
1022         } else {
1023                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1024                 if (ret)
1025                         goto out;
1026
1027                 ret = -EFAULT;
1028                 if (!i915_gem_object_needs_bit17_swizzle(obj))
1029                         ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1030                 if (ret == -EFAULT)
1031                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1032         }
1033
1034 out:
1035         drm_gem_object_unreference(&obj->base);
1036 unlock:
1037         mutex_unlock(&dev->struct_mutex);
1038         return ret;
1039 }
1040
1041 /**
1042  * Called when user space prepares to use an object with the CPU, either
1043  * through the mmap ioctl's mapping or a GTT mapping.
1044  */
1045 int
1046 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1047                           struct drm_file *file)
1048 {
1049         struct drm_i915_gem_set_domain *args = data;
1050         struct drm_i915_gem_object *obj;
1051         uint32_t read_domains = args->read_domains;
1052         uint32_t write_domain = args->write_domain;
1053         int ret;
1054
1055         if (!(dev->driver->driver_features & DRIVER_GEM))
1056                 return -ENODEV;
1057
1058         /* Only handle setting domains to types used by the CPU. */
1059         if (write_domain & I915_GEM_GPU_DOMAINS)
1060                 return -EINVAL;
1061
1062         if (read_domains & I915_GEM_GPU_DOMAINS)
1063                 return -EINVAL;
1064
1065         /* Having something in the write domain implies it's in the read
1066          * domain, and only that read domain.  Enforce that in the request.
1067          */
1068         if (write_domain != 0 && read_domains != write_domain)
1069                 return -EINVAL;
1070
1071         ret = i915_mutex_lock_interruptible(dev);
1072         if (ret)
1073                 return ret;
1074
1075         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1076         if (&obj->base == NULL) {
1077                 ret = -ENOENT;
1078                 goto unlock;
1079         }
1080
1081         if (read_domains & I915_GEM_DOMAIN_GTT) {
1082                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1083
1084                 /* Silently promote "you're not bound, there was nothing to do"
1085                  * to success, since the client was just asking us to
1086                  * make sure everything was done.
1087                  */
1088                 if (ret == -EINVAL)
1089                         ret = 0;
1090         } else {
1091                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1092         }
1093
1094         drm_gem_object_unreference(&obj->base);
1095 unlock:
1096         mutex_unlock(&dev->struct_mutex);
1097         return ret;
1098 }
1099
1100 /**
1101  * Called when user space has done writes to this buffer
1102  */
1103 int
1104 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1105                          struct drm_file *file)
1106 {
1107         struct drm_i915_gem_sw_finish *args = data;
1108         struct drm_i915_gem_object *obj;
1109         int ret = 0;
1110
1111         if (!(dev->driver->driver_features & DRIVER_GEM))
1112                 return -ENODEV;
1113
1114         ret = i915_mutex_lock_interruptible(dev);
1115         if (ret)
1116                 return ret;
1117
1118         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1119         if (&obj->base == NULL) {
1120                 ret = -ENOENT;
1121                 goto unlock;
1122         }
1123
1124         /* Pinned buffers may be scanout, so flush the cache */
1125         if (obj->pin_count)
1126                 i915_gem_object_flush_cpu_write_domain(obj);
1127
1128         drm_gem_object_unreference(&obj->base);
1129 unlock:
1130         mutex_unlock(&dev->struct_mutex);
1131         return ret;
1132 }
1133
1134 /**
1135  * Maps the contents of an object, returning the address it is mapped
1136  * into.
1137  *
1138  * While the mapping holds a reference on the contents of the object, it doesn't
1139  * imply a ref on the object itself.
1140  */
1141 int
1142 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1143                     struct drm_file *file)
1144 {
1145         struct drm_i915_private *dev_priv = dev->dev_private;
1146         struct drm_i915_gem_mmap *args = data;
1147         struct drm_gem_object *obj;
1148         unsigned long addr;
1149
1150         if (!(dev->driver->driver_features & DRIVER_GEM))
1151                 return -ENODEV;
1152
1153         obj = drm_gem_object_lookup(dev, file, args->handle);
1154         if (obj == NULL)
1155                 return -ENOENT;
1156
1157         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1158                 drm_gem_object_unreference_unlocked(obj);
1159                 return -E2BIG;
1160         }
1161
1162         down_write(&current->mm->mmap_sem);
1163         addr = do_mmap(obj->filp, 0, args->size,
1164                        PROT_READ | PROT_WRITE, MAP_SHARED,
1165                        args->offset);
1166         up_write(&current->mm->mmap_sem);
1167         drm_gem_object_unreference_unlocked(obj);
1168         if (IS_ERR((void *)addr))
1169                 return addr;
1170
1171         args->addr_ptr = (uint64_t) addr;
1172
1173         return 0;
1174 }
1175
1176 /**
1177  * i915_gem_fault - fault a page into the GTT
1178  * vma: VMA in question
1179  * vmf: fault info
1180  *
1181  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1182  * from userspace.  The fault handler takes care of binding the object to
1183  * the GTT (if needed), allocating and programming a fence register (again,
1184  * only if needed based on whether the old reg is still valid or the object
1185  * is tiled) and inserting a new PTE into the faulting process.
1186  *
1187  * Note that the faulting process may involve evicting existing objects
1188  * from the GTT and/or fence registers to make room.  So performance may
1189  * suffer if the GTT working set is large or there are few fence registers
1190  * left.
1191  */
1192 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1193 {
1194         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1195         struct drm_device *dev = obj->base.dev;
1196         drm_i915_private_t *dev_priv = dev->dev_private;
1197         pgoff_t page_offset;
1198         unsigned long pfn;
1199         int ret = 0;
1200         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1201
1202         /* We don't use vmf->pgoff since that has the fake offset */
1203         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1204                 PAGE_SHIFT;
1205
1206         ret = i915_mutex_lock_interruptible(dev);
1207         if (ret)
1208                 goto out;
1209
1210         trace_i915_gem_object_fault(obj, page_offset, true, write);
1211
1212         /* Now bind it into the GTT if needed */
1213         if (!obj->map_and_fenceable) {
1214                 ret = i915_gem_object_unbind(obj);
1215                 if (ret)
1216                         goto unlock;
1217         }
1218         if (!obj->gtt_space) {
1219                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1220                 if (ret)
1221                         goto unlock;
1222
1223                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1224                 if (ret)
1225                         goto unlock;
1226         }
1227
1228         if (obj->tiling_mode == I915_TILING_NONE)
1229                 ret = i915_gem_object_put_fence(obj);
1230         else
1231                 ret = i915_gem_object_get_fence(obj, NULL);
1232         if (ret)
1233                 goto unlock;
1234
1235         if (i915_gem_object_is_inactive(obj))
1236                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1237
1238         obj->fault_mappable = true;
1239
1240         pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1241                 page_offset;
1242
1243         /* Finally, remap it using the new GTT offset */
1244         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1245 unlock:
1246         mutex_unlock(&dev->struct_mutex);
1247 out:
1248         switch (ret) {
1249         case -EIO:
1250         case -EAGAIN:
1251                 /* Give the error handler a chance to run and move the
1252                  * objects off the GPU active list. Next time we service the
1253                  * fault, we should be able to transition the page into the
1254                  * GTT without touching the GPU (and so avoid further
1255                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1256                  * with coherency, just lost writes.
1257                  */
1258                 set_need_resched();
1259         case 0:
1260         case -ERESTARTSYS:
1261         case -EINTR:
1262         case -EBUSY:
1263                 /*
1264                  * EBUSY is ok: this just means that another thread
1265                  * already did the job.
1266                  */
1267                 return VM_FAULT_NOPAGE;
1268         case -ENOMEM:
1269                 return VM_FAULT_OOM;
1270         default:
1271                 return VM_FAULT_SIGBUS;
1272         }
1273 }
1274
1275 /**
1276  * i915_gem_release_mmap - remove physical page mappings
1277  * @obj: obj in question
1278  *
1279  * Preserve the reservation of the mmapping with the DRM core code, but
1280  * relinquish ownership of the pages back to the system.
1281  *
1282  * It is vital that we remove the page mapping if we have mapped a tiled
1283  * object through the GTT and then lose the fence register due to
1284  * resource pressure. Similarly if the object has been moved out of the
1285  * aperture, than pages mapped into userspace must be revoked. Removing the
1286  * mapping will then trigger a page fault on the next user access, allowing
1287  * fixup by i915_gem_fault().
1288  */
1289 void
1290 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1291 {
1292         if (!obj->fault_mappable)
1293                 return;
1294
1295         if (obj->base.dev->dev_mapping)
1296                 unmap_mapping_range(obj->base.dev->dev_mapping,
1297                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1298                                     obj->base.size, 1);
1299
1300         obj->fault_mappable = false;
1301 }
1302
1303 static uint32_t
1304 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1305 {
1306         uint32_t gtt_size;
1307
1308         if (INTEL_INFO(dev)->gen >= 4 ||
1309             tiling_mode == I915_TILING_NONE)
1310                 return size;
1311
1312         /* Previous chips need a power-of-two fence region when tiling */
1313         if (INTEL_INFO(dev)->gen == 3)
1314                 gtt_size = 1024*1024;
1315         else
1316                 gtt_size = 512*1024;
1317
1318         while (gtt_size < size)
1319                 gtt_size <<= 1;
1320
1321         return gtt_size;
1322 }
1323
1324 /**
1325  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1326  * @obj: object to check
1327  *
1328  * Return the required GTT alignment for an object, taking into account
1329  * potential fence register mapping.
1330  */
1331 static uint32_t
1332 i915_gem_get_gtt_alignment(struct drm_device *dev,
1333                            uint32_t size,
1334                            int tiling_mode)
1335 {
1336         /*
1337          * Minimum alignment is 4k (GTT page size), but might be greater
1338          * if a fence register is needed for the object.
1339          */
1340         if (INTEL_INFO(dev)->gen >= 4 ||
1341             tiling_mode == I915_TILING_NONE)
1342                 return 4096;
1343
1344         /*
1345          * Previous chips need to be aligned to the size of the smallest
1346          * fence register that can contain the object.
1347          */
1348         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1349 }
1350
1351 /**
1352  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1353  *                                       unfenced object
1354  * @dev: the device
1355  * @size: size of the object
1356  * @tiling_mode: tiling mode of the object
1357  *
1358  * Return the required GTT alignment for an object, only taking into account
1359  * unfenced tiled surface requirements.
1360  */
1361 uint32_t
1362 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1363                                     uint32_t size,
1364                                     int tiling_mode)
1365 {
1366         /*
1367          * Minimum alignment is 4k (GTT page size) for sane hw.
1368          */
1369         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1370             tiling_mode == I915_TILING_NONE)
1371                 return 4096;
1372
1373         /* Previous hardware however needs to be aligned to a power-of-two
1374          * tile height. The simplest method for determining this is to reuse
1375          * the power-of-tile object size.
1376          */
1377         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1378 }
1379
1380 int
1381 i915_gem_mmap_gtt(struct drm_file *file,
1382                   struct drm_device *dev,
1383                   uint32_t handle,
1384                   uint64_t *offset)
1385 {
1386         struct drm_i915_private *dev_priv = dev->dev_private;
1387         struct drm_i915_gem_object *obj;
1388         int ret;
1389
1390         if (!(dev->driver->driver_features & DRIVER_GEM))
1391                 return -ENODEV;
1392
1393         ret = i915_mutex_lock_interruptible(dev);
1394         if (ret)
1395                 return ret;
1396
1397         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1398         if (&obj->base == NULL) {
1399                 ret = -ENOENT;
1400                 goto unlock;
1401         }
1402
1403         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1404                 ret = -E2BIG;
1405                 goto out;
1406         }
1407
1408         if (obj->madv != I915_MADV_WILLNEED) {
1409                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1410                 ret = -EINVAL;
1411                 goto out;
1412         }
1413
1414         if (!obj->base.map_list.map) {
1415                 ret = drm_gem_create_mmap_offset(&obj->base);
1416                 if (ret)
1417                         goto out;
1418         }
1419
1420         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1421
1422 out:
1423         drm_gem_object_unreference(&obj->base);
1424 unlock:
1425         mutex_unlock(&dev->struct_mutex);
1426         return ret;
1427 }
1428
1429 /**
1430  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1431  * @dev: DRM device
1432  * @data: GTT mapping ioctl data
1433  * @file: GEM object info
1434  *
1435  * Simply returns the fake offset to userspace so it can mmap it.
1436  * The mmap call will end up in drm_gem_mmap(), which will set things
1437  * up so we can get faults in the handler above.
1438  *
1439  * The fault handler will take care of binding the object into the GTT
1440  * (since it may have been evicted to make room for something), allocating
1441  * a fence register, and mapping the appropriate aperture address into
1442  * userspace.
1443  */
1444 int
1445 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1446                         struct drm_file *file)
1447 {
1448         struct drm_i915_gem_mmap_gtt *args = data;
1449
1450         if (!(dev->driver->driver_features & DRIVER_GEM))
1451                 return -ENODEV;
1452
1453         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1454 }
1455
1456
1457 static int
1458 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1459                               gfp_t gfpmask)
1460 {
1461         int page_count, i;
1462         struct address_space *mapping;
1463         struct inode *inode;
1464         struct page *page;
1465
1466         /* Get the list of pages out of our struct file.  They'll be pinned
1467          * at this point until we release them.
1468          */
1469         page_count = obj->base.size / PAGE_SIZE;
1470         BUG_ON(obj->pages != NULL);
1471         obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1472         if (obj->pages == NULL)
1473                 return -ENOMEM;
1474
1475         inode = obj->base.filp->f_path.dentry->d_inode;
1476         mapping = inode->i_mapping;
1477         gfpmask |= mapping_gfp_mask(mapping);
1478
1479         for (i = 0; i < page_count; i++) {
1480                 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1481                 if (IS_ERR(page))
1482                         goto err_pages;
1483
1484                 obj->pages[i] = page;
1485         }
1486
1487         if (i915_gem_object_needs_bit17_swizzle(obj))
1488                 i915_gem_object_do_bit_17_swizzle(obj);
1489
1490         return 0;
1491
1492 err_pages:
1493         while (i--)
1494                 page_cache_release(obj->pages[i]);
1495
1496         drm_free_large(obj->pages);
1497         obj->pages = NULL;
1498         return PTR_ERR(page);
1499 }
1500
1501 static void
1502 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1503 {
1504         int page_count = obj->base.size / PAGE_SIZE;
1505         int i;
1506
1507         BUG_ON(obj->madv == __I915_MADV_PURGED);
1508
1509         if (i915_gem_object_needs_bit17_swizzle(obj))
1510                 i915_gem_object_save_bit_17_swizzle(obj);
1511
1512         if (obj->madv == I915_MADV_DONTNEED)
1513                 obj->dirty = 0;
1514
1515         for (i = 0; i < page_count; i++) {
1516                 if (obj->dirty)
1517                         set_page_dirty(obj->pages[i]);
1518
1519                 if (obj->madv == I915_MADV_WILLNEED)
1520                         mark_page_accessed(obj->pages[i]);
1521
1522                 page_cache_release(obj->pages[i]);
1523         }
1524         obj->dirty = 0;
1525
1526         drm_free_large(obj->pages);
1527         obj->pages = NULL;
1528 }
1529
1530 void
1531 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1532                                struct intel_ring_buffer *ring,
1533                                u32 seqno)
1534 {
1535         struct drm_device *dev = obj->base.dev;
1536         struct drm_i915_private *dev_priv = dev->dev_private;
1537
1538         BUG_ON(ring == NULL);
1539         obj->ring = ring;
1540
1541         /* Add a reference if we're newly entering the active list. */
1542         if (!obj->active) {
1543                 drm_gem_object_reference(&obj->base);
1544                 obj->active = 1;
1545         }
1546
1547         /* Move from whatever list we were on to the tail of execution. */
1548         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1549         list_move_tail(&obj->ring_list, &ring->active_list);
1550
1551         obj->last_rendering_seqno = seqno;
1552
1553         if (obj->fenced_gpu_access) {
1554                 obj->last_fenced_seqno = seqno;
1555                 obj->last_fenced_ring = ring;
1556
1557                 /* Bump MRU to take account of the delayed flush */
1558                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1559                         struct drm_i915_fence_reg *reg;
1560
1561                         reg = &dev_priv->fence_regs[obj->fence_reg];
1562                         list_move_tail(&reg->lru_list,
1563                                        &dev_priv->mm.fence_list);
1564                 }
1565         }
1566 }
1567
1568 static void
1569 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1570 {
1571         list_del_init(&obj->ring_list);
1572         obj->last_rendering_seqno = 0;
1573         obj->last_fenced_seqno = 0;
1574 }
1575
1576 static void
1577 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1578 {
1579         struct drm_device *dev = obj->base.dev;
1580         drm_i915_private_t *dev_priv = dev->dev_private;
1581
1582         BUG_ON(!obj->active);
1583         list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1584
1585         i915_gem_object_move_off_active(obj);
1586 }
1587
1588 static void
1589 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1590 {
1591         struct drm_device *dev = obj->base.dev;
1592         struct drm_i915_private *dev_priv = dev->dev_private;
1593
1594         if (obj->pin_count != 0)
1595                 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1596         else
1597                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1598
1599         BUG_ON(!list_empty(&obj->gpu_write_list));
1600         BUG_ON(!obj->active);
1601         obj->ring = NULL;
1602         obj->last_fenced_ring = NULL;
1603
1604         i915_gem_object_move_off_active(obj);
1605         obj->fenced_gpu_access = false;
1606
1607         obj->active = 0;
1608         obj->pending_gpu_write = false;
1609         drm_gem_object_unreference(&obj->base);
1610
1611         WARN_ON(i915_verify_lists(dev));
1612 }
1613
1614 /* Immediately discard the backing storage */
1615 static void
1616 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1617 {
1618         struct inode *inode;
1619
1620         /* Our goal here is to return as much of the memory as
1621          * is possible back to the system as we are called from OOM.
1622          * To do this we must instruct the shmfs to drop all of its
1623          * backing pages, *now*.
1624          */
1625         inode = obj->base.filp->f_path.dentry->d_inode;
1626         shmem_truncate_range(inode, 0, (loff_t)-1);
1627
1628         obj->madv = __I915_MADV_PURGED;
1629 }
1630
1631 static inline int
1632 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1633 {
1634         return obj->madv == I915_MADV_DONTNEED;
1635 }
1636
1637 static void
1638 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1639                                uint32_t flush_domains)
1640 {
1641         struct drm_i915_gem_object *obj, *next;
1642
1643         list_for_each_entry_safe(obj, next,
1644                                  &ring->gpu_write_list,
1645                                  gpu_write_list) {
1646                 if (obj->base.write_domain & flush_domains) {
1647                         uint32_t old_write_domain = obj->base.write_domain;
1648
1649                         obj->base.write_domain = 0;
1650                         list_del_init(&obj->gpu_write_list);
1651                         i915_gem_object_move_to_active(obj, ring,
1652                                                        i915_gem_next_request_seqno(ring));
1653
1654                         trace_i915_gem_object_change_domain(obj,
1655                                                             obj->base.read_domains,
1656                                                             old_write_domain);
1657                 }
1658         }
1659 }
1660
1661 static u32
1662 i915_gem_get_seqno(struct drm_device *dev)
1663 {
1664         drm_i915_private_t *dev_priv = dev->dev_private;
1665         u32 seqno = dev_priv->next_seqno;
1666
1667         /* reserve 0 for non-seqno */
1668         if (++dev_priv->next_seqno == 0)
1669                 dev_priv->next_seqno = 1;
1670
1671         return seqno;
1672 }
1673
1674 u32
1675 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1676 {
1677         if (ring->outstanding_lazy_request == 0)
1678                 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1679
1680         return ring->outstanding_lazy_request;
1681 }
1682
1683 int
1684 i915_add_request(struct intel_ring_buffer *ring,
1685                  struct drm_file *file,
1686                  struct drm_i915_gem_request *request)
1687 {
1688         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1689         uint32_t seqno;
1690         int was_empty;
1691         int ret;
1692
1693         BUG_ON(request == NULL);
1694         seqno = i915_gem_next_request_seqno(ring);
1695
1696         ret = ring->add_request(ring, &seqno);
1697         if (ret)
1698             return ret;
1699
1700         trace_i915_gem_request_add(ring, seqno);
1701
1702         request->seqno = seqno;
1703         request->ring = ring;
1704         request->emitted_jiffies = jiffies;
1705         was_empty = list_empty(&ring->request_list);
1706         list_add_tail(&request->list, &ring->request_list);
1707
1708         if (file) {
1709                 struct drm_i915_file_private *file_priv = file->driver_priv;
1710
1711                 spin_lock(&file_priv->mm.lock);
1712                 request->file_priv = file_priv;
1713                 list_add_tail(&request->client_list,
1714                               &file_priv->mm.request_list);
1715                 spin_unlock(&file_priv->mm.lock);
1716         }
1717
1718         ring->outstanding_lazy_request = false;
1719
1720         if (!dev_priv->mm.suspended) {
1721                 if (i915_enable_hangcheck) {
1722                         mod_timer(&dev_priv->hangcheck_timer,
1723                                   jiffies +
1724                                   msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1725                 }
1726                 if (was_empty)
1727                         queue_delayed_work(dev_priv->wq,
1728                                            &dev_priv->mm.retire_work, HZ);
1729         }
1730         return 0;
1731 }
1732
1733 static inline void
1734 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1735 {
1736         struct drm_i915_file_private *file_priv = request->file_priv;
1737
1738         if (!file_priv)
1739                 return;
1740
1741         spin_lock(&file_priv->mm.lock);
1742         if (request->file_priv) {
1743                 list_del(&request->client_list);
1744                 request->file_priv = NULL;
1745         }
1746         spin_unlock(&file_priv->mm.lock);
1747 }
1748
1749 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1750                                       struct intel_ring_buffer *ring)
1751 {
1752         while (!list_empty(&ring->request_list)) {
1753                 struct drm_i915_gem_request *request;
1754
1755                 request = list_first_entry(&ring->request_list,
1756                                            struct drm_i915_gem_request,
1757                                            list);
1758
1759                 list_del(&request->list);
1760                 i915_gem_request_remove_from_client(request);
1761                 kfree(request);
1762         }
1763
1764         while (!list_empty(&ring->active_list)) {
1765                 struct drm_i915_gem_object *obj;
1766
1767                 obj = list_first_entry(&ring->active_list,
1768                                        struct drm_i915_gem_object,
1769                                        ring_list);
1770
1771                 obj->base.write_domain = 0;
1772                 list_del_init(&obj->gpu_write_list);
1773                 i915_gem_object_move_to_inactive(obj);
1774         }
1775 }
1776
1777 static void i915_gem_reset_fences(struct drm_device *dev)
1778 {
1779         struct drm_i915_private *dev_priv = dev->dev_private;
1780         int i;
1781
1782         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1783                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1784                 struct drm_i915_gem_object *obj = reg->obj;
1785
1786                 if (!obj)
1787                         continue;
1788
1789                 if (obj->tiling_mode)
1790                         i915_gem_release_mmap(obj);
1791
1792                 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1793                 reg->obj->fenced_gpu_access = false;
1794                 reg->obj->last_fenced_seqno = 0;
1795                 reg->obj->last_fenced_ring = NULL;
1796                 i915_gem_clear_fence_reg(dev, reg);
1797         }
1798 }
1799
1800 void i915_gem_reset(struct drm_device *dev)
1801 {
1802         struct drm_i915_private *dev_priv = dev->dev_private;
1803         struct drm_i915_gem_object *obj;
1804         int i;
1805
1806         for (i = 0; i < I915_NUM_RINGS; i++)
1807                 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1808
1809         /* Remove anything from the flushing lists. The GPU cache is likely
1810          * to be lost on reset along with the data, so simply move the
1811          * lost bo to the inactive list.
1812          */
1813         while (!list_empty(&dev_priv->mm.flushing_list)) {
1814                 obj = list_first_entry(&dev_priv->mm.flushing_list,
1815                                       struct drm_i915_gem_object,
1816                                       mm_list);
1817
1818                 obj->base.write_domain = 0;
1819                 list_del_init(&obj->gpu_write_list);
1820                 i915_gem_object_move_to_inactive(obj);
1821         }
1822
1823         /* Move everything out of the GPU domains to ensure we do any
1824          * necessary invalidation upon reuse.
1825          */
1826         list_for_each_entry(obj,
1827                             &dev_priv->mm.inactive_list,
1828                             mm_list)
1829         {
1830                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1831         }
1832
1833         /* The fence registers are invalidated so clear them out */
1834         i915_gem_reset_fences(dev);
1835 }
1836
1837 /**
1838  * This function clears the request list as sequence numbers are passed.
1839  */
1840 static void
1841 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1842 {
1843         uint32_t seqno;
1844         int i;
1845
1846         if (list_empty(&ring->request_list))
1847                 return;
1848
1849         WARN_ON(i915_verify_lists(ring->dev));
1850
1851         seqno = ring->get_seqno(ring);
1852
1853         for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1854                 if (seqno >= ring->sync_seqno[i])
1855                         ring->sync_seqno[i] = 0;
1856
1857         while (!list_empty(&ring->request_list)) {
1858                 struct drm_i915_gem_request *request;
1859
1860                 request = list_first_entry(&ring->request_list,
1861                                            struct drm_i915_gem_request,
1862                                            list);
1863
1864                 if (!i915_seqno_passed(seqno, request->seqno))
1865                         break;
1866
1867                 trace_i915_gem_request_retire(ring, request->seqno);
1868
1869                 list_del(&request->list);
1870                 i915_gem_request_remove_from_client(request);
1871                 kfree(request);
1872         }
1873
1874         /* Move any buffers on the active list that are no longer referenced
1875          * by the ringbuffer to the flushing/inactive lists as appropriate.
1876          */
1877         while (!list_empty(&ring->active_list)) {
1878                 struct drm_i915_gem_object *obj;
1879
1880                 obj = list_first_entry(&ring->active_list,
1881                                       struct drm_i915_gem_object,
1882                                       ring_list);
1883
1884                 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1885                         break;
1886
1887                 if (obj->base.write_domain != 0)
1888                         i915_gem_object_move_to_flushing(obj);
1889                 else
1890                         i915_gem_object_move_to_inactive(obj);
1891         }
1892
1893         if (unlikely(ring->trace_irq_seqno &&
1894                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1895                 ring->irq_put(ring);
1896                 ring->trace_irq_seqno = 0;
1897         }
1898
1899         WARN_ON(i915_verify_lists(ring->dev));
1900 }
1901
1902 void
1903 i915_gem_retire_requests(struct drm_device *dev)
1904 {
1905         drm_i915_private_t *dev_priv = dev->dev_private;
1906         int i;
1907
1908         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1909             struct drm_i915_gem_object *obj, *next;
1910
1911             /* We must be careful that during unbind() we do not
1912              * accidentally infinitely recurse into retire requests.
1913              * Currently:
1914              *   retire -> free -> unbind -> wait -> retire_ring
1915              */
1916             list_for_each_entry_safe(obj, next,
1917                                      &dev_priv->mm.deferred_free_list,
1918                                      mm_list)
1919                     i915_gem_free_object_tail(obj);
1920         }
1921
1922         for (i = 0; i < I915_NUM_RINGS; i++)
1923                 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1924 }
1925
1926 static void
1927 i915_gem_retire_work_handler(struct work_struct *work)
1928 {
1929         drm_i915_private_t *dev_priv;
1930         struct drm_device *dev;
1931         bool idle;
1932         int i;
1933
1934         dev_priv = container_of(work, drm_i915_private_t,
1935                                 mm.retire_work.work);
1936         dev = dev_priv->dev;
1937
1938         /* Come back later if the device is busy... */
1939         if (!mutex_trylock(&dev->struct_mutex)) {
1940                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1941                 return;
1942         }
1943
1944         i915_gem_retire_requests(dev);
1945
1946         /* Send a periodic flush down the ring so we don't hold onto GEM
1947          * objects indefinitely.
1948          */
1949         idle = true;
1950         for (i = 0; i < I915_NUM_RINGS; i++) {
1951                 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1952
1953                 if (!list_empty(&ring->gpu_write_list)) {
1954                         struct drm_i915_gem_request *request;
1955                         int ret;
1956
1957                         ret = i915_gem_flush_ring(ring,
1958                                                   0, I915_GEM_GPU_DOMAINS);
1959                         request = kzalloc(sizeof(*request), GFP_KERNEL);
1960                         if (ret || request == NULL ||
1961                             i915_add_request(ring, NULL, request))
1962                             kfree(request);
1963                 }
1964
1965                 idle &= list_empty(&ring->request_list);
1966         }
1967
1968         if (!dev_priv->mm.suspended && !idle)
1969                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1970
1971         mutex_unlock(&dev->struct_mutex);
1972 }
1973
1974 /**
1975  * Waits for a sequence number to be signaled, and cleans up the
1976  * request and object lists appropriately for that event.
1977  */
1978 int
1979 i915_wait_request(struct intel_ring_buffer *ring,
1980                   uint32_t seqno)
1981 {
1982         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1983         u32 ier;
1984         int ret = 0;
1985
1986         BUG_ON(seqno == 0);
1987
1988         if (atomic_read(&dev_priv->mm.wedged)) {
1989                 struct completion *x = &dev_priv->error_completion;
1990                 bool recovery_complete;
1991                 unsigned long flags;
1992
1993                 /* Give the error handler a chance to run. */
1994                 spin_lock_irqsave(&x->wait.lock, flags);
1995                 recovery_complete = x->done > 0;
1996                 spin_unlock_irqrestore(&x->wait.lock, flags);
1997
1998                 return recovery_complete ? -EIO : -EAGAIN;
1999         }
2000
2001         if (seqno == ring->outstanding_lazy_request) {
2002                 struct drm_i915_gem_request *request;
2003
2004                 request = kzalloc(sizeof(*request), GFP_KERNEL);
2005                 if (request == NULL)
2006                         return -ENOMEM;
2007
2008                 ret = i915_add_request(ring, NULL, request);
2009                 if (ret) {
2010                         kfree(request);
2011                         return ret;
2012                 }
2013
2014                 seqno = request->seqno;
2015         }
2016
2017         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2018                 if (HAS_PCH_SPLIT(ring->dev))
2019                         ier = I915_READ(DEIER) | I915_READ(GTIER);
2020                 else
2021                         ier = I915_READ(IER);
2022                 if (!ier) {
2023                         DRM_ERROR("something (likely vbetool) disabled "
2024                                   "interrupts, re-enabling\n");
2025                         ring->dev->driver->irq_preinstall(ring->dev);
2026                         ring->dev->driver->irq_postinstall(ring->dev);
2027                 }
2028
2029                 trace_i915_gem_request_wait_begin(ring, seqno);
2030
2031                 ring->waiting_seqno = seqno;
2032                 if (ring->irq_get(ring)) {
2033                         if (dev_priv->mm.interruptible)
2034                                 ret = wait_event_interruptible(ring->irq_queue,
2035                                                                i915_seqno_passed(ring->get_seqno(ring), seqno)
2036                                                                || atomic_read(&dev_priv->mm.wedged));
2037                         else
2038                                 wait_event(ring->irq_queue,
2039                                            i915_seqno_passed(ring->get_seqno(ring), seqno)
2040                                            || atomic_read(&dev_priv->mm.wedged));
2041
2042                         ring->irq_put(ring);
2043                 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2044                                                       seqno) ||
2045                                     atomic_read(&dev_priv->mm.wedged), 3000))
2046                         ret = -EBUSY;
2047                 ring->waiting_seqno = 0;
2048
2049                 trace_i915_gem_request_wait_end(ring, seqno);
2050         }
2051         if (atomic_read(&dev_priv->mm.wedged))
2052                 ret = -EAGAIN;
2053
2054         if (ret && ret != -ERESTARTSYS)
2055                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2056                           __func__, ret, seqno, ring->get_seqno(ring),
2057                           dev_priv->next_seqno);
2058
2059         /* Directly dispatch request retiring.  While we have the work queue
2060          * to handle this, the waiter on a request often wants an associated
2061          * buffer to have made it to the inactive list, and we would need
2062          * a separate wait queue to handle that.
2063          */
2064         if (ret == 0)
2065                 i915_gem_retire_requests_ring(ring);
2066
2067         return ret;
2068 }
2069
2070 /**
2071  * Ensures that all rendering to the object has completed and the object is
2072  * safe to unbind from the GTT or access from the CPU.
2073  */
2074 int
2075 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2076 {
2077         int ret;
2078
2079         /* This function only exists to support waiting for existing rendering,
2080          * not for emitting required flushes.
2081          */
2082         BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2083
2084         /* If there is rendering queued on the buffer being evicted, wait for
2085          * it.
2086          */
2087         if (obj->active) {
2088                 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2089                 if (ret)
2090                         return ret;
2091         }
2092
2093         return 0;
2094 }
2095
2096 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2097 {
2098         u32 old_write_domain, old_read_domains;
2099
2100         /* Act a barrier for all accesses through the GTT */
2101         mb();
2102
2103         /* Force a pagefault for domain tracking on next user access */
2104         i915_gem_release_mmap(obj);
2105
2106         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2107                 return;
2108
2109         old_read_domains = obj->base.read_domains;
2110         old_write_domain = obj->base.write_domain;
2111
2112         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2113         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2114
2115         trace_i915_gem_object_change_domain(obj,
2116                                             old_read_domains,
2117                                             old_write_domain);
2118 }
2119
2120 /**
2121  * Unbinds an object from the GTT aperture.
2122  */
2123 int
2124 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2125 {
2126         int ret = 0;
2127
2128         if (obj->gtt_space == NULL)
2129                 return 0;
2130
2131         if (obj->pin_count != 0) {
2132                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2133                 return -EINVAL;
2134         }
2135
2136         ret = i915_gem_object_finish_gpu(obj);
2137         if (ret == -ERESTARTSYS)
2138                 return ret;
2139         /* Continue on if we fail due to EIO, the GPU is hung so we
2140          * should be safe and we need to cleanup or else we might
2141          * cause memory corruption through use-after-free.
2142          */
2143
2144         i915_gem_object_finish_gtt(obj);
2145
2146         /* Move the object to the CPU domain to ensure that
2147          * any possible CPU writes while it's not in the GTT
2148          * are flushed when we go to remap it.
2149          */
2150         if (ret == 0)
2151                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2152         if (ret == -ERESTARTSYS)
2153                 return ret;
2154         if (ret) {
2155                 /* In the event of a disaster, abandon all caches and
2156                  * hope for the best.
2157                  */
2158                 i915_gem_clflush_object(obj);
2159                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2160         }
2161
2162         /* release the fence reg _after_ flushing */
2163         ret = i915_gem_object_put_fence(obj);
2164         if (ret == -ERESTARTSYS)
2165                 return ret;
2166
2167         trace_i915_gem_object_unbind(obj);
2168
2169         i915_gem_gtt_unbind_object(obj);
2170         i915_gem_object_put_pages_gtt(obj);
2171
2172         list_del_init(&obj->gtt_list);
2173         list_del_init(&obj->mm_list);
2174         /* Avoid an unnecessary call to unbind on rebind. */
2175         obj->map_and_fenceable = true;
2176
2177         drm_mm_put_block(obj->gtt_space);
2178         obj->gtt_space = NULL;
2179         obj->gtt_offset = 0;
2180
2181         if (i915_gem_object_is_purgeable(obj))
2182                 i915_gem_object_truncate(obj);
2183
2184         return ret;
2185 }
2186
2187 int
2188 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2189                     uint32_t invalidate_domains,
2190                     uint32_t flush_domains)
2191 {
2192         int ret;
2193
2194         if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2195                 return 0;
2196
2197         trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2198
2199         ret = ring->flush(ring, invalidate_domains, flush_domains);
2200         if (ret)
2201                 return ret;
2202
2203         if (flush_domains & I915_GEM_GPU_DOMAINS)
2204                 i915_gem_process_flushing_list(ring, flush_domains);
2205
2206         return 0;
2207 }
2208
2209 static int i915_ring_idle(struct intel_ring_buffer *ring)
2210 {
2211         int ret;
2212
2213         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2214                 return 0;
2215
2216         if (!list_empty(&ring->gpu_write_list)) {
2217                 ret = i915_gem_flush_ring(ring,
2218                                     I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2219                 if (ret)
2220                         return ret;
2221         }
2222
2223         return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2224 }
2225
2226 int
2227 i915_gpu_idle(struct drm_device *dev)
2228 {
2229         drm_i915_private_t *dev_priv = dev->dev_private;
2230         int ret, i;
2231
2232         /* Flush everything onto the inactive list. */
2233         for (i = 0; i < I915_NUM_RINGS; i++) {
2234                 ret = i915_ring_idle(&dev_priv->ring[i]);
2235                 if (ret)
2236                         return ret;
2237         }
2238
2239         return 0;
2240 }
2241
2242 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2243                                        struct intel_ring_buffer *pipelined)
2244 {
2245         struct drm_device *dev = obj->base.dev;
2246         drm_i915_private_t *dev_priv = dev->dev_private;
2247         u32 size = obj->gtt_space->size;
2248         int regnum = obj->fence_reg;
2249         uint64_t val;
2250
2251         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2252                          0xfffff000) << 32;
2253         val |= obj->gtt_offset & 0xfffff000;
2254         val |= (uint64_t)((obj->stride / 128) - 1) <<
2255                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2256
2257         if (obj->tiling_mode == I915_TILING_Y)
2258                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2259         val |= I965_FENCE_REG_VALID;
2260
2261         if (pipelined) {
2262                 int ret = intel_ring_begin(pipelined, 6);
2263                 if (ret)
2264                         return ret;
2265
2266                 intel_ring_emit(pipelined, MI_NOOP);
2267                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2268                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2269                 intel_ring_emit(pipelined, (u32)val);
2270                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2271                 intel_ring_emit(pipelined, (u32)(val >> 32));
2272                 intel_ring_advance(pipelined);
2273         } else
2274                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2275
2276         return 0;
2277 }
2278
2279 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2280                                 struct intel_ring_buffer *pipelined)
2281 {
2282         struct drm_device *dev = obj->base.dev;
2283         drm_i915_private_t *dev_priv = dev->dev_private;
2284         u32 size = obj->gtt_space->size;
2285         int regnum = obj->fence_reg;
2286         uint64_t val;
2287
2288         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2289                     0xfffff000) << 32;
2290         val |= obj->gtt_offset & 0xfffff000;
2291         val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2292         if (obj->tiling_mode == I915_TILING_Y)
2293                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2294         val |= I965_FENCE_REG_VALID;
2295
2296         if (pipelined) {
2297                 int ret = intel_ring_begin(pipelined, 6);
2298                 if (ret)
2299                         return ret;
2300
2301                 intel_ring_emit(pipelined, MI_NOOP);
2302                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2303                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2304                 intel_ring_emit(pipelined, (u32)val);
2305                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2306                 intel_ring_emit(pipelined, (u32)(val >> 32));
2307                 intel_ring_advance(pipelined);
2308         } else
2309                 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2310
2311         return 0;
2312 }
2313
2314 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2315                                 struct intel_ring_buffer *pipelined)
2316 {
2317         struct drm_device *dev = obj->base.dev;
2318         drm_i915_private_t *dev_priv = dev->dev_private;
2319         u32 size = obj->gtt_space->size;
2320         u32 fence_reg, val, pitch_val;
2321         int tile_width;
2322
2323         if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2324                  (size & -size) != size ||
2325                  (obj->gtt_offset & (size - 1)),
2326                  "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2327                  obj->gtt_offset, obj->map_and_fenceable, size))
2328                 return -EINVAL;
2329
2330         if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2331                 tile_width = 128;
2332         else
2333                 tile_width = 512;
2334
2335         /* Note: pitch better be a power of two tile widths */
2336         pitch_val = obj->stride / tile_width;
2337         pitch_val = ffs(pitch_val) - 1;
2338
2339         val = obj->gtt_offset;
2340         if (obj->tiling_mode == I915_TILING_Y)
2341                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2342         val |= I915_FENCE_SIZE_BITS(size);
2343         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2344         val |= I830_FENCE_REG_VALID;
2345
2346         fence_reg = obj->fence_reg;
2347         if (fence_reg < 8)
2348                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2349         else
2350                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2351
2352         if (pipelined) {
2353                 int ret = intel_ring_begin(pipelined, 4);
2354                 if (ret)
2355                         return ret;
2356
2357                 intel_ring_emit(pipelined, MI_NOOP);
2358                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2359                 intel_ring_emit(pipelined, fence_reg);
2360                 intel_ring_emit(pipelined, val);
2361                 intel_ring_advance(pipelined);
2362         } else
2363                 I915_WRITE(fence_reg, val);
2364
2365         return 0;
2366 }
2367
2368 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2369                                 struct intel_ring_buffer *pipelined)
2370 {
2371         struct drm_device *dev = obj->base.dev;
2372         drm_i915_private_t *dev_priv = dev->dev_private;
2373         u32 size = obj->gtt_space->size;
2374         int regnum = obj->fence_reg;
2375         uint32_t val;
2376         uint32_t pitch_val;
2377
2378         if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2379                  (size & -size) != size ||
2380                  (obj->gtt_offset & (size - 1)),
2381                  "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2382                  obj->gtt_offset, size))
2383                 return -EINVAL;
2384
2385         pitch_val = obj->stride / 128;
2386         pitch_val = ffs(pitch_val) - 1;
2387
2388         val = obj->gtt_offset;
2389         if (obj->tiling_mode == I915_TILING_Y)
2390                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2391         val |= I830_FENCE_SIZE_BITS(size);
2392         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2393         val |= I830_FENCE_REG_VALID;
2394
2395         if (pipelined) {
2396                 int ret = intel_ring_begin(pipelined, 4);
2397                 if (ret)
2398                         return ret;
2399
2400                 intel_ring_emit(pipelined, MI_NOOP);
2401                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2402                 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2403                 intel_ring_emit(pipelined, val);
2404                 intel_ring_advance(pipelined);
2405         } else
2406                 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2407
2408         return 0;
2409 }
2410
2411 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2412 {
2413         return i915_seqno_passed(ring->get_seqno(ring), seqno);
2414 }
2415
2416 static int
2417 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2418                             struct intel_ring_buffer *pipelined)
2419 {
2420         int ret;
2421
2422         if (obj->fenced_gpu_access) {
2423                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2424                         ret = i915_gem_flush_ring(obj->last_fenced_ring,
2425                                                   0, obj->base.write_domain);
2426                         if (ret)
2427                                 return ret;
2428                 }
2429
2430                 obj->fenced_gpu_access = false;
2431         }
2432
2433         if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2434                 if (!ring_passed_seqno(obj->last_fenced_ring,
2435                                        obj->last_fenced_seqno)) {
2436                         ret = i915_wait_request(obj->last_fenced_ring,
2437                                                 obj->last_fenced_seqno);
2438                         if (ret)
2439                                 return ret;
2440                 }
2441
2442                 obj->last_fenced_seqno = 0;
2443                 obj->last_fenced_ring = NULL;
2444         }
2445
2446         /* Ensure that all CPU reads are completed before installing a fence
2447          * and all writes before removing the fence.
2448          */
2449         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2450                 mb();
2451
2452         return 0;
2453 }
2454
2455 int
2456 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2457 {
2458         int ret;
2459
2460         if (obj->tiling_mode)
2461                 i915_gem_release_mmap(obj);
2462
2463         ret = i915_gem_object_flush_fence(obj, NULL);
2464         if (ret)
2465                 return ret;
2466
2467         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2468                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2469                 i915_gem_clear_fence_reg(obj->base.dev,
2470                                          &dev_priv->fence_regs[obj->fence_reg]);
2471
2472                 obj->fence_reg = I915_FENCE_REG_NONE;
2473         }
2474
2475         return 0;
2476 }
2477
2478 static struct drm_i915_fence_reg *
2479 i915_find_fence_reg(struct drm_device *dev,
2480                     struct intel_ring_buffer *pipelined)
2481 {
2482         struct drm_i915_private *dev_priv = dev->dev_private;
2483         struct drm_i915_fence_reg *reg, *first, *avail;
2484         int i;
2485
2486         /* First try to find a free reg */
2487         avail = NULL;
2488         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2489                 reg = &dev_priv->fence_regs[i];
2490                 if (!reg->obj)
2491                         return reg;
2492
2493                 if (!reg->obj->pin_count)
2494                         avail = reg;
2495         }
2496
2497         if (avail == NULL)
2498                 return NULL;
2499
2500         /* None available, try to steal one or wait for a user to finish */
2501         avail = first = NULL;
2502         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2503                 if (reg->obj->pin_count)
2504                         continue;
2505
2506                 if (first == NULL)
2507                         first = reg;
2508
2509                 if (!pipelined ||
2510                     !reg->obj->last_fenced_ring ||
2511                     reg->obj->last_fenced_ring == pipelined) {
2512                         avail = reg;
2513                         break;
2514                 }
2515         }
2516
2517         if (avail == NULL)
2518                 avail = first;
2519
2520         return avail;
2521 }
2522
2523 static void i915_gem_write_fence__ipi(void *data)
2524 {
2525         wbinvd();
2526 }
2527
2528 /**
2529  * i915_gem_object_get_fence - set up a fence reg for an object
2530  * @obj: object to map through a fence reg
2531  * @pipelined: ring on which to queue the change, or NULL for CPU access
2532  * @interruptible: must we wait uninterruptibly for the register to retire?
2533  *
2534  * When mapping objects through the GTT, userspace wants to be able to write
2535  * to them without having to worry about swizzling if the object is tiled.
2536  *
2537  * This function walks the fence regs looking for a free one for @obj,
2538  * stealing one if it can't find any.
2539  *
2540  * It then sets up the reg based on the object's properties: address, pitch
2541  * and tiling format.
2542  */
2543 int
2544 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2545                           struct intel_ring_buffer *pipelined)
2546 {
2547         struct drm_device *dev = obj->base.dev;
2548         struct drm_i915_private *dev_priv = dev->dev_private;
2549         struct drm_i915_fence_reg *reg;
2550         int ret;
2551
2552         /* XXX disable pipelining. There are bugs. Shocking. */
2553         pipelined = NULL;
2554
2555         /* Just update our place in the LRU if our fence is getting reused. */
2556         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2557                 reg = &dev_priv->fence_regs[obj->fence_reg];
2558                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2559
2560                 if (obj->tiling_changed) {
2561                         ret = i915_gem_object_flush_fence(obj, pipelined);
2562                         if (ret)
2563                                 return ret;
2564
2565                         if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2566                                 pipelined = NULL;
2567
2568                         if (pipelined) {
2569                                 reg->setup_seqno =
2570                                         i915_gem_next_request_seqno(pipelined);
2571                                 obj->last_fenced_seqno = reg->setup_seqno;
2572                                 obj->last_fenced_ring = pipelined;
2573                         }
2574
2575                         goto update;
2576                 }
2577
2578                 if (!pipelined) {
2579                         if (reg->setup_seqno) {
2580                                 if (!ring_passed_seqno(obj->last_fenced_ring,
2581                                                        reg->setup_seqno)) {
2582                                         ret = i915_wait_request(obj->last_fenced_ring,
2583                                                                 reg->setup_seqno);
2584                                         if (ret)
2585                                                 return ret;
2586                                 }
2587
2588                                 reg->setup_seqno = 0;
2589                         }
2590                 } else if (obj->last_fenced_ring &&
2591                            obj->last_fenced_ring != pipelined) {
2592                         ret = i915_gem_object_flush_fence(obj, pipelined);
2593                         if (ret)
2594                                 return ret;
2595                 }
2596
2597                 return 0;
2598         }
2599
2600         reg = i915_find_fence_reg(dev, pipelined);
2601         if (reg == NULL)
2602                 return -ENOSPC;
2603
2604         ret = i915_gem_object_flush_fence(obj, pipelined);
2605         if (ret)
2606                 return ret;
2607
2608         if (reg->obj) {
2609                 struct drm_i915_gem_object *old = reg->obj;
2610
2611                 drm_gem_object_reference(&old->base);
2612
2613                 if (old->tiling_mode)
2614                         i915_gem_release_mmap(old);
2615
2616                 ret = i915_gem_object_flush_fence(old, pipelined);
2617                 if (ret) {
2618                         drm_gem_object_unreference(&old->base);
2619                         return ret;
2620                 }
2621
2622                 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2623                         pipelined = NULL;
2624
2625                 old->fence_reg = I915_FENCE_REG_NONE;
2626                 old->last_fenced_ring = pipelined;
2627                 old->last_fenced_seqno =
2628                         pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2629
2630                 drm_gem_object_unreference(&old->base);
2631         } else if (obj->last_fenced_seqno == 0)
2632                 pipelined = NULL;
2633
2634         reg->obj = obj;
2635         list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2636         obj->fence_reg = reg - dev_priv->fence_regs;
2637         obj->last_fenced_ring = pipelined;
2638
2639         reg->setup_seqno =
2640                 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2641         obj->last_fenced_seqno = reg->setup_seqno;
2642
2643 update:
2644         obj->tiling_changed = false;
2645         switch (INTEL_INFO(dev)->gen) {
2646         case 7:
2647         case 6:
2648                 /* In order to fully serialize access to the fenced region and
2649                  * the update to the fence register we need to take extreme
2650                  * measures on SNB+. In theory, the write to the fence register
2651                  * flushes all memory transactions before, and coupled with the
2652                  * mb() placed around the register write we serialise all memory
2653                  * operations with respect to the changes in the tiler. Yet, on
2654                  * SNB+ we need to take a step further and emit an explicit wbinvd()
2655                  * on each processor in order to manually flush all memory
2656                  * transactions before updating the fence register.
2657                  */
2658                 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2659                 ret = sandybridge_write_fence_reg(obj, pipelined);
2660                 break;
2661         case 5:
2662         case 4:
2663                 ret = i965_write_fence_reg(obj, pipelined);
2664                 break;
2665         case 3:
2666                 ret = i915_write_fence_reg(obj, pipelined);
2667                 break;
2668         case 2:
2669                 ret = i830_write_fence_reg(obj, pipelined);
2670                 break;
2671         }
2672
2673         return ret;
2674 }
2675
2676 /**
2677  * i915_gem_clear_fence_reg - clear out fence register info
2678  * @obj: object to clear
2679  *
2680  * Zeroes out the fence register itself and clears out the associated
2681  * data structures in dev_priv and obj.
2682  */
2683 static void
2684 i915_gem_clear_fence_reg(struct drm_device *dev,
2685                          struct drm_i915_fence_reg *reg)
2686 {
2687         drm_i915_private_t *dev_priv = dev->dev_private;
2688         uint32_t fence_reg = reg - dev_priv->fence_regs;
2689
2690         switch (INTEL_INFO(dev)->gen) {
2691         case 7:
2692         case 6:
2693                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2694                 break;
2695         case 5:
2696         case 4:
2697                 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2698                 break;
2699         case 3:
2700                 if (fence_reg >= 8)
2701                         fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2702                 else
2703         case 2:
2704                         fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2705
2706                 I915_WRITE(fence_reg, 0);
2707                 break;
2708         }
2709
2710         list_del_init(&reg->lru_list);
2711         reg->obj = NULL;
2712         reg->setup_seqno = 0;
2713 }
2714
2715 /**
2716  * Finds free space in the GTT aperture and binds the object there.
2717  */
2718 static int
2719 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2720                             unsigned alignment,
2721                             bool map_and_fenceable)
2722 {
2723         struct drm_device *dev = obj->base.dev;
2724         drm_i915_private_t *dev_priv = dev->dev_private;
2725         struct drm_mm_node *free_space;
2726         gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2727         u32 size, fence_size, fence_alignment, unfenced_alignment;
2728         bool mappable, fenceable;
2729         int ret;
2730
2731         if (obj->madv != I915_MADV_WILLNEED) {
2732                 DRM_ERROR("Attempting to bind a purgeable object\n");
2733                 return -EINVAL;
2734         }
2735
2736         fence_size = i915_gem_get_gtt_size(dev,
2737                                            obj->base.size,
2738                                            obj->tiling_mode);
2739         fence_alignment = i915_gem_get_gtt_alignment(dev,
2740                                                      obj->base.size,
2741                                                      obj->tiling_mode);
2742         unfenced_alignment =
2743                 i915_gem_get_unfenced_gtt_alignment(dev,
2744                                                     obj->base.size,
2745                                                     obj->tiling_mode);
2746
2747         if (alignment == 0)
2748                 alignment = map_and_fenceable ? fence_alignment :
2749                                                 unfenced_alignment;
2750         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2751                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2752                 return -EINVAL;
2753         }
2754
2755         size = map_and_fenceable ? fence_size : obj->base.size;
2756
2757         /* If the object is bigger than the entire aperture, reject it early
2758          * before evicting everything in a vain attempt to find space.
2759          */
2760         if (obj->base.size >
2761             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2762                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2763                 return -E2BIG;
2764         }
2765
2766  search_free:
2767         if (map_and_fenceable)
2768                 free_space =
2769                         drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2770                                                     size, alignment, 0,
2771                                                     dev_priv->mm.gtt_mappable_end,
2772                                                     0);
2773         else
2774                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2775                                                 size, alignment, 0);
2776
2777         if (free_space != NULL) {
2778                 if (map_and_fenceable)
2779                         obj->gtt_space =
2780                                 drm_mm_get_block_range_generic(free_space,
2781                                                                size, alignment, 0,
2782                                                                dev_priv->mm.gtt_mappable_end,
2783                                                                0);
2784                 else
2785                         obj->gtt_space =
2786                                 drm_mm_get_block(free_space, size, alignment);
2787         }
2788         if (obj->gtt_space == NULL) {
2789                 /* If the gtt is empty and we're still having trouble
2790                  * fitting our object in, we're out of memory.
2791                  */
2792                 ret = i915_gem_evict_something(dev, size, alignment,
2793                                                map_and_fenceable);
2794                 if (ret)
2795                         return ret;
2796
2797                 goto search_free;
2798         }
2799
2800         ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2801         if (ret) {
2802                 drm_mm_put_block(obj->gtt_space);
2803                 obj->gtt_space = NULL;
2804
2805                 if (ret == -ENOMEM) {
2806                         /* first try to reclaim some memory by clearing the GTT */
2807                         ret = i915_gem_evict_everything(dev, false);
2808                         if (ret) {
2809                                 /* now try to shrink everyone else */
2810                                 if (gfpmask) {
2811                                         gfpmask = 0;
2812                                         goto search_free;
2813                                 }
2814
2815                                 return -ENOMEM;
2816                         }
2817
2818                         goto search_free;
2819                 }
2820
2821                 return ret;
2822         }
2823
2824         ret = i915_gem_gtt_bind_object(obj);
2825         if (ret) {
2826                 i915_gem_object_put_pages_gtt(obj);
2827                 drm_mm_put_block(obj->gtt_space);
2828                 obj->gtt_space = NULL;
2829
2830                 if (i915_gem_evict_everything(dev, false))
2831                         return ret;
2832
2833                 goto search_free;
2834         }
2835
2836         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2837         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2838
2839         /* Assert that the object is not currently in any GPU domain. As it
2840          * wasn't in the GTT, there shouldn't be any way it could have been in
2841          * a GPU cache
2842          */
2843         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2844         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2845
2846         obj->gtt_offset = obj->gtt_space->start;
2847
2848         fenceable =
2849                 obj->gtt_space->size == fence_size &&
2850                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2851
2852         mappable =
2853                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2854
2855         obj->map_and_fenceable = mappable && fenceable;
2856
2857         trace_i915_gem_object_bind(obj, map_and_fenceable);
2858         return 0;
2859 }
2860
2861 void
2862 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2863 {
2864         /* If we don't have a page list set up, then we're not pinned
2865          * to GPU, and we can ignore the cache flush because it'll happen
2866          * again at bind time.
2867          */
2868         if (obj->pages == NULL)
2869                 return;
2870
2871         /* If the GPU is snooping the contents of the CPU cache,
2872          * we do not need to manually clear the CPU cache lines.  However,
2873          * the caches are only snooped when the render cache is
2874          * flushed/invalidated.  As we always have to emit invalidations
2875          * and flushes when moving into and out of the RENDER domain, correct
2876          * snooping behaviour occurs naturally as the result of our domain
2877          * tracking.
2878          */
2879         if (obj->cache_level != I915_CACHE_NONE)
2880                 return;
2881
2882         trace_i915_gem_object_clflush(obj);
2883
2884         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2885 }
2886
2887 /** Flushes any GPU write domain for the object if it's dirty. */
2888 static int
2889 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2890 {
2891         if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2892                 return 0;
2893
2894         /* Queue the GPU write cache flushing we need. */
2895         return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2896 }
2897
2898 /** Flushes the GTT write domain for the object if it's dirty. */
2899 static void
2900 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2901 {
2902         uint32_t old_write_domain;
2903
2904         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2905                 return;
2906
2907         /* No actual flushing is required for the GTT write domain.  Writes
2908          * to it immediately go to main memory as far as we know, so there's
2909          * no chipset flush.  It also doesn't land in render cache.
2910          *
2911          * However, we do have to enforce the order so that all writes through
2912          * the GTT land before any writes to the device, such as updates to
2913          * the GATT itself.
2914          */
2915         wmb();
2916
2917         old_write_domain = obj->base.write_domain;
2918         obj->base.write_domain = 0;
2919
2920         trace_i915_gem_object_change_domain(obj,
2921                                             obj->base.read_domains,
2922                                             old_write_domain);
2923 }
2924
2925 /** Flushes the CPU write domain for the object if it's dirty. */
2926 static void
2927 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2928 {
2929         uint32_t old_write_domain;
2930
2931         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2932                 return;
2933
2934         i915_gem_clflush_object(obj);
2935         intel_gtt_chipset_flush();
2936         old_write_domain = obj->base.write_domain;
2937         obj->base.write_domain = 0;
2938
2939         trace_i915_gem_object_change_domain(obj,
2940                                             obj->base.read_domains,
2941                                             old_write_domain);
2942 }
2943
2944 /**
2945  * Moves a single object to the GTT read, and possibly write domain.
2946  *
2947  * This function returns when the move is complete, including waiting on
2948  * flushes to occur.
2949  */
2950 int
2951 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2952 {
2953         uint32_t old_write_domain, old_read_domains;
2954         int ret;
2955
2956         /* Not valid to be called on unbound objects. */
2957         if (obj->gtt_space == NULL)
2958                 return -EINVAL;
2959
2960         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2961                 return 0;
2962
2963         ret = i915_gem_object_flush_gpu_write_domain(obj);
2964         if (ret)
2965                 return ret;
2966
2967         if (obj->pending_gpu_write || write) {
2968                 ret = i915_gem_object_wait_rendering(obj);
2969                 if (ret)
2970                         return ret;
2971         }
2972
2973         i915_gem_object_flush_cpu_write_domain(obj);
2974
2975         old_write_domain = obj->base.write_domain;
2976         old_read_domains = obj->base.read_domains;
2977
2978         /* It should now be out of any other write domains, and we can update
2979          * the domain values for our changes.
2980          */
2981         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2982         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2983         if (write) {
2984                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2985                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2986                 obj->dirty = 1;
2987         }
2988
2989         trace_i915_gem_object_change_domain(obj,
2990                                             old_read_domains,
2991                                             old_write_domain);
2992
2993         return 0;
2994 }
2995
2996 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2997                                     enum i915_cache_level cache_level)
2998 {
2999         int ret;
3000
3001         if (obj->cache_level == cache_level)
3002                 return 0;
3003
3004         if (obj->pin_count) {
3005                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3006                 return -EBUSY;
3007         }
3008
3009         if (obj->gtt_space) {
3010                 ret = i915_gem_object_finish_gpu(obj);
3011                 if (ret)
3012                         return ret;
3013
3014                 i915_gem_object_finish_gtt(obj);
3015
3016                 /* Before SandyBridge, you could not use tiling or fence
3017                  * registers with snooped memory, so relinquish any fences
3018                  * currently pointing to our region in the aperture.
3019                  */
3020                 if (INTEL_INFO(obj->base.dev)->gen < 6) {
3021                         ret = i915_gem_object_put_fence(obj);
3022                         if (ret)
3023                                 return ret;
3024                 }
3025
3026                 i915_gem_gtt_rebind_object(obj, cache_level);
3027         }
3028
3029         if (cache_level == I915_CACHE_NONE) {
3030                 u32 old_read_domains, old_write_domain;
3031
3032                 /* If we're coming from LLC cached, then we haven't
3033                  * actually been tracking whether the data is in the
3034                  * CPU cache or not, since we only allow one bit set
3035                  * in obj->write_domain and have been skipping the clflushes.
3036                  * Just set it to the CPU cache for now.
3037                  */
3038                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3039                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3040
3041                 old_read_domains = obj->base.read_domains;
3042                 old_write_domain = obj->base.write_domain;
3043
3044                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3045                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3046
3047                 trace_i915_gem_object_change_domain(obj,
3048                                                     old_read_domains,
3049                                                     old_write_domain);
3050         }
3051
3052         obj->cache_level = cache_level;
3053         return 0;
3054 }
3055
3056 /*
3057  * Prepare buffer for display plane (scanout, cursors, etc).
3058  * Can be called from an uninterruptible phase (modesetting) and allows
3059  * any flushes to be pipelined (for pageflips).
3060  *
3061  * For the display plane, we want to be in the GTT but out of any write
3062  * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3063  * ability to pipeline the waits, pinning and any additional subtleties
3064  * that may differentiate the display plane from ordinary buffers.
3065  */
3066 int
3067 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3068                                      u32 alignment,
3069                                      struct intel_ring_buffer *pipelined)
3070 {
3071         u32 old_read_domains, old_write_domain;
3072         int ret;
3073
3074         ret = i915_gem_object_flush_gpu_write_domain(obj);
3075         if (ret)
3076                 return ret;
3077
3078         if (pipelined != obj->ring) {
3079                 ret = i915_gem_object_wait_rendering(obj);
3080                 if (ret == -ERESTARTSYS)
3081                         return ret;
3082         }
3083
3084         /* The display engine is not coherent with the LLC cache on gen6.  As
3085          * a result, we make sure that the pinning that is about to occur is
3086          * done with uncached PTEs. This is lowest common denominator for all
3087          * chipsets.
3088          *
3089          * However for gen6+, we could do better by using the GFDT bit instead
3090          * of uncaching, which would allow us to flush all the LLC-cached data
3091          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3092          */
3093         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3094         if (ret)
3095                 return ret;
3096
3097         /* As the user may map the buffer once pinned in the display plane
3098          * (e.g. libkms for the bootup splash), we have to ensure that we
3099          * always use map_and_fenceable for all scanout buffers.
3100          */
3101         ret = i915_gem_object_pin(obj, alignment, true);
3102         if (ret)
3103                 return ret;
3104
3105         i915_gem_object_flush_cpu_write_domain(obj);
3106
3107         old_write_domain = obj->base.write_domain;
3108         old_read_domains = obj->base.read_domains;
3109
3110         /* It should now be out of any other write domains, and we can update
3111          * the domain values for our changes.
3112          */
3113         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3114         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3115
3116         trace_i915_gem_object_change_domain(obj,
3117                                             old_read_domains,
3118                                             old_write_domain);
3119
3120         return 0;
3121 }
3122
3123 int
3124 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3125 {
3126         int ret;
3127
3128         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3129                 return 0;
3130
3131         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3132                 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3133                 if (ret)
3134                         return ret;
3135         }
3136
3137         ret = i915_gem_object_wait_rendering(obj);
3138         if (ret)
3139                 return ret;
3140
3141         /* Ensure that we invalidate the GPU's caches and TLBs. */
3142         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3143         return 0;
3144 }
3145
3146 /**
3147  * Moves a single object to the CPU read, and possibly write domain.
3148  *
3149  * This function returns when the move is complete, including waiting on
3150  * flushes to occur.
3151  */
3152 static int
3153 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3154 {
3155         uint32_t old_write_domain, old_read_domains;
3156         int ret;
3157
3158         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3159                 return 0;
3160
3161         ret = i915_gem_object_flush_gpu_write_domain(obj);
3162         if (ret)
3163                 return ret;
3164
3165         ret = i915_gem_object_wait_rendering(obj);
3166         if (ret)
3167                 return ret;
3168
3169         i915_gem_object_flush_gtt_write_domain(obj);
3170
3171         /* If we have a partially-valid cache of the object in the CPU,
3172          * finish invalidating it and free the per-page flags.
3173          */
3174         i915_gem_object_set_to_full_cpu_read_domain(obj);
3175
3176         old_write_domain = obj->base.write_domain;
3177         old_read_domains = obj->base.read_domains;
3178
3179         /* Flush the CPU cache if it's still invalid. */
3180         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3181                 i915_gem_clflush_object(obj);
3182
3183                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3184         }
3185
3186         /* It should now be out of any other write domains, and we can update
3187          * the domain values for our changes.
3188          */
3189         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3190
3191         /* If we're writing through the CPU, then the GPU read domains will
3192          * need to be invalidated at next use.
3193          */
3194         if (write) {
3195                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3196                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3197         }
3198
3199         trace_i915_gem_object_change_domain(obj,
3200                                             old_read_domains,
3201                                             old_write_domain);
3202
3203         return 0;
3204 }
3205
3206 /**
3207  * Moves the object from a partially CPU read to a full one.
3208  *
3209  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3210  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3211  */
3212 static void
3213 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3214 {
3215         if (!obj->page_cpu_valid)
3216                 return;
3217
3218         /* If we're partially in the CPU read domain, finish moving it in.
3219          */
3220         if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3221                 int i;
3222
3223                 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3224                         if (obj->page_cpu_valid[i])
3225                                 continue;
3226                         drm_clflush_pages(obj->pages + i, 1);
3227                 }
3228         }
3229
3230         /* Free the page_cpu_valid mappings which are now stale, whether
3231          * or not we've got I915_GEM_DOMAIN_CPU.
3232          */
3233         kfree(obj->page_cpu_valid);
3234         obj->page_cpu_valid = NULL;
3235 }
3236
3237 /**
3238  * Set the CPU read domain on a range of the object.
3239  *
3240  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3241  * not entirely valid.  The page_cpu_valid member of the object flags which
3242  * pages have been flushed, and will be respected by
3243  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3244  * of the whole object.
3245  *
3246  * This function returns when the move is complete, including waiting on
3247  * flushes to occur.
3248  */
3249 static int
3250 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3251                                           uint64_t offset, uint64_t size)
3252 {
3253         uint32_t old_read_domains;
3254         int i, ret;
3255
3256         if (offset == 0 && size == obj->base.size)
3257                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3258
3259         ret = i915_gem_object_flush_gpu_write_domain(obj);
3260         if (ret)
3261                 return ret;
3262
3263         ret = i915_gem_object_wait_rendering(obj);
3264         if (ret)
3265                 return ret;
3266
3267         i915_gem_object_flush_gtt_write_domain(obj);
3268
3269         /* If we're already fully in the CPU read domain, we're done. */
3270         if (obj->page_cpu_valid == NULL &&
3271             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3272                 return 0;
3273
3274         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3275          * newly adding I915_GEM_DOMAIN_CPU
3276          */
3277         if (obj->page_cpu_valid == NULL) {
3278                 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3279                                               GFP_KERNEL);
3280                 if (obj->page_cpu_valid == NULL)
3281                         return -ENOMEM;
3282         } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3283                 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3284
3285         /* Flush the cache on any pages that are still invalid from the CPU's
3286          * perspective.
3287          */
3288         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3289              i++) {
3290                 if (obj->page_cpu_valid[i])
3291                         continue;
3292
3293                 drm_clflush_pages(obj->pages + i, 1);
3294
3295                 obj->page_cpu_valid[i] = 1;
3296         }
3297
3298         /* It should now be out of any other write domains, and we can update
3299          * the domain values for our changes.
3300          */
3301         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3302
3303         old_read_domains = obj->base.read_domains;
3304         obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3305
3306         trace_i915_gem_object_change_domain(obj,
3307                                             old_read_domains,
3308                                             obj->base.write_domain);
3309
3310         return 0;
3311 }
3312
3313 /* Throttle our rendering by waiting until the ring has completed our requests
3314  * emitted over 20 msec ago.
3315  *
3316  * Note that if we were to use the current jiffies each time around the loop,
3317  * we wouldn't escape the function with any frames outstanding if the time to
3318  * render a frame was over 20ms.
3319  *
3320  * This should get us reasonable parallelism between CPU and GPU but also
3321  * relatively low latency when blocking on a particular request to finish.
3322  */
3323 static int
3324 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3325 {
3326         struct drm_i915_private *dev_priv = dev->dev_private;
3327         struct drm_i915_file_private *file_priv = file->driver_priv;
3328         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3329         struct drm_i915_gem_request *request;
3330         struct intel_ring_buffer *ring = NULL;
3331         u32 seqno = 0;
3332         int ret;
3333
3334         if (atomic_read(&dev_priv->mm.wedged))
3335                 return -EIO;
3336
3337         spin_lock(&file_priv->mm.lock);
3338         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3339                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3340                         break;
3341
3342                 ring = request->ring;
3343                 seqno = request->seqno;
3344         }
3345         spin_unlock(&file_priv->mm.lock);
3346
3347         if (seqno == 0)
3348                 return 0;
3349
3350         ret = 0;
3351         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3352                 /* And wait for the seqno passing without holding any locks and
3353                  * causing extra latency for others. This is safe as the irq
3354                  * generation is designed to be run atomically and so is
3355                  * lockless.
3356                  */
3357                 if (ring->irq_get(ring)) {
3358                         ret = wait_event_interruptible(ring->irq_queue,
3359                                                        i915_seqno_passed(ring->get_seqno(ring), seqno)
3360                                                        || atomic_read(&dev_priv->mm.wedged));
3361                         ring->irq_put(ring);
3362
3363                         if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3364                                 ret = -EIO;
3365                 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
3366                                                       seqno) ||
3367                                     atomic_read(&dev_priv->mm.wedged), 3000)) {
3368                         ret = -EBUSY;
3369                 }
3370         }
3371
3372         if (ret == 0)
3373                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3374
3375         return ret;
3376 }
3377
3378 int
3379 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3380                     uint32_t alignment,
3381                     bool map_and_fenceable)
3382 {
3383         struct drm_device *dev = obj->base.dev;
3384         struct drm_i915_private *dev_priv = dev->dev_private;
3385         int ret;
3386
3387         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3388                 return -EBUSY;
3389         WARN_ON(i915_verify_lists(dev));
3390
3391         if (obj->gtt_space != NULL) {
3392                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3393                     (map_and_fenceable && !obj->map_and_fenceable)) {
3394                         WARN(obj->pin_count,
3395                              "bo is already pinned with incorrect alignment:"
3396                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3397                              " obj->map_and_fenceable=%d\n",
3398                              obj->gtt_offset, alignment,
3399                              map_and_fenceable,
3400                              obj->map_and_fenceable);
3401                         ret = i915_gem_object_unbind(obj);
3402                         if (ret)
3403                                 return ret;
3404                 }
3405         }
3406
3407         if (obj->gtt_space == NULL) {
3408                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3409                                                   map_and_fenceable);
3410                 if (ret)
3411                         return ret;
3412         }
3413
3414         if (obj->pin_count++ == 0) {
3415                 if (!obj->active)
3416                         list_move_tail(&obj->mm_list,
3417                                        &dev_priv->mm.pinned_list);
3418         }
3419         obj->pin_mappable |= map_and_fenceable;
3420
3421         WARN_ON(i915_verify_lists(dev));
3422         return 0;
3423 }
3424
3425 void
3426 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3427 {
3428         struct drm_device *dev = obj->base.dev;
3429         drm_i915_private_t *dev_priv = dev->dev_private;
3430
3431         WARN_ON(i915_verify_lists(dev));
3432         BUG_ON(obj->pin_count == 0);
3433         BUG_ON(obj->gtt_space == NULL);
3434
3435         if (--obj->pin_count == 0) {
3436                 if (!obj->active)
3437                         list_move_tail(&obj->mm_list,
3438                                        &dev_priv->mm.inactive_list);
3439                 obj->pin_mappable = false;
3440         }
3441         WARN_ON(i915_verify_lists(dev));
3442 }
3443
3444 int
3445 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3446                    struct drm_file *file)
3447 {
3448         struct drm_i915_gem_pin *args = data;
3449         struct drm_i915_gem_object *obj;
3450         int ret;
3451
3452         ret = i915_mutex_lock_interruptible(dev);
3453         if (ret)
3454                 return ret;
3455
3456         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3457         if (&obj->base == NULL) {
3458                 ret = -ENOENT;
3459                 goto unlock;
3460         }
3461
3462         if (obj->madv != I915_MADV_WILLNEED) {
3463                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3464                 ret = -EINVAL;
3465                 goto out;
3466         }
3467
3468         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3469                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3470                           args->handle);
3471                 ret = -EINVAL;
3472                 goto out;
3473         }
3474
3475         if (obj->user_pin_count == 0) {
3476                 ret = i915_gem_object_pin(obj, args->alignment, true);
3477                 if (ret)
3478                         goto out;
3479         }
3480
3481         obj->user_pin_count++;
3482         obj->pin_filp = file;
3483
3484         /* XXX - flush the CPU caches for pinned objects
3485          * as the X server doesn't manage domains yet
3486          */
3487         i915_gem_object_flush_cpu_write_domain(obj);
3488         args->offset = obj->gtt_offset;
3489 out:
3490         drm_gem_object_unreference(&obj->base);
3491 unlock:
3492         mutex_unlock(&dev->struct_mutex);
3493         return ret;
3494 }
3495
3496 int
3497 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3498                      struct drm_file *file)
3499 {
3500         struct drm_i915_gem_pin *args = data;
3501         struct drm_i915_gem_object *obj;
3502         int ret;
3503
3504         ret = i915_mutex_lock_interruptible(dev);
3505         if (ret)
3506                 return ret;
3507
3508         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3509         if (&obj->base == NULL) {
3510                 ret = -ENOENT;
3511                 goto unlock;
3512         }
3513
3514         if (obj->pin_filp != file) {
3515                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3516                           args->handle);
3517                 ret = -EINVAL;
3518                 goto out;
3519         }
3520         obj->user_pin_count--;
3521         if (obj->user_pin_count == 0) {
3522                 obj->pin_filp = NULL;
3523                 i915_gem_object_unpin(obj);
3524         }
3525
3526 out:
3527         drm_gem_object_unreference(&obj->base);
3528 unlock:
3529         mutex_unlock(&dev->struct_mutex);
3530         return ret;
3531 }
3532
3533 int
3534 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3535                     struct drm_file *file)
3536 {
3537         struct drm_i915_gem_busy *args = data;
3538         struct drm_i915_gem_object *obj;
3539         int ret;
3540
3541         ret = i915_mutex_lock_interruptible(dev);
3542         if (ret)
3543                 return ret;
3544
3545         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3546         if (&obj->base == NULL) {
3547                 ret = -ENOENT;
3548                 goto unlock;
3549         }
3550
3551         /* Count all active objects as busy, even if they are currently not used
3552          * by the gpu. Users of this interface expect objects to eventually
3553          * become non-busy without any further actions, therefore emit any
3554          * necessary flushes here.
3555          */
3556         args->busy = obj->active;
3557         if (args->busy) {
3558                 /* Unconditionally flush objects, even when the gpu still uses this
3559                  * object. Userspace calling this function indicates that it wants to
3560                  * use this buffer rather sooner than later, so issuing the required
3561                  * flush earlier is beneficial.
3562                  */
3563                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3564                         ret = i915_gem_flush_ring(obj->ring,
3565                                                   0, obj->base.write_domain);
3566                 } else if (obj->ring->outstanding_lazy_request ==
3567                            obj->last_rendering_seqno) {
3568                         struct drm_i915_gem_request *request;
3569
3570                         /* This ring is not being cleared by active usage,
3571                          * so emit a request to do so.
3572                          */
3573                         request = kzalloc(sizeof(*request), GFP_KERNEL);
3574                         if (request) {
3575                                 ret = i915_add_request(obj->ring, NULL, request);
3576                                 if (ret)
3577                                         kfree(request);
3578                         } else
3579                                 ret = -ENOMEM;
3580                 }
3581
3582                 /* Update the active list for the hardware's current position.
3583                  * Otherwise this only updates on a delayed timer or when irqs
3584                  * are actually unmasked, and our working set ends up being
3585                  * larger than required.
3586                  */
3587                 i915_gem_retire_requests_ring(obj->ring);
3588
3589                 args->busy = obj->active;
3590         }
3591
3592         drm_gem_object_unreference(&obj->base);
3593 unlock:
3594         mutex_unlock(&dev->struct_mutex);
3595         return ret;
3596 }
3597
3598 int
3599 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3600                         struct drm_file *file_priv)
3601 {
3602         return i915_gem_ring_throttle(dev, file_priv);
3603 }
3604
3605 int
3606 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3607                        struct drm_file *file_priv)
3608 {
3609         struct drm_i915_gem_madvise *args = data;
3610         struct drm_i915_gem_object *obj;
3611         int ret;
3612
3613         switch (args->madv) {
3614         case I915_MADV_DONTNEED:
3615         case I915_MADV_WILLNEED:
3616             break;
3617         default:
3618             return -EINVAL;
3619         }
3620
3621         ret = i915_mutex_lock_interruptible(dev);
3622         if (ret)
3623                 return ret;
3624
3625         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3626         if (&obj->base == NULL) {
3627                 ret = -ENOENT;
3628                 goto unlock;
3629         }
3630
3631         if (obj->pin_count) {
3632                 ret = -EINVAL;
3633                 goto out;
3634         }
3635
3636         if (obj->madv != __I915_MADV_PURGED)
3637                 obj->madv = args->madv;
3638
3639         /* if the object is no longer bound, discard its backing storage */
3640         if (i915_gem_object_is_purgeable(obj) &&
3641             obj->gtt_space == NULL)
3642                 i915_gem_object_truncate(obj);
3643
3644         args->retained = obj->madv != __I915_MADV_PURGED;
3645
3646 out:
3647         drm_gem_object_unreference(&obj->base);
3648 unlock:
3649         mutex_unlock(&dev->struct_mutex);
3650         return ret;
3651 }
3652
3653 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3654                                                   size_t size)
3655 {
3656         struct drm_i915_private *dev_priv = dev->dev_private;
3657         struct drm_i915_gem_object *obj;
3658         struct address_space *mapping;
3659
3660         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3661         if (obj == NULL)
3662                 return NULL;
3663
3664         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3665                 kfree(obj);
3666                 return NULL;
3667         }
3668
3669         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3670         mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3671
3672         i915_gem_info_add_obj(dev_priv, size);
3673
3674         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3675         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3676
3677         if (IS_GEN6(dev) || IS_GEN7(dev)) {
3678                 /* On Gen6, we can have the GPU use the LLC (the CPU
3679                  * cache) for about a 10% performance improvement
3680                  * compared to uncached.  Graphics requests other than
3681                  * display scanout are coherent with the CPU in
3682                  * accessing this cache.  This means in this mode we
3683                  * don't need to clflush on the CPU side, and on the
3684                  * GPU side we only need to flush internal caches to
3685                  * get data visible to the CPU.
3686                  *
3687                  * However, we maintain the display planes as UC, and so
3688                  * need to rebind when first used as such.
3689                  */
3690                 obj->cache_level = I915_CACHE_LLC;
3691         } else
3692                 obj->cache_level = I915_CACHE_NONE;
3693
3694         obj->base.driver_private = NULL;
3695         obj->fence_reg = I915_FENCE_REG_NONE;
3696         INIT_LIST_HEAD(&obj->mm_list);
3697         INIT_LIST_HEAD(&obj->gtt_list);
3698         INIT_LIST_HEAD(&obj->ring_list);
3699         INIT_LIST_HEAD(&obj->exec_list);
3700         INIT_LIST_HEAD(&obj->gpu_write_list);
3701         obj->madv = I915_MADV_WILLNEED;
3702         /* Avoid an unnecessary call to unbind on the first bind. */
3703         obj->map_and_fenceable = true;
3704
3705         return obj;
3706 }
3707
3708 int i915_gem_init_object(struct drm_gem_object *obj)
3709 {
3710         BUG();
3711
3712         return 0;
3713 }
3714
3715 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3716 {
3717         struct drm_device *dev = obj->base.dev;
3718         drm_i915_private_t *dev_priv = dev->dev_private;
3719         int ret;
3720
3721         ret = i915_gem_object_unbind(obj);
3722         if (ret == -ERESTARTSYS) {
3723                 list_move(&obj->mm_list,
3724                           &dev_priv->mm.deferred_free_list);
3725                 return;
3726         }
3727
3728         trace_i915_gem_object_destroy(obj);
3729
3730         if (obj->base.map_list.map)
3731                 drm_gem_free_mmap_offset(&obj->base);
3732
3733         drm_gem_object_release(&obj->base);
3734         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3735
3736         kfree(obj->page_cpu_valid);
3737         kfree(obj->bit_17);
3738         kfree(obj);
3739 }
3740
3741 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3742 {
3743         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3744         struct drm_device *dev = obj->base.dev;
3745
3746         while (obj->pin_count > 0)
3747                 i915_gem_object_unpin(obj);
3748
3749         if (obj->phys_obj)
3750                 i915_gem_detach_phys_object(dev, obj);
3751
3752         i915_gem_free_object_tail(obj);
3753 }
3754
3755 int
3756 i915_gem_idle(struct drm_device *dev)
3757 {
3758         drm_i915_private_t *dev_priv = dev->dev_private;
3759         int ret;
3760
3761         mutex_lock(&dev->struct_mutex);
3762
3763         if (dev_priv->mm.suspended) {
3764                 mutex_unlock(&dev->struct_mutex);
3765                 return 0;
3766         }
3767
3768         ret = i915_gpu_idle(dev);
3769         if (ret) {
3770                 mutex_unlock(&dev->struct_mutex);
3771                 return ret;
3772         }
3773
3774         /* Under UMS, be paranoid and evict. */
3775         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3776                 ret = i915_gem_evict_inactive(dev, false);
3777                 if (ret) {
3778                         mutex_unlock(&dev->struct_mutex);
3779                         return ret;
3780                 }
3781         }
3782
3783         i915_gem_reset_fences(dev);
3784
3785         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3786          * We need to replace this with a semaphore, or something.
3787          * And not confound mm.suspended!
3788          */
3789         dev_priv->mm.suspended = 1;
3790         del_timer_sync(&dev_priv->hangcheck_timer);
3791
3792         i915_kernel_lost_context(dev);
3793         i915_gem_cleanup_ringbuffer(dev);
3794
3795         mutex_unlock(&dev->struct_mutex);
3796
3797         /* Cancel the retire work handler, which should be idle now. */
3798         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3799
3800         return 0;
3801 }
3802
3803 int
3804 i915_gem_init_ringbuffer(struct drm_device *dev)
3805 {
3806         drm_i915_private_t *dev_priv = dev->dev_private;
3807         int ret;
3808
3809         ret = intel_init_render_ring_buffer(dev);
3810         if (ret)
3811                 return ret;
3812
3813         if (HAS_BSD(dev)) {
3814                 ret = intel_init_bsd_ring_buffer(dev);
3815                 if (ret)
3816                         goto cleanup_render_ring;
3817         }
3818
3819         if (HAS_BLT(dev)) {
3820                 ret = intel_init_blt_ring_buffer(dev);
3821                 if (ret)
3822                         goto cleanup_bsd_ring;
3823         }
3824
3825         dev_priv->next_seqno = 1;
3826
3827         return 0;
3828
3829 cleanup_bsd_ring:
3830         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3831 cleanup_render_ring:
3832         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3833         return ret;
3834 }
3835
3836 void
3837 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3838 {
3839         drm_i915_private_t *dev_priv = dev->dev_private;
3840         int i;
3841
3842         for (i = 0; i < I915_NUM_RINGS; i++)
3843                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3844 }
3845
3846 int
3847 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3848                        struct drm_file *file_priv)
3849 {
3850         drm_i915_private_t *dev_priv = dev->dev_private;
3851         int ret, i;
3852
3853         if (drm_core_check_feature(dev, DRIVER_MODESET))
3854                 return 0;
3855
3856         if (atomic_read(&dev_priv->mm.wedged)) {
3857                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3858                 atomic_set(&dev_priv->mm.wedged, 0);
3859         }
3860
3861         mutex_lock(&dev->struct_mutex);
3862         dev_priv->mm.suspended = 0;
3863
3864         ret = i915_gem_init_ringbuffer(dev);
3865         if (ret != 0) {
3866                 mutex_unlock(&dev->struct_mutex);
3867                 return ret;
3868         }
3869
3870         BUG_ON(!list_empty(&dev_priv->mm.active_list));
3871         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3872         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3873         for (i = 0; i < I915_NUM_RINGS; i++) {
3874                 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3875                 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3876         }
3877         mutex_unlock(&dev->struct_mutex);
3878
3879         ret = drm_irq_install(dev);
3880         if (ret)
3881                 goto cleanup_ringbuffer;
3882
3883         return 0;
3884
3885 cleanup_ringbuffer:
3886         mutex_lock(&dev->struct_mutex);
3887         i915_gem_cleanup_ringbuffer(dev);
3888         dev_priv->mm.suspended = 1;
3889         mutex_unlock(&dev->struct_mutex);
3890
3891         return ret;
3892 }
3893
3894 int
3895 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3896                        struct drm_file *file_priv)
3897 {
3898         if (drm_core_check_feature(dev, DRIVER_MODESET))
3899                 return 0;
3900
3901         drm_irq_uninstall(dev);
3902         return i915_gem_idle(dev);
3903 }
3904
3905 void
3906 i915_gem_lastclose(struct drm_device *dev)
3907 {
3908         int ret;
3909
3910         if (drm_core_check_feature(dev, DRIVER_MODESET))
3911                 return;
3912
3913         ret = i915_gem_idle(dev);
3914         if (ret)
3915                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3916 }
3917
3918 static void
3919 init_ring_lists(struct intel_ring_buffer *ring)
3920 {
3921         INIT_LIST_HEAD(&ring->active_list);
3922         INIT_LIST_HEAD(&ring->request_list);
3923         INIT_LIST_HEAD(&ring->gpu_write_list);
3924 }
3925
3926 void
3927 i915_gem_load(struct drm_device *dev)
3928 {
3929         int i;
3930         drm_i915_private_t *dev_priv = dev->dev_private;
3931
3932         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3933         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3934         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3935         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3936         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3937         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3938         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3939         for (i = 0; i < I915_NUM_RINGS; i++)
3940                 init_ring_lists(&dev_priv->ring[i]);
3941         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3942                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3943         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3944                           i915_gem_retire_work_handler);
3945         init_completion(&dev_priv->error_completion);
3946
3947         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3948         if (IS_GEN3(dev)) {
3949                 u32 tmp = I915_READ(MI_ARB_STATE);
3950                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3951                         /* arb state is a masked write, so set bit + bit in mask */
3952                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3953                         I915_WRITE(MI_ARB_STATE, tmp);
3954                 }
3955         }
3956
3957         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3958
3959         /* Old X drivers will take 0-2 for front, back, depth buffers */
3960         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3961                 dev_priv->fence_reg_start = 3;
3962
3963         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3964                 dev_priv->num_fence_regs = 16;
3965         else
3966                 dev_priv->num_fence_regs = 8;
3967
3968         /* Initialize fence registers to zero */
3969         for (i = 0; i < dev_priv->num_fence_regs; i++) {
3970                 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3971         }
3972
3973         i915_gem_detect_bit_6_swizzle(dev);
3974         init_waitqueue_head(&dev_priv->pending_flip_queue);
3975
3976         dev_priv->mm.interruptible = true;
3977
3978         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3979         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3980         register_shrinker(&dev_priv->mm.inactive_shrinker);
3981 }
3982
3983 /*
3984  * Create a physically contiguous memory object for this object
3985  * e.g. for cursor + overlay regs
3986  */
3987 static int i915_gem_init_phys_object(struct drm_device *dev,
3988                                      int id, int size, int align)
3989 {
3990         drm_i915_private_t *dev_priv = dev->dev_private;
3991         struct drm_i915_gem_phys_object *phys_obj;
3992         int ret;
3993
3994         if (dev_priv->mm.phys_objs[id - 1] || !size)
3995                 return 0;
3996
3997         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3998         if (!phys_obj)
3999                 return -ENOMEM;
4000
4001         phys_obj->id = id;
4002
4003         phys_obj->handle = drm_pci_alloc(dev, size, align);
4004         if (!phys_obj->handle) {
4005                 ret = -ENOMEM;
4006                 goto kfree_obj;
4007         }
4008 #ifdef CONFIG_X86
4009         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4010 #endif
4011
4012         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4013
4014         return 0;
4015 kfree_obj:
4016         kfree(phys_obj);
4017         return ret;
4018 }
4019
4020 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4021 {
4022         drm_i915_private_t *dev_priv = dev->dev_private;
4023         struct drm_i915_gem_phys_object *phys_obj;
4024
4025         if (!dev_priv->mm.phys_objs[id - 1])
4026                 return;
4027
4028         phys_obj = dev_priv->mm.phys_objs[id - 1];
4029         if (phys_obj->cur_obj) {
4030                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4031         }
4032
4033 #ifdef CONFIG_X86
4034         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4035 #endif
4036         drm_pci_free(dev, phys_obj->handle);
4037         kfree(phys_obj);
4038         dev_priv->mm.phys_objs[id - 1] = NULL;
4039 }
4040
4041 void i915_gem_free_all_phys_object(struct drm_device *dev)
4042 {
4043         int i;
4044
4045         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4046                 i915_gem_free_phys_object(dev, i);
4047 }
4048
4049 void i915_gem_detach_phys_object(struct drm_device *dev,
4050                                  struct drm_i915_gem_object *obj)
4051 {
4052         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4053         char *vaddr;
4054         int i;
4055         int page_count;
4056
4057         if (!obj->phys_obj)
4058                 return;
4059         vaddr = obj->phys_obj->handle->vaddr;
4060
4061         page_count = obj->base.size / PAGE_SIZE;
4062         for (i = 0; i < page_count; i++) {
4063                 struct page *page = shmem_read_mapping_page(mapping, i);
4064                 if (!IS_ERR(page)) {
4065                         char *dst = kmap_atomic(page);
4066                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4067                         kunmap_atomic(dst);
4068
4069                         drm_clflush_pages(&page, 1);
4070
4071                         set_page_dirty(page);
4072                         mark_page_accessed(page);
4073                         page_cache_release(page);
4074                 }
4075         }
4076         intel_gtt_chipset_flush();
4077
4078         obj->phys_obj->cur_obj = NULL;
4079         obj->phys_obj = NULL;
4080 }
4081
4082 int
4083 i915_gem_attach_phys_object(struct drm_device *dev,
4084                             struct drm_i915_gem_object *obj,
4085                             int id,
4086                             int align)
4087 {
4088         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4089         drm_i915_private_t *dev_priv = dev->dev_private;
4090         int ret = 0;
4091         int page_count;
4092         int i;
4093
4094         if (id > I915_MAX_PHYS_OBJECT)
4095                 return -EINVAL;
4096
4097         if (obj->phys_obj) {
4098                 if (obj->phys_obj->id == id)
4099                         return 0;
4100                 i915_gem_detach_phys_object(dev, obj);
4101         }
4102
4103         /* create a new object */
4104         if (!dev_priv->mm.phys_objs[id - 1]) {
4105                 ret = i915_gem_init_phys_object(dev, id,
4106                                                 obj->base.size, align);
4107                 if (ret) {
4108                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4109                                   id, obj->base.size);
4110                         return ret;
4111                 }
4112         }
4113
4114         /* bind to the object */
4115         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4116         obj->phys_obj->cur_obj = obj;
4117
4118         page_count = obj->base.size / PAGE_SIZE;
4119
4120         for (i = 0; i < page_count; i++) {
4121                 struct page *page;
4122                 char *dst, *src;
4123
4124                 page = shmem_read_mapping_page(mapping, i);
4125                 if (IS_ERR(page))
4126                         return PTR_ERR(page);
4127
4128                 src = kmap_atomic(page);
4129                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4130                 memcpy(dst, src, PAGE_SIZE);
4131                 kunmap_atomic(src);
4132
4133                 mark_page_accessed(page);
4134                 page_cache_release(page);
4135         }
4136
4137         return 0;
4138 }
4139
4140 static int
4141 i915_gem_phys_pwrite(struct drm_device *dev,
4142                      struct drm_i915_gem_object *obj,
4143                      struct drm_i915_gem_pwrite *args,
4144                      struct drm_file *file_priv)
4145 {
4146         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4147         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4148
4149         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4150                 unsigned long unwritten;
4151
4152                 /* The physical object once assigned is fixed for the lifetime
4153                  * of the obj, so we can safely drop the lock and continue
4154                  * to access vaddr.
4155                  */
4156                 mutex_unlock(&dev->struct_mutex);
4157                 unwritten = copy_from_user(vaddr, user_data, args->size);
4158                 mutex_lock(&dev->struct_mutex);
4159                 if (unwritten)
4160                         return -EFAULT;
4161         }
4162
4163         intel_gtt_chipset_flush();
4164         return 0;
4165 }
4166
4167 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4168 {
4169         struct drm_i915_file_private *file_priv = file->driver_priv;
4170
4171         /* Clean up our request list when the client is going away, so that
4172          * later retire_requests won't dereference our soon-to-be-gone
4173          * file_priv.
4174          */
4175         spin_lock(&file_priv->mm.lock);
4176         while (!list_empty(&file_priv->mm.request_list)) {
4177                 struct drm_i915_gem_request *request;
4178
4179                 request = list_first_entry(&file_priv->mm.request_list,
4180                                            struct drm_i915_gem_request,
4181                                            client_list);
4182                 list_del(&request->client_list);
4183                 request->file_priv = NULL;
4184         }
4185         spin_unlock(&file_priv->mm.lock);
4186 }
4187
4188 static int
4189 i915_gpu_is_active(struct drm_device *dev)
4190 {
4191         drm_i915_private_t *dev_priv = dev->dev_private;
4192         int lists_empty;
4193
4194         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4195                       list_empty(&dev_priv->mm.active_list);
4196
4197         return !lists_empty;
4198 }
4199
4200 static int
4201 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4202 {
4203         struct drm_i915_private *dev_priv =
4204                 container_of(shrinker,
4205                              struct drm_i915_private,
4206                              mm.inactive_shrinker);
4207         struct drm_device *dev = dev_priv->dev;
4208         struct drm_i915_gem_object *obj, *next;
4209         int nr_to_scan = sc->nr_to_scan;
4210         int cnt;
4211
4212         if (!mutex_trylock(&dev->struct_mutex))
4213                 return 0;
4214
4215         /* "fast-path" to count number of available objects */
4216         if (nr_to_scan == 0) {
4217                 cnt = 0;
4218                 list_for_each_entry(obj,
4219                                     &dev_priv->mm.inactive_list,
4220                                     mm_list)
4221                         cnt++;
4222                 mutex_unlock(&dev->struct_mutex);
4223                 return cnt / 100 * sysctl_vfs_cache_pressure;
4224         }
4225
4226 rescan:
4227         /* first scan for clean buffers */
4228         i915_gem_retire_requests(dev);
4229
4230         list_for_each_entry_safe(obj, next,
4231                                  &dev_priv->mm.inactive_list,
4232                                  mm_list) {
4233                 if (i915_gem_object_is_purgeable(obj)) {
4234                         if (i915_gem_object_unbind(obj) == 0 &&
4235                             --nr_to_scan == 0)
4236                                 break;
4237                 }
4238         }
4239
4240         /* second pass, evict/count anything still on the inactive list */
4241         cnt = 0;
4242         list_for_each_entry_safe(obj, next,
4243                                  &dev_priv->mm.inactive_list,
4244                                  mm_list) {
4245                 if (nr_to_scan &&
4246                     i915_gem_object_unbind(obj) == 0)
4247                         nr_to_scan--;
4248                 else
4249                         cnt++;
4250         }
4251
4252         if (nr_to_scan && i915_gpu_is_active(dev)) {
4253                 /*
4254                  * We are desperate for pages, so as a last resort, wait
4255                  * for the GPU to finish and discard whatever we can.
4256                  * This has a dramatic impact to reduce the number of
4257                  * OOM-killer events whilst running the GPU aggressively.
4258                  */
4259                 if (i915_gpu_idle(dev) == 0)
4260                         goto rescan;
4261         }
4262         mutex_unlock(&dev->struct_mutex);
4263         return cnt / 100 * sysctl_vfs_cache_pressure;
4264 }