pandora: defconfig: update
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38
39 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43                                                           bool write);
44 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45                                                                   uint64_t offset,
46                                                                   uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49                                                     unsigned alignment,
50                                                     bool map_and_fenceable);
51 static void i915_gem_clear_fence_reg(struct drm_device *dev,
52                                      struct drm_i915_fence_reg *reg);
53 static int i915_gem_phys_pwrite(struct drm_device *dev,
54                                 struct drm_i915_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file);
57 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58
59 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60                                     struct shrink_control *sc);
61
62 /* some bookkeeping */
63 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
64                                   size_t size)
65 {
66         dev_priv->mm.object_count++;
67         dev_priv->mm.object_memory += size;
68 }
69
70 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
71                                      size_t size)
72 {
73         dev_priv->mm.object_count--;
74         dev_priv->mm.object_memory -= size;
75 }
76
77 static int
78 i915_gem_wait_for_error(struct drm_device *dev)
79 {
80         struct drm_i915_private *dev_priv = dev->dev_private;
81         struct completion *x = &dev_priv->error_completion;
82         unsigned long flags;
83         int ret;
84
85         if (!atomic_read(&dev_priv->mm.wedged))
86                 return 0;
87
88         ret = wait_for_completion_interruptible(x);
89         if (ret)
90                 return ret;
91
92         if (atomic_read(&dev_priv->mm.wedged)) {
93                 /* GPU is hung, bump the completion count to account for
94                  * the token we just consumed so that we never hit zero and
95                  * end up waiting upon a subsequent completion event that
96                  * will never happen.
97                  */
98                 spin_lock_irqsave(&x->wait.lock, flags);
99                 x->done++;
100                 spin_unlock_irqrestore(&x->wait.lock, flags);
101         }
102         return 0;
103 }
104
105 int i915_mutex_lock_interruptible(struct drm_device *dev)
106 {
107         int ret;
108
109         ret = i915_gem_wait_for_error(dev);
110         if (ret)
111                 return ret;
112
113         ret = mutex_lock_interruptible(&dev->struct_mutex);
114         if (ret)
115                 return ret;
116
117         WARN_ON(i915_verify_lists(dev));
118         return 0;
119 }
120
121 static inline bool
122 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
123 {
124         return obj->gtt_space && !obj->active && obj->pin_count == 0;
125 }
126
127 void i915_gem_do_init(struct drm_device *dev,
128                       unsigned long start,
129                       unsigned long mappable_end,
130                       unsigned long end)
131 {
132         drm_i915_private_t *dev_priv = dev->dev_private;
133
134         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
135
136         dev_priv->mm.gtt_start = start;
137         dev_priv->mm.gtt_mappable_end = mappable_end;
138         dev_priv->mm.gtt_end = end;
139         dev_priv->mm.gtt_total = end - start;
140         dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
141
142         /* Take over this portion of the GTT */
143         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
144 }
145
146 int
147 i915_gem_init_ioctl(struct drm_device *dev, void *data,
148                     struct drm_file *file)
149 {
150         struct drm_i915_gem_init *args = data;
151
152         if (args->gtt_start >= args->gtt_end ||
153             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
154                 return -EINVAL;
155
156         mutex_lock(&dev->struct_mutex);
157         i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
158         mutex_unlock(&dev->struct_mutex);
159
160         return 0;
161 }
162
163 int
164 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
165                             struct drm_file *file)
166 {
167         struct drm_i915_private *dev_priv = dev->dev_private;
168         struct drm_i915_gem_get_aperture *args = data;
169         struct drm_i915_gem_object *obj;
170         size_t pinned;
171
172         if (!(dev->driver->driver_features & DRIVER_GEM))
173                 return -ENODEV;
174
175         pinned = 0;
176         mutex_lock(&dev->struct_mutex);
177         list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
178                 pinned += obj->gtt_space->size;
179         mutex_unlock(&dev->struct_mutex);
180
181         args->aper_size = dev_priv->mm.gtt_total;
182         args->aper_available_size = args->aper_size - pinned;
183
184         return 0;
185 }
186
187 static int
188 i915_gem_create(struct drm_file *file,
189                 struct drm_device *dev,
190                 uint64_t size,
191                 uint32_t *handle_p)
192 {
193         struct drm_i915_gem_object *obj;
194         int ret;
195         u32 handle;
196
197         size = roundup(size, PAGE_SIZE);
198         if (size == 0)
199                 return -EINVAL;
200
201         /* Allocate the new object */
202         obj = i915_gem_alloc_object(dev, size);
203         if (obj == NULL)
204                 return -ENOMEM;
205
206         ret = drm_gem_handle_create(file, &obj->base, &handle);
207         if (ret) {
208                 drm_gem_object_release(&obj->base);
209                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
210                 kfree(obj);
211                 return ret;
212         }
213
214         /* drop reference from allocate - handle holds it now */
215         drm_gem_object_unreference(&obj->base);
216         trace_i915_gem_object_create(obj);
217
218         *handle_p = handle;
219         return 0;
220 }
221
222 int
223 i915_gem_dumb_create(struct drm_file *file,
224                      struct drm_device *dev,
225                      struct drm_mode_create_dumb *args)
226 {
227         /* have to work out size/pitch and return them */
228         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
229         args->size = args->pitch * args->height;
230         return i915_gem_create(file, dev,
231                                args->size, &args->handle);
232 }
233
234 int i915_gem_dumb_destroy(struct drm_file *file,
235                           struct drm_device *dev,
236                           uint32_t handle)
237 {
238         return drm_gem_handle_delete(file, handle);
239 }
240
241 /**
242  * Creates a new mm object and returns a handle to it.
243  */
244 int
245 i915_gem_create_ioctl(struct drm_device *dev, void *data,
246                       struct drm_file *file)
247 {
248         struct drm_i915_gem_create *args = data;
249         return i915_gem_create(file, dev,
250                                args->size, &args->handle);
251 }
252
253 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
254 {
255         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
256
257         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
258                 obj->tiling_mode != I915_TILING_NONE;
259 }
260
261 static inline void
262 slow_shmem_copy(struct page *dst_page,
263                 int dst_offset,
264                 struct page *src_page,
265                 int src_offset,
266                 int length)
267 {
268         char *dst_vaddr, *src_vaddr;
269
270         dst_vaddr = kmap(dst_page);
271         src_vaddr = kmap(src_page);
272
273         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
274
275         kunmap(src_page);
276         kunmap(dst_page);
277 }
278
279 static inline void
280 slow_shmem_bit17_copy(struct page *gpu_page,
281                       int gpu_offset,
282                       struct page *cpu_page,
283                       int cpu_offset,
284                       int length,
285                       int is_read)
286 {
287         char *gpu_vaddr, *cpu_vaddr;
288
289         /* Use the unswizzled path if this page isn't affected. */
290         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
291                 if (is_read)
292                         return slow_shmem_copy(cpu_page, cpu_offset,
293                                                gpu_page, gpu_offset, length);
294                 else
295                         return slow_shmem_copy(gpu_page, gpu_offset,
296                                                cpu_page, cpu_offset, length);
297         }
298
299         gpu_vaddr = kmap(gpu_page);
300         cpu_vaddr = kmap(cpu_page);
301
302         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
303          * XORing with the other bits (A9 for Y, A9 and A10 for X)
304          */
305         while (length > 0) {
306                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
307                 int this_length = min(cacheline_end - gpu_offset, length);
308                 int swizzled_gpu_offset = gpu_offset ^ 64;
309
310                 if (is_read) {
311                         memcpy(cpu_vaddr + cpu_offset,
312                                gpu_vaddr + swizzled_gpu_offset,
313                                this_length);
314                 } else {
315                         memcpy(gpu_vaddr + swizzled_gpu_offset,
316                                cpu_vaddr + cpu_offset,
317                                this_length);
318                 }
319                 cpu_offset += this_length;
320                 gpu_offset += this_length;
321                 length -= this_length;
322         }
323
324         kunmap(cpu_page);
325         kunmap(gpu_page);
326 }
327
328 /**
329  * This is the fast shmem pread path, which attempts to copy_from_user directly
330  * from the backing pages of the object to the user's address space.  On a
331  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
332  */
333 static int
334 i915_gem_shmem_pread_fast(struct drm_device *dev,
335                           struct drm_i915_gem_object *obj,
336                           struct drm_i915_gem_pread *args,
337                           struct drm_file *file)
338 {
339         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
340         ssize_t remain;
341         loff_t offset;
342         char __user *user_data;
343         int page_offset, page_length;
344
345         user_data = (char __user *) (uintptr_t) args->data_ptr;
346         remain = args->size;
347
348         offset = args->offset;
349
350         while (remain > 0) {
351                 struct page *page;
352                 char *vaddr;
353                 int ret;
354
355                 /* Operation in this page
356                  *
357                  * page_offset = offset within page
358                  * page_length = bytes to copy for this page
359                  */
360                 page_offset = offset_in_page(offset);
361                 page_length = remain;
362                 if ((page_offset + remain) > PAGE_SIZE)
363                         page_length = PAGE_SIZE - page_offset;
364
365                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
366                 if (IS_ERR(page))
367                         return PTR_ERR(page);
368
369                 vaddr = kmap_atomic(page);
370                 ret = __copy_to_user_inatomic(user_data,
371                                               vaddr + page_offset,
372                                               page_length);
373                 kunmap_atomic(vaddr);
374
375                 mark_page_accessed(page);
376                 page_cache_release(page);
377                 if (ret)
378                         return -EFAULT;
379
380                 remain -= page_length;
381                 user_data += page_length;
382                 offset += page_length;
383         }
384
385         return 0;
386 }
387
388 /**
389  * This is the fallback shmem pread path, which allocates temporary storage
390  * in kernel space to copy_to_user into outside of the struct_mutex, so we
391  * can copy out of the object's backing pages while holding the struct mutex
392  * and not take page faults.
393  */
394 static int
395 i915_gem_shmem_pread_slow(struct drm_device *dev,
396                           struct drm_i915_gem_object *obj,
397                           struct drm_i915_gem_pread *args,
398                           struct drm_file *file)
399 {
400         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
401         struct mm_struct *mm = current->mm;
402         struct page **user_pages;
403         ssize_t remain;
404         loff_t offset, pinned_pages, i;
405         loff_t first_data_page, last_data_page, num_pages;
406         int shmem_page_offset;
407         int data_page_index, data_page_offset;
408         int page_length;
409         int ret;
410         uint64_t data_ptr = args->data_ptr;
411         int do_bit17_swizzling;
412
413         remain = args->size;
414
415         /* Pin the user pages containing the data.  We can't fault while
416          * holding the struct mutex, yet we want to hold it while
417          * dereferencing the user data.
418          */
419         first_data_page = data_ptr / PAGE_SIZE;
420         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
421         num_pages = last_data_page - first_data_page + 1;
422
423         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
424         if (user_pages == NULL)
425                 return -ENOMEM;
426
427         mutex_unlock(&dev->struct_mutex);
428         down_read(&mm->mmap_sem);
429         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
430                                       num_pages, 1, 0, user_pages, NULL);
431         up_read(&mm->mmap_sem);
432         mutex_lock(&dev->struct_mutex);
433         if (pinned_pages < num_pages) {
434                 ret = -EFAULT;
435                 goto out;
436         }
437
438         ret = i915_gem_object_set_cpu_read_domain_range(obj,
439                                                         args->offset,
440                                                         args->size);
441         if (ret)
442                 goto out;
443
444         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
445
446         offset = args->offset;
447
448         while (remain > 0) {
449                 struct page *page;
450
451                 /* Operation in this page
452                  *
453                  * shmem_page_offset = offset within page in shmem file
454                  * data_page_index = page number in get_user_pages return
455                  * data_page_offset = offset with data_page_index page.
456                  * page_length = bytes to copy for this page
457                  */
458                 shmem_page_offset = offset_in_page(offset);
459                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
460                 data_page_offset = offset_in_page(data_ptr);
461
462                 page_length = remain;
463                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
464                         page_length = PAGE_SIZE - shmem_page_offset;
465                 if ((data_page_offset + page_length) > PAGE_SIZE)
466                         page_length = PAGE_SIZE - data_page_offset;
467
468                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
469                 if (IS_ERR(page)) {
470                         ret = PTR_ERR(page);
471                         goto out;
472                 }
473
474                 if (do_bit17_swizzling) {
475                         slow_shmem_bit17_copy(page,
476                                               shmem_page_offset,
477                                               user_pages[data_page_index],
478                                               data_page_offset,
479                                               page_length,
480                                               1);
481                 } else {
482                         slow_shmem_copy(user_pages[data_page_index],
483                                         data_page_offset,
484                                         page,
485                                         shmem_page_offset,
486                                         page_length);
487                 }
488
489                 mark_page_accessed(page);
490                 page_cache_release(page);
491
492                 remain -= page_length;
493                 data_ptr += page_length;
494                 offset += page_length;
495         }
496
497 out:
498         for (i = 0; i < pinned_pages; i++) {
499                 SetPageDirty(user_pages[i]);
500                 mark_page_accessed(user_pages[i]);
501                 page_cache_release(user_pages[i]);
502         }
503         drm_free_large(user_pages);
504
505         return ret;
506 }
507
508 /**
509  * Reads data from the object referenced by handle.
510  *
511  * On error, the contents of *data are undefined.
512  */
513 int
514 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
515                      struct drm_file *file)
516 {
517         struct drm_i915_gem_pread *args = data;
518         struct drm_i915_gem_object *obj;
519         int ret = 0;
520
521         if (args->size == 0)
522                 return 0;
523
524         if (!access_ok(VERIFY_WRITE,
525                        (char __user *)(uintptr_t)args->data_ptr,
526                        args->size))
527                 return -EFAULT;
528
529         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
530                                        args->size);
531         if (ret)
532                 return -EFAULT;
533
534         ret = i915_mutex_lock_interruptible(dev);
535         if (ret)
536                 return ret;
537
538         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
539         if (&obj->base == NULL) {
540                 ret = -ENOENT;
541                 goto unlock;
542         }
543
544         /* Bounds check source.  */
545         if (args->offset > obj->base.size ||
546             args->size > obj->base.size - args->offset) {
547                 ret = -EINVAL;
548                 goto out;
549         }
550
551         trace_i915_gem_object_pread(obj, args->offset, args->size);
552
553         ret = i915_gem_object_set_cpu_read_domain_range(obj,
554                                                         args->offset,
555                                                         args->size);
556         if (ret)
557                 goto out;
558
559         ret = -EFAULT;
560         if (!i915_gem_object_needs_bit17_swizzle(obj))
561                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
562         if (ret == -EFAULT)
563                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
564
565 out:
566         drm_gem_object_unreference(&obj->base);
567 unlock:
568         mutex_unlock(&dev->struct_mutex);
569         return ret;
570 }
571
572 /* This is the fast write path which cannot handle
573  * page faults in the source data
574  */
575
576 static inline int
577 fast_user_write(struct io_mapping *mapping,
578                 loff_t page_base, int page_offset,
579                 char __user *user_data,
580                 int length)
581 {
582         char *vaddr_atomic;
583         unsigned long unwritten;
584
585         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
586         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
587                                                       user_data, length);
588         io_mapping_unmap_atomic(vaddr_atomic);
589         return unwritten;
590 }
591
592 /* Here's the write path which can sleep for
593  * page faults
594  */
595
596 static inline void
597 slow_kernel_write(struct io_mapping *mapping,
598                   loff_t gtt_base, int gtt_offset,
599                   struct page *user_page, int user_offset,
600                   int length)
601 {
602         char __iomem *dst_vaddr;
603         char *src_vaddr;
604
605         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
606         src_vaddr = kmap(user_page);
607
608         memcpy_toio(dst_vaddr + gtt_offset,
609                     src_vaddr + user_offset,
610                     length);
611
612         kunmap(user_page);
613         io_mapping_unmap(dst_vaddr);
614 }
615
616 /**
617  * This is the fast pwrite path, where we copy the data directly from the
618  * user into the GTT, uncached.
619  */
620 static int
621 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
622                          struct drm_i915_gem_object *obj,
623                          struct drm_i915_gem_pwrite *args,
624                          struct drm_file *file)
625 {
626         drm_i915_private_t *dev_priv = dev->dev_private;
627         ssize_t remain;
628         loff_t offset, page_base;
629         char __user *user_data;
630         int page_offset, page_length;
631
632         user_data = (char __user *) (uintptr_t) args->data_ptr;
633         remain = args->size;
634
635         offset = obj->gtt_offset + args->offset;
636
637         while (remain > 0) {
638                 /* Operation in this page
639                  *
640                  * page_base = page offset within aperture
641                  * page_offset = offset within page
642                  * page_length = bytes to copy for this page
643                  */
644                 page_base = offset & PAGE_MASK;
645                 page_offset = offset_in_page(offset);
646                 page_length = remain;
647                 if ((page_offset + remain) > PAGE_SIZE)
648                         page_length = PAGE_SIZE - page_offset;
649
650                 /* If we get a fault while copying data, then (presumably) our
651                  * source page isn't available.  Return the error and we'll
652                  * retry in the slow path.
653                  */
654                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
655                                     page_offset, user_data, page_length))
656                         return -EFAULT;
657
658                 remain -= page_length;
659                 user_data += page_length;
660                 offset += page_length;
661         }
662
663         return 0;
664 }
665
666 /**
667  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
668  * the memory and maps it using kmap_atomic for copying.
669  *
670  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
671  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
672  */
673 static int
674 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
675                          struct drm_i915_gem_object *obj,
676                          struct drm_i915_gem_pwrite *args,
677                          struct drm_file *file)
678 {
679         drm_i915_private_t *dev_priv = dev->dev_private;
680         ssize_t remain;
681         loff_t gtt_page_base, offset;
682         loff_t first_data_page, last_data_page, num_pages;
683         loff_t pinned_pages, i;
684         struct page **user_pages;
685         struct mm_struct *mm = current->mm;
686         int gtt_page_offset, data_page_offset, data_page_index, page_length;
687         int ret;
688         uint64_t data_ptr = args->data_ptr;
689
690         remain = args->size;
691
692         /* Pin the user pages containing the data.  We can't fault while
693          * holding the struct mutex, and all of the pwrite implementations
694          * want to hold it while dereferencing the user data.
695          */
696         first_data_page = data_ptr / PAGE_SIZE;
697         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
698         num_pages = last_data_page - first_data_page + 1;
699
700         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
701         if (user_pages == NULL)
702                 return -ENOMEM;
703
704         mutex_unlock(&dev->struct_mutex);
705         down_read(&mm->mmap_sem);
706         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
707                                       num_pages, 0, 0, user_pages, NULL);
708         up_read(&mm->mmap_sem);
709         mutex_lock(&dev->struct_mutex);
710         if (pinned_pages < num_pages) {
711                 ret = -EFAULT;
712                 goto out_unpin_pages;
713         }
714
715         ret = i915_gem_object_set_to_gtt_domain(obj, true);
716         if (ret)
717                 goto out_unpin_pages;
718
719         ret = i915_gem_object_put_fence(obj);
720         if (ret)
721                 goto out_unpin_pages;
722
723         offset = obj->gtt_offset + args->offset;
724
725         while (remain > 0) {
726                 /* Operation in this page
727                  *
728                  * gtt_page_base = page offset within aperture
729                  * gtt_page_offset = offset within page in aperture
730                  * data_page_index = page number in get_user_pages return
731                  * data_page_offset = offset with data_page_index page.
732                  * page_length = bytes to copy for this page
733                  */
734                 gtt_page_base = offset & PAGE_MASK;
735                 gtt_page_offset = offset_in_page(offset);
736                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
737                 data_page_offset = offset_in_page(data_ptr);
738
739                 page_length = remain;
740                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
741                         page_length = PAGE_SIZE - gtt_page_offset;
742                 if ((data_page_offset + page_length) > PAGE_SIZE)
743                         page_length = PAGE_SIZE - data_page_offset;
744
745                 slow_kernel_write(dev_priv->mm.gtt_mapping,
746                                   gtt_page_base, gtt_page_offset,
747                                   user_pages[data_page_index],
748                                   data_page_offset,
749                                   page_length);
750
751                 remain -= page_length;
752                 offset += page_length;
753                 data_ptr += page_length;
754         }
755
756 out_unpin_pages:
757         for (i = 0; i < pinned_pages; i++)
758                 page_cache_release(user_pages[i]);
759         drm_free_large(user_pages);
760
761         return ret;
762 }
763
764 /**
765  * This is the fast shmem pwrite path, which attempts to directly
766  * copy_from_user into the kmapped pages backing the object.
767  */
768 static int
769 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
770                            struct drm_i915_gem_object *obj,
771                            struct drm_i915_gem_pwrite *args,
772                            struct drm_file *file)
773 {
774         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
775         ssize_t remain;
776         loff_t offset;
777         char __user *user_data;
778         int page_offset, page_length;
779
780         user_data = (char __user *) (uintptr_t) args->data_ptr;
781         remain = args->size;
782
783         offset = args->offset;
784         obj->dirty = 1;
785
786         while (remain > 0) {
787                 struct page *page;
788                 char *vaddr;
789                 int ret;
790
791                 /* Operation in this page
792                  *
793                  * page_offset = offset within page
794                  * page_length = bytes to copy for this page
795                  */
796                 page_offset = offset_in_page(offset);
797                 page_length = remain;
798                 if ((page_offset + remain) > PAGE_SIZE)
799                         page_length = PAGE_SIZE - page_offset;
800
801                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
802                 if (IS_ERR(page))
803                         return PTR_ERR(page);
804
805                 vaddr = kmap_atomic(page);
806                 ret = __copy_from_user_inatomic(vaddr + page_offset,
807                                                 user_data,
808                                                 page_length);
809                 kunmap_atomic(vaddr);
810
811                 set_page_dirty(page);
812                 mark_page_accessed(page);
813                 page_cache_release(page);
814
815                 /* If we get a fault while copying data, then (presumably) our
816                  * source page isn't available.  Return the error and we'll
817                  * retry in the slow path.
818                  */
819                 if (ret)
820                         return -EFAULT;
821
822                 remain -= page_length;
823                 user_data += page_length;
824                 offset += page_length;
825         }
826
827         return 0;
828 }
829
830 /**
831  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
832  * the memory and maps it using kmap_atomic for copying.
833  *
834  * This avoids taking mmap_sem for faulting on the user's address while the
835  * struct_mutex is held.
836  */
837 static int
838 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
839                            struct drm_i915_gem_object *obj,
840                            struct drm_i915_gem_pwrite *args,
841                            struct drm_file *file)
842 {
843         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
844         struct mm_struct *mm = current->mm;
845         struct page **user_pages;
846         ssize_t remain;
847         loff_t offset, pinned_pages, i;
848         loff_t first_data_page, last_data_page, num_pages;
849         int shmem_page_offset;
850         int data_page_index,  data_page_offset;
851         int page_length;
852         int ret;
853         uint64_t data_ptr = args->data_ptr;
854         int do_bit17_swizzling;
855
856         remain = args->size;
857
858         /* Pin the user pages containing the data.  We can't fault while
859          * holding the struct mutex, and all of the pwrite implementations
860          * want to hold it while dereferencing the user data.
861          */
862         first_data_page = data_ptr / PAGE_SIZE;
863         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
864         num_pages = last_data_page - first_data_page + 1;
865
866         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
867         if (user_pages == NULL)
868                 return -ENOMEM;
869
870         mutex_unlock(&dev->struct_mutex);
871         down_read(&mm->mmap_sem);
872         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
873                                       num_pages, 0, 0, user_pages, NULL);
874         up_read(&mm->mmap_sem);
875         mutex_lock(&dev->struct_mutex);
876         if (pinned_pages < num_pages) {
877                 ret = -EFAULT;
878                 goto out;
879         }
880
881         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
882         if (ret)
883                 goto out;
884
885         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886
887         offset = args->offset;
888         obj->dirty = 1;
889
890         while (remain > 0) {
891                 struct page *page;
892
893                 /* Operation in this page
894                  *
895                  * shmem_page_offset = offset within page in shmem file
896                  * data_page_index = page number in get_user_pages return
897                  * data_page_offset = offset with data_page_index page.
898                  * page_length = bytes to copy for this page
899                  */
900                 shmem_page_offset = offset_in_page(offset);
901                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
902                 data_page_offset = offset_in_page(data_ptr);
903
904                 page_length = remain;
905                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
906                         page_length = PAGE_SIZE - shmem_page_offset;
907                 if ((data_page_offset + page_length) > PAGE_SIZE)
908                         page_length = PAGE_SIZE - data_page_offset;
909
910                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
911                 if (IS_ERR(page)) {
912                         ret = PTR_ERR(page);
913                         goto out;
914                 }
915
916                 if (do_bit17_swizzling) {
917                         slow_shmem_bit17_copy(page,
918                                               shmem_page_offset,
919                                               user_pages[data_page_index],
920                                               data_page_offset,
921                                               page_length,
922                                               0);
923                 } else {
924                         slow_shmem_copy(page,
925                                         shmem_page_offset,
926                                         user_pages[data_page_index],
927                                         data_page_offset,
928                                         page_length);
929                 }
930
931                 set_page_dirty(page);
932                 mark_page_accessed(page);
933                 page_cache_release(page);
934
935                 remain -= page_length;
936                 data_ptr += page_length;
937                 offset += page_length;
938         }
939
940 out:
941         for (i = 0; i < pinned_pages; i++)
942                 page_cache_release(user_pages[i]);
943         drm_free_large(user_pages);
944
945         return ret;
946 }
947
948 /**
949  * Writes data to the object referenced by handle.
950  *
951  * On error, the contents of the buffer that were to be modified are undefined.
952  */
953 int
954 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
955                       struct drm_file *file)
956 {
957         struct drm_i915_gem_pwrite *args = data;
958         struct drm_i915_gem_object *obj;
959         int ret;
960
961         if (args->size == 0)
962                 return 0;
963
964         if (!access_ok(VERIFY_READ,
965                        (char __user *)(uintptr_t)args->data_ptr,
966                        args->size))
967                 return -EFAULT;
968
969         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
970                                       args->size);
971         if (ret)
972                 return -EFAULT;
973
974         ret = i915_mutex_lock_interruptible(dev);
975         if (ret)
976                 return ret;
977
978         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
979         if (&obj->base == NULL) {
980                 ret = -ENOENT;
981                 goto unlock;
982         }
983
984         /* Bounds check destination. */
985         if (args->offset > obj->base.size ||
986             args->size > obj->base.size - args->offset) {
987                 ret = -EINVAL;
988                 goto out;
989         }
990
991         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
992
993         /* We can only do the GTT pwrite on untiled buffers, as otherwise
994          * it would end up going through the fenced access, and we'll get
995          * different detiling behavior between reading and writing.
996          * pread/pwrite currently are reading and writing from the CPU
997          * perspective, requiring manual detiling by the client.
998          */
999         if (obj->phys_obj)
1000                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1001         else if (obj->gtt_space &&
1002                  obj->tiling_mode == I915_TILING_NONE &&
1003                  obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1004                 ret = i915_gem_object_pin(obj, 0, true);
1005                 if (ret)
1006                         goto out;
1007
1008                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1009                 if (ret)
1010                         goto out_unpin;
1011
1012                 ret = i915_gem_object_put_fence(obj);
1013                 if (ret)
1014                         goto out_unpin;
1015
1016                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1017                 if (ret == -EFAULT)
1018                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1019
1020 out_unpin:
1021                 i915_gem_object_unpin(obj);
1022         } else {
1023                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1024                 if (ret)
1025                         goto out;
1026
1027                 ret = -EFAULT;
1028                 if (!i915_gem_object_needs_bit17_swizzle(obj))
1029                         ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1030                 if (ret == -EFAULT)
1031                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1032         }
1033
1034 out:
1035         drm_gem_object_unreference(&obj->base);
1036 unlock:
1037         mutex_unlock(&dev->struct_mutex);
1038         return ret;
1039 }
1040
1041 /**
1042  * Called when user space prepares to use an object with the CPU, either
1043  * through the mmap ioctl's mapping or a GTT mapping.
1044  */
1045 int
1046 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1047                           struct drm_file *file)
1048 {
1049         struct drm_i915_gem_set_domain *args = data;
1050         struct drm_i915_gem_object *obj;
1051         uint32_t read_domains = args->read_domains;
1052         uint32_t write_domain = args->write_domain;
1053         int ret;
1054
1055         if (!(dev->driver->driver_features & DRIVER_GEM))
1056                 return -ENODEV;
1057
1058         /* Only handle setting domains to types used by the CPU. */
1059         if (write_domain & I915_GEM_GPU_DOMAINS)
1060                 return -EINVAL;
1061
1062         if (read_domains & I915_GEM_GPU_DOMAINS)
1063                 return -EINVAL;
1064
1065         /* Having something in the write domain implies it's in the read
1066          * domain, and only that read domain.  Enforce that in the request.
1067          */
1068         if (write_domain != 0 && read_domains != write_domain)
1069                 return -EINVAL;
1070
1071         ret = i915_mutex_lock_interruptible(dev);
1072         if (ret)
1073                 return ret;
1074
1075         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1076         if (&obj->base == NULL) {
1077                 ret = -ENOENT;
1078                 goto unlock;
1079         }
1080
1081         if (read_domains & I915_GEM_DOMAIN_GTT) {
1082                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1083
1084                 /* Silently promote "you're not bound, there was nothing to do"
1085                  * to success, since the client was just asking us to
1086                  * make sure everything was done.
1087                  */
1088                 if (ret == -EINVAL)
1089                         ret = 0;
1090         } else {
1091                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1092         }
1093
1094         drm_gem_object_unreference(&obj->base);
1095 unlock:
1096         mutex_unlock(&dev->struct_mutex);
1097         return ret;
1098 }
1099
1100 /**
1101  * Called when user space has done writes to this buffer
1102  */
1103 int
1104 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1105                          struct drm_file *file)
1106 {
1107         struct drm_i915_gem_sw_finish *args = data;
1108         struct drm_i915_gem_object *obj;
1109         int ret = 0;
1110
1111         if (!(dev->driver->driver_features & DRIVER_GEM))
1112                 return -ENODEV;
1113
1114         ret = i915_mutex_lock_interruptible(dev);
1115         if (ret)
1116                 return ret;
1117
1118         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1119         if (&obj->base == NULL) {
1120                 ret = -ENOENT;
1121                 goto unlock;
1122         }
1123
1124         /* Pinned buffers may be scanout, so flush the cache */
1125         if (obj->pin_count)
1126                 i915_gem_object_flush_cpu_write_domain(obj);
1127
1128         drm_gem_object_unreference(&obj->base);
1129 unlock:
1130         mutex_unlock(&dev->struct_mutex);
1131         return ret;
1132 }
1133
1134 /**
1135  * Maps the contents of an object, returning the address it is mapped
1136  * into.
1137  *
1138  * While the mapping holds a reference on the contents of the object, it doesn't
1139  * imply a ref on the object itself.
1140  */
1141 int
1142 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1143                     struct drm_file *file)
1144 {
1145         struct drm_i915_private *dev_priv = dev->dev_private;
1146         struct drm_i915_gem_mmap *args = data;
1147         struct drm_gem_object *obj;
1148         unsigned long addr;
1149
1150         if (!(dev->driver->driver_features & DRIVER_GEM))
1151                 return -ENODEV;
1152
1153         obj = drm_gem_object_lookup(dev, file, args->handle);
1154         if (obj == NULL)
1155                 return -ENOENT;
1156
1157         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1158                 drm_gem_object_unreference_unlocked(obj);
1159                 return -E2BIG;
1160         }
1161
1162         down_write(&current->mm->mmap_sem);
1163         addr = do_mmap(obj->filp, 0, args->size,
1164                        PROT_READ | PROT_WRITE, MAP_SHARED,
1165                        args->offset);
1166         up_write(&current->mm->mmap_sem);
1167         drm_gem_object_unreference_unlocked(obj);
1168         if (IS_ERR((void *)addr))
1169                 return addr;
1170
1171         args->addr_ptr = (uint64_t) addr;
1172
1173         return 0;
1174 }
1175
1176 /**
1177  * i915_gem_fault - fault a page into the GTT
1178  * vma: VMA in question
1179  * vmf: fault info
1180  *
1181  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1182  * from userspace.  The fault handler takes care of binding the object to
1183  * the GTT (if needed), allocating and programming a fence register (again,
1184  * only if needed based on whether the old reg is still valid or the object
1185  * is tiled) and inserting a new PTE into the faulting process.
1186  *
1187  * Note that the faulting process may involve evicting existing objects
1188  * from the GTT and/or fence registers to make room.  So performance may
1189  * suffer if the GTT working set is large or there are few fence registers
1190  * left.
1191  */
1192 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1193 {
1194         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1195         struct drm_device *dev = obj->base.dev;
1196         drm_i915_private_t *dev_priv = dev->dev_private;
1197         pgoff_t page_offset;
1198         unsigned long pfn;
1199         int ret = 0;
1200         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1201
1202         /* We don't use vmf->pgoff since that has the fake offset */
1203         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1204                 PAGE_SHIFT;
1205
1206         ret = i915_mutex_lock_interruptible(dev);
1207         if (ret)
1208                 goto out;
1209
1210         trace_i915_gem_object_fault(obj, page_offset, true, write);
1211
1212         /* Now bind it into the GTT if needed */
1213         if (!obj->map_and_fenceable) {
1214                 ret = i915_gem_object_unbind(obj);
1215                 if (ret)
1216                         goto unlock;
1217         }
1218         if (!obj->gtt_space) {
1219                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1220                 if (ret)
1221                         goto unlock;
1222
1223                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1224                 if (ret)
1225                         goto unlock;
1226         }
1227
1228         if (obj->tiling_mode == I915_TILING_NONE)
1229                 ret = i915_gem_object_put_fence(obj);
1230         else
1231                 ret = i915_gem_object_get_fence(obj, NULL);
1232         if (ret)
1233                 goto unlock;
1234
1235         if (i915_gem_object_is_inactive(obj))
1236                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1237
1238         obj->fault_mappable = true;
1239
1240         pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1241                 page_offset;
1242
1243         /* Finally, remap it using the new GTT offset */
1244         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1245 unlock:
1246         mutex_unlock(&dev->struct_mutex);
1247 out:
1248         switch (ret) {
1249         case -EIO:
1250         case -EAGAIN:
1251                 /* Give the error handler a chance to run and move the
1252                  * objects off the GPU active list. Next time we service the
1253                  * fault, we should be able to transition the page into the
1254                  * GTT without touching the GPU (and so avoid further
1255                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1256                  * with coherency, just lost writes.
1257                  */
1258                 set_need_resched();
1259         case 0:
1260         case -ERESTARTSYS:
1261         case -EINTR:
1262         case -EBUSY:
1263                 /*
1264                  * EBUSY is ok: this just means that another thread
1265                  * already did the job.
1266                  */
1267                 return VM_FAULT_NOPAGE;
1268         case -ENOMEM:
1269                 return VM_FAULT_OOM;
1270         default:
1271                 return VM_FAULT_SIGBUS;
1272         }
1273 }
1274
1275 /**
1276  * i915_gem_release_mmap - remove physical page mappings
1277  * @obj: obj in question
1278  *
1279  * Preserve the reservation of the mmapping with the DRM core code, but
1280  * relinquish ownership of the pages back to the system.
1281  *
1282  * It is vital that we remove the page mapping if we have mapped a tiled
1283  * object through the GTT and then lose the fence register due to
1284  * resource pressure. Similarly if the object has been moved out of the
1285  * aperture, than pages mapped into userspace must be revoked. Removing the
1286  * mapping will then trigger a page fault on the next user access, allowing
1287  * fixup by i915_gem_fault().
1288  */
1289 void
1290 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1291 {
1292         if (!obj->fault_mappable)
1293                 return;
1294
1295         if (obj->base.dev->dev_mapping)
1296                 unmap_mapping_range(obj->base.dev->dev_mapping,
1297                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1298                                     obj->base.size, 1);
1299
1300         obj->fault_mappable = false;
1301 }
1302
1303 static uint32_t
1304 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1305 {
1306         uint32_t gtt_size;
1307
1308         if (INTEL_INFO(dev)->gen >= 4 ||
1309             tiling_mode == I915_TILING_NONE)
1310                 return size;
1311
1312         /* Previous chips need a power-of-two fence region when tiling */
1313         if (INTEL_INFO(dev)->gen == 3)
1314                 gtt_size = 1024*1024;
1315         else
1316                 gtt_size = 512*1024;
1317
1318         while (gtt_size < size)
1319                 gtt_size <<= 1;
1320
1321         return gtt_size;
1322 }
1323
1324 /**
1325  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1326  * @obj: object to check
1327  *
1328  * Return the required GTT alignment for an object, taking into account
1329  * potential fence register mapping.
1330  */
1331 static uint32_t
1332 i915_gem_get_gtt_alignment(struct drm_device *dev,
1333                            uint32_t size,
1334                            int tiling_mode)
1335 {
1336         /*
1337          * Minimum alignment is 4k (GTT page size), but might be greater
1338          * if a fence register is needed for the object.
1339          */
1340         if (INTEL_INFO(dev)->gen >= 4 ||
1341             tiling_mode == I915_TILING_NONE)
1342                 return 4096;
1343
1344         /*
1345          * Previous chips need to be aligned to the size of the smallest
1346          * fence register that can contain the object.
1347          */
1348         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1349 }
1350
1351 /**
1352  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1353  *                                       unfenced object
1354  * @dev: the device
1355  * @size: size of the object
1356  * @tiling_mode: tiling mode of the object
1357  *
1358  * Return the required GTT alignment for an object, only taking into account
1359  * unfenced tiled surface requirements.
1360  */
1361 uint32_t
1362 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1363                                     uint32_t size,
1364                                     int tiling_mode)
1365 {
1366         /*
1367          * Minimum alignment is 4k (GTT page size) for sane hw.
1368          */
1369         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1370             tiling_mode == I915_TILING_NONE)
1371                 return 4096;
1372
1373         /* Previous hardware however needs to be aligned to a power-of-two
1374          * tile height. The simplest method for determining this is to reuse
1375          * the power-of-tile object size.
1376          */
1377         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1378 }
1379
1380 int
1381 i915_gem_mmap_gtt(struct drm_file *file,
1382                   struct drm_device *dev,
1383                   uint32_t handle,
1384                   uint64_t *offset)
1385 {
1386         struct drm_i915_private *dev_priv = dev->dev_private;
1387         struct drm_i915_gem_object *obj;
1388         int ret;
1389
1390         if (!(dev->driver->driver_features & DRIVER_GEM))
1391                 return -ENODEV;
1392
1393         ret = i915_mutex_lock_interruptible(dev);
1394         if (ret)
1395                 return ret;
1396
1397         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1398         if (&obj->base == NULL) {
1399                 ret = -ENOENT;
1400                 goto unlock;
1401         }
1402
1403         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1404                 ret = -E2BIG;
1405                 goto out;
1406         }
1407
1408         if (obj->madv != I915_MADV_WILLNEED) {
1409                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1410                 ret = -EINVAL;
1411                 goto out;
1412         }
1413
1414         if (!obj->base.map_list.map) {
1415                 ret = drm_gem_create_mmap_offset(&obj->base);
1416                 if (ret)
1417                         goto out;
1418         }
1419
1420         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1421
1422 out:
1423         drm_gem_object_unreference(&obj->base);
1424 unlock:
1425         mutex_unlock(&dev->struct_mutex);
1426         return ret;
1427 }
1428
1429 /**
1430  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1431  * @dev: DRM device
1432  * @data: GTT mapping ioctl data
1433  * @file: GEM object info
1434  *
1435  * Simply returns the fake offset to userspace so it can mmap it.
1436  * The mmap call will end up in drm_gem_mmap(), which will set things
1437  * up so we can get faults in the handler above.
1438  *
1439  * The fault handler will take care of binding the object into the GTT
1440  * (since it may have been evicted to make room for something), allocating
1441  * a fence register, and mapping the appropriate aperture address into
1442  * userspace.
1443  */
1444 int
1445 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1446                         struct drm_file *file)
1447 {
1448         struct drm_i915_gem_mmap_gtt *args = data;
1449
1450         if (!(dev->driver->driver_features & DRIVER_GEM))
1451                 return -ENODEV;
1452
1453         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1454 }
1455
1456
1457 static int
1458 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1459                               gfp_t gfpmask)
1460 {
1461         int page_count, i;
1462         struct address_space *mapping;
1463         struct inode *inode;
1464         struct page *page;
1465
1466         /* Get the list of pages out of our struct file.  They'll be pinned
1467          * at this point until we release them.
1468          */
1469         page_count = obj->base.size / PAGE_SIZE;
1470         BUG_ON(obj->pages != NULL);
1471         obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1472         if (obj->pages == NULL)
1473                 return -ENOMEM;
1474
1475         inode = obj->base.filp->f_path.dentry->d_inode;
1476         mapping = inode->i_mapping;
1477         gfpmask |= mapping_gfp_mask(mapping);
1478
1479         for (i = 0; i < page_count; i++) {
1480                 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1481                 if (IS_ERR(page))
1482                         goto err_pages;
1483
1484                 obj->pages[i] = page;
1485         }
1486
1487         if (i915_gem_object_needs_bit17_swizzle(obj))
1488                 i915_gem_object_do_bit_17_swizzle(obj);
1489
1490         return 0;
1491
1492 err_pages:
1493         while (i--)
1494                 page_cache_release(obj->pages[i]);
1495
1496         drm_free_large(obj->pages);
1497         obj->pages = NULL;
1498         return PTR_ERR(page);
1499 }
1500
1501 static void
1502 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1503 {
1504         int page_count = obj->base.size / PAGE_SIZE;
1505         int i;
1506
1507         BUG_ON(obj->madv == __I915_MADV_PURGED);
1508
1509         if (i915_gem_object_needs_bit17_swizzle(obj))
1510                 i915_gem_object_save_bit_17_swizzle(obj);
1511
1512         if (obj->madv == I915_MADV_DONTNEED)
1513                 obj->dirty = 0;
1514
1515         for (i = 0; i < page_count; i++) {
1516                 if (obj->dirty)
1517                         set_page_dirty(obj->pages[i]);
1518
1519                 if (obj->madv == I915_MADV_WILLNEED)
1520                         mark_page_accessed(obj->pages[i]);
1521
1522                 page_cache_release(obj->pages[i]);
1523         }
1524         obj->dirty = 0;
1525
1526         drm_free_large(obj->pages);
1527         obj->pages = NULL;
1528 }
1529
1530 void
1531 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1532                                struct intel_ring_buffer *ring,
1533                                u32 seqno)
1534 {
1535         struct drm_device *dev = obj->base.dev;
1536         struct drm_i915_private *dev_priv = dev->dev_private;
1537
1538         BUG_ON(ring == NULL);
1539         obj->ring = ring;
1540
1541         /* Add a reference if we're newly entering the active list. */
1542         if (!obj->active) {
1543                 drm_gem_object_reference(&obj->base);
1544                 obj->active = 1;
1545         }
1546
1547         /* Move from whatever list we were on to the tail of execution. */
1548         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1549         list_move_tail(&obj->ring_list, &ring->active_list);
1550
1551         obj->last_rendering_seqno = seqno;
1552
1553         if (obj->fenced_gpu_access) {
1554                 obj->last_fenced_seqno = seqno;
1555                 obj->last_fenced_ring = ring;
1556
1557                 /* Bump MRU to take account of the delayed flush */
1558                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1559                         struct drm_i915_fence_reg *reg;
1560
1561                         reg = &dev_priv->fence_regs[obj->fence_reg];
1562                         list_move_tail(&reg->lru_list,
1563                                        &dev_priv->mm.fence_list);
1564                 }
1565         }
1566 }
1567
1568 static void
1569 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1570 {
1571         list_del_init(&obj->ring_list);
1572         obj->last_rendering_seqno = 0;
1573         obj->last_fenced_seqno = 0;
1574 }
1575
1576 static void
1577 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1578 {
1579         struct drm_device *dev = obj->base.dev;
1580         drm_i915_private_t *dev_priv = dev->dev_private;
1581
1582         BUG_ON(!obj->active);
1583         list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1584
1585         i915_gem_object_move_off_active(obj);
1586 }
1587
1588 static void
1589 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1590 {
1591         struct drm_device *dev = obj->base.dev;
1592         struct drm_i915_private *dev_priv = dev->dev_private;
1593
1594         if (obj->pin_count != 0)
1595                 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1596         else
1597                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1598
1599         BUG_ON(!list_empty(&obj->gpu_write_list));
1600         BUG_ON(!obj->active);
1601         obj->ring = NULL;
1602         obj->last_fenced_ring = NULL;
1603
1604         i915_gem_object_move_off_active(obj);
1605         obj->fenced_gpu_access = false;
1606
1607         obj->active = 0;
1608         obj->pending_gpu_write = false;
1609         drm_gem_object_unreference(&obj->base);
1610
1611         WARN_ON(i915_verify_lists(dev));
1612 }
1613
1614 /* Immediately discard the backing storage */
1615 static void
1616 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1617 {
1618         struct inode *inode;
1619
1620         /* Our goal here is to return as much of the memory as
1621          * is possible back to the system as we are called from OOM.
1622          * To do this we must instruct the shmfs to drop all of its
1623          * backing pages, *now*.
1624          */
1625         inode = obj->base.filp->f_path.dentry->d_inode;
1626         shmem_truncate_range(inode, 0, (loff_t)-1);
1627
1628         obj->madv = __I915_MADV_PURGED;
1629 }
1630
1631 static inline int
1632 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1633 {
1634         return obj->madv == I915_MADV_DONTNEED;
1635 }
1636
1637 static void
1638 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1639                                uint32_t flush_domains)
1640 {
1641         struct drm_i915_gem_object *obj, *next;
1642
1643         list_for_each_entry_safe(obj, next,
1644                                  &ring->gpu_write_list,
1645                                  gpu_write_list) {
1646                 if (obj->base.write_domain & flush_domains) {
1647                         uint32_t old_write_domain = obj->base.write_domain;
1648
1649                         obj->base.write_domain = 0;
1650                         list_del_init(&obj->gpu_write_list);
1651                         i915_gem_object_move_to_active(obj, ring,
1652                                                        i915_gem_next_request_seqno(ring));
1653
1654                         trace_i915_gem_object_change_domain(obj,
1655                                                             obj->base.read_domains,
1656                                                             old_write_domain);
1657                 }
1658         }
1659 }
1660
1661 static u32
1662 i915_gem_get_seqno(struct drm_device *dev)
1663 {
1664         drm_i915_private_t *dev_priv = dev->dev_private;
1665         u32 seqno = dev_priv->next_seqno;
1666
1667         /* reserve 0 for non-seqno */
1668         if (++dev_priv->next_seqno == 0)
1669                 dev_priv->next_seqno = 1;
1670
1671         return seqno;
1672 }
1673
1674 u32
1675 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1676 {
1677         if (ring->outstanding_lazy_request == 0)
1678                 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1679
1680         return ring->outstanding_lazy_request;
1681 }
1682
1683 int
1684 i915_add_request(struct intel_ring_buffer *ring,
1685                  struct drm_file *file,
1686                  struct drm_i915_gem_request *request)
1687 {
1688         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1689         uint32_t seqno;
1690         int was_empty;
1691         int ret;
1692
1693         BUG_ON(request == NULL);
1694         seqno = i915_gem_next_request_seqno(ring);
1695
1696         ret = ring->add_request(ring, &seqno);
1697         if (ret)
1698             return ret;
1699
1700         trace_i915_gem_request_add(ring, seqno);
1701
1702         request->seqno = seqno;
1703         request->ring = ring;
1704         request->emitted_jiffies = jiffies;
1705         was_empty = list_empty(&ring->request_list);
1706         list_add_tail(&request->list, &ring->request_list);
1707
1708         if (file) {
1709                 struct drm_i915_file_private *file_priv = file->driver_priv;
1710
1711                 spin_lock(&file_priv->mm.lock);
1712                 request->file_priv = file_priv;
1713                 list_add_tail(&request->client_list,
1714                               &file_priv->mm.request_list);
1715                 spin_unlock(&file_priv->mm.lock);
1716         }
1717
1718         ring->outstanding_lazy_request = false;
1719
1720         if (!dev_priv->mm.suspended) {
1721                 if (i915_enable_hangcheck) {
1722                         mod_timer(&dev_priv->hangcheck_timer,
1723                                   jiffies +
1724                                   msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1725                 }
1726                 if (was_empty)
1727                         queue_delayed_work(dev_priv->wq,
1728                                            &dev_priv->mm.retire_work, HZ);
1729         }
1730         return 0;
1731 }
1732
1733 static inline void
1734 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1735 {
1736         struct drm_i915_file_private *file_priv = request->file_priv;
1737
1738         if (!file_priv)
1739                 return;
1740
1741         spin_lock(&file_priv->mm.lock);
1742         if (request->file_priv) {
1743                 list_del(&request->client_list);
1744                 request->file_priv = NULL;
1745         }
1746         spin_unlock(&file_priv->mm.lock);
1747 }
1748
1749 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1750                                       struct intel_ring_buffer *ring)
1751 {
1752         while (!list_empty(&ring->request_list)) {
1753                 struct drm_i915_gem_request *request;
1754
1755                 request = list_first_entry(&ring->request_list,
1756                                            struct drm_i915_gem_request,
1757                                            list);
1758
1759                 list_del(&request->list);
1760                 i915_gem_request_remove_from_client(request);
1761                 kfree(request);
1762         }
1763
1764         while (!list_empty(&ring->active_list)) {
1765                 struct drm_i915_gem_object *obj;
1766
1767                 obj = list_first_entry(&ring->active_list,
1768                                        struct drm_i915_gem_object,
1769                                        ring_list);
1770
1771                 obj->base.write_domain = 0;
1772                 list_del_init(&obj->gpu_write_list);
1773                 i915_gem_object_move_to_inactive(obj);
1774         }
1775 }
1776
1777 static void i915_gem_reset_fences(struct drm_device *dev)
1778 {
1779         struct drm_i915_private *dev_priv = dev->dev_private;
1780         int i;
1781
1782         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1783                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1784                 struct drm_i915_gem_object *obj = reg->obj;
1785
1786                 if (!obj)
1787                         continue;
1788
1789                 if (obj->tiling_mode)
1790                         i915_gem_release_mmap(obj);
1791
1792                 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1793                 reg->obj->fenced_gpu_access = false;
1794                 reg->obj->last_fenced_seqno = 0;
1795                 reg->obj->last_fenced_ring = NULL;
1796                 i915_gem_clear_fence_reg(dev, reg);
1797         }
1798 }
1799
1800 void i915_gem_reset(struct drm_device *dev)
1801 {
1802         struct drm_i915_private *dev_priv = dev->dev_private;
1803         struct drm_i915_gem_object *obj;
1804         int i;
1805
1806         for (i = 0; i < I915_NUM_RINGS; i++)
1807                 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1808
1809         /* Remove anything from the flushing lists. The GPU cache is likely
1810          * to be lost on reset along with the data, so simply move the
1811          * lost bo to the inactive list.
1812          */
1813         while (!list_empty(&dev_priv->mm.flushing_list)) {
1814                 obj = list_first_entry(&dev_priv->mm.flushing_list,
1815                                       struct drm_i915_gem_object,
1816                                       mm_list);
1817
1818                 obj->base.write_domain = 0;
1819                 list_del_init(&obj->gpu_write_list);
1820                 i915_gem_object_move_to_inactive(obj);
1821         }
1822
1823         /* Move everything out of the GPU domains to ensure we do any
1824          * necessary invalidation upon reuse.
1825          */
1826         list_for_each_entry(obj,
1827                             &dev_priv->mm.inactive_list,
1828                             mm_list)
1829         {
1830                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1831         }
1832
1833         /* The fence registers are invalidated so clear them out */
1834         i915_gem_reset_fences(dev);
1835 }
1836
1837 /**
1838  * This function clears the request list as sequence numbers are passed.
1839  */
1840 static void
1841 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1842 {
1843         uint32_t seqno;
1844         int i;
1845
1846         if (list_empty(&ring->request_list))
1847                 return;
1848
1849         WARN_ON(i915_verify_lists(ring->dev));
1850
1851         seqno = ring->get_seqno(ring);
1852
1853         for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1854                 if (seqno >= ring->sync_seqno[i])
1855                         ring->sync_seqno[i] = 0;
1856
1857         while (!list_empty(&ring->request_list)) {
1858                 struct drm_i915_gem_request *request;
1859
1860                 request = list_first_entry(&ring->request_list,
1861                                            struct drm_i915_gem_request,
1862                                            list);
1863
1864                 if (!i915_seqno_passed(seqno, request->seqno))
1865                         break;
1866
1867                 trace_i915_gem_request_retire(ring, request->seqno);
1868
1869                 list_del(&request->list);
1870                 i915_gem_request_remove_from_client(request);
1871                 kfree(request);
1872         }
1873
1874         /* Move any buffers on the active list that are no longer referenced
1875          * by the ringbuffer to the flushing/inactive lists as appropriate.
1876          */
1877         while (!list_empty(&ring->active_list)) {
1878                 struct drm_i915_gem_object *obj;
1879
1880                 obj = list_first_entry(&ring->active_list,
1881                                       struct drm_i915_gem_object,
1882                                       ring_list);
1883
1884                 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1885                         break;
1886
1887                 if (obj->base.write_domain != 0)
1888                         i915_gem_object_move_to_flushing(obj);
1889                 else
1890                         i915_gem_object_move_to_inactive(obj);
1891         }
1892
1893         if (unlikely(ring->trace_irq_seqno &&
1894                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1895                 ring->irq_put(ring);
1896                 ring->trace_irq_seqno = 0;
1897         }
1898
1899         WARN_ON(i915_verify_lists(ring->dev));
1900 }
1901
1902 void
1903 i915_gem_retire_requests(struct drm_device *dev)
1904 {
1905         drm_i915_private_t *dev_priv = dev->dev_private;
1906         int i;
1907
1908         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1909             struct drm_i915_gem_object *obj, *next;
1910
1911             /* We must be careful that during unbind() we do not
1912              * accidentally infinitely recurse into retire requests.
1913              * Currently:
1914              *   retire -> free -> unbind -> wait -> retire_ring
1915              */
1916             list_for_each_entry_safe(obj, next,
1917                                      &dev_priv->mm.deferred_free_list,
1918                                      mm_list)
1919                     i915_gem_free_object_tail(obj);
1920         }
1921
1922         for (i = 0; i < I915_NUM_RINGS; i++)
1923                 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1924 }
1925
1926 static void
1927 i915_gem_retire_work_handler(struct work_struct *work)
1928 {
1929         drm_i915_private_t *dev_priv;
1930         struct drm_device *dev;
1931         bool idle;
1932         int i;
1933
1934         dev_priv = container_of(work, drm_i915_private_t,
1935                                 mm.retire_work.work);
1936         dev = dev_priv->dev;
1937
1938         /* Come back later if the device is busy... */
1939         if (!mutex_trylock(&dev->struct_mutex)) {
1940                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1941                 return;
1942         }
1943
1944         i915_gem_retire_requests(dev);
1945
1946         /* Send a periodic flush down the ring so we don't hold onto GEM
1947          * objects indefinitely.
1948          */
1949         idle = true;
1950         for (i = 0; i < I915_NUM_RINGS; i++) {
1951                 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1952
1953                 if (!list_empty(&ring->gpu_write_list)) {
1954                         struct drm_i915_gem_request *request;
1955                         int ret;
1956
1957                         ret = i915_gem_flush_ring(ring,
1958                                                   0, I915_GEM_GPU_DOMAINS);
1959                         request = kzalloc(sizeof(*request), GFP_KERNEL);
1960                         if (ret || request == NULL ||
1961                             i915_add_request(ring, NULL, request))
1962                             kfree(request);
1963                 }
1964
1965                 idle &= list_empty(&ring->request_list);
1966         }
1967
1968         if (!dev_priv->mm.suspended && !idle)
1969                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1970
1971         mutex_unlock(&dev->struct_mutex);
1972 }
1973
1974 /**
1975  * Waits for a sequence number to be signaled, and cleans up the
1976  * request and object lists appropriately for that event.
1977  */
1978 int
1979 i915_wait_request(struct intel_ring_buffer *ring,
1980                   uint32_t seqno)
1981 {
1982         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1983         u32 ier;
1984         int ret = 0;
1985
1986         BUG_ON(seqno == 0);
1987
1988         if (atomic_read(&dev_priv->mm.wedged)) {
1989                 struct completion *x = &dev_priv->error_completion;
1990                 bool recovery_complete;
1991                 unsigned long flags;
1992
1993                 /* Give the error handler a chance to run. */
1994                 spin_lock_irqsave(&x->wait.lock, flags);
1995                 recovery_complete = x->done > 0;
1996                 spin_unlock_irqrestore(&x->wait.lock, flags);
1997
1998                 return recovery_complete ? -EIO : -EAGAIN;
1999         }
2000
2001         if (seqno == ring->outstanding_lazy_request) {
2002                 struct drm_i915_gem_request *request;
2003
2004                 request = kzalloc(sizeof(*request), GFP_KERNEL);
2005                 if (request == NULL)
2006                         return -ENOMEM;
2007
2008                 ret = i915_add_request(ring, NULL, request);
2009                 if (ret) {
2010                         kfree(request);
2011                         return ret;
2012                 }
2013
2014                 seqno = request->seqno;
2015         }
2016
2017         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2018                 if (HAS_PCH_SPLIT(ring->dev))
2019                         ier = I915_READ(DEIER) | I915_READ(GTIER);
2020                 else
2021                         ier = I915_READ(IER);
2022                 if (!ier) {
2023                         DRM_ERROR("something (likely vbetool) disabled "
2024                                   "interrupts, re-enabling\n");
2025                         ring->dev->driver->irq_preinstall(ring->dev);
2026                         ring->dev->driver->irq_postinstall(ring->dev);
2027                 }
2028
2029                 trace_i915_gem_request_wait_begin(ring, seqno);
2030
2031                 ring->waiting_seqno = seqno;
2032                 if (ring->irq_get(ring)) {
2033                         if (dev_priv->mm.interruptible)
2034                                 ret = wait_event_interruptible(ring->irq_queue,
2035                                                                i915_seqno_passed(ring->get_seqno(ring), seqno)
2036                                                                || atomic_read(&dev_priv->mm.wedged));
2037                         else
2038                                 wait_event(ring->irq_queue,
2039                                            i915_seqno_passed(ring->get_seqno(ring), seqno)
2040                                            || atomic_read(&dev_priv->mm.wedged));
2041
2042                         ring->irq_put(ring);
2043                 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2044                                                       seqno) ||
2045                                     atomic_read(&dev_priv->mm.wedged), 3000))
2046                         ret = -EBUSY;
2047                 ring->waiting_seqno = 0;
2048
2049                 trace_i915_gem_request_wait_end(ring, seqno);
2050         }
2051         if (atomic_read(&dev_priv->mm.wedged))
2052                 ret = -EAGAIN;
2053
2054         if (ret && ret != -ERESTARTSYS)
2055                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2056                           __func__, ret, seqno, ring->get_seqno(ring),
2057                           dev_priv->next_seqno);
2058
2059         /* Directly dispatch request retiring.  While we have the work queue
2060          * to handle this, the waiter on a request often wants an associated
2061          * buffer to have made it to the inactive list, and we would need
2062          * a separate wait queue to handle that.
2063          */
2064         if (ret == 0)
2065                 i915_gem_retire_requests_ring(ring);
2066
2067         return ret;
2068 }
2069
2070 /**
2071  * Ensures that all rendering to the object has completed and the object is
2072  * safe to unbind from the GTT or access from the CPU.
2073  */
2074 int
2075 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2076 {
2077         int ret;
2078
2079         /* This function only exists to support waiting for existing rendering,
2080          * not for emitting required flushes.
2081          */
2082         BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2083
2084         /* If there is rendering queued on the buffer being evicted, wait for
2085          * it.
2086          */
2087         if (obj->active) {
2088                 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2089                 if (ret)
2090                         return ret;
2091         }
2092
2093         return 0;
2094 }
2095
2096 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2097 {
2098         u32 old_write_domain, old_read_domains;
2099
2100         /* Act a barrier for all accesses through the GTT */
2101         mb();
2102
2103         /* Force a pagefault for domain tracking on next user access */
2104         i915_gem_release_mmap(obj);
2105
2106         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2107                 return;
2108
2109         old_read_domains = obj->base.read_domains;
2110         old_write_domain = obj->base.write_domain;
2111
2112         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2113         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2114
2115         trace_i915_gem_object_change_domain(obj,
2116                                             old_read_domains,
2117                                             old_write_domain);
2118 }
2119
2120 /**
2121  * Unbinds an object from the GTT aperture.
2122  */
2123 int
2124 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2125 {
2126         int ret = 0;
2127
2128         if (obj->gtt_space == NULL)
2129                 return 0;
2130
2131         if (obj->pin_count != 0) {
2132                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2133                 return -EINVAL;
2134         }
2135
2136         ret = i915_gem_object_finish_gpu(obj);
2137         if (ret == -ERESTARTSYS)
2138                 return ret;
2139         /* Continue on if we fail due to EIO, the GPU is hung so we
2140          * should be safe and we need to cleanup or else we might
2141          * cause memory corruption through use-after-free.
2142          */
2143
2144         i915_gem_object_finish_gtt(obj);
2145
2146         /* Move the object to the CPU domain to ensure that
2147          * any possible CPU writes while it's not in the GTT
2148          * are flushed when we go to remap it.
2149          */
2150         if (ret == 0)
2151                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2152         if (ret == -ERESTARTSYS)
2153                 return ret;
2154         if (ret) {
2155                 /* In the event of a disaster, abandon all caches and
2156                  * hope for the best.
2157                  */
2158                 i915_gem_clflush_object(obj);
2159                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2160         }
2161
2162         /* release the fence reg _after_ flushing */
2163         ret = i915_gem_object_put_fence(obj);
2164         if (ret == -ERESTARTSYS)
2165                 return ret;
2166
2167         trace_i915_gem_object_unbind(obj);
2168
2169         i915_gem_gtt_unbind_object(obj);
2170         i915_gem_object_put_pages_gtt(obj);
2171
2172         list_del_init(&obj->gtt_list);
2173         list_del_init(&obj->mm_list);
2174         /* Avoid an unnecessary call to unbind on rebind. */
2175         obj->map_and_fenceable = true;
2176
2177         drm_mm_put_block(obj->gtt_space);
2178         obj->gtt_space = NULL;
2179         obj->gtt_offset = 0;
2180
2181         if (i915_gem_object_is_purgeable(obj))
2182                 i915_gem_object_truncate(obj);
2183
2184         return ret;
2185 }
2186
2187 int
2188 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2189                     uint32_t invalidate_domains,
2190                     uint32_t flush_domains)
2191 {
2192         int ret;
2193
2194         if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2195                 return 0;
2196
2197         trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2198
2199         ret = ring->flush(ring, invalidate_domains, flush_domains);
2200         if (ret)
2201                 return ret;
2202
2203         if (flush_domains & I915_GEM_GPU_DOMAINS)
2204                 i915_gem_process_flushing_list(ring, flush_domains);
2205
2206         return 0;
2207 }
2208
2209 static int i915_ring_idle(struct intel_ring_buffer *ring)
2210 {
2211         int ret;
2212
2213         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2214                 return 0;
2215
2216         if (!list_empty(&ring->gpu_write_list)) {
2217                 ret = i915_gem_flush_ring(ring,
2218                                     I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2219                 if (ret)
2220                         return ret;
2221         }
2222
2223         return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2224 }
2225
2226 int
2227 i915_gpu_idle(struct drm_device *dev)
2228 {
2229         drm_i915_private_t *dev_priv = dev->dev_private;
2230         int ret, i;
2231
2232         /* Flush everything onto the inactive list. */
2233         for (i = 0; i < I915_NUM_RINGS; i++) {
2234                 ret = i915_ring_idle(&dev_priv->ring[i]);
2235                 if (ret)
2236                         return ret;
2237         }
2238
2239         return 0;
2240 }
2241
2242 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2243                                        struct intel_ring_buffer *pipelined)
2244 {
2245         struct drm_device *dev = obj->base.dev;
2246         drm_i915_private_t *dev_priv = dev->dev_private;
2247         u32 size = obj->gtt_space->size;
2248         int regnum = obj->fence_reg;
2249         uint64_t val;
2250
2251         /* Adjust fence size to match tiled area */
2252         if (obj->tiling_mode != I915_TILING_NONE) {
2253                 uint32_t row_size = obj->stride *
2254                         (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
2255                 size = (size / row_size) * row_size;
2256         }
2257
2258         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2259                          0xfffff000) << 32;
2260         val |= obj->gtt_offset & 0xfffff000;
2261         val |= (uint64_t)((obj->stride / 128) - 1) <<
2262                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2263
2264         if (obj->tiling_mode == I915_TILING_Y)
2265                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2266         val |= I965_FENCE_REG_VALID;
2267
2268         if (pipelined) {
2269                 int ret = intel_ring_begin(pipelined, 6);
2270                 if (ret)
2271                         return ret;
2272
2273                 intel_ring_emit(pipelined, MI_NOOP);
2274                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2275                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2276                 intel_ring_emit(pipelined, (u32)val);
2277                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2278                 intel_ring_emit(pipelined, (u32)(val >> 32));
2279                 intel_ring_advance(pipelined);
2280         } else
2281                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2282
2283         return 0;
2284 }
2285
2286 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2287                                 struct intel_ring_buffer *pipelined)
2288 {
2289         struct drm_device *dev = obj->base.dev;
2290         drm_i915_private_t *dev_priv = dev->dev_private;
2291         u32 size = obj->gtt_space->size;
2292         int regnum = obj->fence_reg;
2293         uint64_t val;
2294
2295         /* Adjust fence size to match tiled area */
2296         if (obj->tiling_mode != I915_TILING_NONE) {
2297                 uint32_t row_size = obj->stride *
2298                         (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
2299                 size = (size / row_size) * row_size;
2300         }
2301
2302         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2303                     0xfffff000) << 32;
2304         val |= obj->gtt_offset & 0xfffff000;
2305         val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2306         if (obj->tiling_mode == I915_TILING_Y)
2307                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2308         val |= I965_FENCE_REG_VALID;
2309
2310         if (pipelined) {
2311                 int ret = intel_ring_begin(pipelined, 6);
2312                 if (ret)
2313                         return ret;
2314
2315                 intel_ring_emit(pipelined, MI_NOOP);
2316                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2317                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2318                 intel_ring_emit(pipelined, (u32)val);
2319                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2320                 intel_ring_emit(pipelined, (u32)(val >> 32));
2321                 intel_ring_advance(pipelined);
2322         } else
2323                 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2324
2325         return 0;
2326 }
2327
2328 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2329                                 struct intel_ring_buffer *pipelined)
2330 {
2331         struct drm_device *dev = obj->base.dev;
2332         drm_i915_private_t *dev_priv = dev->dev_private;
2333         u32 size = obj->gtt_space->size;
2334         u32 fence_reg, val, pitch_val;
2335         int tile_width;
2336
2337         if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2338                  (size & -size) != size ||
2339                  (obj->gtt_offset & (size - 1)),
2340                  "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2341                  obj->gtt_offset, obj->map_and_fenceable, size))
2342                 return -EINVAL;
2343
2344         if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2345                 tile_width = 128;
2346         else
2347                 tile_width = 512;
2348
2349         /* Note: pitch better be a power of two tile widths */
2350         pitch_val = obj->stride / tile_width;
2351         pitch_val = ffs(pitch_val) - 1;
2352
2353         val = obj->gtt_offset;
2354         if (obj->tiling_mode == I915_TILING_Y)
2355                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2356         val |= I915_FENCE_SIZE_BITS(size);
2357         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2358         val |= I830_FENCE_REG_VALID;
2359
2360         fence_reg = obj->fence_reg;
2361         if (fence_reg < 8)
2362                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2363         else
2364                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2365
2366         if (pipelined) {
2367                 int ret = intel_ring_begin(pipelined, 4);
2368                 if (ret)
2369                         return ret;
2370
2371                 intel_ring_emit(pipelined, MI_NOOP);
2372                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2373                 intel_ring_emit(pipelined, fence_reg);
2374                 intel_ring_emit(pipelined, val);
2375                 intel_ring_advance(pipelined);
2376         } else
2377                 I915_WRITE(fence_reg, val);
2378
2379         return 0;
2380 }
2381
2382 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2383                                 struct intel_ring_buffer *pipelined)
2384 {
2385         struct drm_device *dev = obj->base.dev;
2386         drm_i915_private_t *dev_priv = dev->dev_private;
2387         u32 size = obj->gtt_space->size;
2388         int regnum = obj->fence_reg;
2389         uint32_t val;
2390         uint32_t pitch_val;
2391
2392         if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2393                  (size & -size) != size ||
2394                  (obj->gtt_offset & (size - 1)),
2395                  "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2396                  obj->gtt_offset, size))
2397                 return -EINVAL;
2398
2399         pitch_val = obj->stride / 128;
2400         pitch_val = ffs(pitch_val) - 1;
2401
2402         val = obj->gtt_offset;
2403         if (obj->tiling_mode == I915_TILING_Y)
2404                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2405         val |= I830_FENCE_SIZE_BITS(size);
2406         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2407         val |= I830_FENCE_REG_VALID;
2408
2409         if (pipelined) {
2410                 int ret = intel_ring_begin(pipelined, 4);
2411                 if (ret)
2412                         return ret;
2413
2414                 intel_ring_emit(pipelined, MI_NOOP);
2415                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2416                 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2417                 intel_ring_emit(pipelined, val);
2418                 intel_ring_advance(pipelined);
2419         } else
2420                 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2421
2422         return 0;
2423 }
2424
2425 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2426 {
2427         return i915_seqno_passed(ring->get_seqno(ring), seqno);
2428 }
2429
2430 static int
2431 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2432                             struct intel_ring_buffer *pipelined)
2433 {
2434         int ret;
2435
2436         if (obj->fenced_gpu_access) {
2437                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2438                         ret = i915_gem_flush_ring(obj->last_fenced_ring,
2439                                                   0, obj->base.write_domain);
2440                         if (ret)
2441                                 return ret;
2442                 }
2443
2444                 obj->fenced_gpu_access = false;
2445         }
2446
2447         if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2448                 if (!ring_passed_seqno(obj->last_fenced_ring,
2449                                        obj->last_fenced_seqno)) {
2450                         ret = i915_wait_request(obj->last_fenced_ring,
2451                                                 obj->last_fenced_seqno);
2452                         if (ret)
2453                                 return ret;
2454                 }
2455
2456                 obj->last_fenced_seqno = 0;
2457                 obj->last_fenced_ring = NULL;
2458         }
2459
2460         /* Ensure that all CPU reads are completed before installing a fence
2461          * and all writes before removing the fence.
2462          */
2463         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2464                 mb();
2465
2466         return 0;
2467 }
2468
2469 int
2470 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2471 {
2472         int ret;
2473
2474         if (obj->tiling_mode)
2475                 i915_gem_release_mmap(obj);
2476
2477         ret = i915_gem_object_flush_fence(obj, NULL);
2478         if (ret)
2479                 return ret;
2480
2481         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2482                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2483                 i915_gem_clear_fence_reg(obj->base.dev,
2484                                          &dev_priv->fence_regs[obj->fence_reg]);
2485
2486                 obj->fence_reg = I915_FENCE_REG_NONE;
2487         }
2488
2489         return 0;
2490 }
2491
2492 static struct drm_i915_fence_reg *
2493 i915_find_fence_reg(struct drm_device *dev,
2494                     struct intel_ring_buffer *pipelined)
2495 {
2496         struct drm_i915_private *dev_priv = dev->dev_private;
2497         struct drm_i915_fence_reg *reg, *first, *avail;
2498         int i;
2499
2500         /* First try to find a free reg */
2501         avail = NULL;
2502         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2503                 reg = &dev_priv->fence_regs[i];
2504                 if (!reg->obj)
2505                         return reg;
2506
2507                 if (!reg->obj->pin_count)
2508                         avail = reg;
2509         }
2510
2511         if (avail == NULL)
2512                 return NULL;
2513
2514         /* None available, try to steal one or wait for a user to finish */
2515         avail = first = NULL;
2516         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2517                 if (reg->obj->pin_count)
2518                         continue;
2519
2520                 if (first == NULL)
2521                         first = reg;
2522
2523                 if (!pipelined ||
2524                     !reg->obj->last_fenced_ring ||
2525                     reg->obj->last_fenced_ring == pipelined) {
2526                         avail = reg;
2527                         break;
2528                 }
2529         }
2530
2531         if (avail == NULL)
2532                 avail = first;
2533
2534         return avail;
2535 }
2536
2537 static void i915_gem_write_fence__ipi(void *data)
2538 {
2539         wbinvd();
2540 }
2541
2542 /**
2543  * i915_gem_object_get_fence - set up a fence reg for an object
2544  * @obj: object to map through a fence reg
2545  * @pipelined: ring on which to queue the change, or NULL for CPU access
2546  * @interruptible: must we wait uninterruptibly for the register to retire?
2547  *
2548  * When mapping objects through the GTT, userspace wants to be able to write
2549  * to them without having to worry about swizzling if the object is tiled.
2550  *
2551  * This function walks the fence regs looking for a free one for @obj,
2552  * stealing one if it can't find any.
2553  *
2554  * It then sets up the reg based on the object's properties: address, pitch
2555  * and tiling format.
2556  */
2557 int
2558 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2559                           struct intel_ring_buffer *pipelined)
2560 {
2561         struct drm_device *dev = obj->base.dev;
2562         struct drm_i915_private *dev_priv = dev->dev_private;
2563         struct drm_i915_fence_reg *reg;
2564         int ret;
2565
2566         /* XXX disable pipelining. There are bugs. Shocking. */
2567         pipelined = NULL;
2568
2569         /* Just update our place in the LRU if our fence is getting reused. */
2570         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2571                 reg = &dev_priv->fence_regs[obj->fence_reg];
2572                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2573
2574                 if (obj->tiling_changed) {
2575                         ret = i915_gem_object_flush_fence(obj, pipelined);
2576                         if (ret)
2577                                 return ret;
2578
2579                         if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2580                                 pipelined = NULL;
2581
2582                         if (pipelined) {
2583                                 reg->setup_seqno =
2584                                         i915_gem_next_request_seqno(pipelined);
2585                                 obj->last_fenced_seqno = reg->setup_seqno;
2586                                 obj->last_fenced_ring = pipelined;
2587                         }
2588
2589                         goto update;
2590                 }
2591
2592                 if (!pipelined) {
2593                         if (reg->setup_seqno) {
2594                                 if (!ring_passed_seqno(obj->last_fenced_ring,
2595                                                        reg->setup_seqno)) {
2596                                         ret = i915_wait_request(obj->last_fenced_ring,
2597                                                                 reg->setup_seqno);
2598                                         if (ret)
2599                                                 return ret;
2600                                 }
2601
2602                                 reg->setup_seqno = 0;
2603                         }
2604                 } else if (obj->last_fenced_ring &&
2605                            obj->last_fenced_ring != pipelined) {
2606                         ret = i915_gem_object_flush_fence(obj, pipelined);
2607                         if (ret)
2608                                 return ret;
2609                 }
2610
2611                 return 0;
2612         }
2613
2614         reg = i915_find_fence_reg(dev, pipelined);
2615         if (reg == NULL)
2616                 return -ENOSPC;
2617
2618         ret = i915_gem_object_flush_fence(obj, pipelined);
2619         if (ret)
2620                 return ret;
2621
2622         if (reg->obj) {
2623                 struct drm_i915_gem_object *old = reg->obj;
2624
2625                 drm_gem_object_reference(&old->base);
2626
2627                 if (old->tiling_mode)
2628                         i915_gem_release_mmap(old);
2629
2630                 ret = i915_gem_object_flush_fence(old, pipelined);
2631                 if (ret) {
2632                         drm_gem_object_unreference(&old->base);
2633                         return ret;
2634                 }
2635
2636                 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2637                         pipelined = NULL;
2638
2639                 old->fence_reg = I915_FENCE_REG_NONE;
2640                 old->last_fenced_ring = pipelined;
2641                 old->last_fenced_seqno =
2642                         pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2643
2644                 drm_gem_object_unreference(&old->base);
2645         } else if (obj->last_fenced_seqno == 0)
2646                 pipelined = NULL;
2647
2648         reg->obj = obj;
2649         list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2650         obj->fence_reg = reg - dev_priv->fence_regs;
2651         obj->last_fenced_ring = pipelined;
2652
2653         reg->setup_seqno =
2654                 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2655         obj->last_fenced_seqno = reg->setup_seqno;
2656
2657 update:
2658         obj->tiling_changed = false;
2659         switch (INTEL_INFO(dev)->gen) {
2660         case 7:
2661         case 6:
2662                 /* In order to fully serialize access to the fenced region and
2663                  * the update to the fence register we need to take extreme
2664                  * measures on SNB+. In theory, the write to the fence register
2665                  * flushes all memory transactions before, and coupled with the
2666                  * mb() placed around the register write we serialise all memory
2667                  * operations with respect to the changes in the tiler. Yet, on
2668                  * SNB+ we need to take a step further and emit an explicit wbinvd()
2669                  * on each processor in order to manually flush all memory
2670                  * transactions before updating the fence register.
2671                  */
2672                 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2673                 ret = sandybridge_write_fence_reg(obj, pipelined);
2674                 break;
2675         case 5:
2676         case 4:
2677                 ret = i965_write_fence_reg(obj, pipelined);
2678                 break;
2679         case 3:
2680                 ret = i915_write_fence_reg(obj, pipelined);
2681                 break;
2682         case 2:
2683                 ret = i830_write_fence_reg(obj, pipelined);
2684                 break;
2685         }
2686
2687         return ret;
2688 }
2689
2690 /**
2691  * i915_gem_clear_fence_reg - clear out fence register info
2692  * @obj: object to clear
2693  *
2694  * Zeroes out the fence register itself and clears out the associated
2695  * data structures in dev_priv and obj.
2696  */
2697 static void
2698 i915_gem_clear_fence_reg(struct drm_device *dev,
2699                          struct drm_i915_fence_reg *reg)
2700 {
2701         drm_i915_private_t *dev_priv = dev->dev_private;
2702         uint32_t fence_reg = reg - dev_priv->fence_regs;
2703
2704         switch (INTEL_INFO(dev)->gen) {
2705         case 7:
2706         case 6:
2707                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2708                 break;
2709         case 5:
2710         case 4:
2711                 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2712                 break;
2713         case 3:
2714                 if (fence_reg >= 8)
2715                         fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2716                 else
2717         case 2:
2718                         fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2719
2720                 I915_WRITE(fence_reg, 0);
2721                 break;
2722         }
2723
2724         list_del_init(&reg->lru_list);
2725         reg->obj = NULL;
2726         reg->setup_seqno = 0;
2727 }
2728
2729 /**
2730  * Finds free space in the GTT aperture and binds the object there.
2731  */
2732 static int
2733 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2734                             unsigned alignment,
2735                             bool map_and_fenceable)
2736 {
2737         struct drm_device *dev = obj->base.dev;
2738         drm_i915_private_t *dev_priv = dev->dev_private;
2739         struct drm_mm_node *free_space;
2740         gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2741         u32 size, fence_size, fence_alignment, unfenced_alignment;
2742         bool mappable, fenceable;
2743         int ret;
2744
2745         if (obj->madv != I915_MADV_WILLNEED) {
2746                 DRM_ERROR("Attempting to bind a purgeable object\n");
2747                 return -EINVAL;
2748         }
2749
2750         fence_size = i915_gem_get_gtt_size(dev,
2751                                            obj->base.size,
2752                                            obj->tiling_mode);
2753         fence_alignment = i915_gem_get_gtt_alignment(dev,
2754                                                      obj->base.size,
2755                                                      obj->tiling_mode);
2756         unfenced_alignment =
2757                 i915_gem_get_unfenced_gtt_alignment(dev,
2758                                                     obj->base.size,
2759                                                     obj->tiling_mode);
2760
2761         if (alignment == 0)
2762                 alignment = map_and_fenceable ? fence_alignment :
2763                                                 unfenced_alignment;
2764         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2765                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2766                 return -EINVAL;
2767         }
2768
2769         size = map_and_fenceable ? fence_size : obj->base.size;
2770
2771         /* If the object is bigger than the entire aperture, reject it early
2772          * before evicting everything in a vain attempt to find space.
2773          */
2774         if (obj->base.size >
2775             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2776                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2777                 return -E2BIG;
2778         }
2779
2780  search_free:
2781         if (map_and_fenceable)
2782                 free_space =
2783                         drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2784                                                     size, alignment, 0,
2785                                                     dev_priv->mm.gtt_mappable_end,
2786                                                     0);
2787         else
2788                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2789                                                 size, alignment, 0);
2790
2791         if (free_space != NULL) {
2792                 if (map_and_fenceable)
2793                         obj->gtt_space =
2794                                 drm_mm_get_block_range_generic(free_space,
2795                                                                size, alignment, 0,
2796                                                                dev_priv->mm.gtt_mappable_end,
2797                                                                0);
2798                 else
2799                         obj->gtt_space =
2800                                 drm_mm_get_block(free_space, size, alignment);
2801         }
2802         if (obj->gtt_space == NULL) {
2803                 /* If the gtt is empty and we're still having trouble
2804                  * fitting our object in, we're out of memory.
2805                  */
2806                 ret = i915_gem_evict_something(dev, size, alignment,
2807                                                map_and_fenceable);
2808                 if (ret)
2809                         return ret;
2810
2811                 goto search_free;
2812         }
2813
2814         ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2815         if (ret) {
2816                 drm_mm_put_block(obj->gtt_space);
2817                 obj->gtt_space = NULL;
2818
2819                 if (ret == -ENOMEM) {
2820                         /* first try to reclaim some memory by clearing the GTT */
2821                         ret = i915_gem_evict_everything(dev, false);
2822                         if (ret) {
2823                                 /* now try to shrink everyone else */
2824                                 if (gfpmask) {
2825                                         gfpmask = 0;
2826                                         goto search_free;
2827                                 }
2828
2829                                 return -ENOMEM;
2830                         }
2831
2832                         goto search_free;
2833                 }
2834
2835                 return ret;
2836         }
2837
2838         ret = i915_gem_gtt_bind_object(obj);
2839         if (ret) {
2840                 i915_gem_object_put_pages_gtt(obj);
2841                 drm_mm_put_block(obj->gtt_space);
2842                 obj->gtt_space = NULL;
2843
2844                 if (i915_gem_evict_everything(dev, false))
2845                         return ret;
2846
2847                 goto search_free;
2848         }
2849
2850         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2851         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2852
2853         /* Assert that the object is not currently in any GPU domain. As it
2854          * wasn't in the GTT, there shouldn't be any way it could have been in
2855          * a GPU cache
2856          */
2857         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2858         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2859
2860         obj->gtt_offset = obj->gtt_space->start;
2861
2862         fenceable =
2863                 obj->gtt_space->size == fence_size &&
2864                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2865
2866         mappable =
2867                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2868
2869         obj->map_and_fenceable = mappable && fenceable;
2870
2871         trace_i915_gem_object_bind(obj, map_and_fenceable);
2872         return 0;
2873 }
2874
2875 void
2876 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2877 {
2878         /* If we don't have a page list set up, then we're not pinned
2879          * to GPU, and we can ignore the cache flush because it'll happen
2880          * again at bind time.
2881          */
2882         if (obj->pages == NULL)
2883                 return;
2884
2885         /* If the GPU is snooping the contents of the CPU cache,
2886          * we do not need to manually clear the CPU cache lines.  However,
2887          * the caches are only snooped when the render cache is
2888          * flushed/invalidated.  As we always have to emit invalidations
2889          * and flushes when moving into and out of the RENDER domain, correct
2890          * snooping behaviour occurs naturally as the result of our domain
2891          * tracking.
2892          */
2893         if (obj->cache_level != I915_CACHE_NONE)
2894                 return;
2895
2896         trace_i915_gem_object_clflush(obj);
2897
2898         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2899 }
2900
2901 /** Flushes any GPU write domain for the object if it's dirty. */
2902 static int
2903 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2904 {
2905         if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2906                 return 0;
2907
2908         /* Queue the GPU write cache flushing we need. */
2909         return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2910 }
2911
2912 /** Flushes the GTT write domain for the object if it's dirty. */
2913 static void
2914 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2915 {
2916         uint32_t old_write_domain;
2917
2918         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2919                 return;
2920
2921         /* No actual flushing is required for the GTT write domain.  Writes
2922          * to it immediately go to main memory as far as we know, so there's
2923          * no chipset flush.  It also doesn't land in render cache.
2924          *
2925          * However, we do have to enforce the order so that all writes through
2926          * the GTT land before any writes to the device, such as updates to
2927          * the GATT itself.
2928          */
2929         wmb();
2930
2931         old_write_domain = obj->base.write_domain;
2932         obj->base.write_domain = 0;
2933
2934         trace_i915_gem_object_change_domain(obj,
2935                                             obj->base.read_domains,
2936                                             old_write_domain);
2937 }
2938
2939 /** Flushes the CPU write domain for the object if it's dirty. */
2940 static void
2941 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2942 {
2943         uint32_t old_write_domain;
2944
2945         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2946                 return;
2947
2948         i915_gem_clflush_object(obj);
2949         intel_gtt_chipset_flush();
2950         old_write_domain = obj->base.write_domain;
2951         obj->base.write_domain = 0;
2952
2953         trace_i915_gem_object_change_domain(obj,
2954                                             obj->base.read_domains,
2955                                             old_write_domain);
2956 }
2957
2958 /**
2959  * Moves a single object to the GTT read, and possibly write domain.
2960  *
2961  * This function returns when the move is complete, including waiting on
2962  * flushes to occur.
2963  */
2964 int
2965 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2966 {
2967         uint32_t old_write_domain, old_read_domains;
2968         int ret;
2969
2970         /* Not valid to be called on unbound objects. */
2971         if (obj->gtt_space == NULL)
2972                 return -EINVAL;
2973
2974         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2975                 return 0;
2976
2977         ret = i915_gem_object_flush_gpu_write_domain(obj);
2978         if (ret)
2979                 return ret;
2980
2981         if (obj->pending_gpu_write || write) {
2982                 ret = i915_gem_object_wait_rendering(obj);
2983                 if (ret)
2984                         return ret;
2985         }
2986
2987         i915_gem_object_flush_cpu_write_domain(obj);
2988
2989         old_write_domain = obj->base.write_domain;
2990         old_read_domains = obj->base.read_domains;
2991
2992         /* It should now be out of any other write domains, and we can update
2993          * the domain values for our changes.
2994          */
2995         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2996         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2997         if (write) {
2998                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2999                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3000                 obj->dirty = 1;
3001         }
3002
3003         trace_i915_gem_object_change_domain(obj,
3004                                             old_read_domains,
3005                                             old_write_domain);
3006
3007         return 0;
3008 }
3009
3010 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3011                                     enum i915_cache_level cache_level)
3012 {
3013         int ret;
3014
3015         if (obj->cache_level == cache_level)
3016                 return 0;
3017
3018         if (obj->pin_count) {
3019                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3020                 return -EBUSY;
3021         }
3022
3023         if (obj->gtt_space) {
3024                 ret = i915_gem_object_finish_gpu(obj);
3025                 if (ret)
3026                         return ret;
3027
3028                 i915_gem_object_finish_gtt(obj);
3029
3030                 /* Before SandyBridge, you could not use tiling or fence
3031                  * registers with snooped memory, so relinquish any fences
3032                  * currently pointing to our region in the aperture.
3033                  */
3034                 if (INTEL_INFO(obj->base.dev)->gen < 6) {
3035                         ret = i915_gem_object_put_fence(obj);
3036                         if (ret)
3037                                 return ret;
3038                 }
3039
3040                 i915_gem_gtt_rebind_object(obj, cache_level);
3041         }
3042
3043         if (cache_level == I915_CACHE_NONE) {
3044                 u32 old_read_domains, old_write_domain;
3045
3046                 /* If we're coming from LLC cached, then we haven't
3047                  * actually been tracking whether the data is in the
3048                  * CPU cache or not, since we only allow one bit set
3049                  * in obj->write_domain and have been skipping the clflushes.
3050                  * Just set it to the CPU cache for now.
3051                  */
3052                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3053                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3054
3055                 old_read_domains = obj->base.read_domains;
3056                 old_write_domain = obj->base.write_domain;
3057
3058                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3059                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3060
3061                 trace_i915_gem_object_change_domain(obj,
3062                                                     old_read_domains,
3063                                                     old_write_domain);
3064         }
3065
3066         obj->cache_level = cache_level;
3067         return 0;
3068 }
3069
3070 /*
3071  * Prepare buffer for display plane (scanout, cursors, etc).
3072  * Can be called from an uninterruptible phase (modesetting) and allows
3073  * any flushes to be pipelined (for pageflips).
3074  *
3075  * For the display plane, we want to be in the GTT but out of any write
3076  * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3077  * ability to pipeline the waits, pinning and any additional subtleties
3078  * that may differentiate the display plane from ordinary buffers.
3079  */
3080 int
3081 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3082                                      u32 alignment,
3083                                      struct intel_ring_buffer *pipelined)
3084 {
3085         u32 old_read_domains, old_write_domain;
3086         int ret;
3087
3088         ret = i915_gem_object_flush_gpu_write_domain(obj);
3089         if (ret)
3090                 return ret;
3091
3092         if (pipelined != obj->ring) {
3093                 ret = i915_gem_object_wait_rendering(obj);
3094                 if (ret == -ERESTARTSYS)
3095                         return ret;
3096         }
3097
3098         /* The display engine is not coherent with the LLC cache on gen6.  As
3099          * a result, we make sure that the pinning that is about to occur is
3100          * done with uncached PTEs. This is lowest common denominator for all
3101          * chipsets.
3102          *
3103          * However for gen6+, we could do better by using the GFDT bit instead
3104          * of uncaching, which would allow us to flush all the LLC-cached data
3105          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3106          */
3107         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3108         if (ret)
3109                 return ret;
3110
3111         /* As the user may map the buffer once pinned in the display plane
3112          * (e.g. libkms for the bootup splash), we have to ensure that we
3113          * always use map_and_fenceable for all scanout buffers.
3114          */
3115         ret = i915_gem_object_pin(obj, alignment, true);
3116         if (ret)
3117                 return ret;
3118
3119         i915_gem_object_flush_cpu_write_domain(obj);
3120
3121         old_write_domain = obj->base.write_domain;
3122         old_read_domains = obj->base.read_domains;
3123
3124         /* It should now be out of any other write domains, and we can update
3125          * the domain values for our changes.
3126          */
3127         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3128         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3129
3130         trace_i915_gem_object_change_domain(obj,
3131                                             old_read_domains,
3132                                             old_write_domain);
3133
3134         return 0;
3135 }
3136
3137 int
3138 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3139 {
3140         int ret;
3141
3142         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3143                 return 0;
3144
3145         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3146                 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3147                 if (ret)
3148                         return ret;
3149         }
3150
3151         ret = i915_gem_object_wait_rendering(obj);
3152         if (ret)
3153                 return ret;
3154
3155         /* Ensure that we invalidate the GPU's caches and TLBs. */
3156         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3157         return 0;
3158 }
3159
3160 /**
3161  * Moves a single object to the CPU read, and possibly write domain.
3162  *
3163  * This function returns when the move is complete, including waiting on
3164  * flushes to occur.
3165  */
3166 static int
3167 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3168 {
3169         uint32_t old_write_domain, old_read_domains;
3170         int ret;
3171
3172         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3173                 return 0;
3174
3175         ret = i915_gem_object_flush_gpu_write_domain(obj);
3176         if (ret)
3177                 return ret;
3178
3179         ret = i915_gem_object_wait_rendering(obj);
3180         if (ret)
3181                 return ret;
3182
3183         i915_gem_object_flush_gtt_write_domain(obj);
3184
3185         /* If we have a partially-valid cache of the object in the CPU,
3186          * finish invalidating it and free the per-page flags.
3187          */
3188         i915_gem_object_set_to_full_cpu_read_domain(obj);
3189
3190         old_write_domain = obj->base.write_domain;
3191         old_read_domains = obj->base.read_domains;
3192
3193         /* Flush the CPU cache if it's still invalid. */
3194         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3195                 i915_gem_clflush_object(obj);
3196
3197                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3198         }
3199
3200         /* It should now be out of any other write domains, and we can update
3201          * the domain values for our changes.
3202          */
3203         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3204
3205         /* If we're writing through the CPU, then the GPU read domains will
3206          * need to be invalidated at next use.
3207          */
3208         if (write) {
3209                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3210                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3211         }
3212
3213         trace_i915_gem_object_change_domain(obj,
3214                                             old_read_domains,
3215                                             old_write_domain);
3216
3217         return 0;
3218 }
3219
3220 /**
3221  * Moves the object from a partially CPU read to a full one.
3222  *
3223  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3224  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3225  */
3226 static void
3227 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3228 {
3229         if (!obj->page_cpu_valid)
3230                 return;
3231
3232         /* If we're partially in the CPU read domain, finish moving it in.
3233          */
3234         if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3235                 int i;
3236
3237                 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3238                         if (obj->page_cpu_valid[i])
3239                                 continue;
3240                         drm_clflush_pages(obj->pages + i, 1);
3241                 }
3242         }
3243
3244         /* Free the page_cpu_valid mappings which are now stale, whether
3245          * or not we've got I915_GEM_DOMAIN_CPU.
3246          */
3247         kfree(obj->page_cpu_valid);
3248         obj->page_cpu_valid = NULL;
3249 }
3250
3251 /**
3252  * Set the CPU read domain on a range of the object.
3253  *
3254  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3255  * not entirely valid.  The page_cpu_valid member of the object flags which
3256  * pages have been flushed, and will be respected by
3257  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3258  * of the whole object.
3259  *
3260  * This function returns when the move is complete, including waiting on
3261  * flushes to occur.
3262  */
3263 static int
3264 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3265                                           uint64_t offset, uint64_t size)
3266 {
3267         uint32_t old_read_domains;
3268         int i, ret;
3269
3270         if (offset == 0 && size == obj->base.size)
3271                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3272
3273         ret = i915_gem_object_flush_gpu_write_domain(obj);
3274         if (ret)
3275                 return ret;
3276
3277         ret = i915_gem_object_wait_rendering(obj);
3278         if (ret)
3279                 return ret;
3280
3281         i915_gem_object_flush_gtt_write_domain(obj);
3282
3283         /* If we're already fully in the CPU read domain, we're done. */
3284         if (obj->page_cpu_valid == NULL &&
3285             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3286                 return 0;
3287
3288         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3289          * newly adding I915_GEM_DOMAIN_CPU
3290          */
3291         if (obj->page_cpu_valid == NULL) {
3292                 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3293                                               GFP_KERNEL);
3294                 if (obj->page_cpu_valid == NULL)
3295                         return -ENOMEM;
3296         } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3297                 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3298
3299         /* Flush the cache on any pages that are still invalid from the CPU's
3300          * perspective.
3301          */
3302         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3303              i++) {
3304                 if (obj->page_cpu_valid[i])
3305                         continue;
3306
3307                 drm_clflush_pages(obj->pages + i, 1);
3308
3309                 obj->page_cpu_valid[i] = 1;
3310         }
3311
3312         /* It should now be out of any other write domains, and we can update
3313          * the domain values for our changes.
3314          */
3315         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3316
3317         old_read_domains = obj->base.read_domains;
3318         obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3319
3320         trace_i915_gem_object_change_domain(obj,
3321                                             old_read_domains,
3322                                             obj->base.write_domain);
3323
3324         return 0;
3325 }
3326
3327 /* Throttle our rendering by waiting until the ring has completed our requests
3328  * emitted over 20 msec ago.
3329  *
3330  * Note that if we were to use the current jiffies each time around the loop,
3331  * we wouldn't escape the function with any frames outstanding if the time to
3332  * render a frame was over 20ms.
3333  *
3334  * This should get us reasonable parallelism between CPU and GPU but also
3335  * relatively low latency when blocking on a particular request to finish.
3336  */
3337 static int
3338 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3339 {
3340         struct drm_i915_private *dev_priv = dev->dev_private;
3341         struct drm_i915_file_private *file_priv = file->driver_priv;
3342         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3343         struct drm_i915_gem_request *request;
3344         struct intel_ring_buffer *ring = NULL;
3345         u32 seqno = 0;
3346         int ret;
3347
3348         if (atomic_read(&dev_priv->mm.wedged))
3349                 return -EIO;
3350
3351         spin_lock(&file_priv->mm.lock);
3352         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3353                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3354                         break;
3355
3356                 ring = request->ring;
3357                 seqno = request->seqno;
3358         }
3359         spin_unlock(&file_priv->mm.lock);
3360
3361         if (seqno == 0)
3362                 return 0;
3363
3364         ret = 0;
3365         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3366                 /* And wait for the seqno passing without holding any locks and
3367                  * causing extra latency for others. This is safe as the irq
3368                  * generation is designed to be run atomically and so is
3369                  * lockless.
3370                  */
3371                 if (ring->irq_get(ring)) {
3372                         ret = wait_event_interruptible(ring->irq_queue,
3373                                                        i915_seqno_passed(ring->get_seqno(ring), seqno)
3374                                                        || atomic_read(&dev_priv->mm.wedged));
3375                         ring->irq_put(ring);
3376
3377                         if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3378                                 ret = -EIO;
3379                 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
3380                                                       seqno) ||
3381                                     atomic_read(&dev_priv->mm.wedged), 3000)) {
3382                         ret = -EBUSY;
3383                 }
3384         }
3385
3386         if (ret == 0)
3387                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3388
3389         return ret;
3390 }
3391
3392 int
3393 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3394                     uint32_t alignment,
3395                     bool map_and_fenceable)
3396 {
3397         struct drm_device *dev = obj->base.dev;
3398         struct drm_i915_private *dev_priv = dev->dev_private;
3399         int ret;
3400
3401         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3402                 return -EBUSY;
3403         WARN_ON(i915_verify_lists(dev));
3404
3405         if (obj->gtt_space != NULL) {
3406                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3407                     (map_and_fenceable && !obj->map_and_fenceable)) {
3408                         WARN(obj->pin_count,
3409                              "bo is already pinned with incorrect alignment:"
3410                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3411                              " obj->map_and_fenceable=%d\n",
3412                              obj->gtt_offset, alignment,
3413                              map_and_fenceable,
3414                              obj->map_and_fenceable);
3415                         ret = i915_gem_object_unbind(obj);
3416                         if (ret)
3417                                 return ret;
3418                 }
3419         }
3420
3421         if (obj->gtt_space == NULL) {
3422                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3423                                                   map_and_fenceable);
3424                 if (ret)
3425                         return ret;
3426         }
3427
3428         if (obj->pin_count++ == 0) {
3429                 if (!obj->active)
3430                         list_move_tail(&obj->mm_list,
3431                                        &dev_priv->mm.pinned_list);
3432         }
3433         obj->pin_mappable |= map_and_fenceable;
3434
3435         WARN_ON(i915_verify_lists(dev));
3436         return 0;
3437 }
3438
3439 void
3440 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3441 {
3442         struct drm_device *dev = obj->base.dev;
3443         drm_i915_private_t *dev_priv = dev->dev_private;
3444
3445         WARN_ON(i915_verify_lists(dev));
3446         BUG_ON(obj->pin_count == 0);
3447         BUG_ON(obj->gtt_space == NULL);
3448
3449         if (--obj->pin_count == 0) {
3450                 if (!obj->active)
3451                         list_move_tail(&obj->mm_list,
3452                                        &dev_priv->mm.inactive_list);
3453                 obj->pin_mappable = false;
3454         }
3455         WARN_ON(i915_verify_lists(dev));
3456 }
3457
3458 int
3459 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3460                    struct drm_file *file)
3461 {
3462         struct drm_i915_gem_pin *args = data;
3463         struct drm_i915_gem_object *obj;
3464         int ret;
3465
3466         ret = i915_mutex_lock_interruptible(dev);
3467         if (ret)
3468                 return ret;
3469
3470         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3471         if (&obj->base == NULL) {
3472                 ret = -ENOENT;
3473                 goto unlock;
3474         }
3475
3476         if (obj->madv != I915_MADV_WILLNEED) {
3477                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3478                 ret = -EINVAL;
3479                 goto out;
3480         }
3481
3482         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3483                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3484                           args->handle);
3485                 ret = -EINVAL;
3486                 goto out;
3487         }
3488
3489         if (obj->user_pin_count == 0) {
3490                 ret = i915_gem_object_pin(obj, args->alignment, true);
3491                 if (ret)
3492                         goto out;
3493         }
3494
3495         obj->user_pin_count++;
3496         obj->pin_filp = file;
3497
3498         /* XXX - flush the CPU caches for pinned objects
3499          * as the X server doesn't manage domains yet
3500          */
3501         i915_gem_object_flush_cpu_write_domain(obj);
3502         args->offset = obj->gtt_offset;
3503 out:
3504         drm_gem_object_unreference(&obj->base);
3505 unlock:
3506         mutex_unlock(&dev->struct_mutex);
3507         return ret;
3508 }
3509
3510 int
3511 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3512                      struct drm_file *file)
3513 {
3514         struct drm_i915_gem_pin *args = data;
3515         struct drm_i915_gem_object *obj;
3516         int ret;
3517
3518         ret = i915_mutex_lock_interruptible(dev);
3519         if (ret)
3520                 return ret;
3521
3522         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3523         if (&obj->base == NULL) {
3524                 ret = -ENOENT;
3525                 goto unlock;
3526         }
3527
3528         if (obj->pin_filp != file) {
3529                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3530                           args->handle);
3531                 ret = -EINVAL;
3532                 goto out;
3533         }
3534         obj->user_pin_count--;
3535         if (obj->user_pin_count == 0) {
3536                 obj->pin_filp = NULL;
3537                 i915_gem_object_unpin(obj);
3538         }
3539
3540 out:
3541         drm_gem_object_unreference(&obj->base);
3542 unlock:
3543         mutex_unlock(&dev->struct_mutex);
3544         return ret;
3545 }
3546
3547 int
3548 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3549                     struct drm_file *file)
3550 {
3551         struct drm_i915_gem_busy *args = data;
3552         struct drm_i915_gem_object *obj;
3553         int ret;
3554
3555         ret = i915_mutex_lock_interruptible(dev);
3556         if (ret)
3557                 return ret;
3558
3559         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3560         if (&obj->base == NULL) {
3561                 ret = -ENOENT;
3562                 goto unlock;
3563         }
3564
3565         /* Count all active objects as busy, even if they are currently not used
3566          * by the gpu. Users of this interface expect objects to eventually
3567          * become non-busy without any further actions, therefore emit any
3568          * necessary flushes here.
3569          */
3570         args->busy = obj->active;
3571         if (args->busy) {
3572                 /* Unconditionally flush objects, even when the gpu still uses this
3573                  * object. Userspace calling this function indicates that it wants to
3574                  * use this buffer rather sooner than later, so issuing the required
3575                  * flush earlier is beneficial.
3576                  */
3577                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3578                         ret = i915_gem_flush_ring(obj->ring,
3579                                                   0, obj->base.write_domain);
3580                 } else if (obj->ring->outstanding_lazy_request ==
3581                            obj->last_rendering_seqno) {
3582                         struct drm_i915_gem_request *request;
3583
3584                         /* This ring is not being cleared by active usage,
3585                          * so emit a request to do so.
3586                          */
3587                         request = kzalloc(sizeof(*request), GFP_KERNEL);
3588                         if (request) {
3589                                 ret = i915_add_request(obj->ring, NULL, request);
3590                                 if (ret)
3591                                         kfree(request);
3592                         } else
3593                                 ret = -ENOMEM;
3594                 }
3595
3596                 /* Update the active list for the hardware's current position.
3597                  * Otherwise this only updates on a delayed timer or when irqs
3598                  * are actually unmasked, and our working set ends up being
3599                  * larger than required.
3600                  */
3601                 i915_gem_retire_requests_ring(obj->ring);
3602
3603                 args->busy = obj->active;
3604         }
3605
3606         drm_gem_object_unreference(&obj->base);
3607 unlock:
3608         mutex_unlock(&dev->struct_mutex);
3609         return ret;
3610 }
3611
3612 int
3613 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3614                         struct drm_file *file_priv)
3615 {
3616         return i915_gem_ring_throttle(dev, file_priv);
3617 }
3618
3619 int
3620 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3621                        struct drm_file *file_priv)
3622 {
3623         struct drm_i915_gem_madvise *args = data;
3624         struct drm_i915_gem_object *obj;
3625         int ret;
3626
3627         switch (args->madv) {
3628         case I915_MADV_DONTNEED:
3629         case I915_MADV_WILLNEED:
3630             break;
3631         default:
3632             return -EINVAL;
3633         }
3634
3635         ret = i915_mutex_lock_interruptible(dev);
3636         if (ret)
3637                 return ret;
3638
3639         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3640         if (&obj->base == NULL) {
3641                 ret = -ENOENT;
3642                 goto unlock;
3643         }
3644
3645         if (obj->pin_count) {
3646                 ret = -EINVAL;
3647                 goto out;
3648         }
3649
3650         if (obj->madv != __I915_MADV_PURGED)
3651                 obj->madv = args->madv;
3652
3653         /* if the object is no longer bound, discard its backing storage */
3654         if (i915_gem_object_is_purgeable(obj) &&
3655             obj->gtt_space == NULL)
3656                 i915_gem_object_truncate(obj);
3657
3658         args->retained = obj->madv != __I915_MADV_PURGED;
3659
3660 out:
3661         drm_gem_object_unreference(&obj->base);
3662 unlock:
3663         mutex_unlock(&dev->struct_mutex);
3664         return ret;
3665 }
3666
3667 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3668                                                   size_t size)
3669 {
3670         struct drm_i915_private *dev_priv = dev->dev_private;
3671         struct drm_i915_gem_object *obj;
3672         struct address_space *mapping;
3673
3674         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3675         if (obj == NULL)
3676                 return NULL;
3677
3678         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3679                 kfree(obj);
3680                 return NULL;
3681         }
3682
3683         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3684         mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3685
3686         i915_gem_info_add_obj(dev_priv, size);
3687
3688         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3689         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3690
3691         if (IS_GEN6(dev) || IS_GEN7(dev)) {
3692                 /* On Gen6, we can have the GPU use the LLC (the CPU
3693                  * cache) for about a 10% performance improvement
3694                  * compared to uncached.  Graphics requests other than
3695                  * display scanout are coherent with the CPU in
3696                  * accessing this cache.  This means in this mode we
3697                  * don't need to clflush on the CPU side, and on the
3698                  * GPU side we only need to flush internal caches to
3699                  * get data visible to the CPU.
3700                  *
3701                  * However, we maintain the display planes as UC, and so
3702                  * need to rebind when first used as such.
3703                  */
3704                 obj->cache_level = I915_CACHE_LLC;
3705         } else
3706                 obj->cache_level = I915_CACHE_NONE;
3707
3708         obj->base.driver_private = NULL;
3709         obj->fence_reg = I915_FENCE_REG_NONE;
3710         INIT_LIST_HEAD(&obj->mm_list);
3711         INIT_LIST_HEAD(&obj->gtt_list);
3712         INIT_LIST_HEAD(&obj->ring_list);
3713         INIT_LIST_HEAD(&obj->exec_list);
3714         INIT_LIST_HEAD(&obj->gpu_write_list);
3715         obj->madv = I915_MADV_WILLNEED;
3716         /* Avoid an unnecessary call to unbind on the first bind. */
3717         obj->map_and_fenceable = true;
3718
3719         return obj;
3720 }
3721
3722 int i915_gem_init_object(struct drm_gem_object *obj)
3723 {
3724         BUG();
3725
3726         return 0;
3727 }
3728
3729 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3730 {
3731         struct drm_device *dev = obj->base.dev;
3732         drm_i915_private_t *dev_priv = dev->dev_private;
3733         int ret;
3734
3735         ret = i915_gem_object_unbind(obj);
3736         if (ret == -ERESTARTSYS) {
3737                 list_move(&obj->mm_list,
3738                           &dev_priv->mm.deferred_free_list);
3739                 return;
3740         }
3741
3742         trace_i915_gem_object_destroy(obj);
3743
3744         if (obj->base.map_list.map)
3745                 drm_gem_free_mmap_offset(&obj->base);
3746
3747         drm_gem_object_release(&obj->base);
3748         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3749
3750         kfree(obj->page_cpu_valid);
3751         kfree(obj->bit_17);
3752         kfree(obj);
3753 }
3754
3755 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3756 {
3757         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3758         struct drm_device *dev = obj->base.dev;
3759
3760         while (obj->pin_count > 0)
3761                 i915_gem_object_unpin(obj);
3762
3763         if (obj->phys_obj)
3764                 i915_gem_detach_phys_object(dev, obj);
3765
3766         i915_gem_free_object_tail(obj);
3767 }
3768
3769 int
3770 i915_gem_idle(struct drm_device *dev)
3771 {
3772         drm_i915_private_t *dev_priv = dev->dev_private;
3773         int ret;
3774
3775         mutex_lock(&dev->struct_mutex);
3776
3777         if (dev_priv->mm.suspended) {
3778                 mutex_unlock(&dev->struct_mutex);
3779                 return 0;
3780         }
3781
3782         ret = i915_gpu_idle(dev);
3783         if (ret) {
3784                 mutex_unlock(&dev->struct_mutex);
3785                 return ret;
3786         }
3787
3788         /* Under UMS, be paranoid and evict. */
3789         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3790                 ret = i915_gem_evict_inactive(dev, false);
3791                 if (ret) {
3792                         mutex_unlock(&dev->struct_mutex);
3793                         return ret;
3794                 }
3795         }
3796
3797         i915_gem_reset_fences(dev);
3798
3799         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3800          * We need to replace this with a semaphore, or something.
3801          * And not confound mm.suspended!
3802          */
3803         dev_priv->mm.suspended = 1;
3804         del_timer_sync(&dev_priv->hangcheck_timer);
3805
3806         i915_kernel_lost_context(dev);
3807         i915_gem_cleanup_ringbuffer(dev);
3808
3809         mutex_unlock(&dev->struct_mutex);
3810
3811         /* Cancel the retire work handler, which should be idle now. */
3812         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3813
3814         return 0;
3815 }
3816
3817 int
3818 i915_gem_init_ringbuffer(struct drm_device *dev)
3819 {
3820         drm_i915_private_t *dev_priv = dev->dev_private;
3821         int ret;
3822
3823         ret = intel_init_render_ring_buffer(dev);
3824         if (ret)
3825                 return ret;
3826
3827         if (HAS_BSD(dev)) {
3828                 ret = intel_init_bsd_ring_buffer(dev);
3829                 if (ret)
3830                         goto cleanup_render_ring;
3831         }
3832
3833         if (HAS_BLT(dev)) {
3834                 ret = intel_init_blt_ring_buffer(dev);
3835                 if (ret)
3836                         goto cleanup_bsd_ring;
3837         }
3838
3839         dev_priv->next_seqno = 1;
3840
3841         return 0;
3842
3843 cleanup_bsd_ring:
3844         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3845 cleanup_render_ring:
3846         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3847         return ret;
3848 }
3849
3850 void
3851 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3852 {
3853         drm_i915_private_t *dev_priv = dev->dev_private;
3854         int i;
3855
3856         for (i = 0; i < I915_NUM_RINGS; i++)
3857                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3858 }
3859
3860 int
3861 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3862                        struct drm_file *file_priv)
3863 {
3864         drm_i915_private_t *dev_priv = dev->dev_private;
3865         int ret, i;
3866
3867         if (drm_core_check_feature(dev, DRIVER_MODESET))
3868                 return 0;
3869
3870         if (atomic_read(&dev_priv->mm.wedged)) {
3871                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3872                 atomic_set(&dev_priv->mm.wedged, 0);
3873         }
3874
3875         mutex_lock(&dev->struct_mutex);
3876         dev_priv->mm.suspended = 0;
3877
3878         ret = i915_gem_init_ringbuffer(dev);
3879         if (ret != 0) {
3880                 mutex_unlock(&dev->struct_mutex);
3881                 return ret;
3882         }
3883
3884         BUG_ON(!list_empty(&dev_priv->mm.active_list));
3885         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3886         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3887         for (i = 0; i < I915_NUM_RINGS; i++) {
3888                 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3889                 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3890         }
3891         mutex_unlock(&dev->struct_mutex);
3892
3893         ret = drm_irq_install(dev);
3894         if (ret)
3895                 goto cleanup_ringbuffer;
3896
3897         return 0;
3898
3899 cleanup_ringbuffer:
3900         mutex_lock(&dev->struct_mutex);
3901         i915_gem_cleanup_ringbuffer(dev);
3902         dev_priv->mm.suspended = 1;
3903         mutex_unlock(&dev->struct_mutex);
3904
3905         return ret;
3906 }
3907
3908 int
3909 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3910                        struct drm_file *file_priv)
3911 {
3912         if (drm_core_check_feature(dev, DRIVER_MODESET))
3913                 return 0;
3914
3915         drm_irq_uninstall(dev);
3916         return i915_gem_idle(dev);
3917 }
3918
3919 void
3920 i915_gem_lastclose(struct drm_device *dev)
3921 {
3922         int ret;
3923
3924         if (drm_core_check_feature(dev, DRIVER_MODESET))
3925                 return;
3926
3927         ret = i915_gem_idle(dev);
3928         if (ret)
3929                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3930 }
3931
3932 static void
3933 init_ring_lists(struct intel_ring_buffer *ring)
3934 {
3935         INIT_LIST_HEAD(&ring->active_list);
3936         INIT_LIST_HEAD(&ring->request_list);
3937         INIT_LIST_HEAD(&ring->gpu_write_list);
3938 }
3939
3940 void
3941 i915_gem_load(struct drm_device *dev)
3942 {
3943         int i;
3944         drm_i915_private_t *dev_priv = dev->dev_private;
3945
3946         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3947         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3948         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3949         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3950         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3951         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3952         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3953         for (i = 0; i < I915_NUM_RINGS; i++)
3954                 init_ring_lists(&dev_priv->ring[i]);
3955         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3956                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3957         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3958                           i915_gem_retire_work_handler);
3959         init_completion(&dev_priv->error_completion);
3960
3961         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3962         if (IS_GEN3(dev)) {
3963                 u32 tmp = I915_READ(MI_ARB_STATE);
3964                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3965                         /* arb state is a masked write, so set bit + bit in mask */
3966                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3967                         I915_WRITE(MI_ARB_STATE, tmp);
3968                 }
3969         }
3970
3971         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3972
3973         /* Old X drivers will take 0-2 for front, back, depth buffers */
3974         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3975                 dev_priv->fence_reg_start = 3;
3976
3977         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3978                 dev_priv->num_fence_regs = 16;
3979         else
3980                 dev_priv->num_fence_regs = 8;
3981
3982         /* Initialize fence registers to zero */
3983         for (i = 0; i < dev_priv->num_fence_regs; i++) {
3984                 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3985         }
3986
3987         i915_gem_detect_bit_6_swizzle(dev);
3988         init_waitqueue_head(&dev_priv->pending_flip_queue);
3989
3990         dev_priv->mm.interruptible = true;
3991
3992         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3993         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3994         register_shrinker(&dev_priv->mm.inactive_shrinker);
3995 }
3996
3997 /*
3998  * Create a physically contiguous memory object for this object
3999  * e.g. for cursor + overlay regs
4000  */
4001 static int i915_gem_init_phys_object(struct drm_device *dev,
4002                                      int id, int size, int align)
4003 {
4004         drm_i915_private_t *dev_priv = dev->dev_private;
4005         struct drm_i915_gem_phys_object *phys_obj;
4006         int ret;
4007
4008         if (dev_priv->mm.phys_objs[id - 1] || !size)
4009                 return 0;
4010
4011         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4012         if (!phys_obj)
4013                 return -ENOMEM;
4014
4015         phys_obj->id = id;
4016
4017         phys_obj->handle = drm_pci_alloc(dev, size, align);
4018         if (!phys_obj->handle) {
4019                 ret = -ENOMEM;
4020                 goto kfree_obj;
4021         }
4022 #ifdef CONFIG_X86
4023         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4024 #endif
4025
4026         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4027
4028         return 0;
4029 kfree_obj:
4030         kfree(phys_obj);
4031         return ret;
4032 }
4033
4034 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4035 {
4036         drm_i915_private_t *dev_priv = dev->dev_private;
4037         struct drm_i915_gem_phys_object *phys_obj;
4038
4039         if (!dev_priv->mm.phys_objs[id - 1])
4040                 return;
4041
4042         phys_obj = dev_priv->mm.phys_objs[id - 1];
4043         if (phys_obj->cur_obj) {
4044                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4045         }
4046
4047 #ifdef CONFIG_X86
4048         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4049 #endif
4050         drm_pci_free(dev, phys_obj->handle);
4051         kfree(phys_obj);
4052         dev_priv->mm.phys_objs[id - 1] = NULL;
4053 }
4054
4055 void i915_gem_free_all_phys_object(struct drm_device *dev)
4056 {
4057         int i;
4058
4059         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4060                 i915_gem_free_phys_object(dev, i);
4061 }
4062
4063 void i915_gem_detach_phys_object(struct drm_device *dev,
4064                                  struct drm_i915_gem_object *obj)
4065 {
4066         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4067         char *vaddr;
4068         int i;
4069         int page_count;
4070
4071         if (!obj->phys_obj)
4072                 return;
4073         vaddr = obj->phys_obj->handle->vaddr;
4074
4075         page_count = obj->base.size / PAGE_SIZE;
4076         for (i = 0; i < page_count; i++) {
4077                 struct page *page = shmem_read_mapping_page(mapping, i);
4078                 if (!IS_ERR(page)) {
4079                         char *dst = kmap_atomic(page);
4080                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4081                         kunmap_atomic(dst);
4082
4083                         drm_clflush_pages(&page, 1);
4084
4085                         set_page_dirty(page);
4086                         mark_page_accessed(page);
4087                         page_cache_release(page);
4088                 }
4089         }
4090         intel_gtt_chipset_flush();
4091
4092         obj->phys_obj->cur_obj = NULL;
4093         obj->phys_obj = NULL;
4094 }
4095
4096 int
4097 i915_gem_attach_phys_object(struct drm_device *dev,
4098                             struct drm_i915_gem_object *obj,
4099                             int id,
4100                             int align)
4101 {
4102         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4103         drm_i915_private_t *dev_priv = dev->dev_private;
4104         int ret = 0;
4105         int page_count;
4106         int i;
4107
4108         if (id > I915_MAX_PHYS_OBJECT)
4109                 return -EINVAL;
4110
4111         if (obj->phys_obj) {
4112                 if (obj->phys_obj->id == id)
4113                         return 0;
4114                 i915_gem_detach_phys_object(dev, obj);
4115         }
4116
4117         /* create a new object */
4118         if (!dev_priv->mm.phys_objs[id - 1]) {
4119                 ret = i915_gem_init_phys_object(dev, id,
4120                                                 obj->base.size, align);
4121                 if (ret) {
4122                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4123                                   id, obj->base.size);
4124                         return ret;
4125                 }
4126         }
4127
4128         /* bind to the object */
4129         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4130         obj->phys_obj->cur_obj = obj;
4131
4132         page_count = obj->base.size / PAGE_SIZE;
4133
4134         for (i = 0; i < page_count; i++) {
4135                 struct page *page;
4136                 char *dst, *src;
4137
4138                 page = shmem_read_mapping_page(mapping, i);
4139                 if (IS_ERR(page))
4140                         return PTR_ERR(page);
4141
4142                 src = kmap_atomic(page);
4143                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4144                 memcpy(dst, src, PAGE_SIZE);
4145                 kunmap_atomic(src);
4146
4147                 mark_page_accessed(page);
4148                 page_cache_release(page);
4149         }
4150
4151         return 0;
4152 }
4153
4154 static int
4155 i915_gem_phys_pwrite(struct drm_device *dev,
4156                      struct drm_i915_gem_object *obj,
4157                      struct drm_i915_gem_pwrite *args,
4158                      struct drm_file *file_priv)
4159 {
4160         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4161         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4162
4163         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4164                 unsigned long unwritten;
4165
4166                 /* The physical object once assigned is fixed for the lifetime
4167                  * of the obj, so we can safely drop the lock and continue
4168                  * to access vaddr.
4169                  */
4170                 mutex_unlock(&dev->struct_mutex);
4171                 unwritten = copy_from_user(vaddr, user_data, args->size);
4172                 mutex_lock(&dev->struct_mutex);
4173                 if (unwritten)
4174                         return -EFAULT;
4175         }
4176
4177         intel_gtt_chipset_flush();
4178         return 0;
4179 }
4180
4181 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4182 {
4183         struct drm_i915_file_private *file_priv = file->driver_priv;
4184
4185         /* Clean up our request list when the client is going away, so that
4186          * later retire_requests won't dereference our soon-to-be-gone
4187          * file_priv.
4188          */
4189         spin_lock(&file_priv->mm.lock);
4190         while (!list_empty(&file_priv->mm.request_list)) {
4191                 struct drm_i915_gem_request *request;
4192
4193                 request = list_first_entry(&file_priv->mm.request_list,
4194                                            struct drm_i915_gem_request,
4195                                            client_list);
4196                 list_del(&request->client_list);
4197                 request->file_priv = NULL;
4198         }
4199         spin_unlock(&file_priv->mm.lock);
4200 }
4201
4202 static int
4203 i915_gpu_is_active(struct drm_device *dev)
4204 {
4205         drm_i915_private_t *dev_priv = dev->dev_private;
4206         int lists_empty;
4207
4208         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4209                       list_empty(&dev_priv->mm.active_list);
4210
4211         return !lists_empty;
4212 }
4213
4214 static int
4215 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4216 {
4217         struct drm_i915_private *dev_priv =
4218                 container_of(shrinker,
4219                              struct drm_i915_private,
4220                              mm.inactive_shrinker);
4221         struct drm_device *dev = dev_priv->dev;
4222         struct drm_i915_gem_object *obj, *next;
4223         int nr_to_scan = sc->nr_to_scan;
4224         int cnt;
4225
4226         if (!mutex_trylock(&dev->struct_mutex))
4227                 return 0;
4228
4229         /* "fast-path" to count number of available objects */
4230         if (nr_to_scan == 0) {
4231                 cnt = 0;
4232                 list_for_each_entry(obj,
4233                                     &dev_priv->mm.inactive_list,
4234                                     mm_list)
4235                         cnt++;
4236                 mutex_unlock(&dev->struct_mutex);
4237                 return cnt / 100 * sysctl_vfs_cache_pressure;
4238         }
4239
4240 rescan:
4241         /* first scan for clean buffers */
4242         i915_gem_retire_requests(dev);
4243
4244         list_for_each_entry_safe(obj, next,
4245                                  &dev_priv->mm.inactive_list,
4246                                  mm_list) {
4247                 if (i915_gem_object_is_purgeable(obj)) {
4248                         if (i915_gem_object_unbind(obj) == 0 &&
4249                             --nr_to_scan == 0)
4250                                 break;
4251                 }
4252         }
4253
4254         /* second pass, evict/count anything still on the inactive list */
4255         cnt = 0;
4256         list_for_each_entry_safe(obj, next,
4257                                  &dev_priv->mm.inactive_list,
4258                                  mm_list) {
4259                 if (nr_to_scan &&
4260                     i915_gem_object_unbind(obj) == 0)
4261                         nr_to_scan--;
4262                 else
4263                         cnt++;
4264         }
4265
4266         if (nr_to_scan && i915_gpu_is_active(dev)) {
4267                 /*
4268                  * We are desperate for pages, so as a last resort, wait
4269                  * for the GPU to finish and discard whatever we can.
4270                  * This has a dramatic impact to reduce the number of
4271                  * OOM-killer events whilst running the GPU aggressively.
4272                  */
4273                 if (i915_gpu_idle(dev) == 0)
4274                         goto rescan;
4275         }
4276         mutex_unlock(&dev->struct_mutex);
4277         return cnt / 100 * sysctl_vfs_cache_pressure;
4278 }