drm/i915: drop seqno argument from i915_gem_object_move_to_active
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/intel-gtt.h>
38
39 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
44                                              int write);
45 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
46                                                      uint64_t offset,
47                                                      uint64_t size);
48 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
49 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
50                                           bool interruptible);
51 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
52                                            unsigned alignment);
53 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
54 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file_priv);
57 static void i915_gem_free_object_tail(struct drm_gem_object *obj);
58
59 static LIST_HEAD(shrink_list);
60 static DEFINE_SPINLOCK(shrink_list_lock);
61
62 static inline bool
63 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
64 {
65         return obj_priv->gtt_space &&
66                 !obj_priv->active &&
67                 obj_priv->pin_count == 0;
68 }
69
70 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
71                      unsigned long end)
72 {
73         drm_i915_private_t *dev_priv = dev->dev_private;
74
75         if (start >= end ||
76             (start & (PAGE_SIZE - 1)) != 0 ||
77             (end & (PAGE_SIZE - 1)) != 0) {
78                 return -EINVAL;
79         }
80
81         drm_mm_init(&dev_priv->mm.gtt_space, start,
82                     end - start);
83
84         dev->gtt_total = (uint32_t) (end - start);
85
86         return 0;
87 }
88
89 int
90 i915_gem_init_ioctl(struct drm_device *dev, void *data,
91                     struct drm_file *file_priv)
92 {
93         struct drm_i915_gem_init *args = data;
94         int ret;
95
96         mutex_lock(&dev->struct_mutex);
97         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
98         mutex_unlock(&dev->struct_mutex);
99
100         return ret;
101 }
102
103 int
104 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
105                             struct drm_file *file_priv)
106 {
107         struct drm_i915_gem_get_aperture *args = data;
108
109         if (!(dev->driver->driver_features & DRIVER_GEM))
110                 return -ENODEV;
111
112         args->aper_size = dev->gtt_total;
113         args->aper_available_size = (args->aper_size -
114                                      atomic_read(&dev->pin_memory));
115
116         return 0;
117 }
118
119
120 /**
121  * Creates a new mm object and returns a handle to it.
122  */
123 int
124 i915_gem_create_ioctl(struct drm_device *dev, void *data,
125                       struct drm_file *file_priv)
126 {
127         struct drm_i915_gem_create *args = data;
128         struct drm_gem_object *obj;
129         int ret;
130         u32 handle;
131
132         args->size = roundup(args->size, PAGE_SIZE);
133
134         /* Allocate the new object */
135         obj = i915_gem_alloc_object(dev, args->size);
136         if (obj == NULL)
137                 return -ENOMEM;
138
139         ret = drm_gem_handle_create(file_priv, obj, &handle);
140         if (ret) {
141                 drm_gem_object_unreference_unlocked(obj);
142                 return ret;
143         }
144
145         /* Sink the floating reference from kref_init(handlecount) */
146         drm_gem_object_handle_unreference_unlocked(obj);
147
148         args->handle = handle;
149         return 0;
150 }
151
152 static inline int
153 fast_shmem_read(struct page **pages,
154                 loff_t page_base, int page_offset,
155                 char __user *data,
156                 int length)
157 {
158         char __iomem *vaddr;
159         int unwritten;
160
161         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
162         if (vaddr == NULL)
163                 return -ENOMEM;
164         unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
165         kunmap_atomic(vaddr, KM_USER0);
166
167         if (unwritten)
168                 return -EFAULT;
169
170         return 0;
171 }
172
173 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
174 {
175         drm_i915_private_t *dev_priv = obj->dev->dev_private;
176         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
177
178         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
179                 obj_priv->tiling_mode != I915_TILING_NONE;
180 }
181
182 static inline void
183 slow_shmem_copy(struct page *dst_page,
184                 int dst_offset,
185                 struct page *src_page,
186                 int src_offset,
187                 int length)
188 {
189         char *dst_vaddr, *src_vaddr;
190
191         dst_vaddr = kmap(dst_page);
192         src_vaddr = kmap(src_page);
193
194         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
195
196         kunmap(src_page);
197         kunmap(dst_page);
198 }
199
200 static inline void
201 slow_shmem_bit17_copy(struct page *gpu_page,
202                       int gpu_offset,
203                       struct page *cpu_page,
204                       int cpu_offset,
205                       int length,
206                       int is_read)
207 {
208         char *gpu_vaddr, *cpu_vaddr;
209
210         /* Use the unswizzled path if this page isn't affected. */
211         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
212                 if (is_read)
213                         return slow_shmem_copy(cpu_page, cpu_offset,
214                                                gpu_page, gpu_offset, length);
215                 else
216                         return slow_shmem_copy(gpu_page, gpu_offset,
217                                                cpu_page, cpu_offset, length);
218         }
219
220         gpu_vaddr = kmap(gpu_page);
221         cpu_vaddr = kmap(cpu_page);
222
223         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
224          * XORing with the other bits (A9 for Y, A9 and A10 for X)
225          */
226         while (length > 0) {
227                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
228                 int this_length = min(cacheline_end - gpu_offset, length);
229                 int swizzled_gpu_offset = gpu_offset ^ 64;
230
231                 if (is_read) {
232                         memcpy(cpu_vaddr + cpu_offset,
233                                gpu_vaddr + swizzled_gpu_offset,
234                                this_length);
235                 } else {
236                         memcpy(gpu_vaddr + swizzled_gpu_offset,
237                                cpu_vaddr + cpu_offset,
238                                this_length);
239                 }
240                 cpu_offset += this_length;
241                 gpu_offset += this_length;
242                 length -= this_length;
243         }
244
245         kunmap(cpu_page);
246         kunmap(gpu_page);
247 }
248
249 /**
250  * This is the fast shmem pread path, which attempts to copy_from_user directly
251  * from the backing pages of the object to the user's address space.  On a
252  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
253  */
254 static int
255 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
256                           struct drm_i915_gem_pread *args,
257                           struct drm_file *file_priv)
258 {
259         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
260         ssize_t remain;
261         loff_t offset, page_base;
262         char __user *user_data;
263         int page_offset, page_length;
264         int ret;
265
266         user_data = (char __user *) (uintptr_t) args->data_ptr;
267         remain = args->size;
268
269         mutex_lock(&dev->struct_mutex);
270
271         ret = i915_gem_object_get_pages(obj, 0);
272         if (ret != 0)
273                 goto fail_unlock;
274
275         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
276                                                         args->size);
277         if (ret != 0)
278                 goto fail_put_pages;
279
280         obj_priv = to_intel_bo(obj);
281         offset = args->offset;
282
283         while (remain > 0) {
284                 /* Operation in this page
285                  *
286                  * page_base = page offset within aperture
287                  * page_offset = offset within page
288                  * page_length = bytes to copy for this page
289                  */
290                 page_base = (offset & ~(PAGE_SIZE-1));
291                 page_offset = offset & (PAGE_SIZE-1);
292                 page_length = remain;
293                 if ((page_offset + remain) > PAGE_SIZE)
294                         page_length = PAGE_SIZE - page_offset;
295
296                 ret = fast_shmem_read(obj_priv->pages,
297                                       page_base, page_offset,
298                                       user_data, page_length);
299                 if (ret)
300                         goto fail_put_pages;
301
302                 remain -= page_length;
303                 user_data += page_length;
304                 offset += page_length;
305         }
306
307 fail_put_pages:
308         i915_gem_object_put_pages(obj);
309 fail_unlock:
310         mutex_unlock(&dev->struct_mutex);
311
312         return ret;
313 }
314
315 static int
316 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
317 {
318         int ret;
319
320         ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
321
322         /* If we've insufficient memory to map in the pages, attempt
323          * to make some space by throwing out some old buffers.
324          */
325         if (ret == -ENOMEM) {
326                 struct drm_device *dev = obj->dev;
327
328                 ret = i915_gem_evict_something(dev, obj->size,
329                                                i915_gem_get_gtt_alignment(obj));
330                 if (ret)
331                         return ret;
332
333                 ret = i915_gem_object_get_pages(obj, 0);
334         }
335
336         return ret;
337 }
338
339 /**
340  * This is the fallback shmem pread path, which allocates temporary storage
341  * in kernel space to copy_to_user into outside of the struct_mutex, so we
342  * can copy out of the object's backing pages while holding the struct mutex
343  * and not take page faults.
344  */
345 static int
346 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
347                           struct drm_i915_gem_pread *args,
348                           struct drm_file *file_priv)
349 {
350         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
351         struct mm_struct *mm = current->mm;
352         struct page **user_pages;
353         ssize_t remain;
354         loff_t offset, pinned_pages, i;
355         loff_t first_data_page, last_data_page, num_pages;
356         int shmem_page_index, shmem_page_offset;
357         int data_page_index,  data_page_offset;
358         int page_length;
359         int ret;
360         uint64_t data_ptr = args->data_ptr;
361         int do_bit17_swizzling;
362
363         remain = args->size;
364
365         /* Pin the user pages containing the data.  We can't fault while
366          * holding the struct mutex, yet we want to hold it while
367          * dereferencing the user data.
368          */
369         first_data_page = data_ptr / PAGE_SIZE;
370         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
371         num_pages = last_data_page - first_data_page + 1;
372
373         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
374         if (user_pages == NULL)
375                 return -ENOMEM;
376
377         down_read(&mm->mmap_sem);
378         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
379                                       num_pages, 1, 0, user_pages, NULL);
380         up_read(&mm->mmap_sem);
381         if (pinned_pages < num_pages) {
382                 ret = -EFAULT;
383                 goto fail_put_user_pages;
384         }
385
386         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
387
388         mutex_lock(&dev->struct_mutex);
389
390         ret = i915_gem_object_get_pages_or_evict(obj);
391         if (ret)
392                 goto fail_unlock;
393
394         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
395                                                         args->size);
396         if (ret != 0)
397                 goto fail_put_pages;
398
399         obj_priv = to_intel_bo(obj);
400         offset = args->offset;
401
402         while (remain > 0) {
403                 /* Operation in this page
404                  *
405                  * shmem_page_index = page number within shmem file
406                  * shmem_page_offset = offset within page in shmem file
407                  * data_page_index = page number in get_user_pages return
408                  * data_page_offset = offset with data_page_index page.
409                  * page_length = bytes to copy for this page
410                  */
411                 shmem_page_index = offset / PAGE_SIZE;
412                 shmem_page_offset = offset & ~PAGE_MASK;
413                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
414                 data_page_offset = data_ptr & ~PAGE_MASK;
415
416                 page_length = remain;
417                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
418                         page_length = PAGE_SIZE - shmem_page_offset;
419                 if ((data_page_offset + page_length) > PAGE_SIZE)
420                         page_length = PAGE_SIZE - data_page_offset;
421
422                 if (do_bit17_swizzling) {
423                         slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
424                                               shmem_page_offset,
425                                               user_pages[data_page_index],
426                                               data_page_offset,
427                                               page_length,
428                                               1);
429                 } else {
430                         slow_shmem_copy(user_pages[data_page_index],
431                                         data_page_offset,
432                                         obj_priv->pages[shmem_page_index],
433                                         shmem_page_offset,
434                                         page_length);
435                 }
436
437                 remain -= page_length;
438                 data_ptr += page_length;
439                 offset += page_length;
440         }
441
442 fail_put_pages:
443         i915_gem_object_put_pages(obj);
444 fail_unlock:
445         mutex_unlock(&dev->struct_mutex);
446 fail_put_user_pages:
447         for (i = 0; i < pinned_pages; i++) {
448                 SetPageDirty(user_pages[i]);
449                 page_cache_release(user_pages[i]);
450         }
451         drm_free_large(user_pages);
452
453         return ret;
454 }
455
456 /**
457  * Reads data from the object referenced by handle.
458  *
459  * On error, the contents of *data are undefined.
460  */
461 int
462 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
463                      struct drm_file *file_priv)
464 {
465         struct drm_i915_gem_pread *args = data;
466         struct drm_gem_object *obj;
467         struct drm_i915_gem_object *obj_priv;
468         int ret;
469
470         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
471         if (obj == NULL)
472                 return -ENOENT;
473         obj_priv = to_intel_bo(obj);
474
475         /* Bounds check source.
476          *
477          * XXX: This could use review for overflow issues...
478          */
479         if (args->offset > obj->size || args->size > obj->size ||
480             args->offset + args->size > obj->size) {
481                 drm_gem_object_unreference_unlocked(obj);
482                 return -EINVAL;
483         }
484
485         if (i915_gem_object_needs_bit17_swizzle(obj)) {
486                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
487         } else {
488                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
489                 if (ret != 0)
490                         ret = i915_gem_shmem_pread_slow(dev, obj, args,
491                                                         file_priv);
492         }
493
494         drm_gem_object_unreference_unlocked(obj);
495
496         return ret;
497 }
498
499 /* This is the fast write path which cannot handle
500  * page faults in the source data
501  */
502
503 static inline int
504 fast_user_write(struct io_mapping *mapping,
505                 loff_t page_base, int page_offset,
506                 char __user *user_data,
507                 int length)
508 {
509         char *vaddr_atomic;
510         unsigned long unwritten;
511
512         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
513         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
514                                                       user_data, length);
515         io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
516         if (unwritten)
517                 return -EFAULT;
518         return 0;
519 }
520
521 /* Here's the write path which can sleep for
522  * page faults
523  */
524
525 static inline void
526 slow_kernel_write(struct io_mapping *mapping,
527                   loff_t gtt_base, int gtt_offset,
528                   struct page *user_page, int user_offset,
529                   int length)
530 {
531         char __iomem *dst_vaddr;
532         char *src_vaddr;
533
534         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
535         src_vaddr = kmap(user_page);
536
537         memcpy_toio(dst_vaddr + gtt_offset,
538                     src_vaddr + user_offset,
539                     length);
540
541         kunmap(user_page);
542         io_mapping_unmap(dst_vaddr);
543 }
544
545 static inline int
546 fast_shmem_write(struct page **pages,
547                  loff_t page_base, int page_offset,
548                  char __user *data,
549                  int length)
550 {
551         char __iomem *vaddr;
552         unsigned long unwritten;
553
554         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
555         if (vaddr == NULL)
556                 return -ENOMEM;
557         unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
558         kunmap_atomic(vaddr, KM_USER0);
559
560         if (unwritten)
561                 return -EFAULT;
562         return 0;
563 }
564
565 /**
566  * This is the fast pwrite path, where we copy the data directly from the
567  * user into the GTT, uncached.
568  */
569 static int
570 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
571                          struct drm_i915_gem_pwrite *args,
572                          struct drm_file *file_priv)
573 {
574         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
575         drm_i915_private_t *dev_priv = dev->dev_private;
576         ssize_t remain;
577         loff_t offset, page_base;
578         char __user *user_data;
579         int page_offset, page_length;
580         int ret;
581
582         user_data = (char __user *) (uintptr_t) args->data_ptr;
583         remain = args->size;
584         if (!access_ok(VERIFY_READ, user_data, remain))
585                 return -EFAULT;
586
587
588         mutex_lock(&dev->struct_mutex);
589         ret = i915_gem_object_pin(obj, 0);
590         if (ret) {
591                 mutex_unlock(&dev->struct_mutex);
592                 return ret;
593         }
594         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
595         if (ret)
596                 goto fail;
597
598         obj_priv = to_intel_bo(obj);
599         offset = obj_priv->gtt_offset + args->offset;
600
601         while (remain > 0) {
602                 /* Operation in this page
603                  *
604                  * page_base = page offset within aperture
605                  * page_offset = offset within page
606                  * page_length = bytes to copy for this page
607                  */
608                 page_base = (offset & ~(PAGE_SIZE-1));
609                 page_offset = offset & (PAGE_SIZE-1);
610                 page_length = remain;
611                 if ((page_offset + remain) > PAGE_SIZE)
612                         page_length = PAGE_SIZE - page_offset;
613
614                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
615                                        page_offset, user_data, page_length);
616
617                 /* If we get a fault while copying data, then (presumably) our
618                  * source page isn't available.  Return the error and we'll
619                  * retry in the slow path.
620                  */
621                 if (ret)
622                         goto fail;
623
624                 remain -= page_length;
625                 user_data += page_length;
626                 offset += page_length;
627         }
628
629 fail:
630         i915_gem_object_unpin(obj);
631         mutex_unlock(&dev->struct_mutex);
632
633         return ret;
634 }
635
636 /**
637  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
638  * the memory and maps it using kmap_atomic for copying.
639  *
640  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
641  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
642  */
643 static int
644 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
645                          struct drm_i915_gem_pwrite *args,
646                          struct drm_file *file_priv)
647 {
648         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
649         drm_i915_private_t *dev_priv = dev->dev_private;
650         ssize_t remain;
651         loff_t gtt_page_base, offset;
652         loff_t first_data_page, last_data_page, num_pages;
653         loff_t pinned_pages, i;
654         struct page **user_pages;
655         struct mm_struct *mm = current->mm;
656         int gtt_page_offset, data_page_offset, data_page_index, page_length;
657         int ret;
658         uint64_t data_ptr = args->data_ptr;
659
660         remain = args->size;
661
662         /* Pin the user pages containing the data.  We can't fault while
663          * holding the struct mutex, and all of the pwrite implementations
664          * want to hold it while dereferencing the user data.
665          */
666         first_data_page = data_ptr / PAGE_SIZE;
667         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
668         num_pages = last_data_page - first_data_page + 1;
669
670         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
671         if (user_pages == NULL)
672                 return -ENOMEM;
673
674         down_read(&mm->mmap_sem);
675         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
676                                       num_pages, 0, 0, user_pages, NULL);
677         up_read(&mm->mmap_sem);
678         if (pinned_pages < num_pages) {
679                 ret = -EFAULT;
680                 goto out_unpin_pages;
681         }
682
683         mutex_lock(&dev->struct_mutex);
684         ret = i915_gem_object_pin(obj, 0);
685         if (ret)
686                 goto out_unlock;
687
688         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
689         if (ret)
690                 goto out_unpin_object;
691
692         obj_priv = to_intel_bo(obj);
693         offset = obj_priv->gtt_offset + args->offset;
694
695         while (remain > 0) {
696                 /* Operation in this page
697                  *
698                  * gtt_page_base = page offset within aperture
699                  * gtt_page_offset = offset within page in aperture
700                  * data_page_index = page number in get_user_pages return
701                  * data_page_offset = offset with data_page_index page.
702                  * page_length = bytes to copy for this page
703                  */
704                 gtt_page_base = offset & PAGE_MASK;
705                 gtt_page_offset = offset & ~PAGE_MASK;
706                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
707                 data_page_offset = data_ptr & ~PAGE_MASK;
708
709                 page_length = remain;
710                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
711                         page_length = PAGE_SIZE - gtt_page_offset;
712                 if ((data_page_offset + page_length) > PAGE_SIZE)
713                         page_length = PAGE_SIZE - data_page_offset;
714
715                 slow_kernel_write(dev_priv->mm.gtt_mapping,
716                                   gtt_page_base, gtt_page_offset,
717                                   user_pages[data_page_index],
718                                   data_page_offset,
719                                   page_length);
720
721                 remain -= page_length;
722                 offset += page_length;
723                 data_ptr += page_length;
724         }
725
726 out_unpin_object:
727         i915_gem_object_unpin(obj);
728 out_unlock:
729         mutex_unlock(&dev->struct_mutex);
730 out_unpin_pages:
731         for (i = 0; i < pinned_pages; i++)
732                 page_cache_release(user_pages[i]);
733         drm_free_large(user_pages);
734
735         return ret;
736 }
737
738 /**
739  * This is the fast shmem pwrite path, which attempts to directly
740  * copy_from_user into the kmapped pages backing the object.
741  */
742 static int
743 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
744                            struct drm_i915_gem_pwrite *args,
745                            struct drm_file *file_priv)
746 {
747         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
748         ssize_t remain;
749         loff_t offset, page_base;
750         char __user *user_data;
751         int page_offset, page_length;
752         int ret;
753
754         user_data = (char __user *) (uintptr_t) args->data_ptr;
755         remain = args->size;
756
757         mutex_lock(&dev->struct_mutex);
758
759         ret = i915_gem_object_get_pages(obj, 0);
760         if (ret != 0)
761                 goto fail_unlock;
762
763         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
764         if (ret != 0)
765                 goto fail_put_pages;
766
767         obj_priv = to_intel_bo(obj);
768         offset = args->offset;
769         obj_priv->dirty = 1;
770
771         while (remain > 0) {
772                 /* Operation in this page
773                  *
774                  * page_base = page offset within aperture
775                  * page_offset = offset within page
776                  * page_length = bytes to copy for this page
777                  */
778                 page_base = (offset & ~(PAGE_SIZE-1));
779                 page_offset = offset & (PAGE_SIZE-1);
780                 page_length = remain;
781                 if ((page_offset + remain) > PAGE_SIZE)
782                         page_length = PAGE_SIZE - page_offset;
783
784                 ret = fast_shmem_write(obj_priv->pages,
785                                        page_base, page_offset,
786                                        user_data, page_length);
787                 if (ret)
788                         goto fail_put_pages;
789
790                 remain -= page_length;
791                 user_data += page_length;
792                 offset += page_length;
793         }
794
795 fail_put_pages:
796         i915_gem_object_put_pages(obj);
797 fail_unlock:
798         mutex_unlock(&dev->struct_mutex);
799
800         return ret;
801 }
802
803 /**
804  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
805  * the memory and maps it using kmap_atomic for copying.
806  *
807  * This avoids taking mmap_sem for faulting on the user's address while the
808  * struct_mutex is held.
809  */
810 static int
811 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
812                            struct drm_i915_gem_pwrite *args,
813                            struct drm_file *file_priv)
814 {
815         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
816         struct mm_struct *mm = current->mm;
817         struct page **user_pages;
818         ssize_t remain;
819         loff_t offset, pinned_pages, i;
820         loff_t first_data_page, last_data_page, num_pages;
821         int shmem_page_index, shmem_page_offset;
822         int data_page_index,  data_page_offset;
823         int page_length;
824         int ret;
825         uint64_t data_ptr = args->data_ptr;
826         int do_bit17_swizzling;
827
828         remain = args->size;
829
830         /* Pin the user pages containing the data.  We can't fault while
831          * holding the struct mutex, and all of the pwrite implementations
832          * want to hold it while dereferencing the user data.
833          */
834         first_data_page = data_ptr / PAGE_SIZE;
835         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
836         num_pages = last_data_page - first_data_page + 1;
837
838         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
839         if (user_pages == NULL)
840                 return -ENOMEM;
841
842         down_read(&mm->mmap_sem);
843         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
844                                       num_pages, 0, 0, user_pages, NULL);
845         up_read(&mm->mmap_sem);
846         if (pinned_pages < num_pages) {
847                 ret = -EFAULT;
848                 goto fail_put_user_pages;
849         }
850
851         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
852
853         mutex_lock(&dev->struct_mutex);
854
855         ret = i915_gem_object_get_pages_or_evict(obj);
856         if (ret)
857                 goto fail_unlock;
858
859         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
860         if (ret != 0)
861                 goto fail_put_pages;
862
863         obj_priv = to_intel_bo(obj);
864         offset = args->offset;
865         obj_priv->dirty = 1;
866
867         while (remain > 0) {
868                 /* Operation in this page
869                  *
870                  * shmem_page_index = page number within shmem file
871                  * shmem_page_offset = offset within page in shmem file
872                  * data_page_index = page number in get_user_pages return
873                  * data_page_offset = offset with data_page_index page.
874                  * page_length = bytes to copy for this page
875                  */
876                 shmem_page_index = offset / PAGE_SIZE;
877                 shmem_page_offset = offset & ~PAGE_MASK;
878                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
879                 data_page_offset = data_ptr & ~PAGE_MASK;
880
881                 page_length = remain;
882                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
883                         page_length = PAGE_SIZE - shmem_page_offset;
884                 if ((data_page_offset + page_length) > PAGE_SIZE)
885                         page_length = PAGE_SIZE - data_page_offset;
886
887                 if (do_bit17_swizzling) {
888                         slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
889                                               shmem_page_offset,
890                                               user_pages[data_page_index],
891                                               data_page_offset,
892                                               page_length,
893                                               0);
894                 } else {
895                         slow_shmem_copy(obj_priv->pages[shmem_page_index],
896                                         shmem_page_offset,
897                                         user_pages[data_page_index],
898                                         data_page_offset,
899                                         page_length);
900                 }
901
902                 remain -= page_length;
903                 data_ptr += page_length;
904                 offset += page_length;
905         }
906
907 fail_put_pages:
908         i915_gem_object_put_pages(obj);
909 fail_unlock:
910         mutex_unlock(&dev->struct_mutex);
911 fail_put_user_pages:
912         for (i = 0; i < pinned_pages; i++)
913                 page_cache_release(user_pages[i]);
914         drm_free_large(user_pages);
915
916         return ret;
917 }
918
919 /**
920  * Writes data to the object referenced by handle.
921  *
922  * On error, the contents of the buffer that were to be modified are undefined.
923  */
924 int
925 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
926                       struct drm_file *file_priv)
927 {
928         struct drm_i915_gem_pwrite *args = data;
929         struct drm_gem_object *obj;
930         struct drm_i915_gem_object *obj_priv;
931         int ret = 0;
932
933         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
934         if (obj == NULL)
935                 return -ENOENT;
936         obj_priv = to_intel_bo(obj);
937
938         /* Bounds check destination.
939          *
940          * XXX: This could use review for overflow issues...
941          */
942         if (args->offset > obj->size || args->size > obj->size ||
943             args->offset + args->size > obj->size) {
944                 drm_gem_object_unreference_unlocked(obj);
945                 return -EINVAL;
946         }
947
948         /* We can only do the GTT pwrite on untiled buffers, as otherwise
949          * it would end up going through the fenced access, and we'll get
950          * different detiling behavior between reading and writing.
951          * pread/pwrite currently are reading and writing from the CPU
952          * perspective, requiring manual detiling by the client.
953          */
954         if (obj_priv->phys_obj)
955                 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
956         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
957                  dev->gtt_total != 0 &&
958                  obj->write_domain != I915_GEM_DOMAIN_CPU) {
959                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
960                 if (ret == -EFAULT) {
961                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
962                                                        file_priv);
963                 }
964         } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
965                 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
966         } else {
967                 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
968                 if (ret == -EFAULT) {
969                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
970                                                          file_priv);
971                 }
972         }
973
974 #if WATCH_PWRITE
975         if (ret)
976                 DRM_INFO("pwrite failed %d\n", ret);
977 #endif
978
979         drm_gem_object_unreference_unlocked(obj);
980
981         return ret;
982 }
983
984 /**
985  * Called when user space prepares to use an object with the CPU, either
986  * through the mmap ioctl's mapping or a GTT mapping.
987  */
988 int
989 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
990                           struct drm_file *file_priv)
991 {
992         struct drm_i915_private *dev_priv = dev->dev_private;
993         struct drm_i915_gem_set_domain *args = data;
994         struct drm_gem_object *obj;
995         struct drm_i915_gem_object *obj_priv;
996         uint32_t read_domains = args->read_domains;
997         uint32_t write_domain = args->write_domain;
998         int ret;
999
1000         if (!(dev->driver->driver_features & DRIVER_GEM))
1001                 return -ENODEV;
1002
1003         /* Only handle setting domains to types used by the CPU. */
1004         if (write_domain & I915_GEM_GPU_DOMAINS)
1005                 return -EINVAL;
1006
1007         if (read_domains & I915_GEM_GPU_DOMAINS)
1008                 return -EINVAL;
1009
1010         /* Having something in the write domain implies it's in the read
1011          * domain, and only that read domain.  Enforce that in the request.
1012          */
1013         if (write_domain != 0 && read_domains != write_domain)
1014                 return -EINVAL;
1015
1016         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1017         if (obj == NULL)
1018                 return -ENOENT;
1019         obj_priv = to_intel_bo(obj);
1020
1021         mutex_lock(&dev->struct_mutex);
1022
1023         intel_mark_busy(dev, obj);
1024
1025 #if WATCH_BUF
1026         DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1027                  obj, obj->size, read_domains, write_domain);
1028 #endif
1029         if (read_domains & I915_GEM_DOMAIN_GTT) {
1030                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1031
1032                 /* Update the LRU on the fence for the CPU access that's
1033                  * about to occur.
1034                  */
1035                 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1036                         struct drm_i915_fence_reg *reg =
1037                                 &dev_priv->fence_regs[obj_priv->fence_reg];
1038                         list_move_tail(&reg->lru_list,
1039                                        &dev_priv->mm.fence_list);
1040                 }
1041
1042                 /* Silently promote "you're not bound, there was nothing to do"
1043                  * to success, since the client was just asking us to
1044                  * make sure everything was done.
1045                  */
1046                 if (ret == -EINVAL)
1047                         ret = 0;
1048         } else {
1049                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1050         }
1051
1052         
1053         /* Maintain LRU order of "inactive" objects */
1054         if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1055                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1056
1057         drm_gem_object_unreference(obj);
1058         mutex_unlock(&dev->struct_mutex);
1059         return ret;
1060 }
1061
1062 /**
1063  * Called when user space has done writes to this buffer
1064  */
1065 int
1066 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1067                       struct drm_file *file_priv)
1068 {
1069         struct drm_i915_gem_sw_finish *args = data;
1070         struct drm_gem_object *obj;
1071         struct drm_i915_gem_object *obj_priv;
1072         int ret = 0;
1073
1074         if (!(dev->driver->driver_features & DRIVER_GEM))
1075                 return -ENODEV;
1076
1077         mutex_lock(&dev->struct_mutex);
1078         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1079         if (obj == NULL) {
1080                 mutex_unlock(&dev->struct_mutex);
1081                 return -ENOENT;
1082         }
1083
1084 #if WATCH_BUF
1085         DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1086                  __func__, args->handle, obj, obj->size);
1087 #endif
1088         obj_priv = to_intel_bo(obj);
1089
1090         /* Pinned buffers may be scanout, so flush the cache */
1091         if (obj_priv->pin_count)
1092                 i915_gem_object_flush_cpu_write_domain(obj);
1093
1094         drm_gem_object_unreference(obj);
1095         mutex_unlock(&dev->struct_mutex);
1096         return ret;
1097 }
1098
1099 /**
1100  * Maps the contents of an object, returning the address it is mapped
1101  * into.
1102  *
1103  * While the mapping holds a reference on the contents of the object, it doesn't
1104  * imply a ref on the object itself.
1105  */
1106 int
1107 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1108                    struct drm_file *file_priv)
1109 {
1110         struct drm_i915_gem_mmap *args = data;
1111         struct drm_gem_object *obj;
1112         loff_t offset;
1113         unsigned long addr;
1114
1115         if (!(dev->driver->driver_features & DRIVER_GEM))
1116                 return -ENODEV;
1117
1118         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1119         if (obj == NULL)
1120                 return -ENOENT;
1121
1122         offset = args->offset;
1123
1124         down_write(&current->mm->mmap_sem);
1125         addr = do_mmap(obj->filp, 0, args->size,
1126                        PROT_READ | PROT_WRITE, MAP_SHARED,
1127                        args->offset);
1128         up_write(&current->mm->mmap_sem);
1129         drm_gem_object_unreference_unlocked(obj);
1130         if (IS_ERR((void *)addr))
1131                 return addr;
1132
1133         args->addr_ptr = (uint64_t) addr;
1134
1135         return 0;
1136 }
1137
1138 /**
1139  * i915_gem_fault - fault a page into the GTT
1140  * vma: VMA in question
1141  * vmf: fault info
1142  *
1143  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1144  * from userspace.  The fault handler takes care of binding the object to
1145  * the GTT (if needed), allocating and programming a fence register (again,
1146  * only if needed based on whether the old reg is still valid or the object
1147  * is tiled) and inserting a new PTE into the faulting process.
1148  *
1149  * Note that the faulting process may involve evicting existing objects
1150  * from the GTT and/or fence registers to make room.  So performance may
1151  * suffer if the GTT working set is large or there are few fence registers
1152  * left.
1153  */
1154 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1155 {
1156         struct drm_gem_object *obj = vma->vm_private_data;
1157         struct drm_device *dev = obj->dev;
1158         drm_i915_private_t *dev_priv = dev->dev_private;
1159         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1160         pgoff_t page_offset;
1161         unsigned long pfn;
1162         int ret = 0;
1163         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1164
1165         /* We don't use vmf->pgoff since that has the fake offset */
1166         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1167                 PAGE_SHIFT;
1168
1169         /* Now bind it into the GTT if needed */
1170         mutex_lock(&dev->struct_mutex);
1171         if (!obj_priv->gtt_space) {
1172                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1173                 if (ret)
1174                         goto unlock;
1175
1176                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1177                 if (ret)
1178                         goto unlock;
1179         }
1180
1181         /* Need a new fence register? */
1182         if (obj_priv->tiling_mode != I915_TILING_NONE) {
1183                 ret = i915_gem_object_get_fence_reg(obj);
1184                 if (ret)
1185                         goto unlock;
1186         }
1187
1188         if (i915_gem_object_is_inactive(obj_priv))
1189                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1190
1191         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1192                 page_offset;
1193
1194         /* Finally, remap it using the new GTT offset */
1195         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1196 unlock:
1197         mutex_unlock(&dev->struct_mutex);
1198
1199         switch (ret) {
1200         case 0:
1201         case -ERESTARTSYS:
1202                 return VM_FAULT_NOPAGE;
1203         case -ENOMEM:
1204         case -EAGAIN:
1205                 return VM_FAULT_OOM;
1206         default:
1207                 return VM_FAULT_SIGBUS;
1208         }
1209 }
1210
1211 /**
1212  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1213  * @obj: obj in question
1214  *
1215  * GEM memory mapping works by handing back to userspace a fake mmap offset
1216  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1217  * up the object based on the offset and sets up the various memory mapping
1218  * structures.
1219  *
1220  * This routine allocates and attaches a fake offset for @obj.
1221  */
1222 static int
1223 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1224 {
1225         struct drm_device *dev = obj->dev;
1226         struct drm_gem_mm *mm = dev->mm_private;
1227         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1228         struct drm_map_list *list;
1229         struct drm_local_map *map;
1230         int ret = 0;
1231
1232         /* Set the object up for mmap'ing */
1233         list = &obj->map_list;
1234         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1235         if (!list->map)
1236                 return -ENOMEM;
1237
1238         map = list->map;
1239         map->type = _DRM_GEM;
1240         map->size = obj->size;
1241         map->handle = obj;
1242
1243         /* Get a DRM GEM mmap offset allocated... */
1244         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1245                                                     obj->size / PAGE_SIZE, 0, 0);
1246         if (!list->file_offset_node) {
1247                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1248                 ret = -ENOMEM;
1249                 goto out_free_list;
1250         }
1251
1252         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1253                                                   obj->size / PAGE_SIZE, 0);
1254         if (!list->file_offset_node) {
1255                 ret = -ENOMEM;
1256                 goto out_free_list;
1257         }
1258
1259         list->hash.key = list->file_offset_node->start;
1260         if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1261                 DRM_ERROR("failed to add to map hash\n");
1262                 ret = -ENOMEM;
1263                 goto out_free_mm;
1264         }
1265
1266         /* By now we should be all set, any drm_mmap request on the offset
1267          * below will get to our mmap & fault handler */
1268         obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1269
1270         return 0;
1271
1272 out_free_mm:
1273         drm_mm_put_block(list->file_offset_node);
1274 out_free_list:
1275         kfree(list->map);
1276
1277         return ret;
1278 }
1279
1280 /**
1281  * i915_gem_release_mmap - remove physical page mappings
1282  * @obj: obj in question
1283  *
1284  * Preserve the reservation of the mmapping with the DRM core code, but
1285  * relinquish ownership of the pages back to the system.
1286  *
1287  * It is vital that we remove the page mapping if we have mapped a tiled
1288  * object through the GTT and then lose the fence register due to
1289  * resource pressure. Similarly if the object has been moved out of the
1290  * aperture, than pages mapped into userspace must be revoked. Removing the
1291  * mapping will then trigger a page fault on the next user access, allowing
1292  * fixup by i915_gem_fault().
1293  */
1294 void
1295 i915_gem_release_mmap(struct drm_gem_object *obj)
1296 {
1297         struct drm_device *dev = obj->dev;
1298         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1299
1300         if (dev->dev_mapping)
1301                 unmap_mapping_range(dev->dev_mapping,
1302                                     obj_priv->mmap_offset, obj->size, 1);
1303 }
1304
1305 static void
1306 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1307 {
1308         struct drm_device *dev = obj->dev;
1309         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1310         struct drm_gem_mm *mm = dev->mm_private;
1311         struct drm_map_list *list;
1312
1313         list = &obj->map_list;
1314         drm_ht_remove_item(&mm->offset_hash, &list->hash);
1315
1316         if (list->file_offset_node) {
1317                 drm_mm_put_block(list->file_offset_node);
1318                 list->file_offset_node = NULL;
1319         }
1320
1321         if (list->map) {
1322                 kfree(list->map);
1323                 list->map = NULL;
1324         }
1325
1326         obj_priv->mmap_offset = 0;
1327 }
1328
1329 /**
1330  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1331  * @obj: object to check
1332  *
1333  * Return the required GTT alignment for an object, taking into account
1334  * potential fence register mapping if needed.
1335  */
1336 static uint32_t
1337 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1338 {
1339         struct drm_device *dev = obj->dev;
1340         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1341         int start, i;
1342
1343         /*
1344          * Minimum alignment is 4k (GTT page size), but might be greater
1345          * if a fence register is needed for the object.
1346          */
1347         if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1348                 return 4096;
1349
1350         /*
1351          * Previous chips need to be aligned to the size of the smallest
1352          * fence register that can contain the object.
1353          */
1354         if (IS_I9XX(dev))
1355                 start = 1024*1024;
1356         else
1357                 start = 512*1024;
1358
1359         for (i = start; i < obj->size; i <<= 1)
1360                 ;
1361
1362         return i;
1363 }
1364
1365 /**
1366  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1367  * @dev: DRM device
1368  * @data: GTT mapping ioctl data
1369  * @file_priv: GEM object info
1370  *
1371  * Simply returns the fake offset to userspace so it can mmap it.
1372  * The mmap call will end up in drm_gem_mmap(), which will set things
1373  * up so we can get faults in the handler above.
1374  *
1375  * The fault handler will take care of binding the object into the GTT
1376  * (since it may have been evicted to make room for something), allocating
1377  * a fence register, and mapping the appropriate aperture address into
1378  * userspace.
1379  */
1380 int
1381 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1382                         struct drm_file *file_priv)
1383 {
1384         struct drm_i915_gem_mmap_gtt *args = data;
1385         struct drm_gem_object *obj;
1386         struct drm_i915_gem_object *obj_priv;
1387         int ret;
1388
1389         if (!(dev->driver->driver_features & DRIVER_GEM))
1390                 return -ENODEV;
1391
1392         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1393         if (obj == NULL)
1394                 return -ENOENT;
1395
1396         mutex_lock(&dev->struct_mutex);
1397
1398         obj_priv = to_intel_bo(obj);
1399
1400         if (obj_priv->madv != I915_MADV_WILLNEED) {
1401                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1402                 drm_gem_object_unreference(obj);
1403                 mutex_unlock(&dev->struct_mutex);
1404                 return -EINVAL;
1405         }
1406
1407
1408         if (!obj_priv->mmap_offset) {
1409                 ret = i915_gem_create_mmap_offset(obj);
1410                 if (ret) {
1411                         drm_gem_object_unreference(obj);
1412                         mutex_unlock(&dev->struct_mutex);
1413                         return ret;
1414                 }
1415         }
1416
1417         args->offset = obj_priv->mmap_offset;
1418
1419         /*
1420          * Pull it into the GTT so that we have a page list (makes the
1421          * initial fault faster and any subsequent flushing possible).
1422          */
1423         if (!obj_priv->agp_mem) {
1424                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1425                 if (ret) {
1426                         drm_gem_object_unreference(obj);
1427                         mutex_unlock(&dev->struct_mutex);
1428                         return ret;
1429                 }
1430         }
1431
1432         drm_gem_object_unreference(obj);
1433         mutex_unlock(&dev->struct_mutex);
1434
1435         return 0;
1436 }
1437
1438 void
1439 i915_gem_object_put_pages(struct drm_gem_object *obj)
1440 {
1441         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1442         int page_count = obj->size / PAGE_SIZE;
1443         int i;
1444
1445         BUG_ON(obj_priv->pages_refcount == 0);
1446         BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1447
1448         if (--obj_priv->pages_refcount != 0)
1449                 return;
1450
1451         if (obj_priv->tiling_mode != I915_TILING_NONE)
1452                 i915_gem_object_save_bit_17_swizzle(obj);
1453
1454         if (obj_priv->madv == I915_MADV_DONTNEED)
1455                 obj_priv->dirty = 0;
1456
1457         for (i = 0; i < page_count; i++) {
1458                 if (obj_priv->dirty)
1459                         set_page_dirty(obj_priv->pages[i]);
1460
1461                 if (obj_priv->madv == I915_MADV_WILLNEED)
1462                         mark_page_accessed(obj_priv->pages[i]);
1463
1464                 page_cache_release(obj_priv->pages[i]);
1465         }
1466         obj_priv->dirty = 0;
1467
1468         drm_free_large(obj_priv->pages);
1469         obj_priv->pages = NULL;
1470 }
1471
1472 static uint32_t
1473 i915_gem_next_request_seqno(struct drm_device *dev,
1474                             struct intel_ring_buffer *ring)
1475 {
1476         drm_i915_private_t *dev_priv = dev->dev_private;
1477
1478         ring->outstanding_lazy_request = true;
1479
1480         return dev_priv->next_seqno;
1481 }
1482
1483 static void
1484 i915_gem_object_move_to_active(struct drm_gem_object *obj,
1485                                struct intel_ring_buffer *ring)
1486 {
1487         struct drm_device *dev = obj->dev;
1488         drm_i915_private_t *dev_priv = dev->dev_private;
1489         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1490         uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1491
1492         BUG_ON(ring == NULL);
1493         obj_priv->ring = ring;
1494
1495         /* Add a reference if we're newly entering the active list. */
1496         if (!obj_priv->active) {
1497                 drm_gem_object_reference(obj);
1498                 obj_priv->active = 1;
1499         }
1500
1501         /* Move from whatever list we were on to the tail of execution. */
1502         spin_lock(&dev_priv->mm.active_list_lock);
1503         list_move_tail(&obj_priv->list, &ring->active_list);
1504         spin_unlock(&dev_priv->mm.active_list_lock);
1505         obj_priv->last_rendering_seqno = seqno;
1506 }
1507
1508 static void
1509 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1510 {
1511         struct drm_device *dev = obj->dev;
1512         drm_i915_private_t *dev_priv = dev->dev_private;
1513         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1514
1515         BUG_ON(!obj_priv->active);
1516         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1517         obj_priv->last_rendering_seqno = 0;
1518 }
1519
1520 /* Immediately discard the backing storage */
1521 static void
1522 i915_gem_object_truncate(struct drm_gem_object *obj)
1523 {
1524         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1525         struct inode *inode;
1526
1527         /* Our goal here is to return as much of the memory as
1528          * is possible back to the system as we are called from OOM.
1529          * To do this we must instruct the shmfs to drop all of its
1530          * backing pages, *now*. Here we mirror the actions taken
1531          * when by shmem_delete_inode() to release the backing store.
1532          */
1533         inode = obj->filp->f_path.dentry->d_inode;
1534         truncate_inode_pages(inode->i_mapping, 0);
1535         if (inode->i_op->truncate_range)
1536                 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1537
1538         obj_priv->madv = __I915_MADV_PURGED;
1539 }
1540
1541 static inline int
1542 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1543 {
1544         return obj_priv->madv == I915_MADV_DONTNEED;
1545 }
1546
1547 static void
1548 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1549 {
1550         struct drm_device *dev = obj->dev;
1551         drm_i915_private_t *dev_priv = dev->dev_private;
1552         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1553
1554         i915_verify_inactive(dev, __FILE__, __LINE__);
1555         if (obj_priv->pin_count != 0)
1556                 list_del_init(&obj_priv->list);
1557         else
1558                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1559
1560         BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1561
1562         obj_priv->last_rendering_seqno = 0;
1563         obj_priv->ring = NULL;
1564         if (obj_priv->active) {
1565                 obj_priv->active = 0;
1566                 drm_gem_object_unreference(obj);
1567         }
1568         i915_verify_inactive(dev, __FILE__, __LINE__);
1569 }
1570
1571 void
1572 i915_gem_process_flushing_list(struct drm_device *dev,
1573                                uint32_t flush_domains,
1574                                struct intel_ring_buffer *ring)
1575 {
1576         drm_i915_private_t *dev_priv = dev->dev_private;
1577         struct drm_i915_gem_object *obj_priv, *next;
1578
1579         list_for_each_entry_safe(obj_priv, next,
1580                                  &dev_priv->mm.gpu_write_list,
1581                                  gpu_write_list) {
1582                 struct drm_gem_object *obj = &obj_priv->base;
1583
1584                 if ((obj->write_domain & flush_domains) ==
1585                     obj->write_domain &&
1586                     obj_priv->ring->ring_flag == ring->ring_flag) {
1587                         uint32_t old_write_domain = obj->write_domain;
1588
1589                         obj->write_domain = 0;
1590                         list_del_init(&obj_priv->gpu_write_list);
1591                         i915_gem_object_move_to_active(obj, ring);
1592
1593                         /* update the fence lru list */
1594                         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1595                                 struct drm_i915_fence_reg *reg =
1596                                         &dev_priv->fence_regs[obj_priv->fence_reg];
1597                                 list_move_tail(&reg->lru_list,
1598                                                 &dev_priv->mm.fence_list);
1599                         }
1600
1601                         trace_i915_gem_object_change_domain(obj,
1602                                                             obj->read_domains,
1603                                                             old_write_domain);
1604                 }
1605         }
1606 }
1607
1608 uint32_t
1609 i915_add_request(struct drm_device *dev,
1610                  struct drm_file *file_priv,
1611                  struct intel_ring_buffer *ring)
1612 {
1613         drm_i915_private_t *dev_priv = dev->dev_private;
1614         struct drm_i915_file_private *i915_file_priv = NULL;
1615         struct drm_i915_gem_request *request;
1616         uint32_t seqno;
1617         int was_empty;
1618
1619         if (file_priv != NULL)
1620                 i915_file_priv = file_priv->driver_priv;
1621
1622         request = kzalloc(sizeof(*request), GFP_KERNEL);
1623         if (request == NULL)
1624                 return 0;
1625
1626         seqno = ring->add_request(dev, ring, file_priv, 0);
1627
1628         request->seqno = seqno;
1629         request->ring = ring;
1630         request->emitted_jiffies = jiffies;
1631         was_empty = list_empty(&ring->request_list);
1632         list_add_tail(&request->list, &ring->request_list);
1633
1634         if (i915_file_priv) {
1635                 list_add_tail(&request->client_list,
1636                               &i915_file_priv->mm.request_list);
1637         } else {
1638                 INIT_LIST_HEAD(&request->client_list);
1639         }
1640
1641         if (!dev_priv->mm.suspended) {
1642                 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1643                 if (was_empty)
1644                         queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1645         }
1646         return seqno;
1647 }
1648
1649 /**
1650  * Command execution barrier
1651  *
1652  * Ensures that all commands in the ring are finished
1653  * before signalling the CPU
1654  */
1655 static void
1656 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1657 {
1658         uint32_t flush_domains = 0;
1659
1660         /* The sampler always gets flushed on i965 (sigh) */
1661         if (IS_I965G(dev))
1662                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1663
1664         ring->flush(dev, ring,
1665                         I915_GEM_DOMAIN_COMMAND, flush_domains);
1666 }
1667
1668 /**
1669  * Moves buffers associated only with the given active seqno from the active
1670  * to inactive list, potentially freeing them.
1671  */
1672 static void
1673 i915_gem_retire_request(struct drm_device *dev,
1674                         struct drm_i915_gem_request *request)
1675 {
1676         drm_i915_private_t *dev_priv = dev->dev_private;
1677
1678         trace_i915_gem_request_retire(dev, request->seqno);
1679
1680         /* Move any buffers on the active list that are no longer referenced
1681          * by the ringbuffer to the flushing/inactive lists as appropriate.
1682          */
1683         spin_lock(&dev_priv->mm.active_list_lock);
1684         while (!list_empty(&request->ring->active_list)) {
1685                 struct drm_gem_object *obj;
1686                 struct drm_i915_gem_object *obj_priv;
1687
1688                 obj_priv = list_first_entry(&request->ring->active_list,
1689                                             struct drm_i915_gem_object,
1690                                             list);
1691                 obj = &obj_priv->base;
1692
1693                 /* If the seqno being retired doesn't match the oldest in the
1694                  * list, then the oldest in the list must still be newer than
1695                  * this seqno.
1696                  */
1697                 if (obj_priv->last_rendering_seqno != request->seqno)
1698                         goto out;
1699
1700 #if WATCH_LRU
1701                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1702                          __func__, request->seqno, obj);
1703 #endif
1704
1705                 if (obj->write_domain != 0)
1706                         i915_gem_object_move_to_flushing(obj);
1707                 else {
1708                         /* Take a reference on the object so it won't be
1709                          * freed while the spinlock is held.  The list
1710                          * protection for this spinlock is safe when breaking
1711                          * the lock like this since the next thing we do
1712                          * is just get the head of the list again.
1713                          */
1714                         drm_gem_object_reference(obj);
1715                         i915_gem_object_move_to_inactive(obj);
1716                         spin_unlock(&dev_priv->mm.active_list_lock);
1717                         drm_gem_object_unreference(obj);
1718                         spin_lock(&dev_priv->mm.active_list_lock);
1719                 }
1720         }
1721 out:
1722         spin_unlock(&dev_priv->mm.active_list_lock);
1723 }
1724
1725 /**
1726  * Returns true if seq1 is later than seq2.
1727  */
1728 bool
1729 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1730 {
1731         return (int32_t)(seq1 - seq2) >= 0;
1732 }
1733
1734 uint32_t
1735 i915_get_gem_seqno(struct drm_device *dev,
1736                    struct intel_ring_buffer *ring)
1737 {
1738         return ring->get_gem_seqno(dev, ring);
1739 }
1740
1741 /**
1742  * This function clears the request list as sequence numbers are passed.
1743  */
1744 static void
1745 i915_gem_retire_requests_ring(struct drm_device *dev,
1746                               struct intel_ring_buffer *ring)
1747 {
1748         drm_i915_private_t *dev_priv = dev->dev_private;
1749         uint32_t seqno;
1750
1751         if (!ring->status_page.page_addr
1752                         || list_empty(&ring->request_list))
1753                 return;
1754
1755         seqno = i915_get_gem_seqno(dev, ring);
1756
1757         while (!list_empty(&ring->request_list)) {
1758                 struct drm_i915_gem_request *request;
1759                 uint32_t retiring_seqno;
1760
1761                 request = list_first_entry(&ring->request_list,
1762                                            struct drm_i915_gem_request,
1763                                            list);
1764                 retiring_seqno = request->seqno;
1765
1766                 if (i915_seqno_passed(seqno, retiring_seqno) ||
1767                     atomic_read(&dev_priv->mm.wedged)) {
1768                         i915_gem_retire_request(dev, request);
1769
1770                         list_del(&request->list);
1771                         list_del(&request->client_list);
1772                         kfree(request);
1773                 } else
1774                         break;
1775         }
1776
1777         if (unlikely (dev_priv->trace_irq_seqno &&
1778                       i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1779
1780                 ring->user_irq_put(dev, ring);
1781                 dev_priv->trace_irq_seqno = 0;
1782         }
1783 }
1784
1785 void
1786 i915_gem_retire_requests(struct drm_device *dev)
1787 {
1788         drm_i915_private_t *dev_priv = dev->dev_private;
1789
1790         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1791             struct drm_i915_gem_object *obj_priv, *tmp;
1792
1793             /* We must be careful that during unbind() we do not
1794              * accidentally infinitely recurse into retire requests.
1795              * Currently:
1796              *   retire -> free -> unbind -> wait -> retire_ring
1797              */
1798             list_for_each_entry_safe(obj_priv, tmp,
1799                                      &dev_priv->mm.deferred_free_list,
1800                                      list)
1801                     i915_gem_free_object_tail(&obj_priv->base);
1802         }
1803
1804         i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1805         if (HAS_BSD(dev))
1806                 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1807 }
1808
1809 static void
1810 i915_gem_retire_work_handler(struct work_struct *work)
1811 {
1812         drm_i915_private_t *dev_priv;
1813         struct drm_device *dev;
1814
1815         dev_priv = container_of(work, drm_i915_private_t,
1816                                 mm.retire_work.work);
1817         dev = dev_priv->dev;
1818
1819         mutex_lock(&dev->struct_mutex);
1820         i915_gem_retire_requests(dev);
1821
1822         if (!dev_priv->mm.suspended &&
1823                 (!list_empty(&dev_priv->render_ring.request_list) ||
1824                         (HAS_BSD(dev) &&
1825                          !list_empty(&dev_priv->bsd_ring.request_list))))
1826                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1827         mutex_unlock(&dev->struct_mutex);
1828 }
1829
1830 int
1831 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1832                      bool interruptible, struct intel_ring_buffer *ring)
1833 {
1834         drm_i915_private_t *dev_priv = dev->dev_private;
1835         u32 ier;
1836         int ret = 0;
1837
1838         BUG_ON(seqno == 0);
1839
1840         if (seqno == dev_priv->next_seqno) {
1841                 seqno = i915_add_request(dev, NULL, ring);
1842                 if (seqno == 0)
1843                         return -ENOMEM;
1844         }
1845
1846         if (atomic_read(&dev_priv->mm.wedged))
1847                 return -EIO;
1848
1849         if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
1850                 if (HAS_PCH_SPLIT(dev))
1851                         ier = I915_READ(DEIER) | I915_READ(GTIER);
1852                 else
1853                         ier = I915_READ(IER);
1854                 if (!ier) {
1855                         DRM_ERROR("something (likely vbetool) disabled "
1856                                   "interrupts, re-enabling\n");
1857                         i915_driver_irq_preinstall(dev);
1858                         i915_driver_irq_postinstall(dev);
1859                 }
1860
1861                 trace_i915_gem_request_wait_begin(dev, seqno);
1862
1863                 ring->waiting_gem_seqno = seqno;
1864                 ring->user_irq_get(dev, ring);
1865                 if (interruptible)
1866                         ret = wait_event_interruptible(ring->irq_queue,
1867                                 i915_seqno_passed(
1868                                         ring->get_gem_seqno(dev, ring), seqno)
1869                                 || atomic_read(&dev_priv->mm.wedged));
1870                 else
1871                         wait_event(ring->irq_queue,
1872                                 i915_seqno_passed(
1873                                         ring->get_gem_seqno(dev, ring), seqno)
1874                                 || atomic_read(&dev_priv->mm.wedged));
1875
1876                 ring->user_irq_put(dev, ring);
1877                 ring->waiting_gem_seqno = 0;
1878
1879                 trace_i915_gem_request_wait_end(dev, seqno);
1880         }
1881         if (atomic_read(&dev_priv->mm.wedged))
1882                 ret = -EIO;
1883
1884         if (ret && ret != -ERESTARTSYS)
1885                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
1886                           __func__, ret, seqno, ring->get_gem_seqno(dev, ring),
1887                           dev_priv->next_seqno);
1888
1889         /* Directly dispatch request retiring.  While we have the work queue
1890          * to handle this, the waiter on a request often wants an associated
1891          * buffer to have made it to the inactive list, and we would need
1892          * a separate wait queue to handle that.
1893          */
1894         if (ret == 0)
1895                 i915_gem_retire_requests_ring(dev, ring);
1896
1897         return ret;
1898 }
1899
1900 /**
1901  * Waits for a sequence number to be signaled, and cleans up the
1902  * request and object lists appropriately for that event.
1903  */
1904 static int
1905 i915_wait_request(struct drm_device *dev, uint32_t seqno,
1906                 struct intel_ring_buffer *ring)
1907 {
1908         return i915_do_wait_request(dev, seqno, 1, ring);
1909 }
1910
1911 static void
1912 i915_gem_flush(struct drm_device *dev,
1913                uint32_t invalidate_domains,
1914                uint32_t flush_domains)
1915 {
1916         drm_i915_private_t *dev_priv = dev->dev_private;
1917
1918         if (flush_domains & I915_GEM_DOMAIN_CPU)
1919                 drm_agp_chipset_flush(dev);
1920
1921         dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1922                         invalidate_domains,
1923                         flush_domains);
1924
1925         if (HAS_BSD(dev))
1926                 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1927                                 invalidate_domains,
1928                                 flush_domains);
1929 }
1930
1931 /**
1932  * Ensures that all rendering to the object has completed and the object is
1933  * safe to unbind from the GTT or access from the CPU.
1934  */
1935 static int
1936 i915_gem_object_wait_rendering(struct drm_gem_object *obj,
1937                                bool interruptible)
1938 {
1939         struct drm_device *dev = obj->dev;
1940         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1941         int ret;
1942
1943         /* This function only exists to support waiting for existing rendering,
1944          * not for emitting required flushes.
1945          */
1946         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1947
1948         /* If there is rendering queued on the buffer being evicted, wait for
1949          * it.
1950          */
1951         if (obj_priv->active) {
1952 #if WATCH_BUF
1953                 DRM_INFO("%s: object %p wait for seqno %08x\n",
1954                           __func__, obj, obj_priv->last_rendering_seqno);
1955 #endif
1956                 ret = i915_do_wait_request(dev,
1957                                            obj_priv->last_rendering_seqno,
1958                                            interruptible,
1959                                            obj_priv->ring);
1960                 if (ret != 0)
1961                         return ret;
1962         }
1963
1964         return 0;
1965 }
1966
1967 /**
1968  * Unbinds an object from the GTT aperture.
1969  */
1970 int
1971 i915_gem_object_unbind(struct drm_gem_object *obj)
1972 {
1973         struct drm_device *dev = obj->dev;
1974         drm_i915_private_t *dev_priv = dev->dev_private;
1975         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1976         int ret = 0;
1977
1978 #if WATCH_BUF
1979         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1980         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1981 #endif
1982         if (obj_priv->gtt_space == NULL)
1983                 return 0;
1984
1985         if (obj_priv->pin_count != 0) {
1986                 DRM_ERROR("Attempting to unbind pinned buffer\n");
1987                 return -EINVAL;
1988         }
1989
1990         /* blow away mappings if mapped through GTT */
1991         i915_gem_release_mmap(obj);
1992
1993         /* Move the object to the CPU domain to ensure that
1994          * any possible CPU writes while it's not in the GTT
1995          * are flushed when we go to remap it. This will
1996          * also ensure that all pending GPU writes are finished
1997          * before we unbind.
1998          */
1999         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2000         if (ret == -ERESTARTSYS)
2001                 return ret;
2002         /* Continue on if we fail due to EIO, the GPU is hung so we
2003          * should be safe and we need to cleanup or else we might
2004          * cause memory corruption through use-after-free.
2005          */
2006
2007         /* release the fence reg _after_ flushing */
2008         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2009                 i915_gem_clear_fence_reg(obj);
2010
2011         if (obj_priv->agp_mem != NULL) {
2012                 drm_unbind_agp(obj_priv->agp_mem);
2013                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2014                 obj_priv->agp_mem = NULL;
2015         }
2016
2017         i915_gem_object_put_pages(obj);
2018         BUG_ON(obj_priv->pages_refcount);
2019
2020         if (obj_priv->gtt_space) {
2021                 atomic_dec(&dev->gtt_count);
2022                 atomic_sub(obj->size, &dev->gtt_memory);
2023
2024                 drm_mm_put_block(obj_priv->gtt_space);
2025                 obj_priv->gtt_space = NULL;
2026         }
2027
2028         /* Remove ourselves from the LRU list if present. */
2029         spin_lock(&dev_priv->mm.active_list_lock);
2030         if (!list_empty(&obj_priv->list))
2031                 list_del_init(&obj_priv->list);
2032         spin_unlock(&dev_priv->mm.active_list_lock);
2033
2034         if (i915_gem_object_is_purgeable(obj_priv))
2035                 i915_gem_object_truncate(obj);
2036
2037         trace_i915_gem_object_unbind(obj);
2038
2039         return ret;
2040 }
2041
2042 int
2043 i915_gpu_idle(struct drm_device *dev)
2044 {
2045         drm_i915_private_t *dev_priv = dev->dev_private;
2046         bool lists_empty;
2047         uint32_t seqno1, seqno2;
2048         int ret;
2049
2050         spin_lock(&dev_priv->mm.active_list_lock);
2051         lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2052                        list_empty(&dev_priv->render_ring.active_list) &&
2053                        (!HAS_BSD(dev) ||
2054                         list_empty(&dev_priv->bsd_ring.active_list)));
2055         spin_unlock(&dev_priv->mm.active_list_lock);
2056
2057         if (lists_empty)
2058                 return 0;
2059
2060         /* Flush everything onto the inactive list. */
2061         i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2062         seqno1 = i915_add_request(dev, NULL, &dev_priv->render_ring);
2063         if (seqno1 == 0)
2064                 return -ENOMEM;
2065         ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2066         if (ret)
2067                 return ret;
2068
2069         if (HAS_BSD(dev)) {
2070                 seqno2 = i915_add_request(dev, NULL, &dev_priv->bsd_ring);
2071                 if (seqno2 == 0)
2072                         return -ENOMEM;
2073                 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2074                 if (ret)
2075                         return ret;
2076         }
2077
2078         return 0;
2079 }
2080
2081 int
2082 i915_gem_object_get_pages(struct drm_gem_object *obj,
2083                           gfp_t gfpmask)
2084 {
2085         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2086         int page_count, i;
2087         struct address_space *mapping;
2088         struct inode *inode;
2089         struct page *page;
2090
2091         BUG_ON(obj_priv->pages_refcount
2092                         == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2093
2094         if (obj_priv->pages_refcount++ != 0)
2095                 return 0;
2096
2097         /* Get the list of pages out of our struct file.  They'll be pinned
2098          * at this point until we release them.
2099          */
2100         page_count = obj->size / PAGE_SIZE;
2101         BUG_ON(obj_priv->pages != NULL);
2102         obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2103         if (obj_priv->pages == NULL) {
2104                 obj_priv->pages_refcount--;
2105                 return -ENOMEM;
2106         }
2107
2108         inode = obj->filp->f_path.dentry->d_inode;
2109         mapping = inode->i_mapping;
2110         for (i = 0; i < page_count; i++) {
2111                 page = read_cache_page_gfp(mapping, i,
2112                                            GFP_HIGHUSER |
2113                                            __GFP_COLD |
2114                                            __GFP_RECLAIMABLE |
2115                                            gfpmask);
2116                 if (IS_ERR(page))
2117                         goto err_pages;
2118
2119                 obj_priv->pages[i] = page;
2120         }
2121
2122         if (obj_priv->tiling_mode != I915_TILING_NONE)
2123                 i915_gem_object_do_bit_17_swizzle(obj);
2124
2125         return 0;
2126
2127 err_pages:
2128         while (i--)
2129                 page_cache_release(obj_priv->pages[i]);
2130
2131         drm_free_large(obj_priv->pages);
2132         obj_priv->pages = NULL;
2133         obj_priv->pages_refcount--;
2134         return PTR_ERR(page);
2135 }
2136
2137 static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2138 {
2139         struct drm_gem_object *obj = reg->obj;
2140         struct drm_device *dev = obj->dev;
2141         drm_i915_private_t *dev_priv = dev->dev_private;
2142         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2143         int regnum = obj_priv->fence_reg;
2144         uint64_t val;
2145
2146         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2147                     0xfffff000) << 32;
2148         val |= obj_priv->gtt_offset & 0xfffff000;
2149         val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2150                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2151
2152         if (obj_priv->tiling_mode == I915_TILING_Y)
2153                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2154         val |= I965_FENCE_REG_VALID;
2155
2156         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2157 }
2158
2159 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2160 {
2161         struct drm_gem_object *obj = reg->obj;
2162         struct drm_device *dev = obj->dev;
2163         drm_i915_private_t *dev_priv = dev->dev_private;
2164         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2165         int regnum = obj_priv->fence_reg;
2166         uint64_t val;
2167
2168         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2169                     0xfffff000) << 32;
2170         val |= obj_priv->gtt_offset & 0xfffff000;
2171         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2172         if (obj_priv->tiling_mode == I915_TILING_Y)
2173                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2174         val |= I965_FENCE_REG_VALID;
2175
2176         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2177 }
2178
2179 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2180 {
2181         struct drm_gem_object *obj = reg->obj;
2182         struct drm_device *dev = obj->dev;
2183         drm_i915_private_t *dev_priv = dev->dev_private;
2184         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2185         int regnum = obj_priv->fence_reg;
2186         int tile_width;
2187         uint32_t fence_reg, val;
2188         uint32_t pitch_val;
2189
2190         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2191             (obj_priv->gtt_offset & (obj->size - 1))) {
2192                 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2193                      __func__, obj_priv->gtt_offset, obj->size);
2194                 return;
2195         }
2196
2197         if (obj_priv->tiling_mode == I915_TILING_Y &&
2198             HAS_128_BYTE_Y_TILING(dev))
2199                 tile_width = 128;
2200         else
2201                 tile_width = 512;
2202
2203         /* Note: pitch better be a power of two tile widths */
2204         pitch_val = obj_priv->stride / tile_width;
2205         pitch_val = ffs(pitch_val) - 1;
2206
2207         if (obj_priv->tiling_mode == I915_TILING_Y &&
2208             HAS_128_BYTE_Y_TILING(dev))
2209                 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2210         else
2211                 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2212
2213         val = obj_priv->gtt_offset;
2214         if (obj_priv->tiling_mode == I915_TILING_Y)
2215                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2216         val |= I915_FENCE_SIZE_BITS(obj->size);
2217         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2218         val |= I830_FENCE_REG_VALID;
2219
2220         if (regnum < 8)
2221                 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2222         else
2223                 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2224         I915_WRITE(fence_reg, val);
2225 }
2226
2227 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2228 {
2229         struct drm_gem_object *obj = reg->obj;
2230         struct drm_device *dev = obj->dev;
2231         drm_i915_private_t *dev_priv = dev->dev_private;
2232         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2233         int regnum = obj_priv->fence_reg;
2234         uint32_t val;
2235         uint32_t pitch_val;
2236         uint32_t fence_size_bits;
2237
2238         if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2239             (obj_priv->gtt_offset & (obj->size - 1))) {
2240                 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2241                      __func__, obj_priv->gtt_offset);
2242                 return;
2243         }
2244
2245         pitch_val = obj_priv->stride / 128;
2246         pitch_val = ffs(pitch_val) - 1;
2247         WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2248
2249         val = obj_priv->gtt_offset;
2250         if (obj_priv->tiling_mode == I915_TILING_Y)
2251                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2252         fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2253         WARN_ON(fence_size_bits & ~0x00000f00);
2254         val |= fence_size_bits;
2255         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2256         val |= I830_FENCE_REG_VALID;
2257
2258         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2259 }
2260
2261 static int i915_find_fence_reg(struct drm_device *dev)
2262 {
2263         struct drm_i915_fence_reg *reg = NULL;
2264         struct drm_i915_gem_object *obj_priv = NULL;
2265         struct drm_i915_private *dev_priv = dev->dev_private;
2266         struct drm_gem_object *obj = NULL;
2267         int i, avail, ret;
2268
2269         /* First try to find a free reg */
2270         avail = 0;
2271         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2272                 reg = &dev_priv->fence_regs[i];
2273                 if (!reg->obj)
2274                         return i;
2275
2276                 obj_priv = to_intel_bo(reg->obj);
2277                 if (!obj_priv->pin_count)
2278                     avail++;
2279         }
2280
2281         if (avail == 0)
2282                 return -ENOSPC;
2283
2284         /* None available, try to steal one or wait for a user to finish */
2285         i = I915_FENCE_REG_NONE;
2286         list_for_each_entry(reg, &dev_priv->mm.fence_list,
2287                             lru_list) {
2288                 obj = reg->obj;
2289                 obj_priv = to_intel_bo(obj);
2290
2291                 if (obj_priv->pin_count)
2292                         continue;
2293
2294                 /* found one! */
2295                 i = obj_priv->fence_reg;
2296                 break;
2297         }
2298
2299         BUG_ON(i == I915_FENCE_REG_NONE);
2300
2301         /* We only have a reference on obj from the active list. put_fence_reg
2302          * might drop that one, causing a use-after-free in it. So hold a
2303          * private reference to obj like the other callers of put_fence_reg
2304          * (set_tiling ioctl) do. */
2305         drm_gem_object_reference(obj);
2306         ret = i915_gem_object_put_fence_reg(obj);
2307         drm_gem_object_unreference(obj);
2308         if (ret != 0)
2309                 return ret;
2310
2311         return i;
2312 }
2313
2314 /**
2315  * i915_gem_object_get_fence_reg - set up a fence reg for an object
2316  * @obj: object to map through a fence reg
2317  *
2318  * When mapping objects through the GTT, userspace wants to be able to write
2319  * to them without having to worry about swizzling if the object is tiled.
2320  *
2321  * This function walks the fence regs looking for a free one for @obj,
2322  * stealing one if it can't find any.
2323  *
2324  * It then sets up the reg based on the object's properties: address, pitch
2325  * and tiling format.
2326  */
2327 int
2328 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2329 {
2330         struct drm_device *dev = obj->dev;
2331         struct drm_i915_private *dev_priv = dev->dev_private;
2332         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2333         struct drm_i915_fence_reg *reg = NULL;
2334         int ret;
2335
2336         /* Just update our place in the LRU if our fence is getting used. */
2337         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2338                 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2339                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2340                 return 0;
2341         }
2342
2343         switch (obj_priv->tiling_mode) {
2344         case I915_TILING_NONE:
2345                 WARN(1, "allocating a fence for non-tiled object?\n");
2346                 break;
2347         case I915_TILING_X:
2348                 if (!obj_priv->stride)
2349                         return -EINVAL;
2350                 WARN((obj_priv->stride & (512 - 1)),
2351                      "object 0x%08x is X tiled but has non-512B pitch\n",
2352                      obj_priv->gtt_offset);
2353                 break;
2354         case I915_TILING_Y:
2355                 if (!obj_priv->stride)
2356                         return -EINVAL;
2357                 WARN((obj_priv->stride & (128 - 1)),
2358                      "object 0x%08x is Y tiled but has non-128B pitch\n",
2359                      obj_priv->gtt_offset);
2360                 break;
2361         }
2362
2363         ret = i915_find_fence_reg(dev);
2364         if (ret < 0)
2365                 return ret;
2366
2367         obj_priv->fence_reg = ret;
2368         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2369         list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2370
2371         reg->obj = obj;
2372
2373         if (IS_GEN6(dev))
2374                 sandybridge_write_fence_reg(reg);
2375         else if (IS_I965G(dev))
2376                 i965_write_fence_reg(reg);
2377         else if (IS_I9XX(dev))
2378                 i915_write_fence_reg(reg);
2379         else
2380                 i830_write_fence_reg(reg);
2381
2382         trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2383                         obj_priv->tiling_mode);
2384
2385         return 0;
2386 }
2387
2388 /**
2389  * i915_gem_clear_fence_reg - clear out fence register info
2390  * @obj: object to clear
2391  *
2392  * Zeroes out the fence register itself and clears out the associated
2393  * data structures in dev_priv and obj_priv.
2394  */
2395 static void
2396 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2397 {
2398         struct drm_device *dev = obj->dev;
2399         drm_i915_private_t *dev_priv = dev->dev_private;
2400         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2401         struct drm_i915_fence_reg *reg =
2402                 &dev_priv->fence_regs[obj_priv->fence_reg];
2403
2404         if (IS_GEN6(dev)) {
2405                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2406                              (obj_priv->fence_reg * 8), 0);
2407         } else if (IS_I965G(dev)) {
2408                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2409         } else {
2410                 uint32_t fence_reg;
2411
2412                 if (obj_priv->fence_reg < 8)
2413                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2414                 else
2415                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2416                                                        8) * 4;
2417
2418                 I915_WRITE(fence_reg, 0);
2419         }
2420
2421         reg->obj = NULL;
2422         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2423         list_del_init(&reg->lru_list);
2424 }
2425
2426 /**
2427  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2428  * to the buffer to finish, and then resets the fence register.
2429  * @obj: tiled object holding a fence register.
2430  *
2431  * Zeroes out the fence register itself and clears out the associated
2432  * data structures in dev_priv and obj_priv.
2433  */
2434 int
2435 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2436 {
2437         struct drm_device *dev = obj->dev;
2438         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2439
2440         if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2441                 return 0;
2442
2443         /* If we've changed tiling, GTT-mappings of the object
2444          * need to re-fault to ensure that the correct fence register
2445          * setup is in place.
2446          */
2447         i915_gem_release_mmap(obj);
2448
2449         /* On the i915, GPU access to tiled buffers is via a fence,
2450          * therefore we must wait for any outstanding access to complete
2451          * before clearing the fence.
2452          */
2453         if (!IS_I965G(dev)) {
2454                 int ret;
2455
2456                 ret = i915_gem_object_flush_gpu_write_domain(obj);
2457                 if (ret != 0)
2458                         return ret;
2459
2460                 ret = i915_gem_object_wait_rendering(obj, true);
2461                 if (ret != 0)
2462                         return ret;
2463         }
2464
2465         i915_gem_object_flush_gtt_write_domain(obj);
2466         i915_gem_clear_fence_reg (obj);
2467
2468         return 0;
2469 }
2470
2471 /**
2472  * Finds free space in the GTT aperture and binds the object there.
2473  */
2474 static int
2475 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2476 {
2477         struct drm_device *dev = obj->dev;
2478         drm_i915_private_t *dev_priv = dev->dev_private;
2479         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2480         struct drm_mm_node *free_space;
2481         gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
2482         int ret;
2483
2484         if (obj_priv->madv != I915_MADV_WILLNEED) {
2485                 DRM_ERROR("Attempting to bind a purgeable object\n");
2486                 return -EINVAL;
2487         }
2488
2489         if (alignment == 0)
2490                 alignment = i915_gem_get_gtt_alignment(obj);
2491         if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2492                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2493                 return -EINVAL;
2494         }
2495
2496         /* If the object is bigger than the entire aperture, reject it early
2497          * before evicting everything in a vain attempt to find space.
2498          */
2499         if (obj->size > dev->gtt_total) {
2500                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2501                 return -E2BIG;
2502         }
2503
2504  search_free:
2505         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2506                                         obj->size, alignment, 0);
2507         if (free_space != NULL) {
2508                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2509                                                        alignment);
2510                 if (obj_priv->gtt_space != NULL)
2511                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
2512         }
2513         if (obj_priv->gtt_space == NULL) {
2514                 /* If the gtt is empty and we're still having trouble
2515                  * fitting our object in, we're out of memory.
2516                  */
2517 #if WATCH_LRU
2518                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2519 #endif
2520                 ret = i915_gem_evict_something(dev, obj->size, alignment);
2521                 if (ret)
2522                         return ret;
2523
2524                 goto search_free;
2525         }
2526
2527 #if WATCH_BUF
2528         DRM_INFO("Binding object of size %zd at 0x%08x\n",
2529                  obj->size, obj_priv->gtt_offset);
2530 #endif
2531         ret = i915_gem_object_get_pages(obj, gfpmask);
2532         if (ret) {
2533                 drm_mm_put_block(obj_priv->gtt_space);
2534                 obj_priv->gtt_space = NULL;
2535
2536                 if (ret == -ENOMEM) {
2537                         /* first try to clear up some space from the GTT */
2538                         ret = i915_gem_evict_something(dev, obj->size,
2539                                                        alignment);
2540                         if (ret) {
2541                                 /* now try to shrink everyone else */
2542                                 if (gfpmask) {
2543                                         gfpmask = 0;
2544                                         goto search_free;
2545                                 }
2546
2547                                 return ret;
2548                         }
2549
2550                         goto search_free;
2551                 }
2552
2553                 return ret;
2554         }
2555
2556         /* Create an AGP memory structure pointing at our pages, and bind it
2557          * into the GTT.
2558          */
2559         obj_priv->agp_mem = drm_agp_bind_pages(dev,
2560                                                obj_priv->pages,
2561                                                obj->size >> PAGE_SHIFT,
2562                                                obj_priv->gtt_offset,
2563                                                obj_priv->agp_type);
2564         if (obj_priv->agp_mem == NULL) {
2565                 i915_gem_object_put_pages(obj);
2566                 drm_mm_put_block(obj_priv->gtt_space);
2567                 obj_priv->gtt_space = NULL;
2568
2569                 ret = i915_gem_evict_something(dev, obj->size, alignment);
2570                 if (ret)
2571                         return ret;
2572
2573                 goto search_free;
2574         }
2575         atomic_inc(&dev->gtt_count);
2576         atomic_add(obj->size, &dev->gtt_memory);
2577
2578         /* keep track of bounds object by adding it to the inactive list */
2579         list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2580
2581         /* Assert that the object is not currently in any GPU domain. As it
2582          * wasn't in the GTT, there shouldn't be any way it could have been in
2583          * a GPU cache
2584          */
2585         BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2586         BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2587
2588         trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2589
2590         return 0;
2591 }
2592
2593 void
2594 i915_gem_clflush_object(struct drm_gem_object *obj)
2595 {
2596         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
2597
2598         /* If we don't have a page list set up, then we're not pinned
2599          * to GPU, and we can ignore the cache flush because it'll happen
2600          * again at bind time.
2601          */
2602         if (obj_priv->pages == NULL)
2603                 return;
2604
2605         trace_i915_gem_object_clflush(obj);
2606
2607         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2608 }
2609
2610 /** Flushes any GPU write domain for the object if it's dirty. */
2611 static int
2612 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2613 {
2614         struct drm_device *dev = obj->dev;
2615         uint32_t old_write_domain;
2616         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2617
2618         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2619                 return 0;
2620
2621         /* Queue the GPU write cache flushing we need. */
2622         old_write_domain = obj->write_domain;
2623         i915_gem_flush(dev, 0, obj->write_domain);
2624         if (i915_add_request(dev, NULL, obj_priv->ring) == 0)
2625                 return -ENOMEM;
2626
2627         trace_i915_gem_object_change_domain(obj,
2628                                             obj->read_domains,
2629                                             old_write_domain);
2630         return 0;
2631 }
2632
2633 /** Flushes the GTT write domain for the object if it's dirty. */
2634 static void
2635 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2636 {
2637         uint32_t old_write_domain;
2638
2639         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2640                 return;
2641
2642         /* No actual flushing is required for the GTT write domain.   Writes
2643          * to it immediately go to main memory as far as we know, so there's
2644          * no chipset flush.  It also doesn't land in render cache.
2645          */
2646         old_write_domain = obj->write_domain;
2647         obj->write_domain = 0;
2648
2649         trace_i915_gem_object_change_domain(obj,
2650                                             obj->read_domains,
2651                                             old_write_domain);
2652 }
2653
2654 /** Flushes the CPU write domain for the object if it's dirty. */
2655 static void
2656 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2657 {
2658         struct drm_device *dev = obj->dev;
2659         uint32_t old_write_domain;
2660
2661         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2662                 return;
2663
2664         i915_gem_clflush_object(obj);
2665         drm_agp_chipset_flush(dev);
2666         old_write_domain = obj->write_domain;
2667         obj->write_domain = 0;
2668
2669         trace_i915_gem_object_change_domain(obj,
2670                                             obj->read_domains,
2671                                             old_write_domain);
2672 }
2673
2674 int
2675 i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2676 {
2677         int ret = 0;
2678
2679         switch (obj->write_domain) {
2680         case I915_GEM_DOMAIN_GTT:
2681                 i915_gem_object_flush_gtt_write_domain(obj);
2682                 break;
2683         case I915_GEM_DOMAIN_CPU:
2684                 i915_gem_object_flush_cpu_write_domain(obj);
2685                 break;
2686         default:
2687                 ret = i915_gem_object_flush_gpu_write_domain(obj);
2688                 break;
2689         }
2690
2691         return ret;
2692 }
2693
2694 /**
2695  * Moves a single object to the GTT read, and possibly write domain.
2696  *
2697  * This function returns when the move is complete, including waiting on
2698  * flushes to occur.
2699  */
2700 int
2701 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2702 {
2703         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2704         uint32_t old_write_domain, old_read_domains;
2705         int ret;
2706
2707         /* Not valid to be called on unbound objects. */
2708         if (obj_priv->gtt_space == NULL)
2709                 return -EINVAL;
2710
2711         ret = i915_gem_object_flush_gpu_write_domain(obj);
2712         if (ret != 0)
2713                 return ret;
2714
2715         /* Wait on any GPU rendering and flushing to occur. */
2716         ret = i915_gem_object_wait_rendering(obj, true);
2717         if (ret != 0)
2718                 return ret;
2719
2720         old_write_domain = obj->write_domain;
2721         old_read_domains = obj->read_domains;
2722
2723         /* If we're writing through the GTT domain, then CPU and GPU caches
2724          * will need to be invalidated at next use.
2725          */
2726         if (write)
2727                 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2728
2729         i915_gem_object_flush_cpu_write_domain(obj);
2730
2731         /* It should now be out of any other write domains, and we can update
2732          * the domain values for our changes.
2733          */
2734         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2735         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2736         if (write) {
2737                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2738                 obj_priv->dirty = 1;
2739         }
2740
2741         trace_i915_gem_object_change_domain(obj,
2742                                             old_read_domains,
2743                                             old_write_domain);
2744
2745         return 0;
2746 }
2747
2748 /*
2749  * Prepare buffer for display plane. Use uninterruptible for possible flush
2750  * wait, as in modesetting process we're not supposed to be interrupted.
2751  */
2752 int
2753 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2754 {
2755         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2756         uint32_t old_write_domain, old_read_domains;
2757         int ret;
2758
2759         /* Not valid to be called on unbound objects. */
2760         if (obj_priv->gtt_space == NULL)
2761                 return -EINVAL;
2762
2763         ret = i915_gem_object_flush_gpu_write_domain(obj);
2764         if (ret)
2765                 return ret;
2766
2767         /* Wait on any GPU rendering and flushing to occur. */
2768         ret = i915_gem_object_wait_rendering(obj, false);
2769         if (ret != 0)
2770                 return ret;
2771
2772         i915_gem_object_flush_cpu_write_domain(obj);
2773
2774         old_write_domain = obj->write_domain;
2775         old_read_domains = obj->read_domains;
2776
2777         /* It should now be out of any other write domains, and we can update
2778          * the domain values for our changes.
2779          */
2780         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2781         obj->read_domains = I915_GEM_DOMAIN_GTT;
2782         obj->write_domain = I915_GEM_DOMAIN_GTT;
2783         obj_priv->dirty = 1;
2784
2785         trace_i915_gem_object_change_domain(obj,
2786                                             old_read_domains,
2787                                             old_write_domain);
2788
2789         return 0;
2790 }
2791
2792 /**
2793  * Moves a single object to the CPU read, and possibly write domain.
2794  *
2795  * This function returns when the move is complete, including waiting on
2796  * flushes to occur.
2797  */
2798 static int
2799 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2800 {
2801         uint32_t old_write_domain, old_read_domains;
2802         int ret;
2803
2804         ret = i915_gem_object_flush_gpu_write_domain(obj);
2805         if (ret)
2806                 return ret;
2807
2808         /* Wait on any GPU rendering and flushing to occur. */
2809         ret = i915_gem_object_wait_rendering(obj, true);
2810         if (ret != 0)
2811                 return ret;
2812
2813         i915_gem_object_flush_gtt_write_domain(obj);
2814
2815         /* If we have a partially-valid cache of the object in the CPU,
2816          * finish invalidating it and free the per-page flags.
2817          */
2818         i915_gem_object_set_to_full_cpu_read_domain(obj);
2819
2820         old_write_domain = obj->write_domain;
2821         old_read_domains = obj->read_domains;
2822
2823         /* Flush the CPU cache if it's still invalid. */
2824         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2825                 i915_gem_clflush_object(obj);
2826
2827                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2828         }
2829
2830         /* It should now be out of any other write domains, and we can update
2831          * the domain values for our changes.
2832          */
2833         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2834
2835         /* If we're writing through the CPU, then the GPU read domains will
2836          * need to be invalidated at next use.
2837          */
2838         if (write) {
2839                 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2840                 obj->write_domain = I915_GEM_DOMAIN_CPU;
2841         }
2842
2843         trace_i915_gem_object_change_domain(obj,
2844                                             old_read_domains,
2845                                             old_write_domain);
2846
2847         return 0;
2848 }
2849
2850 /*
2851  * Set the next domain for the specified object. This
2852  * may not actually perform the necessary flushing/invaliding though,
2853  * as that may want to be batched with other set_domain operations
2854  *
2855  * This is (we hope) the only really tricky part of gem. The goal
2856  * is fairly simple -- track which caches hold bits of the object
2857  * and make sure they remain coherent. A few concrete examples may
2858  * help to explain how it works. For shorthand, we use the notation
2859  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2860  * a pair of read and write domain masks.
2861  *
2862  * Case 1: the batch buffer
2863  *
2864  *      1. Allocated
2865  *      2. Written by CPU
2866  *      3. Mapped to GTT
2867  *      4. Read by GPU
2868  *      5. Unmapped from GTT
2869  *      6. Freed
2870  *
2871  *      Let's take these a step at a time
2872  *
2873  *      1. Allocated
2874  *              Pages allocated from the kernel may still have
2875  *              cache contents, so we set them to (CPU, CPU) always.
2876  *      2. Written by CPU (using pwrite)
2877  *              The pwrite function calls set_domain (CPU, CPU) and
2878  *              this function does nothing (as nothing changes)
2879  *      3. Mapped by GTT
2880  *              This function asserts that the object is not
2881  *              currently in any GPU-based read or write domains
2882  *      4. Read by GPU
2883  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
2884  *              As write_domain is zero, this function adds in the
2885  *              current read domains (CPU+COMMAND, 0).
2886  *              flush_domains is set to CPU.
2887  *              invalidate_domains is set to COMMAND
2888  *              clflush is run to get data out of the CPU caches
2889  *              then i915_dev_set_domain calls i915_gem_flush to
2890  *              emit an MI_FLUSH and drm_agp_chipset_flush
2891  *      5. Unmapped from GTT
2892  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
2893  *              flush_domains and invalidate_domains end up both zero
2894  *              so no flushing/invalidating happens
2895  *      6. Freed
2896  *              yay, done
2897  *
2898  * Case 2: The shared render buffer
2899  *
2900  *      1. Allocated
2901  *      2. Mapped to GTT
2902  *      3. Read/written by GPU
2903  *      4. set_domain to (CPU,CPU)
2904  *      5. Read/written by CPU
2905  *      6. Read/written by GPU
2906  *
2907  *      1. Allocated
2908  *              Same as last example, (CPU, CPU)
2909  *      2. Mapped to GTT
2910  *              Nothing changes (assertions find that it is not in the GPU)
2911  *      3. Read/written by GPU
2912  *              execbuffer calls set_domain (RENDER, RENDER)
2913  *              flush_domains gets CPU
2914  *              invalidate_domains gets GPU
2915  *              clflush (obj)
2916  *              MI_FLUSH and drm_agp_chipset_flush
2917  *      4. set_domain (CPU, CPU)
2918  *              flush_domains gets GPU
2919  *              invalidate_domains gets CPU
2920  *              wait_rendering (obj) to make sure all drawing is complete.
2921  *              This will include an MI_FLUSH to get the data from GPU
2922  *              to memory
2923  *              clflush (obj) to invalidate the CPU cache
2924  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2925  *      5. Read/written by CPU
2926  *              cache lines are loaded and dirtied
2927  *      6. Read written by GPU
2928  *              Same as last GPU access
2929  *
2930  * Case 3: The constant buffer
2931  *
2932  *      1. Allocated
2933  *      2. Written by CPU
2934  *      3. Read by GPU
2935  *      4. Updated (written) by CPU again
2936  *      5. Read by GPU
2937  *
2938  *      1. Allocated
2939  *              (CPU, CPU)
2940  *      2. Written by CPU
2941  *              (CPU, CPU)
2942  *      3. Read by GPU
2943  *              (CPU+RENDER, 0)
2944  *              flush_domains = CPU
2945  *              invalidate_domains = RENDER
2946  *              clflush (obj)
2947  *              MI_FLUSH
2948  *              drm_agp_chipset_flush
2949  *      4. Updated (written) by CPU again
2950  *              (CPU, CPU)
2951  *              flush_domains = 0 (no previous write domain)
2952  *              invalidate_domains = 0 (no new read domains)
2953  *      5. Read by GPU
2954  *              (CPU+RENDER, 0)
2955  *              flush_domains = CPU
2956  *              invalidate_domains = RENDER
2957  *              clflush (obj)
2958  *              MI_FLUSH
2959  *              drm_agp_chipset_flush
2960  */
2961 static void
2962 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2963 {
2964         struct drm_device               *dev = obj->dev;
2965         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
2966         uint32_t                        invalidate_domains = 0;
2967         uint32_t                        flush_domains = 0;
2968         uint32_t                        old_read_domains;
2969
2970         BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2971         BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2972
2973         intel_mark_busy(dev, obj);
2974
2975 #if WATCH_BUF
2976         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2977                  __func__, obj,
2978                  obj->read_domains, obj->pending_read_domains,
2979                  obj->write_domain, obj->pending_write_domain);
2980 #endif
2981         /*
2982          * If the object isn't moving to a new write domain,
2983          * let the object stay in multiple read domains
2984          */
2985         if (obj->pending_write_domain == 0)
2986                 obj->pending_read_domains |= obj->read_domains;
2987         else
2988                 obj_priv->dirty = 1;
2989
2990         /*
2991          * Flush the current write domain if
2992          * the new read domains don't match. Invalidate
2993          * any read domains which differ from the old
2994          * write domain
2995          */
2996         if (obj->write_domain &&
2997             obj->write_domain != obj->pending_read_domains) {
2998                 flush_domains |= obj->write_domain;
2999                 invalidate_domains |=
3000                         obj->pending_read_domains & ~obj->write_domain;
3001         }
3002         /*
3003          * Invalidate any read caches which may have
3004          * stale data. That is, any new read domains.
3005          */
3006         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3007         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3008 #if WATCH_BUF
3009                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3010                          __func__, flush_domains, invalidate_domains);
3011 #endif
3012                 i915_gem_clflush_object(obj);
3013         }
3014
3015         old_read_domains = obj->read_domains;
3016
3017         /* The actual obj->write_domain will be updated with
3018          * pending_write_domain after we emit the accumulated flush for all
3019          * of our domain changes in execbuffers (which clears objects'
3020          * write_domains).  So if we have a current write domain that we
3021          * aren't changing, set pending_write_domain to that.
3022          */
3023         if (flush_domains == 0 && obj->pending_write_domain == 0)
3024                 obj->pending_write_domain = obj->write_domain;
3025         obj->read_domains = obj->pending_read_domains;
3026
3027         dev->invalidate_domains |= invalidate_domains;
3028         dev->flush_domains |= flush_domains;
3029 #if WATCH_BUF
3030         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3031                  __func__,
3032                  obj->read_domains, obj->write_domain,
3033                  dev->invalidate_domains, dev->flush_domains);
3034 #endif
3035
3036         trace_i915_gem_object_change_domain(obj,
3037                                             old_read_domains,
3038                                             obj->write_domain);
3039 }
3040
3041 /**
3042  * Moves the object from a partially CPU read to a full one.
3043  *
3044  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3045  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3046  */
3047 static void
3048 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3049 {
3050         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3051
3052         if (!obj_priv->page_cpu_valid)
3053                 return;
3054
3055         /* If we're partially in the CPU read domain, finish moving it in.
3056          */
3057         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3058                 int i;
3059
3060                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3061                         if (obj_priv->page_cpu_valid[i])
3062                                 continue;
3063                         drm_clflush_pages(obj_priv->pages + i, 1);
3064                 }
3065         }
3066
3067         /* Free the page_cpu_valid mappings which are now stale, whether
3068          * or not we've got I915_GEM_DOMAIN_CPU.
3069          */
3070         kfree(obj_priv->page_cpu_valid);
3071         obj_priv->page_cpu_valid = NULL;
3072 }
3073
3074 /**
3075  * Set the CPU read domain on a range of the object.
3076  *
3077  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3078  * not entirely valid.  The page_cpu_valid member of the object flags which
3079  * pages have been flushed, and will be respected by
3080  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3081  * of the whole object.
3082  *
3083  * This function returns when the move is complete, including waiting on
3084  * flushes to occur.
3085  */
3086 static int
3087 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3088                                           uint64_t offset, uint64_t size)
3089 {
3090         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3091         uint32_t old_read_domains;
3092         int i, ret;
3093
3094         if (offset == 0 && size == obj->size)
3095                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3096
3097         ret = i915_gem_object_flush_gpu_write_domain(obj);
3098         if (ret)
3099                 return ret;
3100
3101         /* Wait on any GPU rendering and flushing to occur. */
3102         ret = i915_gem_object_wait_rendering(obj, true);
3103         if (ret != 0)
3104                 return ret;
3105         i915_gem_object_flush_gtt_write_domain(obj);
3106
3107         /* If we're already fully in the CPU read domain, we're done. */
3108         if (obj_priv->page_cpu_valid == NULL &&
3109             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3110                 return 0;
3111
3112         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3113          * newly adding I915_GEM_DOMAIN_CPU
3114          */
3115         if (obj_priv->page_cpu_valid == NULL) {
3116                 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3117                                                    GFP_KERNEL);
3118                 if (obj_priv->page_cpu_valid == NULL)
3119                         return -ENOMEM;
3120         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3121                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3122
3123         /* Flush the cache on any pages that are still invalid from the CPU's
3124          * perspective.
3125          */
3126         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3127              i++) {
3128                 if (obj_priv->page_cpu_valid[i])
3129                         continue;
3130
3131                 drm_clflush_pages(obj_priv->pages + i, 1);
3132
3133                 obj_priv->page_cpu_valid[i] = 1;
3134         }
3135
3136         /* It should now be out of any other write domains, and we can update
3137          * the domain values for our changes.
3138          */
3139         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3140
3141         old_read_domains = obj->read_domains;
3142         obj->read_domains |= I915_GEM_DOMAIN_CPU;
3143
3144         trace_i915_gem_object_change_domain(obj,
3145                                             old_read_domains,
3146                                             obj->write_domain);
3147
3148         return 0;
3149 }
3150
3151 /**
3152  * Pin an object to the GTT and evaluate the relocations landing in it.
3153  */
3154 static int
3155 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3156                                  struct drm_file *file_priv,
3157                                  struct drm_i915_gem_exec_object2 *entry,
3158                                  struct drm_i915_gem_relocation_entry *relocs)
3159 {
3160         struct drm_device *dev = obj->dev;
3161         drm_i915_private_t *dev_priv = dev->dev_private;
3162         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3163         int i, ret;
3164         void __iomem *reloc_page;
3165         bool need_fence;
3166
3167         need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3168                      obj_priv->tiling_mode != I915_TILING_NONE;
3169
3170         /* Check fence reg constraints and rebind if necessary */
3171         if (need_fence &&
3172             !i915_gem_object_fence_offset_ok(obj,
3173                                              obj_priv->tiling_mode)) {
3174                 ret = i915_gem_object_unbind(obj);
3175                 if (ret)
3176                         return ret;
3177         }
3178
3179         /* Choose the GTT offset for our buffer and put it there. */
3180         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3181         if (ret)
3182                 return ret;
3183
3184         /*
3185          * Pre-965 chips need a fence register set up in order to
3186          * properly handle blits to/from tiled surfaces.
3187          */
3188         if (need_fence) {
3189                 ret = i915_gem_object_get_fence_reg(obj);
3190                 if (ret != 0) {
3191                         i915_gem_object_unpin(obj);
3192                         return ret;
3193                 }
3194         }
3195
3196         entry->offset = obj_priv->gtt_offset;
3197
3198         /* Apply the relocations, using the GTT aperture to avoid cache
3199          * flushing requirements.
3200          */
3201         for (i = 0; i < entry->relocation_count; i++) {
3202                 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
3203                 struct drm_gem_object *target_obj;
3204                 struct drm_i915_gem_object *target_obj_priv;
3205                 uint32_t reloc_val, reloc_offset;
3206                 uint32_t __iomem *reloc_entry;
3207
3208                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3209                                                    reloc->target_handle);
3210                 if (target_obj == NULL) {
3211                         i915_gem_object_unpin(obj);
3212                         return -ENOENT;
3213                 }
3214                 target_obj_priv = to_intel_bo(target_obj);
3215
3216 #if WATCH_RELOC
3217                 DRM_INFO("%s: obj %p offset %08x target %d "
3218                          "read %08x write %08x gtt %08x "
3219                          "presumed %08x delta %08x\n",
3220                          __func__,
3221                          obj,
3222                          (int) reloc->offset,
3223                          (int) reloc->target_handle,
3224                          (int) reloc->read_domains,
3225                          (int) reloc->write_domain,
3226                          (int) target_obj_priv->gtt_offset,
3227                          (int) reloc->presumed_offset,
3228                          reloc->delta);
3229 #endif
3230
3231                 /* The target buffer should have appeared before us in the
3232                  * exec_object list, so it should have a GTT space bound by now.
3233                  */
3234                 if (target_obj_priv->gtt_space == NULL) {
3235                         DRM_ERROR("No GTT space found for object %d\n",
3236                                   reloc->target_handle);
3237                         drm_gem_object_unreference(target_obj);
3238                         i915_gem_object_unpin(obj);
3239                         return -EINVAL;
3240                 }
3241
3242                 /* Validate that the target is in a valid r/w GPU domain */
3243                 if (reloc->write_domain & (reloc->write_domain - 1)) {
3244                         DRM_ERROR("reloc with multiple write domains: "
3245                                   "obj %p target %d offset %d "
3246                                   "read %08x write %08x",
3247                                   obj, reloc->target_handle,
3248                                   (int) reloc->offset,
3249                                   reloc->read_domains,
3250                                   reloc->write_domain);
3251                         return -EINVAL;
3252                 }
3253                 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3254                     reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3255                         DRM_ERROR("reloc with read/write CPU domains: "
3256                                   "obj %p target %d offset %d "
3257                                   "read %08x write %08x",
3258                                   obj, reloc->target_handle,
3259                                   (int) reloc->offset,
3260                                   reloc->read_domains,
3261                                   reloc->write_domain);
3262                         drm_gem_object_unreference(target_obj);
3263                         i915_gem_object_unpin(obj);
3264                         return -EINVAL;
3265                 }
3266                 if (reloc->write_domain && target_obj->pending_write_domain &&
3267                     reloc->write_domain != target_obj->pending_write_domain) {
3268                         DRM_ERROR("Write domain conflict: "
3269                                   "obj %p target %d offset %d "
3270                                   "new %08x old %08x\n",
3271                                   obj, reloc->target_handle,
3272                                   (int) reloc->offset,
3273                                   reloc->write_domain,
3274                                   target_obj->pending_write_domain);
3275                         drm_gem_object_unreference(target_obj);
3276                         i915_gem_object_unpin(obj);
3277                         return -EINVAL;
3278                 }
3279
3280                 target_obj->pending_read_domains |= reloc->read_domains;
3281                 target_obj->pending_write_domain |= reloc->write_domain;
3282
3283                 /* If the relocation already has the right value in it, no
3284                  * more work needs to be done.
3285                  */
3286                 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3287                         drm_gem_object_unreference(target_obj);
3288                         continue;
3289                 }
3290
3291                 /* Check that the relocation address is valid... */
3292                 if (reloc->offset > obj->size - 4) {
3293                         DRM_ERROR("Relocation beyond object bounds: "
3294                                   "obj %p target %d offset %d size %d.\n",
3295                                   obj, reloc->target_handle,
3296                                   (int) reloc->offset, (int) obj->size);
3297                         drm_gem_object_unreference(target_obj);
3298                         i915_gem_object_unpin(obj);
3299                         return -EINVAL;
3300                 }
3301                 if (reloc->offset & 3) {
3302                         DRM_ERROR("Relocation not 4-byte aligned: "
3303                                   "obj %p target %d offset %d.\n",
3304                                   obj, reloc->target_handle,
3305                                   (int) reloc->offset);
3306                         drm_gem_object_unreference(target_obj);
3307                         i915_gem_object_unpin(obj);
3308                         return -EINVAL;
3309                 }
3310
3311                 /* and points to somewhere within the target object. */
3312                 if (reloc->delta >= target_obj->size) {
3313                         DRM_ERROR("Relocation beyond target object bounds: "
3314                                   "obj %p target %d delta %d size %d.\n",
3315                                   obj, reloc->target_handle,
3316                                   (int) reloc->delta, (int) target_obj->size);
3317                         drm_gem_object_unreference(target_obj);
3318                         i915_gem_object_unpin(obj);
3319                         return -EINVAL;
3320                 }
3321
3322                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3323                 if (ret != 0) {
3324                         drm_gem_object_unreference(target_obj);
3325                         i915_gem_object_unpin(obj);
3326                         return -EINVAL;
3327                 }
3328
3329                 /* Map the page containing the relocation we're going to
3330                  * perform.
3331                  */
3332                 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3333                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3334                                                       (reloc_offset &
3335                                                        ~(PAGE_SIZE - 1)),
3336                                                       KM_USER0);
3337                 reloc_entry = (uint32_t __iomem *)(reloc_page +
3338                                                    (reloc_offset & (PAGE_SIZE - 1)));
3339                 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3340
3341 #if WATCH_BUF
3342                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3343                           obj, (unsigned int) reloc->offset,
3344                           readl(reloc_entry), reloc_val);
3345 #endif
3346                 writel(reloc_val, reloc_entry);
3347                 io_mapping_unmap_atomic(reloc_page, KM_USER0);
3348
3349                 /* The updated presumed offset for this entry will be
3350                  * copied back out to the user.
3351                  */
3352                 reloc->presumed_offset = target_obj_priv->gtt_offset;
3353
3354                 drm_gem_object_unreference(target_obj);
3355         }
3356
3357 #if WATCH_BUF
3358         if (0)
3359                 i915_gem_dump_object(obj, 128, __func__, ~0);
3360 #endif
3361         return 0;
3362 }
3363
3364 /* Throttle our rendering by waiting until the ring has completed our requests
3365  * emitted over 20 msec ago.
3366  *
3367  * Note that if we were to use the current jiffies each time around the loop,
3368  * we wouldn't escape the function with any frames outstanding if the time to
3369  * render a frame was over 20ms.
3370  *
3371  * This should get us reasonable parallelism between CPU and GPU but also
3372  * relatively low latency when blocking on a particular request to finish.
3373  */
3374 static int
3375 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3376 {
3377         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3378         int ret = 0;
3379         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3380
3381         mutex_lock(&dev->struct_mutex);
3382         while (!list_empty(&i915_file_priv->mm.request_list)) {
3383                 struct drm_i915_gem_request *request;
3384
3385                 request = list_first_entry(&i915_file_priv->mm.request_list,
3386                                            struct drm_i915_gem_request,
3387                                            client_list);
3388
3389                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3390                         break;
3391
3392                 ret = i915_wait_request(dev, request->seqno, request->ring);
3393                 if (ret != 0)
3394                         break;
3395         }
3396         mutex_unlock(&dev->struct_mutex);
3397
3398         return ret;
3399 }
3400
3401 static int
3402 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3403                               uint32_t buffer_count,
3404                               struct drm_i915_gem_relocation_entry **relocs)
3405 {
3406         uint32_t reloc_count = 0, reloc_index = 0, i;
3407         int ret;
3408
3409         *relocs = NULL;
3410         for (i = 0; i < buffer_count; i++) {
3411                 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3412                         return -EINVAL;
3413                 reloc_count += exec_list[i].relocation_count;
3414         }
3415
3416         *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3417         if (*relocs == NULL) {
3418                 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3419                 return -ENOMEM;
3420         }
3421
3422         for (i = 0; i < buffer_count; i++) {
3423                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3424
3425                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3426
3427                 ret = copy_from_user(&(*relocs)[reloc_index],
3428                                      user_relocs,
3429                                      exec_list[i].relocation_count *
3430                                      sizeof(**relocs));
3431                 if (ret != 0) {
3432                         drm_free_large(*relocs);
3433                         *relocs = NULL;
3434                         return -EFAULT;
3435                 }
3436
3437                 reloc_index += exec_list[i].relocation_count;
3438         }
3439
3440         return 0;
3441 }
3442
3443 static int
3444 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3445                             uint32_t buffer_count,
3446                             struct drm_i915_gem_relocation_entry *relocs)
3447 {
3448         uint32_t reloc_count = 0, i;
3449         int ret = 0;
3450
3451         if (relocs == NULL)
3452             return 0;
3453
3454         for (i = 0; i < buffer_count; i++) {
3455                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3456                 int unwritten;
3457
3458                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3459
3460                 unwritten = copy_to_user(user_relocs,
3461                                          &relocs[reloc_count],
3462                                          exec_list[i].relocation_count *
3463                                          sizeof(*relocs));
3464
3465                 if (unwritten) {
3466                         ret = -EFAULT;
3467                         goto err;
3468                 }
3469
3470                 reloc_count += exec_list[i].relocation_count;
3471         }
3472
3473 err:
3474         drm_free_large(relocs);
3475
3476         return ret;
3477 }
3478
3479 static int
3480 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3481                            uint64_t exec_offset)
3482 {
3483         uint32_t exec_start, exec_len;
3484
3485         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3486         exec_len = (uint32_t) exec->batch_len;
3487
3488         if ((exec_start | exec_len) & 0x7)
3489                 return -EINVAL;
3490
3491         if (!exec_start)
3492                 return -EINVAL;
3493
3494         return 0;
3495 }
3496
3497 static int
3498 i915_gem_wait_for_pending_flip(struct drm_device *dev,
3499                                struct drm_gem_object **object_list,
3500                                int count)
3501 {
3502         drm_i915_private_t *dev_priv = dev->dev_private;
3503         struct drm_i915_gem_object *obj_priv;
3504         DEFINE_WAIT(wait);
3505         int i, ret = 0;
3506
3507         for (;;) {
3508                 prepare_to_wait(&dev_priv->pending_flip_queue,
3509                                 &wait, TASK_INTERRUPTIBLE);
3510                 for (i = 0; i < count; i++) {
3511                         obj_priv = to_intel_bo(object_list[i]);
3512                         if (atomic_read(&obj_priv->pending_flip) > 0)
3513                                 break;
3514                 }
3515                 if (i == count)
3516                         break;
3517
3518                 if (!signal_pending(current)) {
3519                         mutex_unlock(&dev->struct_mutex);
3520                         schedule();
3521                         mutex_lock(&dev->struct_mutex);
3522                         continue;
3523                 }
3524                 ret = -ERESTARTSYS;
3525                 break;
3526         }
3527         finish_wait(&dev_priv->pending_flip_queue, &wait);
3528
3529         return ret;
3530 }
3531
3532
3533 int
3534 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3535                        struct drm_file *file_priv,
3536                        struct drm_i915_gem_execbuffer2 *args,
3537                        struct drm_i915_gem_exec_object2 *exec_list)
3538 {
3539         drm_i915_private_t *dev_priv = dev->dev_private;
3540         struct drm_gem_object **object_list = NULL;
3541         struct drm_gem_object *batch_obj;
3542         struct drm_i915_gem_object *obj_priv;
3543         struct drm_clip_rect *cliprects = NULL;
3544         struct drm_i915_gem_relocation_entry *relocs = NULL;
3545         int ret = 0, ret2, i, pinned = 0;
3546         uint64_t exec_offset;
3547         uint32_t seqno, reloc_index;
3548         int pin_tries, flips;
3549
3550         struct intel_ring_buffer *ring = NULL;
3551
3552 #if WATCH_EXEC
3553         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3554                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3555 #endif
3556         if (args->flags & I915_EXEC_BSD) {
3557                 if (!HAS_BSD(dev)) {
3558                         DRM_ERROR("execbuf with wrong flag\n");
3559                         return -EINVAL;
3560                 }
3561                 ring = &dev_priv->bsd_ring;
3562         } else {
3563                 ring = &dev_priv->render_ring;
3564         }
3565
3566         if (args->buffer_count < 1) {
3567                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3568                 return -EINVAL;
3569         }
3570         object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3571         if (object_list == NULL) {
3572                 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3573                           args->buffer_count);
3574                 ret = -ENOMEM;
3575                 goto pre_mutex_err;
3576         }
3577
3578         if (args->num_cliprects != 0) {
3579                 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3580                                     GFP_KERNEL);
3581                 if (cliprects == NULL) {
3582                         ret = -ENOMEM;
3583                         goto pre_mutex_err;
3584                 }
3585
3586                 ret = copy_from_user(cliprects,
3587                                      (struct drm_clip_rect __user *)
3588                                      (uintptr_t) args->cliprects_ptr,
3589                                      sizeof(*cliprects) * args->num_cliprects);
3590                 if (ret != 0) {
3591                         DRM_ERROR("copy %d cliprects failed: %d\n",
3592                                   args->num_cliprects, ret);
3593                         ret = -EFAULT;
3594                         goto pre_mutex_err;
3595                 }
3596         }
3597
3598         ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3599                                             &relocs);
3600         if (ret != 0)
3601                 goto pre_mutex_err;
3602
3603         mutex_lock(&dev->struct_mutex);
3604
3605         i915_verify_inactive(dev, __FILE__, __LINE__);
3606
3607         if (atomic_read(&dev_priv->mm.wedged)) {
3608                 mutex_unlock(&dev->struct_mutex);
3609                 ret = -EIO;
3610                 goto pre_mutex_err;
3611         }
3612
3613         if (dev_priv->mm.suspended) {
3614                 mutex_unlock(&dev->struct_mutex);
3615                 ret = -EBUSY;
3616                 goto pre_mutex_err;
3617         }
3618
3619         /* Look up object handles */
3620         flips = 0;
3621         for (i = 0; i < args->buffer_count; i++) {
3622                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3623                                                        exec_list[i].handle);
3624                 if (object_list[i] == NULL) {
3625                         DRM_ERROR("Invalid object handle %d at index %d\n",
3626                                    exec_list[i].handle, i);
3627                         /* prevent error path from reading uninitialized data */
3628                         args->buffer_count = i + 1;
3629                         ret = -ENOENT;
3630                         goto err;
3631                 }
3632
3633                 obj_priv = to_intel_bo(object_list[i]);
3634                 if (obj_priv->in_execbuffer) {
3635                         DRM_ERROR("Object %p appears more than once in object list\n",
3636                                    object_list[i]);
3637                         /* prevent error path from reading uninitialized data */
3638                         args->buffer_count = i + 1;
3639                         ret = -EINVAL;
3640                         goto err;
3641                 }
3642                 obj_priv->in_execbuffer = true;
3643                 flips += atomic_read(&obj_priv->pending_flip);
3644         }
3645
3646         if (flips > 0) {
3647                 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3648                                                      args->buffer_count);
3649                 if (ret)
3650                         goto err;
3651         }
3652
3653         /* Pin and relocate */
3654         for (pin_tries = 0; ; pin_tries++) {
3655                 ret = 0;
3656                 reloc_index = 0;
3657
3658                 for (i = 0; i < args->buffer_count; i++) {
3659                         object_list[i]->pending_read_domains = 0;
3660                         object_list[i]->pending_write_domain = 0;
3661                         ret = i915_gem_object_pin_and_relocate(object_list[i],
3662                                                                file_priv,
3663                                                                &exec_list[i],
3664                                                                &relocs[reloc_index]);
3665                         if (ret)
3666                                 break;
3667                         pinned = i + 1;
3668                         reloc_index += exec_list[i].relocation_count;
3669                 }
3670                 /* success */
3671                 if (ret == 0)
3672                         break;
3673
3674                 /* error other than GTT full, or we've already tried again */
3675                 if (ret != -ENOSPC || pin_tries >= 1) {
3676                         if (ret != -ERESTARTSYS) {
3677                                 unsigned long long total_size = 0;
3678                                 int num_fences = 0;
3679                                 for (i = 0; i < args->buffer_count; i++) {
3680                                         obj_priv = to_intel_bo(object_list[i]);
3681
3682                                         total_size += object_list[i]->size;
3683                                         num_fences +=
3684                                                 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3685                                                 obj_priv->tiling_mode != I915_TILING_NONE;
3686                                 }
3687                                 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3688                                           pinned+1, args->buffer_count,
3689                                           total_size, num_fences,
3690                                           ret);
3691                                 DRM_ERROR("%d objects [%d pinned], "
3692                                           "%d object bytes [%d pinned], "
3693                                           "%d/%d gtt bytes\n",
3694                                           atomic_read(&dev->object_count),
3695                                           atomic_read(&dev->pin_count),
3696                                           atomic_read(&dev->object_memory),
3697                                           atomic_read(&dev->pin_memory),
3698                                           atomic_read(&dev->gtt_memory),
3699                                           dev->gtt_total);
3700                         }
3701                         goto err;
3702                 }
3703
3704                 /* unpin all of our buffers */
3705                 for (i = 0; i < pinned; i++)
3706                         i915_gem_object_unpin(object_list[i]);
3707                 pinned = 0;
3708
3709                 /* evict everyone we can from the aperture */
3710                 ret = i915_gem_evict_everything(dev);
3711                 if (ret && ret != -ENOSPC)
3712                         goto err;
3713         }
3714
3715         /* Set the pending read domains for the batch buffer to COMMAND */
3716         batch_obj = object_list[args->buffer_count-1];
3717         if (batch_obj->pending_write_domain) {
3718                 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3719                 ret = -EINVAL;
3720                 goto err;
3721         }
3722         batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3723
3724         /* Sanity check the batch buffer, prior to moving objects */
3725         exec_offset = exec_list[args->buffer_count - 1].offset;
3726         ret = i915_gem_check_execbuffer (args, exec_offset);
3727         if (ret != 0) {
3728                 DRM_ERROR("execbuf with invalid offset/length\n");
3729                 goto err;
3730         }
3731
3732         i915_verify_inactive(dev, __FILE__, __LINE__);
3733
3734         /* Zero the global flush/invalidate flags. These
3735          * will be modified as new domains are computed
3736          * for each object
3737          */
3738         dev->invalidate_domains = 0;
3739         dev->flush_domains = 0;
3740
3741         for (i = 0; i < args->buffer_count; i++) {
3742                 struct drm_gem_object *obj = object_list[i];
3743
3744                 /* Compute new gpu domains and update invalidate/flush */
3745                 i915_gem_object_set_to_gpu_domain(obj);
3746         }
3747
3748         i915_verify_inactive(dev, __FILE__, __LINE__);
3749
3750         if (dev->invalidate_domains | dev->flush_domains) {
3751 #if WATCH_EXEC
3752                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3753                           __func__,
3754                          dev->invalidate_domains,
3755                          dev->flush_domains);
3756 #endif
3757                 i915_gem_flush(dev,
3758                                dev->invalidate_domains,
3759                                dev->flush_domains);
3760         }
3761
3762         if (dev_priv->render_ring.outstanding_lazy_request) {
3763                 (void)i915_add_request(dev, file_priv, &dev_priv->render_ring);
3764                 dev_priv->render_ring.outstanding_lazy_request = false;
3765         }
3766         if (dev_priv->bsd_ring.outstanding_lazy_request) {
3767                 (void)i915_add_request(dev, file_priv, &dev_priv->bsd_ring);
3768                 dev_priv->bsd_ring.outstanding_lazy_request = false;
3769         }
3770
3771         for (i = 0; i < args->buffer_count; i++) {
3772                 struct drm_gem_object *obj = object_list[i];
3773                 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3774                 uint32_t old_write_domain = obj->write_domain;
3775
3776                 obj->write_domain = obj->pending_write_domain;
3777                 if (obj->write_domain)
3778                         list_move_tail(&obj_priv->gpu_write_list,
3779                                        &dev_priv->mm.gpu_write_list);
3780                 else
3781                         list_del_init(&obj_priv->gpu_write_list);
3782
3783                 trace_i915_gem_object_change_domain(obj,
3784                                                     obj->read_domains,
3785                                                     old_write_domain);
3786         }
3787
3788         i915_verify_inactive(dev, __FILE__, __LINE__);
3789
3790 #if WATCH_COHERENCY
3791         for (i = 0; i < args->buffer_count; i++) {
3792                 i915_gem_object_check_coherency(object_list[i],
3793                                                 exec_list[i].handle);
3794         }
3795 #endif
3796
3797 #if WATCH_EXEC
3798         i915_gem_dump_object(batch_obj,
3799                               args->batch_len,
3800                               __func__,
3801                               ~0);
3802 #endif
3803
3804         /* Exec the batchbuffer */
3805         ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3806                         cliprects, exec_offset);
3807         if (ret) {
3808                 DRM_ERROR("dispatch failed %d\n", ret);
3809                 goto err;
3810         }
3811
3812         /*
3813          * Ensure that the commands in the batch buffer are
3814          * finished before the interrupt fires
3815          */
3816         i915_retire_commands(dev, ring);
3817
3818         i915_verify_inactive(dev, __FILE__, __LINE__);
3819
3820         for (i = 0; i < args->buffer_count; i++) {
3821                 struct drm_gem_object *obj = object_list[i];
3822                 obj_priv = to_intel_bo(obj);
3823
3824                 i915_gem_object_move_to_active(obj, ring);
3825 #if WATCH_LRU
3826                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3827 #endif
3828         }
3829
3830         /*
3831          * Get a seqno representing the execution of the current buffer,
3832          * which we can wait on.  We would like to mitigate these interrupts,
3833          * likely by only creating seqnos occasionally (so that we have
3834          * *some* interrupts representing completion of buffers that we can
3835          * wait on when trying to clear up gtt space).
3836          */
3837         seqno = i915_add_request(dev, file_priv, ring);
3838
3839 #if WATCH_LRU
3840         i915_dump_lru(dev, __func__);
3841 #endif
3842
3843         i915_verify_inactive(dev, __FILE__, __LINE__);
3844
3845 err:
3846         for (i = 0; i < pinned; i++)
3847                 i915_gem_object_unpin(object_list[i]);
3848
3849         for (i = 0; i < args->buffer_count; i++) {
3850                 if (object_list[i]) {
3851                         obj_priv = to_intel_bo(object_list[i]);
3852                         obj_priv->in_execbuffer = false;
3853                 }
3854                 drm_gem_object_unreference(object_list[i]);
3855         }
3856
3857         mutex_unlock(&dev->struct_mutex);
3858
3859 pre_mutex_err:
3860         /* Copy the updated relocations out regardless of current error
3861          * state.  Failure to update the relocs would mean that the next
3862          * time userland calls execbuf, it would do so with presumed offset
3863          * state that didn't match the actual object state.
3864          */
3865         ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3866                                            relocs);
3867         if (ret2 != 0) {
3868                 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3869
3870                 if (ret == 0)
3871                         ret = ret2;
3872         }
3873
3874         drm_free_large(object_list);
3875         kfree(cliprects);
3876
3877         return ret;
3878 }
3879
3880 /*
3881  * Legacy execbuffer just creates an exec2 list from the original exec object
3882  * list array and passes it to the real function.
3883  */
3884 int
3885 i915_gem_execbuffer(struct drm_device *dev, void *data,
3886                     struct drm_file *file_priv)
3887 {
3888         struct drm_i915_gem_execbuffer *args = data;
3889         struct drm_i915_gem_execbuffer2 exec2;
3890         struct drm_i915_gem_exec_object *exec_list = NULL;
3891         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3892         int ret, i;
3893
3894 #if WATCH_EXEC
3895         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3896                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3897 #endif
3898
3899         if (args->buffer_count < 1) {
3900                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3901                 return -EINVAL;
3902         }
3903
3904         /* Copy in the exec list from userland */
3905         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3906         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3907         if (exec_list == NULL || exec2_list == NULL) {
3908                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3909                           args->buffer_count);
3910                 drm_free_large(exec_list);
3911                 drm_free_large(exec2_list);
3912                 return -ENOMEM;
3913         }
3914         ret = copy_from_user(exec_list,
3915                              (struct drm_i915_relocation_entry __user *)
3916                              (uintptr_t) args->buffers_ptr,
3917                              sizeof(*exec_list) * args->buffer_count);
3918         if (ret != 0) {
3919                 DRM_ERROR("copy %d exec entries failed %d\n",
3920                           args->buffer_count, ret);
3921                 drm_free_large(exec_list);
3922                 drm_free_large(exec2_list);
3923                 return -EFAULT;
3924         }
3925
3926         for (i = 0; i < args->buffer_count; i++) {
3927                 exec2_list[i].handle = exec_list[i].handle;
3928                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3929                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3930                 exec2_list[i].alignment = exec_list[i].alignment;
3931                 exec2_list[i].offset = exec_list[i].offset;
3932                 if (!IS_I965G(dev))
3933                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3934                 else
3935                         exec2_list[i].flags = 0;
3936         }
3937
3938         exec2.buffers_ptr = args->buffers_ptr;
3939         exec2.buffer_count = args->buffer_count;
3940         exec2.batch_start_offset = args->batch_start_offset;
3941         exec2.batch_len = args->batch_len;
3942         exec2.DR1 = args->DR1;
3943         exec2.DR4 = args->DR4;
3944         exec2.num_cliprects = args->num_cliprects;
3945         exec2.cliprects_ptr = args->cliprects_ptr;
3946         exec2.flags = I915_EXEC_RENDER;
3947
3948         ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3949         if (!ret) {
3950                 /* Copy the new buffer offsets back to the user's exec list. */
3951                 for (i = 0; i < args->buffer_count; i++)
3952                         exec_list[i].offset = exec2_list[i].offset;
3953                 /* ... and back out to userspace */
3954                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3955                                    (uintptr_t) args->buffers_ptr,
3956                                    exec_list,
3957                                    sizeof(*exec_list) * args->buffer_count);
3958                 if (ret) {
3959                         ret = -EFAULT;
3960                         DRM_ERROR("failed to copy %d exec entries "
3961                                   "back to user (%d)\n",
3962                                   args->buffer_count, ret);
3963                 }
3964         }
3965
3966         drm_free_large(exec_list);
3967         drm_free_large(exec2_list);
3968         return ret;
3969 }
3970
3971 int
3972 i915_gem_execbuffer2(struct drm_device *dev, void *data,
3973                      struct drm_file *file_priv)
3974 {
3975         struct drm_i915_gem_execbuffer2 *args = data;
3976         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3977         int ret;
3978
3979 #if WATCH_EXEC
3980         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3981                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3982 #endif
3983
3984         if (args->buffer_count < 1) {
3985                 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
3986                 return -EINVAL;
3987         }
3988
3989         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3990         if (exec2_list == NULL) {
3991                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3992                           args->buffer_count);
3993                 return -ENOMEM;
3994         }
3995         ret = copy_from_user(exec2_list,
3996                              (struct drm_i915_relocation_entry __user *)
3997                              (uintptr_t) args->buffers_ptr,
3998                              sizeof(*exec2_list) * args->buffer_count);
3999         if (ret != 0) {
4000                 DRM_ERROR("copy %d exec entries failed %d\n",
4001                           args->buffer_count, ret);
4002                 drm_free_large(exec2_list);
4003                 return -EFAULT;
4004         }
4005
4006         ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4007         if (!ret) {
4008                 /* Copy the new buffer offsets back to the user's exec list. */
4009                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4010                                    (uintptr_t) args->buffers_ptr,
4011                                    exec2_list,
4012                                    sizeof(*exec2_list) * args->buffer_count);
4013                 if (ret) {
4014                         ret = -EFAULT;
4015                         DRM_ERROR("failed to copy %d exec entries "
4016                                   "back to user (%d)\n",
4017                                   args->buffer_count, ret);
4018                 }
4019         }
4020
4021         drm_free_large(exec2_list);
4022         return ret;
4023 }
4024
4025 int
4026 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4027 {
4028         struct drm_device *dev = obj->dev;
4029         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4030         int ret;
4031
4032         BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4033
4034         i915_verify_inactive(dev, __FILE__, __LINE__);
4035
4036         if (obj_priv->gtt_space != NULL) {
4037                 if (alignment == 0)
4038                         alignment = i915_gem_get_gtt_alignment(obj);
4039                 if (obj_priv->gtt_offset & (alignment - 1)) {
4040                         WARN(obj_priv->pin_count,
4041                              "bo is already pinned with incorrect alignment:"
4042                              " offset=%x, req.alignment=%x\n",
4043                              obj_priv->gtt_offset, alignment);
4044                         ret = i915_gem_object_unbind(obj);
4045                         if (ret)
4046                                 return ret;
4047                 }
4048         }
4049
4050         if (obj_priv->gtt_space == NULL) {
4051                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
4052                 if (ret)
4053                         return ret;
4054         }
4055
4056         obj_priv->pin_count++;
4057
4058         /* If the object is not active and not pending a flush,
4059          * remove it from the inactive list
4060          */
4061         if (obj_priv->pin_count == 1) {
4062                 atomic_inc(&dev->pin_count);
4063                 atomic_add(obj->size, &dev->pin_memory);
4064                 if (!obj_priv->active &&
4065                     (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
4066                         list_del_init(&obj_priv->list);
4067         }
4068         i915_verify_inactive(dev, __FILE__, __LINE__);
4069
4070         return 0;
4071 }
4072
4073 void
4074 i915_gem_object_unpin(struct drm_gem_object *obj)
4075 {
4076         struct drm_device *dev = obj->dev;
4077         drm_i915_private_t *dev_priv = dev->dev_private;
4078         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4079
4080         i915_verify_inactive(dev, __FILE__, __LINE__);
4081         obj_priv->pin_count--;
4082         BUG_ON(obj_priv->pin_count < 0);
4083         BUG_ON(obj_priv->gtt_space == NULL);
4084
4085         /* If the object is no longer pinned, and is
4086          * neither active nor being flushed, then stick it on
4087          * the inactive list
4088          */
4089         if (obj_priv->pin_count == 0) {
4090                 if (!obj_priv->active &&
4091                     (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
4092                         list_move_tail(&obj_priv->list,
4093                                        &dev_priv->mm.inactive_list);
4094                 atomic_dec(&dev->pin_count);
4095                 atomic_sub(obj->size, &dev->pin_memory);
4096         }
4097         i915_verify_inactive(dev, __FILE__, __LINE__);
4098 }
4099
4100 int
4101 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4102                    struct drm_file *file_priv)
4103 {
4104         struct drm_i915_gem_pin *args = data;
4105         struct drm_gem_object *obj;
4106         struct drm_i915_gem_object *obj_priv;
4107         int ret;
4108
4109         mutex_lock(&dev->struct_mutex);
4110
4111         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4112         if (obj == NULL) {
4113                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4114                           args->handle);
4115                 mutex_unlock(&dev->struct_mutex);
4116                 return -ENOENT;
4117         }
4118         obj_priv = to_intel_bo(obj);
4119
4120         if (obj_priv->madv != I915_MADV_WILLNEED) {
4121                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4122                 drm_gem_object_unreference(obj);
4123                 mutex_unlock(&dev->struct_mutex);
4124                 return -EINVAL;
4125         }
4126
4127         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4128                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4129                           args->handle);
4130                 drm_gem_object_unreference(obj);
4131                 mutex_unlock(&dev->struct_mutex);
4132                 return -EINVAL;
4133         }
4134
4135         obj_priv->user_pin_count++;
4136         obj_priv->pin_filp = file_priv;
4137         if (obj_priv->user_pin_count == 1) {
4138                 ret = i915_gem_object_pin(obj, args->alignment);
4139                 if (ret != 0) {
4140                         drm_gem_object_unreference(obj);
4141                         mutex_unlock(&dev->struct_mutex);
4142                         return ret;
4143                 }
4144         }
4145
4146         /* XXX - flush the CPU caches for pinned objects
4147          * as the X server doesn't manage domains yet
4148          */
4149         i915_gem_object_flush_cpu_write_domain(obj);
4150         args->offset = obj_priv->gtt_offset;
4151         drm_gem_object_unreference(obj);
4152         mutex_unlock(&dev->struct_mutex);
4153
4154         return 0;
4155 }
4156
4157 int
4158 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4159                      struct drm_file *file_priv)
4160 {
4161         struct drm_i915_gem_pin *args = data;
4162         struct drm_gem_object *obj;
4163         struct drm_i915_gem_object *obj_priv;
4164
4165         mutex_lock(&dev->struct_mutex);
4166
4167         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4168         if (obj == NULL) {
4169                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4170                           args->handle);
4171                 mutex_unlock(&dev->struct_mutex);
4172                 return -ENOENT;
4173         }
4174
4175         obj_priv = to_intel_bo(obj);
4176         if (obj_priv->pin_filp != file_priv) {
4177                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4178                           args->handle);
4179                 drm_gem_object_unreference(obj);
4180                 mutex_unlock(&dev->struct_mutex);
4181                 return -EINVAL;
4182         }
4183         obj_priv->user_pin_count--;
4184         if (obj_priv->user_pin_count == 0) {
4185                 obj_priv->pin_filp = NULL;
4186                 i915_gem_object_unpin(obj);
4187         }
4188
4189         drm_gem_object_unreference(obj);
4190         mutex_unlock(&dev->struct_mutex);
4191         return 0;
4192 }
4193
4194 int
4195 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4196                     struct drm_file *file_priv)
4197 {
4198         struct drm_i915_gem_busy *args = data;
4199         struct drm_gem_object *obj;
4200         struct drm_i915_gem_object *obj_priv;
4201
4202         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4203         if (obj == NULL) {
4204                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4205                           args->handle);
4206                 return -ENOENT;
4207         }
4208
4209         mutex_lock(&dev->struct_mutex);
4210
4211         /* Count all active objects as busy, even if they are currently not used
4212          * by the gpu. Users of this interface expect objects to eventually
4213          * become non-busy without any further actions, therefore emit any
4214          * necessary flushes here.
4215          */
4216         obj_priv = to_intel_bo(obj);
4217         args->busy = obj_priv->active;
4218         if (args->busy) {
4219                 /* Unconditionally flush objects, even when the gpu still uses this
4220                  * object. Userspace calling this function indicates that it wants to
4221                  * use this buffer rather sooner than later, so issuing the required
4222                  * flush earlier is beneficial.
4223                  */
4224                 if (obj->write_domain) {
4225                         i915_gem_flush(dev, 0, obj->write_domain);
4226                         (void)i915_add_request(dev, file_priv, obj_priv->ring);
4227                 }
4228
4229                 /* Update the active list for the hardware's current position.
4230                  * Otherwise this only updates on a delayed timer or when irqs
4231                  * are actually unmasked, and our working set ends up being
4232                  * larger than required.
4233                  */
4234                 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4235
4236                 args->busy = obj_priv->active;
4237         }
4238
4239         drm_gem_object_unreference(obj);
4240         mutex_unlock(&dev->struct_mutex);
4241         return 0;
4242 }
4243
4244 int
4245 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4246                         struct drm_file *file_priv)
4247 {
4248     return i915_gem_ring_throttle(dev, file_priv);
4249 }
4250
4251 int
4252 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4253                        struct drm_file *file_priv)
4254 {
4255         struct drm_i915_gem_madvise *args = data;
4256         struct drm_gem_object *obj;
4257         struct drm_i915_gem_object *obj_priv;
4258
4259         switch (args->madv) {
4260         case I915_MADV_DONTNEED:
4261         case I915_MADV_WILLNEED:
4262             break;
4263         default:
4264             return -EINVAL;
4265         }
4266
4267         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4268         if (obj == NULL) {
4269                 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4270                           args->handle);
4271                 return -ENOENT;
4272         }
4273
4274         mutex_lock(&dev->struct_mutex);
4275         obj_priv = to_intel_bo(obj);
4276
4277         if (obj_priv->pin_count) {
4278                 drm_gem_object_unreference(obj);
4279                 mutex_unlock(&dev->struct_mutex);
4280
4281                 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4282                 return -EINVAL;
4283         }
4284
4285         if (obj_priv->madv != __I915_MADV_PURGED)
4286                 obj_priv->madv = args->madv;
4287
4288         /* if the object is no longer bound, discard its backing storage */
4289         if (i915_gem_object_is_purgeable(obj_priv) &&
4290             obj_priv->gtt_space == NULL)
4291                 i915_gem_object_truncate(obj);
4292
4293         args->retained = obj_priv->madv != __I915_MADV_PURGED;
4294
4295         drm_gem_object_unreference(obj);
4296         mutex_unlock(&dev->struct_mutex);
4297
4298         return 0;
4299 }
4300
4301 struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4302                                               size_t size)
4303 {
4304         struct drm_i915_gem_object *obj;
4305
4306         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4307         if (obj == NULL)
4308                 return NULL;
4309
4310         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4311                 kfree(obj);
4312                 return NULL;
4313         }
4314
4315         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4316         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4317
4318         obj->agp_type = AGP_USER_MEMORY;
4319         obj->base.driver_private = NULL;
4320         obj->fence_reg = I915_FENCE_REG_NONE;
4321         INIT_LIST_HEAD(&obj->list);
4322         INIT_LIST_HEAD(&obj->gpu_write_list);
4323         obj->madv = I915_MADV_WILLNEED;
4324
4325         trace_i915_gem_object_create(&obj->base);
4326
4327         return &obj->base;
4328 }
4329
4330 int i915_gem_init_object(struct drm_gem_object *obj)
4331 {
4332         BUG();
4333
4334         return 0;
4335 }
4336
4337 static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4338 {
4339         struct drm_device *dev = obj->dev;
4340         drm_i915_private_t *dev_priv = dev->dev_private;
4341         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4342         int ret;
4343
4344         ret = i915_gem_object_unbind(obj);
4345         if (ret == -ERESTARTSYS) {
4346                 list_move(&obj_priv->list,
4347                           &dev_priv->mm.deferred_free_list);
4348                 return;
4349         }
4350
4351         if (obj_priv->mmap_offset)
4352                 i915_gem_free_mmap_offset(obj);
4353
4354         drm_gem_object_release(obj);
4355
4356         kfree(obj_priv->page_cpu_valid);
4357         kfree(obj_priv->bit_17);
4358         kfree(obj_priv);
4359 }
4360
4361 void i915_gem_free_object(struct drm_gem_object *obj)
4362 {
4363         struct drm_device *dev = obj->dev;
4364         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4365
4366         trace_i915_gem_object_destroy(obj);
4367
4368         while (obj_priv->pin_count > 0)
4369                 i915_gem_object_unpin(obj);
4370
4371         if (obj_priv->phys_obj)
4372                 i915_gem_detach_phys_object(dev, obj);
4373
4374         i915_gem_free_object_tail(obj);
4375 }
4376
4377 int
4378 i915_gem_idle(struct drm_device *dev)
4379 {
4380         drm_i915_private_t *dev_priv = dev->dev_private;
4381         int ret;
4382
4383         mutex_lock(&dev->struct_mutex);
4384
4385         if (dev_priv->mm.suspended ||
4386                         (dev_priv->render_ring.gem_object == NULL) ||
4387                         (HAS_BSD(dev) &&
4388                          dev_priv->bsd_ring.gem_object == NULL)) {
4389                 mutex_unlock(&dev->struct_mutex);
4390                 return 0;
4391         }
4392
4393         ret = i915_gpu_idle(dev);
4394         if (ret) {
4395                 mutex_unlock(&dev->struct_mutex);
4396                 return ret;
4397         }
4398
4399         /* Under UMS, be paranoid and evict. */
4400         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4401                 ret = i915_gem_evict_inactive(dev);
4402                 if (ret) {
4403                         mutex_unlock(&dev->struct_mutex);
4404                         return ret;
4405                 }
4406         }
4407
4408         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4409          * We need to replace this with a semaphore, or something.
4410          * And not confound mm.suspended!
4411          */
4412         dev_priv->mm.suspended = 1;
4413         del_timer_sync(&dev_priv->hangcheck_timer);
4414
4415         i915_kernel_lost_context(dev);
4416         i915_gem_cleanup_ringbuffer(dev);
4417
4418         mutex_unlock(&dev->struct_mutex);
4419
4420         /* Cancel the retire work handler, which should be idle now. */
4421         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4422
4423         return 0;
4424 }
4425
4426 /*
4427  * 965+ support PIPE_CONTROL commands, which provide finer grained control
4428  * over cache flushing.
4429  */
4430 static int
4431 i915_gem_init_pipe_control(struct drm_device *dev)
4432 {
4433         drm_i915_private_t *dev_priv = dev->dev_private;
4434         struct drm_gem_object *obj;
4435         struct drm_i915_gem_object *obj_priv;
4436         int ret;
4437
4438         obj = i915_gem_alloc_object(dev, 4096);
4439         if (obj == NULL) {
4440                 DRM_ERROR("Failed to allocate seqno page\n");
4441                 ret = -ENOMEM;
4442                 goto err;
4443         }
4444         obj_priv = to_intel_bo(obj);
4445         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4446
4447         ret = i915_gem_object_pin(obj, 4096);
4448         if (ret)
4449                 goto err_unref;
4450
4451         dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4452         dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
4453         if (dev_priv->seqno_page == NULL)
4454                 goto err_unpin;
4455
4456         dev_priv->seqno_obj = obj;
4457         memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4458
4459         return 0;
4460
4461 err_unpin:
4462         i915_gem_object_unpin(obj);
4463 err_unref:
4464         drm_gem_object_unreference(obj);
4465 err:
4466         return ret;
4467 }
4468
4469
4470 static void
4471 i915_gem_cleanup_pipe_control(struct drm_device *dev)
4472 {
4473         drm_i915_private_t *dev_priv = dev->dev_private;
4474         struct drm_gem_object *obj;
4475         struct drm_i915_gem_object *obj_priv;
4476
4477         obj = dev_priv->seqno_obj;
4478         obj_priv = to_intel_bo(obj);
4479         kunmap(obj_priv->pages[0]);
4480         i915_gem_object_unpin(obj);
4481         drm_gem_object_unreference(obj);
4482         dev_priv->seqno_obj = NULL;
4483
4484         dev_priv->seqno_page = NULL;
4485 }
4486
4487 int
4488 i915_gem_init_ringbuffer(struct drm_device *dev)
4489 {
4490         drm_i915_private_t *dev_priv = dev->dev_private;
4491         int ret;
4492
4493         dev_priv->render_ring = render_ring;
4494
4495         if (!I915_NEED_GFX_HWS(dev)) {
4496                 dev_priv->render_ring.status_page.page_addr
4497                         = dev_priv->status_page_dmah->vaddr;
4498                 memset(dev_priv->render_ring.status_page.page_addr,
4499                                 0, PAGE_SIZE);
4500         }
4501
4502         if (HAS_PIPE_CONTROL(dev)) {
4503                 ret = i915_gem_init_pipe_control(dev);
4504                 if (ret)
4505                         return ret;
4506         }
4507
4508         ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4509         if (ret)
4510                 goto cleanup_pipe_control;
4511
4512         if (HAS_BSD(dev)) {
4513                 dev_priv->bsd_ring = bsd_ring;
4514                 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4515                 if (ret)
4516                         goto cleanup_render_ring;
4517         }
4518
4519         dev_priv->next_seqno = 1;
4520
4521         return 0;
4522
4523 cleanup_render_ring:
4524         intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4525 cleanup_pipe_control:
4526         if (HAS_PIPE_CONTROL(dev))
4527                 i915_gem_cleanup_pipe_control(dev);
4528         return ret;
4529 }
4530
4531 void
4532 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4533 {
4534         drm_i915_private_t *dev_priv = dev->dev_private;
4535
4536         intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4537         if (HAS_BSD(dev))
4538                 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4539         if (HAS_PIPE_CONTROL(dev))
4540                 i915_gem_cleanup_pipe_control(dev);
4541 }
4542
4543 int
4544 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4545                        struct drm_file *file_priv)
4546 {
4547         drm_i915_private_t *dev_priv = dev->dev_private;
4548         int ret;
4549
4550         if (drm_core_check_feature(dev, DRIVER_MODESET))
4551                 return 0;
4552
4553         if (atomic_read(&dev_priv->mm.wedged)) {
4554                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4555                 atomic_set(&dev_priv->mm.wedged, 0);
4556         }
4557
4558         mutex_lock(&dev->struct_mutex);
4559         dev_priv->mm.suspended = 0;
4560
4561         ret = i915_gem_init_ringbuffer(dev);
4562         if (ret != 0) {
4563                 mutex_unlock(&dev->struct_mutex);
4564                 return ret;
4565         }
4566
4567         spin_lock(&dev_priv->mm.active_list_lock);
4568         BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4569         BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4570         spin_unlock(&dev_priv->mm.active_list_lock);
4571
4572         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4573         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4574         BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4575         BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4576         mutex_unlock(&dev->struct_mutex);
4577
4578         ret = drm_irq_install(dev);
4579         if (ret)
4580                 goto cleanup_ringbuffer;
4581
4582         return 0;
4583
4584 cleanup_ringbuffer:
4585         mutex_lock(&dev->struct_mutex);
4586         i915_gem_cleanup_ringbuffer(dev);
4587         dev_priv->mm.suspended = 1;
4588         mutex_unlock(&dev->struct_mutex);
4589
4590         return ret;
4591 }
4592
4593 int
4594 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4595                        struct drm_file *file_priv)
4596 {
4597         if (drm_core_check_feature(dev, DRIVER_MODESET))
4598                 return 0;
4599
4600         drm_irq_uninstall(dev);
4601         return i915_gem_idle(dev);
4602 }
4603
4604 void
4605 i915_gem_lastclose(struct drm_device *dev)
4606 {
4607         int ret;
4608
4609         if (drm_core_check_feature(dev, DRIVER_MODESET))
4610                 return;
4611
4612         ret = i915_gem_idle(dev);
4613         if (ret)
4614                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4615 }
4616
4617 void
4618 i915_gem_load(struct drm_device *dev)
4619 {
4620         int i;
4621         drm_i915_private_t *dev_priv = dev->dev_private;
4622
4623         spin_lock_init(&dev_priv->mm.active_list_lock);
4624         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4625         INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4626         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4627         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4628         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4629         INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4630         INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4631         if (HAS_BSD(dev)) {
4632                 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4633                 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4634         }
4635         for (i = 0; i < 16; i++)
4636                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4637         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4638                           i915_gem_retire_work_handler);
4639         spin_lock(&shrink_list_lock);
4640         list_add(&dev_priv->mm.shrink_list, &shrink_list);
4641         spin_unlock(&shrink_list_lock);
4642
4643         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4644         if (IS_GEN3(dev)) {
4645                 u32 tmp = I915_READ(MI_ARB_STATE);
4646                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4647                         /* arb state is a masked write, so set bit + bit in mask */
4648                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4649                         I915_WRITE(MI_ARB_STATE, tmp);
4650                 }
4651         }
4652
4653         /* Old X drivers will take 0-2 for front, back, depth buffers */
4654         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4655                 dev_priv->fence_reg_start = 3;
4656
4657         if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4658                 dev_priv->num_fence_regs = 16;
4659         else
4660                 dev_priv->num_fence_regs = 8;
4661
4662         /* Initialize fence registers to zero */
4663         if (IS_I965G(dev)) {
4664                 for (i = 0; i < 16; i++)
4665                         I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4666         } else {
4667                 for (i = 0; i < 8; i++)
4668                         I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4669                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4670                         for (i = 0; i < 8; i++)
4671                                 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4672         }
4673         i915_gem_detect_bit_6_swizzle(dev);
4674         init_waitqueue_head(&dev_priv->pending_flip_queue);
4675 }
4676
4677 /*
4678  * Create a physically contiguous memory object for this object
4679  * e.g. for cursor + overlay regs
4680  */
4681 int i915_gem_init_phys_object(struct drm_device *dev,
4682                               int id, int size, int align)
4683 {
4684         drm_i915_private_t *dev_priv = dev->dev_private;
4685         struct drm_i915_gem_phys_object *phys_obj;
4686         int ret;
4687
4688         if (dev_priv->mm.phys_objs[id - 1] || !size)
4689                 return 0;
4690
4691         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4692         if (!phys_obj)
4693                 return -ENOMEM;
4694
4695         phys_obj->id = id;
4696
4697         phys_obj->handle = drm_pci_alloc(dev, size, align);
4698         if (!phys_obj->handle) {
4699                 ret = -ENOMEM;
4700                 goto kfree_obj;
4701         }
4702 #ifdef CONFIG_X86
4703         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4704 #endif
4705
4706         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4707
4708         return 0;
4709 kfree_obj:
4710         kfree(phys_obj);
4711         return ret;
4712 }
4713
4714 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4715 {
4716         drm_i915_private_t *dev_priv = dev->dev_private;
4717         struct drm_i915_gem_phys_object *phys_obj;
4718
4719         if (!dev_priv->mm.phys_objs[id - 1])
4720                 return;
4721
4722         phys_obj = dev_priv->mm.phys_objs[id - 1];
4723         if (phys_obj->cur_obj) {
4724                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4725         }
4726
4727 #ifdef CONFIG_X86
4728         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4729 #endif
4730         drm_pci_free(dev, phys_obj->handle);
4731         kfree(phys_obj);
4732         dev_priv->mm.phys_objs[id - 1] = NULL;
4733 }
4734
4735 void i915_gem_free_all_phys_object(struct drm_device *dev)
4736 {
4737         int i;
4738
4739         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4740                 i915_gem_free_phys_object(dev, i);
4741 }
4742
4743 void i915_gem_detach_phys_object(struct drm_device *dev,
4744                                  struct drm_gem_object *obj)
4745 {
4746         struct drm_i915_gem_object *obj_priv;
4747         int i;
4748         int ret;
4749         int page_count;
4750
4751         obj_priv = to_intel_bo(obj);
4752         if (!obj_priv->phys_obj)
4753                 return;
4754
4755         ret = i915_gem_object_get_pages(obj, 0);
4756         if (ret)
4757                 goto out;
4758
4759         page_count = obj->size / PAGE_SIZE;
4760
4761         for (i = 0; i < page_count; i++) {
4762                 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4763                 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4764
4765                 memcpy(dst, src, PAGE_SIZE);
4766                 kunmap_atomic(dst, KM_USER0);
4767         }
4768         drm_clflush_pages(obj_priv->pages, page_count);
4769         drm_agp_chipset_flush(dev);
4770
4771         i915_gem_object_put_pages(obj);
4772 out:
4773         obj_priv->phys_obj->cur_obj = NULL;
4774         obj_priv->phys_obj = NULL;
4775 }
4776
4777 int
4778 i915_gem_attach_phys_object(struct drm_device *dev,
4779                             struct drm_gem_object *obj,
4780                             int id,
4781                             int align)
4782 {
4783         drm_i915_private_t *dev_priv = dev->dev_private;
4784         struct drm_i915_gem_object *obj_priv;
4785         int ret = 0;
4786         int page_count;
4787         int i;
4788
4789         if (id > I915_MAX_PHYS_OBJECT)
4790                 return -EINVAL;
4791
4792         obj_priv = to_intel_bo(obj);
4793
4794         if (obj_priv->phys_obj) {
4795                 if (obj_priv->phys_obj->id == id)
4796                         return 0;
4797                 i915_gem_detach_phys_object(dev, obj);
4798         }
4799
4800         /* create a new object */
4801         if (!dev_priv->mm.phys_objs[id - 1]) {
4802                 ret = i915_gem_init_phys_object(dev, id,
4803                                                 obj->size, align);
4804                 if (ret) {
4805                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4806                         goto out;
4807                 }
4808         }
4809
4810         /* bind to the object */
4811         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4812         obj_priv->phys_obj->cur_obj = obj;
4813
4814         ret = i915_gem_object_get_pages(obj, 0);
4815         if (ret) {
4816                 DRM_ERROR("failed to get page list\n");
4817                 goto out;
4818         }
4819
4820         page_count = obj->size / PAGE_SIZE;
4821
4822         for (i = 0; i < page_count; i++) {
4823                 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4824                 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4825
4826                 memcpy(dst, src, PAGE_SIZE);
4827                 kunmap_atomic(src, KM_USER0);
4828         }
4829
4830         i915_gem_object_put_pages(obj);
4831
4832         return 0;
4833 out:
4834         return ret;
4835 }
4836
4837 static int
4838 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4839                      struct drm_i915_gem_pwrite *args,
4840                      struct drm_file *file_priv)
4841 {
4842         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4843         void *obj_addr;
4844         int ret;
4845         char __user *user_data;
4846
4847         user_data = (char __user *) (uintptr_t) args->data_ptr;
4848         obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4849
4850         DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4851         ret = copy_from_user(obj_addr, user_data, args->size);
4852         if (ret)
4853                 return -EFAULT;
4854
4855         drm_agp_chipset_flush(dev);
4856         return 0;
4857 }
4858
4859 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4860 {
4861         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4862
4863         /* Clean up our request list when the client is going away, so that
4864          * later retire_requests won't dereference our soon-to-be-gone
4865          * file_priv.
4866          */
4867         mutex_lock(&dev->struct_mutex);
4868         while (!list_empty(&i915_file_priv->mm.request_list))
4869                 list_del_init(i915_file_priv->mm.request_list.next);
4870         mutex_unlock(&dev->struct_mutex);
4871 }
4872
4873 static int
4874 i915_gpu_is_active(struct drm_device *dev)
4875 {
4876         drm_i915_private_t *dev_priv = dev->dev_private;
4877         int lists_empty;
4878
4879         spin_lock(&dev_priv->mm.active_list_lock);
4880         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4881                       list_empty(&dev_priv->render_ring.active_list);
4882         if (HAS_BSD(dev))
4883                 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
4884         spin_unlock(&dev_priv->mm.active_list_lock);
4885
4886         return !lists_empty;
4887 }
4888
4889 static int
4890 i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
4891 {
4892         drm_i915_private_t *dev_priv, *next_dev;
4893         struct drm_i915_gem_object *obj_priv, *next_obj;
4894         int cnt = 0;
4895         int would_deadlock = 1;
4896
4897         /* "fast-path" to count number of available objects */
4898         if (nr_to_scan == 0) {
4899                 spin_lock(&shrink_list_lock);
4900                 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4901                         struct drm_device *dev = dev_priv->dev;
4902
4903                         if (mutex_trylock(&dev->struct_mutex)) {
4904                                 list_for_each_entry(obj_priv,
4905                                                     &dev_priv->mm.inactive_list,
4906                                                     list)
4907                                         cnt++;
4908                                 mutex_unlock(&dev->struct_mutex);
4909                         }
4910                 }
4911                 spin_unlock(&shrink_list_lock);
4912
4913                 return (cnt / 100) * sysctl_vfs_cache_pressure;
4914         }
4915
4916         spin_lock(&shrink_list_lock);
4917
4918 rescan:
4919         /* first scan for clean buffers */
4920         list_for_each_entry_safe(dev_priv, next_dev,
4921                                  &shrink_list, mm.shrink_list) {
4922                 struct drm_device *dev = dev_priv->dev;
4923
4924                 if (! mutex_trylock(&dev->struct_mutex))
4925                         continue;
4926
4927                 spin_unlock(&shrink_list_lock);
4928                 i915_gem_retire_requests(dev);
4929
4930                 list_for_each_entry_safe(obj_priv, next_obj,
4931                                          &dev_priv->mm.inactive_list,
4932                                          list) {
4933                         if (i915_gem_object_is_purgeable(obj_priv)) {
4934                                 i915_gem_object_unbind(&obj_priv->base);
4935                                 if (--nr_to_scan <= 0)
4936                                         break;
4937                         }
4938                 }
4939
4940                 spin_lock(&shrink_list_lock);
4941                 mutex_unlock(&dev->struct_mutex);
4942
4943                 would_deadlock = 0;
4944
4945                 if (nr_to_scan <= 0)
4946                         break;
4947         }
4948
4949         /* second pass, evict/count anything still on the inactive list */
4950         list_for_each_entry_safe(dev_priv, next_dev,
4951                                  &shrink_list, mm.shrink_list) {
4952                 struct drm_device *dev = dev_priv->dev;
4953
4954                 if (! mutex_trylock(&dev->struct_mutex))
4955                         continue;
4956
4957                 spin_unlock(&shrink_list_lock);
4958
4959                 list_for_each_entry_safe(obj_priv, next_obj,
4960                                          &dev_priv->mm.inactive_list,
4961                                          list) {
4962                         if (nr_to_scan > 0) {
4963                                 i915_gem_object_unbind(&obj_priv->base);
4964                                 nr_to_scan--;
4965                         } else
4966                                 cnt++;
4967                 }
4968
4969                 spin_lock(&shrink_list_lock);
4970                 mutex_unlock(&dev->struct_mutex);
4971
4972                 would_deadlock = 0;
4973         }
4974
4975         if (nr_to_scan) {
4976                 int active = 0;
4977
4978                 /*
4979                  * We are desperate for pages, so as a last resort, wait
4980                  * for the GPU to finish and discard whatever we can.
4981                  * This has a dramatic impact to reduce the number of
4982                  * OOM-killer events whilst running the GPU aggressively.
4983                  */
4984                 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4985                         struct drm_device *dev = dev_priv->dev;
4986
4987                         if (!mutex_trylock(&dev->struct_mutex))
4988                                 continue;
4989
4990                         spin_unlock(&shrink_list_lock);
4991
4992                         if (i915_gpu_is_active(dev)) {
4993                                 i915_gpu_idle(dev);
4994                                 active++;
4995                         }
4996
4997                         spin_lock(&shrink_list_lock);
4998                         mutex_unlock(&dev->struct_mutex);
4999                 }
5000
5001                 if (active)
5002                         goto rescan;
5003         }
5004
5005         spin_unlock(&shrink_list_lock);
5006
5007         if (would_deadlock)
5008                 return -1;
5009         else if (cnt > 0)
5010                 return (cnt / 100) * sysctl_vfs_cache_pressure;
5011         else
5012                 return 0;
5013 }
5014
5015 static struct shrinker shrinker = {
5016         .shrink = i915_gem_shrink,
5017         .seeks = DEFAULT_SEEKS,
5018 };
5019
5020 __init void
5021 i915_gem_shrinker_init(void)
5022 {
5023     register_shrinker(&shrinker);
5024 }
5025
5026 __exit void
5027 i915_gem_shrinker_exit(void)
5028 {
5029     unregister_shrinker(&shrinker);
5030 }