0445770cc23c1cfedf5dfe8ceef0de36744d565e
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 struct change_domains {
37         uint32_t invalidate_domains;
38         uint32_t flush_domains;
39         uint32_t flush_rings;
40 };
41
42 /*
43  * Set the next domain for the specified object. This
44  * may not actually perform the necessary flushing/invaliding though,
45  * as that may want to be batched with other set_domain operations
46  *
47  * This is (we hope) the only really tricky part of gem. The goal
48  * is fairly simple -- track which caches hold bits of the object
49  * and make sure they remain coherent. A few concrete examples may
50  * help to explain how it works. For shorthand, we use the notation
51  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
52  * a pair of read and write domain masks.
53  *
54  * Case 1: the batch buffer
55  *
56  *      1. Allocated
57  *      2. Written by CPU
58  *      3. Mapped to GTT
59  *      4. Read by GPU
60  *      5. Unmapped from GTT
61  *      6. Freed
62  *
63  *      Let's take these a step at a time
64  *
65  *      1. Allocated
66  *              Pages allocated from the kernel may still have
67  *              cache contents, so we set them to (CPU, CPU) always.
68  *      2. Written by CPU (using pwrite)
69  *              The pwrite function calls set_domain (CPU, CPU) and
70  *              this function does nothing (as nothing changes)
71  *      3. Mapped by GTT
72  *              This function asserts that the object is not
73  *              currently in any GPU-based read or write domains
74  *      4. Read by GPU
75  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
76  *              As write_domain is zero, this function adds in the
77  *              current read domains (CPU+COMMAND, 0).
78  *              flush_domains is set to CPU.
79  *              invalidate_domains is set to COMMAND
80  *              clflush is run to get data out of the CPU caches
81  *              then i915_dev_set_domain calls i915_gem_flush to
82  *              emit an MI_FLUSH and drm_agp_chipset_flush
83  *      5. Unmapped from GTT
84  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
85  *              flush_domains and invalidate_domains end up both zero
86  *              so no flushing/invalidating happens
87  *      6. Freed
88  *              yay, done
89  *
90  * Case 2: The shared render buffer
91  *
92  *      1. Allocated
93  *      2. Mapped to GTT
94  *      3. Read/written by GPU
95  *      4. set_domain to (CPU,CPU)
96  *      5. Read/written by CPU
97  *      6. Read/written by GPU
98  *
99  *      1. Allocated
100  *              Same as last example, (CPU, CPU)
101  *      2. Mapped to GTT
102  *              Nothing changes (assertions find that it is not in the GPU)
103  *      3. Read/written by GPU
104  *              execbuffer calls set_domain (RENDER, RENDER)
105  *              flush_domains gets CPU
106  *              invalidate_domains gets GPU
107  *              clflush (obj)
108  *              MI_FLUSH and drm_agp_chipset_flush
109  *      4. set_domain (CPU, CPU)
110  *              flush_domains gets GPU
111  *              invalidate_domains gets CPU
112  *              wait_rendering (obj) to make sure all drawing is complete.
113  *              This will include an MI_FLUSH to get the data from GPU
114  *              to memory
115  *              clflush (obj) to invalidate the CPU cache
116  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
117  *      5. Read/written by CPU
118  *              cache lines are loaded and dirtied
119  *      6. Read written by GPU
120  *              Same as last GPU access
121  *
122  * Case 3: The constant buffer
123  *
124  *      1. Allocated
125  *      2. Written by CPU
126  *      3. Read by GPU
127  *      4. Updated (written) by CPU again
128  *      5. Read by GPU
129  *
130  *      1. Allocated
131  *              (CPU, CPU)
132  *      2. Written by CPU
133  *              (CPU, CPU)
134  *      3. Read by GPU
135  *              (CPU+RENDER, 0)
136  *              flush_domains = CPU
137  *              invalidate_domains = RENDER
138  *              clflush (obj)
139  *              MI_FLUSH
140  *              drm_agp_chipset_flush
141  *      4. Updated (written) by CPU again
142  *              (CPU, CPU)
143  *              flush_domains = 0 (no previous write domain)
144  *              invalidate_domains = 0 (no new read domains)
145  *      5. Read by GPU
146  *              (CPU+RENDER, 0)
147  *              flush_domains = CPU
148  *              invalidate_domains = RENDER
149  *              clflush (obj)
150  *              MI_FLUSH
151  *              drm_agp_chipset_flush
152  */
153 static void
154 i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
155                                   struct intel_ring_buffer *ring,
156                                   struct change_domains *cd)
157 {
158         uint32_t invalidate_domains = 0, flush_domains = 0;
159
160         /*
161          * If the object isn't moving to a new write domain,
162          * let the object stay in multiple read domains
163          */
164         if (obj->base.pending_write_domain == 0)
165                 obj->base.pending_read_domains |= obj->base.read_domains;
166
167         /*
168          * Flush the current write domain if
169          * the new read domains don't match. Invalidate
170          * any read domains which differ from the old
171          * write domain
172          */
173         if (obj->base.write_domain &&
174             (((obj->base.write_domain != obj->base.pending_read_domains ||
175                obj->ring != ring)) ||
176              (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
177                 flush_domains |= obj->base.write_domain;
178                 invalidate_domains |=
179                         obj->base.pending_read_domains & ~obj->base.write_domain;
180         }
181         /*
182          * Invalidate any read caches which may have
183          * stale data. That is, any new read domains.
184          */
185         invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
186         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
187                 i915_gem_clflush_object(obj);
188
189         /* blow away mappings if mapped through GTT */
190         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
191                 i915_gem_release_mmap(obj);
192
193         /* The actual obj->write_domain will be updated with
194          * pending_write_domain after we emit the accumulated flush for all
195          * of our domain changes in execbuffers (which clears objects'
196          * write_domains).  So if we have a current write domain that we
197          * aren't changing, set pending_write_domain to that.
198          */
199         if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200                 obj->base.pending_write_domain = obj->base.write_domain;
201
202         cd->invalidate_domains |= invalidate_domains;
203         cd->flush_domains |= flush_domains;
204         if (flush_domains & I915_GEM_GPU_DOMAINS)
205                 cd->flush_rings |= obj->ring->id;
206         if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207                 cd->flush_rings |= ring->id;
208 }
209
210 struct eb_objects {
211         int and;
212         struct hlist_head buckets[0];
213 };
214
215 static struct eb_objects *
216 eb_create(int size)
217 {
218         struct eb_objects *eb;
219         int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
220         while (count > size)
221                 count >>= 1;
222         eb = kzalloc(count*sizeof(struct hlist_head) +
223                      sizeof(struct eb_objects),
224                      GFP_KERNEL);
225         if (eb == NULL)
226                 return eb;
227
228         eb->and = count - 1;
229         return eb;
230 }
231
232 static void
233 eb_reset(struct eb_objects *eb)
234 {
235         memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
236 }
237
238 static void
239 eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
240 {
241         hlist_add_head(&obj->exec_node,
242                        &eb->buckets[obj->exec_handle & eb->and]);
243 }
244
245 static struct drm_i915_gem_object *
246 eb_get_object(struct eb_objects *eb, unsigned long handle)
247 {
248         struct hlist_head *head;
249         struct hlist_node *node;
250         struct drm_i915_gem_object *obj;
251
252         head = &eb->buckets[handle & eb->and];
253         hlist_for_each(node, head) {
254                 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
255                 if (obj->exec_handle == handle)
256                         return obj;
257         }
258
259         return NULL;
260 }
261
262 static void
263 eb_destroy(struct eb_objects *eb)
264 {
265         kfree(eb);
266 }
267
268 static int
269 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
270                                    struct eb_objects *eb,
271                                    struct drm_i915_gem_exec_object2 *entry,
272                                    struct drm_i915_gem_relocation_entry *reloc)
273 {
274         struct drm_device *dev = obj->base.dev;
275         struct drm_gem_object *target_obj;
276         uint32_t target_offset;
277         int ret = -EINVAL;
278
279         /* we've already hold a reference to all valid objects */
280         target_obj = &eb_get_object(eb, reloc->target_handle)->base;
281         if (unlikely(target_obj == NULL))
282                 return -ENOENT;
283
284         target_offset = to_intel_bo(target_obj)->gtt_offset;
285
286 #if WATCH_RELOC
287         DRM_INFO("%s: obj %p offset %08x target %d "
288                  "read %08x write %08x gtt %08x "
289                  "presumed %08x delta %08x\n",
290                  __func__,
291                  obj,
292                  (int) reloc->offset,
293                  (int) reloc->target_handle,
294                  (int) reloc->read_domains,
295                  (int) reloc->write_domain,
296                  (int) target_offset,
297                  (int) reloc->presumed_offset,
298                  reloc->delta);
299 #endif
300
301         /* The target buffer should have appeared before us in the
302          * exec_object list, so it should have a GTT space bound by now.
303          */
304         if (unlikely(target_offset == 0)) {
305                 DRM_ERROR("No GTT space found for object %d\n",
306                           reloc->target_handle);
307                 return ret;
308         }
309
310         /* Validate that the target is in a valid r/w GPU domain */
311         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
312                 DRM_ERROR("reloc with multiple write domains: "
313                           "obj %p target %d offset %d "
314                           "read %08x write %08x",
315                           obj, reloc->target_handle,
316                           (int) reloc->offset,
317                           reloc->read_domains,
318                           reloc->write_domain);
319                 return ret;
320         }
321         if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
322                 DRM_ERROR("reloc with read/write CPU domains: "
323                           "obj %p target %d offset %d "
324                           "read %08x write %08x",
325                           obj, reloc->target_handle,
326                           (int) reloc->offset,
327                           reloc->read_domains,
328                           reloc->write_domain);
329                 return ret;
330         }
331         if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
332                      reloc->write_domain != target_obj->pending_write_domain)) {
333                 DRM_ERROR("Write domain conflict: "
334                           "obj %p target %d offset %d "
335                           "new %08x old %08x\n",
336                           obj, reloc->target_handle,
337                           (int) reloc->offset,
338                           reloc->write_domain,
339                           target_obj->pending_write_domain);
340                 return ret;
341         }
342
343         target_obj->pending_read_domains |= reloc->read_domains;
344         target_obj->pending_write_domain |= reloc->write_domain;
345
346         /* If the relocation already has the right value in it, no
347          * more work needs to be done.
348          */
349         if (target_offset == reloc->presumed_offset)
350                 return 0;
351
352         /* Check that the relocation address is valid... */
353         if (unlikely(reloc->offset > obj->base.size - 4)) {
354                 DRM_ERROR("Relocation beyond object bounds: "
355                           "obj %p target %d offset %d size %d.\n",
356                           obj, reloc->target_handle,
357                           (int) reloc->offset,
358                           (int) obj->base.size);
359                 return ret;
360         }
361         if (unlikely(reloc->offset & 3)) {
362                 DRM_ERROR("Relocation not 4-byte aligned: "
363                           "obj %p target %d offset %d.\n",
364                           obj, reloc->target_handle,
365                           (int) reloc->offset);
366                 return ret;
367         }
368
369         /* and points to somewhere within the target object. */
370         if (unlikely(reloc->delta >= target_obj->size)) {
371                 DRM_ERROR("Relocation beyond target object bounds: "
372                           "obj %p target %d delta %d size %d.\n",
373                           obj, reloc->target_handle,
374                           (int) reloc->delta,
375                           (int) target_obj->size);
376                 return ret;
377         }
378
379         reloc->delta += target_offset;
380         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
381                 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
382                 char *vaddr;
383
384                 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
385                 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
386                 kunmap_atomic(vaddr);
387         } else {
388                 struct drm_i915_private *dev_priv = dev->dev_private;
389                 uint32_t __iomem *reloc_entry;
390                 void __iomem *reloc_page;
391
392                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
393                 if (ret)
394                         return ret;
395
396                 /* Map the page containing the relocation we're going to perform.  */
397                 reloc->offset += obj->gtt_offset;
398                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
399                                                       reloc->offset & PAGE_MASK);
400                 reloc_entry = (uint32_t __iomem *)
401                         (reloc_page + (reloc->offset & ~PAGE_MASK));
402                 iowrite32(reloc->delta, reloc_entry);
403                 io_mapping_unmap_atomic(reloc_page);
404         }
405
406         /* and update the user's relocation entry */
407         reloc->presumed_offset = target_offset;
408
409         return 0;
410 }
411
412 static int
413 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
414                                     struct eb_objects *eb,
415                                     struct drm_i915_gem_exec_object2 *entry)
416 {
417         struct drm_i915_gem_relocation_entry __user *user_relocs;
418         int i, ret;
419
420         user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
421         for (i = 0; i < entry->relocation_count; i++) {
422                 struct drm_i915_gem_relocation_entry reloc;
423
424                 if (__copy_from_user_inatomic(&reloc,
425                                               user_relocs+i,
426                                               sizeof(reloc)))
427                         return -EFAULT;
428
429                 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
430                 if (ret)
431                         return ret;
432
433                 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
434                                             &reloc.presumed_offset,
435                                             sizeof(reloc.presumed_offset)))
436                         return -EFAULT;
437         }
438
439         return 0;
440 }
441
442 static int
443 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
444                                          struct eb_objects *eb,
445                                          struct drm_i915_gem_exec_object2 *entry,
446                                          struct drm_i915_gem_relocation_entry *relocs)
447 {
448         int i, ret;
449
450         for (i = 0; i < entry->relocation_count; i++) {
451                 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
452                 if (ret)
453                         return ret;
454         }
455
456         return 0;
457 }
458
459 static int
460 i915_gem_execbuffer_relocate(struct drm_device *dev,
461                              struct eb_objects *eb,
462                              struct list_head *objects,
463                              struct drm_i915_gem_exec_object2 *exec)
464 {
465         struct drm_i915_gem_object *obj;
466         int ret;
467
468         list_for_each_entry(obj, objects, exec_list) {
469                 obj->base.pending_read_domains = 0;
470                 obj->base.pending_write_domain = 0;
471                 ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
472                 if (ret)
473                         return ret;
474         }
475
476         return 0;
477 }
478
479 static int
480 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
481                             struct drm_file *file,
482                             struct list_head *objects,
483                             struct drm_i915_gem_exec_object2 *exec)
484 {
485         struct drm_i915_gem_object *obj;
486         struct drm_i915_gem_exec_object2 *entry;
487         int ret, retry;
488         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
489
490         /* Attempt to pin all of the buffers into the GTT.
491          * This is done in 3 phases:
492          *
493          * 1a. Unbind all objects that do not match the GTT constraints for
494          *     the execbuffer (fenceable, mappable, alignment etc).
495          * 1b. Increment pin count for already bound objects.
496          * 2.  Bind new objects.
497          * 3.  Decrement pin count.
498          *
499          * This avoid unnecessary unbinding of later objects in order to makr
500          * room for the earlier objects *unless* we need to defragment.
501          */
502         retry = 0;
503         do {
504                 ret = 0;
505
506                 /* Unbind any ill-fitting objects or pin. */
507                 entry = exec;
508                 list_for_each_entry(obj, objects, exec_list) {
509                         bool need_fence, need_mappable;
510
511                         if (!obj->gtt_space) {
512                                 entry++;
513                                 continue;
514                         }
515
516                         need_fence =
517                                 has_fenced_gpu_access &&
518                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
519                                 obj->tiling_mode != I915_TILING_NONE;
520                         need_mappable =
521                                 entry->relocation_count ? true : need_fence;
522
523                         if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
524                             (need_mappable && !obj->map_and_fenceable))
525                                 ret = i915_gem_object_unbind(obj);
526                         else
527                                 ret = i915_gem_object_pin(obj,
528                                                           entry->alignment,
529                                                           need_mappable);
530                         if (ret)
531                                 goto err;
532
533                         entry++;
534                 }
535
536                 /* Bind fresh objects */
537                 entry = exec;
538                 list_for_each_entry(obj, objects, exec_list) {
539                         bool need_fence;
540
541                         need_fence =
542                                 has_fenced_gpu_access &&
543                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
544                                 obj->tiling_mode != I915_TILING_NONE;
545
546                         if (!obj->gtt_space) {
547                                 bool need_mappable =
548                                         entry->relocation_count ? true : need_fence;
549
550                                 ret = i915_gem_object_pin(obj,
551                                                           entry->alignment,
552                                                           need_mappable);
553                                 if (ret)
554                                         break;
555                         }
556
557                         if (has_fenced_gpu_access) {
558                                 if (need_fence) {
559                                         ret = i915_gem_object_get_fence(obj, ring, 1);
560                                         if (ret)
561                                                 break;
562                                 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
563                                            obj->tiling_mode == I915_TILING_NONE) {
564                                         /* XXX pipelined! */
565                                         ret = i915_gem_object_put_fence(obj);
566                                         if (ret)
567                                                 break;
568                                 }
569                                 obj->pending_fenced_gpu_access = need_fence;
570                         }
571
572                         entry->offset = obj->gtt_offset;
573                         entry++;
574                 }
575
576                 /* Decrement pin count for bound objects */
577                 list_for_each_entry(obj, objects, exec_list) {
578                         if (obj->gtt_space)
579                                 i915_gem_object_unpin(obj);
580                 }
581
582                 if (ret != -ENOSPC || retry > 1)
583                         return ret;
584
585                 /* First attempt, just clear anything that is purgeable.
586                  * Second attempt, clear the entire GTT.
587                  */
588                 ret = i915_gem_evict_everything(ring->dev, retry == 0);
589                 if (ret)
590                         return ret;
591
592                 retry++;
593         } while (1);
594
595 err:
596         obj = list_entry(obj->exec_list.prev,
597                          struct drm_i915_gem_object,
598                          exec_list);
599         while (objects != &obj->exec_list) {
600                 if (obj->gtt_space)
601                         i915_gem_object_unpin(obj);
602
603                 obj = list_entry(obj->exec_list.prev,
604                                  struct drm_i915_gem_object,
605                                  exec_list);
606         }
607
608         return ret;
609 }
610
611 static int
612 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
613                                   struct drm_file *file,
614                                   struct intel_ring_buffer *ring,
615                                   struct list_head *objects,
616                                   struct eb_objects *eb,
617                                   struct drm_i915_gem_exec_object2 *exec,
618                                   int count)
619 {
620         struct drm_i915_gem_relocation_entry *reloc;
621         struct drm_i915_gem_object *obj;
622         int i, total, ret;
623
624         /* We may process another execbuffer during the unlock... */
625         while (!list_empty(objects)) {
626                 obj = list_first_entry(objects,
627                                        struct drm_i915_gem_object,
628                                        exec_list);
629                 list_del_init(&obj->exec_list);
630                 drm_gem_object_unreference(&obj->base);
631         }
632
633         mutex_unlock(&dev->struct_mutex);
634
635         total = 0;
636         for (i = 0; i < count; i++)
637                 total += exec[i].relocation_count;
638
639         reloc = drm_malloc_ab(total, sizeof(*reloc));
640         if (reloc == NULL) {
641                 mutex_lock(&dev->struct_mutex);
642                 return -ENOMEM;
643         }
644
645         total = 0;
646         for (i = 0; i < count; i++) {
647                 struct drm_i915_gem_relocation_entry __user *user_relocs;
648
649                 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
650
651                 if (copy_from_user(reloc+total, user_relocs,
652                                    exec[i].relocation_count * sizeof(*reloc))) {
653                         ret = -EFAULT;
654                         mutex_lock(&dev->struct_mutex);
655                         goto err;
656                 }
657
658                 total += exec[i].relocation_count;
659         }
660
661         ret = i915_mutex_lock_interruptible(dev);
662         if (ret) {
663                 mutex_lock(&dev->struct_mutex);
664                 goto err;
665         }
666
667         /* reacquire the objects */
668         eb_reset(eb);
669         for (i = 0; i < count; i++) {
670                 struct drm_i915_gem_object *obj;
671
672                 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
673                                                         exec[i].handle));
674                 if (obj == NULL) {
675                         DRM_ERROR("Invalid object handle %d at index %d\n",
676                                    exec[i].handle, i);
677                         ret = -ENOENT;
678                         goto err;
679                 }
680
681                 list_add_tail(&obj->exec_list, objects);
682                 obj->exec_handle = exec[i].handle;
683                 eb_add_object(eb, obj);
684         }
685
686         ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
687         if (ret)
688                 goto err;
689
690         total = 0;
691         list_for_each_entry(obj, objects, exec_list) {
692                 obj->base.pending_read_domains = 0;
693                 obj->base.pending_write_domain = 0;
694                 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
695                                                                exec,
696                                                                reloc + total);
697                 if (ret)
698                         goto err;
699
700                 total += exec->relocation_count;
701                 exec++;
702         }
703
704         /* Leave the user relocations as are, this is the painfully slow path,
705          * and we want to avoid the complication of dropping the lock whilst
706          * having buffers reserved in the aperture and so causing spurious
707          * ENOSPC for random operations.
708          */
709
710 err:
711         drm_free_large(reloc);
712         return ret;
713 }
714
715 static int
716 i915_gem_execbuffer_flush(struct drm_device *dev,
717                           uint32_t invalidate_domains,
718                           uint32_t flush_domains,
719                           uint32_t flush_rings)
720 {
721         drm_i915_private_t *dev_priv = dev->dev_private;
722         int i, ret;
723
724         if (flush_domains & I915_GEM_DOMAIN_CPU)
725                 intel_gtt_chipset_flush();
726
727         if (flush_domains & I915_GEM_DOMAIN_GTT)
728                 wmb();
729
730         if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
731                 for (i = 0; i < I915_NUM_RINGS; i++)
732                         if (flush_rings & (1 << i)) {
733                                 ret = i915_gem_flush_ring(dev,
734                                                           &dev_priv->ring[i],
735                                                           invalidate_domains,
736                                                           flush_domains);
737                                 if (ret)
738                                         return ret;
739                         }
740         }
741
742         return 0;
743 }
744
745 static int
746 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
747                                struct intel_ring_buffer *to)
748 {
749         struct intel_ring_buffer *from = obj->ring;
750         u32 seqno;
751         int ret, idx;
752
753         if (from == NULL || to == from)
754                 return 0;
755
756         if (INTEL_INFO(obj->base.dev)->gen < 6)
757                 return i915_gem_object_wait_rendering(obj, true);
758
759         idx = intel_ring_sync_index(from, to);
760
761         seqno = obj->last_rendering_seqno;
762         if (seqno <= from->sync_seqno[idx])
763                 return 0;
764
765         if (seqno == from->outstanding_lazy_request) {
766                 struct drm_i915_gem_request *request;
767
768                 request = kzalloc(sizeof(*request), GFP_KERNEL);
769                 if (request == NULL)
770                         return -ENOMEM;
771
772                 ret = i915_add_request(obj->base.dev, NULL, request, from);
773                 if (ret) {
774                         kfree(request);
775                         return ret;
776                 }
777
778                 seqno = request->seqno;
779         }
780
781         from->sync_seqno[idx] = seqno;
782         return intel_ring_sync(to, from, seqno - 1);
783 }
784
785 static int
786 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
787                                 struct list_head *objects)
788 {
789         struct drm_i915_gem_object *obj;
790         struct change_domains cd;
791         int ret;
792
793         cd.invalidate_domains = 0;
794         cd.flush_domains = 0;
795         cd.flush_rings = 0;
796         list_for_each_entry(obj, objects, exec_list)
797                 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
798
799         if (cd.invalidate_domains | cd.flush_domains) {
800 #if WATCH_EXEC
801                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
802                           __func__,
803                          cd.invalidate_domains,
804                          cd.flush_domains);
805 #endif
806                 ret = i915_gem_execbuffer_flush(ring->dev,
807                                                 cd.invalidate_domains,
808                                                 cd.flush_domains,
809                                                 cd.flush_rings);
810                 if (ret)
811                         return ret;
812         }
813
814         list_for_each_entry(obj, objects, exec_list) {
815                 ret = i915_gem_execbuffer_sync_rings(obj, ring);
816                 if (ret)
817                         return ret;
818         }
819
820         return 0;
821 }
822
823 static bool
824 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
825 {
826         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
827 }
828
829 static int
830 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
831                    int count)
832 {
833         int i;
834
835         for (i = 0; i < count; i++) {
836                 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
837                 int length; /* limited by fault_in_pages_readable() */
838
839                 /* First check for malicious input causing overflow */
840                 if (exec[i].relocation_count >
841                     INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
842                         return -EINVAL;
843
844                 length = exec[i].relocation_count *
845                         sizeof(struct drm_i915_gem_relocation_entry);
846                 if (!access_ok(VERIFY_READ, ptr, length))
847                         return -EFAULT;
848
849                 /* we may also need to update the presumed offsets */
850                 if (!access_ok(VERIFY_WRITE, ptr, length))
851                         return -EFAULT;
852
853                 if (fault_in_pages_readable(ptr, length))
854                         return -EFAULT;
855         }
856
857         return 0;
858 }
859
860 static int
861 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
862                                    struct list_head *objects)
863 {
864         struct drm_i915_gem_object *obj;
865         int flips;
866
867         /* Check for any pending flips. As we only maintain a flip queue depth
868          * of 1, we can simply insert a WAIT for the next display flip prior
869          * to executing the batch and avoid stalling the CPU.
870          */
871         flips = 0;
872         list_for_each_entry(obj, objects, exec_list) {
873                 if (obj->base.write_domain)
874                         flips |= atomic_read(&obj->pending_flip);
875         }
876         if (flips) {
877                 int plane, flip_mask, ret;
878
879                 for (plane = 0; flips >> plane; plane++) {
880                         if (((flips >> plane) & 1) == 0)
881                                 continue;
882
883                         if (plane)
884                                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
885                         else
886                                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
887
888                         ret = intel_ring_begin(ring, 2);
889                         if (ret)
890                                 return ret;
891
892                         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
893                         intel_ring_emit(ring, MI_NOOP);
894                         intel_ring_advance(ring);
895                 }
896         }
897
898         return 0;
899 }
900
901 static void
902 i915_gem_execbuffer_move_to_active(struct list_head *objects,
903                                    struct intel_ring_buffer *ring,
904                                    u32 seqno)
905 {
906         struct drm_i915_gem_object *obj;
907
908         list_for_each_entry(obj, objects, exec_list) {
909                 obj->base.read_domains = obj->base.pending_read_domains;
910                 obj->base.write_domain = obj->base.pending_write_domain;
911                 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
912
913                 i915_gem_object_move_to_active(obj, ring, seqno);
914                 if (obj->base.write_domain) {
915                         obj->dirty = 1;
916                         obj->pending_gpu_write = true;
917                         list_move_tail(&obj->gpu_write_list,
918                                        &ring->gpu_write_list);
919                         intel_mark_busy(ring->dev, obj);
920                 }
921
922                 trace_i915_gem_object_change_domain(obj,
923                                                     obj->base.read_domains,
924                                                     obj->base.write_domain);
925         }
926 }
927
928 static void
929 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
930                                     struct drm_file *file,
931                                     struct intel_ring_buffer *ring)
932 {
933         struct drm_i915_gem_request *request;
934         u32 invalidate;
935
936         /*
937          * Ensure that the commands in the batch buffer are
938          * finished before the interrupt fires.
939          *
940          * The sampler always gets flushed on i965 (sigh).
941          */
942         invalidate = I915_GEM_DOMAIN_COMMAND;
943         if (INTEL_INFO(dev)->gen >= 4)
944                 invalidate |= I915_GEM_DOMAIN_SAMPLER;
945         if (ring->flush(ring, invalidate, 0)) {
946                 i915_gem_next_request_seqno(dev, ring);
947                 return;
948         }
949
950         /* Add a breadcrumb for the completion of the batch buffer */
951         request = kzalloc(sizeof(*request), GFP_KERNEL);
952         if (request == NULL || i915_add_request(dev, file, request, ring)) {
953                 i915_gem_next_request_seqno(dev, ring);
954                 kfree(request);
955         }
956 }
957
958 static int
959 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
960                        struct drm_file *file,
961                        struct drm_i915_gem_execbuffer2 *args,
962                        struct drm_i915_gem_exec_object2 *exec)
963 {
964         drm_i915_private_t *dev_priv = dev->dev_private;
965         struct list_head objects;
966         struct eb_objects *eb;
967         struct drm_i915_gem_object *batch_obj;
968         struct drm_clip_rect *cliprects = NULL;
969         struct intel_ring_buffer *ring;
970         u32 exec_start, exec_len;
971         u32 seqno;
972         int ret, mode, i;
973
974         if (!i915_gem_check_execbuffer(args)) {
975                 DRM_ERROR("execbuf with invalid offset/length\n");
976                 return -EINVAL;
977         }
978
979         ret = validate_exec_list(exec, args->buffer_count);
980         if (ret)
981                 return ret;
982
983 #if WATCH_EXEC
984         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
985                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
986 #endif
987         switch (args->flags & I915_EXEC_RING_MASK) {
988         case I915_EXEC_DEFAULT:
989         case I915_EXEC_RENDER:
990                 ring = &dev_priv->ring[RCS];
991                 break;
992         case I915_EXEC_BSD:
993                 if (!HAS_BSD(dev)) {
994                         DRM_ERROR("execbuf with invalid ring (BSD)\n");
995                         return -EINVAL;
996                 }
997                 ring = &dev_priv->ring[VCS];
998                 break;
999         case I915_EXEC_BLT:
1000                 if (!HAS_BLT(dev)) {
1001                         DRM_ERROR("execbuf with invalid ring (BLT)\n");
1002                         return -EINVAL;
1003                 }
1004                 ring = &dev_priv->ring[BCS];
1005                 break;
1006         default:
1007                 DRM_ERROR("execbuf with unknown ring: %d\n",
1008                           (int)(args->flags & I915_EXEC_RING_MASK));
1009                 return -EINVAL;
1010         }
1011
1012         mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1013         switch (mode) {
1014         case I915_EXEC_CONSTANTS_REL_GENERAL:
1015         case I915_EXEC_CONSTANTS_ABSOLUTE:
1016         case I915_EXEC_CONSTANTS_REL_SURFACE:
1017                 if (ring == &dev_priv->ring[RCS] &&
1018                     mode != dev_priv->relative_constants_mode) {
1019                         if (INTEL_INFO(dev)->gen < 4)
1020                                 return -EINVAL;
1021
1022                         if (INTEL_INFO(dev)->gen > 5 &&
1023                             mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1024                                 return -EINVAL;
1025
1026                         ret = intel_ring_begin(ring, 4);
1027                         if (ret)
1028                                 return ret;
1029
1030                         intel_ring_emit(ring, MI_NOOP);
1031                         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1032                         intel_ring_emit(ring, INSTPM);
1033                         intel_ring_emit(ring,
1034                                         I915_EXEC_CONSTANTS_MASK << 16 | mode);
1035                         intel_ring_advance(ring);
1036
1037                         dev_priv->relative_constants_mode = mode;
1038                 }
1039                 break;
1040         default:
1041                 DRM_ERROR("execbuf with unknown constants: %d\n", mode);
1042                 return -EINVAL;
1043         }
1044
1045         if (args->buffer_count < 1) {
1046                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1047                 return -EINVAL;
1048         }
1049
1050         if (args->num_cliprects != 0) {
1051                 if (ring != &dev_priv->ring[RCS]) {
1052                         DRM_ERROR("clip rectangles are only valid with the render ring\n");
1053                         return -EINVAL;
1054                 }
1055
1056                 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1057                                     GFP_KERNEL);
1058                 if (cliprects == NULL) {
1059                         ret = -ENOMEM;
1060                         goto pre_mutex_err;
1061                 }
1062
1063                 if (copy_from_user(cliprects,
1064                                      (struct drm_clip_rect __user *)(uintptr_t)
1065                                      args->cliprects_ptr,
1066                                      sizeof(*cliprects)*args->num_cliprects)) {
1067                         ret = -EFAULT;
1068                         goto pre_mutex_err;
1069                 }
1070         }
1071
1072         ret = i915_mutex_lock_interruptible(dev);
1073         if (ret)
1074                 goto pre_mutex_err;
1075
1076         if (dev_priv->mm.suspended) {
1077                 mutex_unlock(&dev->struct_mutex);
1078                 ret = -EBUSY;
1079                 goto pre_mutex_err;
1080         }
1081
1082         eb = eb_create(args->buffer_count);
1083         if (eb == NULL) {
1084                 mutex_unlock(&dev->struct_mutex);
1085                 ret = -ENOMEM;
1086                 goto pre_mutex_err;
1087         }
1088
1089         /* Look up object handles */
1090         INIT_LIST_HEAD(&objects);
1091         for (i = 0; i < args->buffer_count; i++) {
1092                 struct drm_i915_gem_object *obj;
1093
1094                 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1095                                                         exec[i].handle));
1096                 if (obj == NULL) {
1097                         DRM_ERROR("Invalid object handle %d at index %d\n",
1098                                    exec[i].handle, i);
1099                         /* prevent error path from reading uninitialized data */
1100                         ret = -ENOENT;
1101                         goto err;
1102                 }
1103
1104                 if (!list_empty(&obj->exec_list)) {
1105                         DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
1106                                    obj, exec[i].handle, i);
1107                         ret = -EINVAL;
1108                         goto err;
1109                 }
1110
1111                 list_add_tail(&obj->exec_list, &objects);
1112                 obj->exec_handle = exec[i].handle;
1113                 eb_add_object(eb, obj);
1114         }
1115
1116         /* Move the objects en-masse into the GTT, evicting if necessary. */
1117         ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
1118         if (ret)
1119                 goto err;
1120
1121         /* The objects are in their final locations, apply the relocations. */
1122         ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
1123         if (ret) {
1124                 if (ret == -EFAULT) {
1125                         ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1126                                                                 &objects, eb,
1127                                                                 exec,
1128                                                                 args->buffer_count);
1129                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1130                 }
1131                 if (ret)
1132                         goto err;
1133         }
1134
1135         /* Set the pending read domains for the batch buffer to COMMAND */
1136         batch_obj = list_entry(objects.prev,
1137                                struct drm_i915_gem_object,
1138                                exec_list);
1139         if (batch_obj->base.pending_write_domain) {
1140                 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
1141                 ret = -EINVAL;
1142                 goto err;
1143         }
1144         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1145
1146         ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
1147         if (ret)
1148                 goto err;
1149
1150         ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
1151         if (ret)
1152                 goto err;
1153
1154         seqno = i915_gem_next_request_seqno(dev, ring);
1155         for (i = 0; i < I915_NUM_RINGS-1; i++) {
1156                 if (seqno < ring->sync_seqno[i]) {
1157                         /* The GPU can not handle its semaphore value wrapping,
1158                          * so every billion or so execbuffers, we need to stall
1159                          * the GPU in order to reset the counters.
1160                          */
1161                         ret = i915_gpu_idle(dev);
1162                         if (ret)
1163                                 goto err;
1164
1165                         BUG_ON(ring->sync_seqno[i]);
1166                 }
1167         }
1168
1169         exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1170         exec_len = args->batch_len;
1171         if (cliprects) {
1172                 for (i = 0; i < args->num_cliprects; i++) {
1173                         ret = i915_emit_box(dev, &cliprects[i],
1174                                             args->DR1, args->DR4);
1175                         if (ret)
1176                                 goto err;
1177
1178                         ret = ring->dispatch_execbuffer(ring,
1179                                                         exec_start, exec_len);
1180                         if (ret)
1181                                 goto err;
1182                 }
1183         } else {
1184                 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1185                 if (ret)
1186                         goto err;
1187         }
1188
1189         i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1190         i915_gem_execbuffer_retire_commands(dev, file, ring);
1191
1192 err:
1193         eb_destroy(eb);
1194         while (!list_empty(&objects)) {
1195                 struct drm_i915_gem_object *obj;
1196
1197                 obj = list_first_entry(&objects,
1198                                        struct drm_i915_gem_object,
1199                                        exec_list);
1200                 list_del_init(&obj->exec_list);
1201                 drm_gem_object_unreference(&obj->base);
1202         }
1203
1204         mutex_unlock(&dev->struct_mutex);
1205
1206 pre_mutex_err:
1207         kfree(cliprects);
1208         return ret;
1209 }
1210
1211 /*
1212  * Legacy execbuffer just creates an exec2 list from the original exec object
1213  * list array and passes it to the real function.
1214  */
1215 int
1216 i915_gem_execbuffer(struct drm_device *dev, void *data,
1217                     struct drm_file *file)
1218 {
1219         struct drm_i915_gem_execbuffer *args = data;
1220         struct drm_i915_gem_execbuffer2 exec2;
1221         struct drm_i915_gem_exec_object *exec_list = NULL;
1222         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1223         int ret, i;
1224
1225 #if WATCH_EXEC
1226         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1227                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1228 #endif
1229
1230         if (args->buffer_count < 1) {
1231                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1232                 return -EINVAL;
1233         }
1234
1235         /* Copy in the exec list from userland */
1236         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1237         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1238         if (exec_list == NULL || exec2_list == NULL) {
1239                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1240                           args->buffer_count);
1241                 drm_free_large(exec_list);
1242                 drm_free_large(exec2_list);
1243                 return -ENOMEM;
1244         }
1245         ret = copy_from_user(exec_list,
1246                              (struct drm_i915_relocation_entry __user *)
1247                              (uintptr_t) args->buffers_ptr,
1248                              sizeof(*exec_list) * args->buffer_count);
1249         if (ret != 0) {
1250                 DRM_ERROR("copy %d exec entries failed %d\n",
1251                           args->buffer_count, ret);
1252                 drm_free_large(exec_list);
1253                 drm_free_large(exec2_list);
1254                 return -EFAULT;
1255         }
1256
1257         for (i = 0; i < args->buffer_count; i++) {
1258                 exec2_list[i].handle = exec_list[i].handle;
1259                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1260                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1261                 exec2_list[i].alignment = exec_list[i].alignment;
1262                 exec2_list[i].offset = exec_list[i].offset;
1263                 if (INTEL_INFO(dev)->gen < 4)
1264                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1265                 else
1266                         exec2_list[i].flags = 0;
1267         }
1268
1269         exec2.buffers_ptr = args->buffers_ptr;
1270         exec2.buffer_count = args->buffer_count;
1271         exec2.batch_start_offset = args->batch_start_offset;
1272         exec2.batch_len = args->batch_len;
1273         exec2.DR1 = args->DR1;
1274         exec2.DR4 = args->DR4;
1275         exec2.num_cliprects = args->num_cliprects;
1276         exec2.cliprects_ptr = args->cliprects_ptr;
1277         exec2.flags = I915_EXEC_RENDER;
1278
1279         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1280         if (!ret) {
1281                 /* Copy the new buffer offsets back to the user's exec list. */
1282                 for (i = 0; i < args->buffer_count; i++)
1283                         exec_list[i].offset = exec2_list[i].offset;
1284                 /* ... and back out to userspace */
1285                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1286                                    (uintptr_t) args->buffers_ptr,
1287                                    exec_list,
1288                                    sizeof(*exec_list) * args->buffer_count);
1289                 if (ret) {
1290                         ret = -EFAULT;
1291                         DRM_ERROR("failed to copy %d exec entries "
1292                                   "back to user (%d)\n",
1293                                   args->buffer_count, ret);
1294                 }
1295         }
1296
1297         drm_free_large(exec_list);
1298         drm_free_large(exec2_list);
1299         return ret;
1300 }
1301
1302 int
1303 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1304                      struct drm_file *file)
1305 {
1306         struct drm_i915_gem_execbuffer2 *args = data;
1307         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1308         int ret;
1309
1310 #if WATCH_EXEC
1311         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1312                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1313 #endif
1314
1315         if (args->buffer_count < 1) {
1316                 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
1317                 return -EINVAL;
1318         }
1319
1320         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1321         if (exec2_list == NULL) {
1322                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1323                           args->buffer_count);
1324                 return -ENOMEM;
1325         }
1326         ret = copy_from_user(exec2_list,
1327                              (struct drm_i915_relocation_entry __user *)
1328                              (uintptr_t) args->buffers_ptr,
1329                              sizeof(*exec2_list) * args->buffer_count);
1330         if (ret != 0) {
1331                 DRM_ERROR("copy %d exec entries failed %d\n",
1332                           args->buffer_count, ret);
1333                 drm_free_large(exec2_list);
1334                 return -EFAULT;
1335         }
1336
1337         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1338         if (!ret) {
1339                 /* Copy the new buffer offsets back to the user's exec list. */
1340                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1341                                    (uintptr_t) args->buffers_ptr,
1342                                    exec2_list,
1343                                    sizeof(*exec2_list) * args->buffer_count);
1344                 if (ret) {
1345                         ret = -EFAULT;
1346                         DRM_ERROR("failed to copy %d exec entries "
1347                                   "back to user (%d)\n",
1348                                   args->buffer_count, ret);
1349                 }
1350         }
1351
1352         drm_free_large(exec2_list);
1353         return ret;
1354 }