drm/i915: Use batch length instead of object size in command parser
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
37 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
38 #define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
39 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
40
41 #define BATCH_OFFSET_BIAS (256*1024)
42
43 struct eb_vmas {
44         struct list_head vmas;
45         int and;
46         union {
47                 struct i915_vma *lut[0];
48                 struct hlist_head buckets[0];
49         };
50 };
51
52 static struct eb_vmas *
53 eb_create(struct drm_i915_gem_execbuffer2 *args)
54 {
55         struct eb_vmas *eb = NULL;
56
57         if (args->flags & I915_EXEC_HANDLE_LUT) {
58                 unsigned size = args->buffer_count;
59                 size *= sizeof(struct i915_vma *);
60                 size += sizeof(struct eb_vmas);
61                 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
62         }
63
64         if (eb == NULL) {
65                 unsigned size = args->buffer_count;
66                 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
67                 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
68                 while (count > 2*size)
69                         count >>= 1;
70                 eb = kzalloc(count*sizeof(struct hlist_head) +
71                              sizeof(struct eb_vmas),
72                              GFP_TEMPORARY);
73                 if (eb == NULL)
74                         return eb;
75
76                 eb->and = count - 1;
77         } else
78                 eb->and = -args->buffer_count;
79
80         INIT_LIST_HEAD(&eb->vmas);
81         return eb;
82 }
83
84 static void
85 eb_reset(struct eb_vmas *eb)
86 {
87         if (eb->and >= 0)
88                 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
89 }
90
91 static int
92 eb_lookup_vmas(struct eb_vmas *eb,
93                struct drm_i915_gem_exec_object2 *exec,
94                const struct drm_i915_gem_execbuffer2 *args,
95                struct i915_address_space *vm,
96                struct drm_file *file)
97 {
98         struct drm_i915_gem_object *obj;
99         struct list_head objects;
100         int i, ret;
101
102         INIT_LIST_HEAD(&objects);
103         spin_lock(&file->table_lock);
104         /* Grab a reference to the object and release the lock so we can lookup
105          * or create the VMA without using GFP_ATOMIC */
106         for (i = 0; i < args->buffer_count; i++) {
107                 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
108                 if (obj == NULL) {
109                         spin_unlock(&file->table_lock);
110                         DRM_DEBUG("Invalid object handle %d at index %d\n",
111                                    exec[i].handle, i);
112                         ret = -ENOENT;
113                         goto err;
114                 }
115
116                 if (!list_empty(&obj->obj_exec_link)) {
117                         spin_unlock(&file->table_lock);
118                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
119                                    obj, exec[i].handle, i);
120                         ret = -EINVAL;
121                         goto err;
122                 }
123
124                 WARN_ONCE(obj->base.dumb,
125                           "GPU use of dumb buffer is illegal.\n");
126
127                 drm_gem_object_reference(&obj->base);
128                 list_add_tail(&obj->obj_exec_link, &objects);
129         }
130         spin_unlock(&file->table_lock);
131
132         i = 0;
133         while (!list_empty(&objects)) {
134                 struct i915_vma *vma;
135
136                 obj = list_first_entry(&objects,
137                                        struct drm_i915_gem_object,
138                                        obj_exec_link);
139
140                 /*
141                  * NOTE: We can leak any vmas created here when something fails
142                  * later on. But that's no issue since vma_unbind can deal with
143                  * vmas which are not actually bound. And since only
144                  * lookup_or_create exists as an interface to get at the vma
145                  * from the (obj, vm) we don't run the risk of creating
146                  * duplicated vmas for the same vm.
147                  */
148                 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
149                 if (IS_ERR(vma)) {
150                         DRM_DEBUG("Failed to lookup VMA\n");
151                         ret = PTR_ERR(vma);
152                         goto err;
153                 }
154
155                 /* Transfer ownership from the objects list to the vmas list. */
156                 list_add_tail(&vma->exec_list, &eb->vmas);
157                 list_del_init(&obj->obj_exec_link);
158
159                 vma->exec_entry = &exec[i];
160                 if (eb->and < 0) {
161                         eb->lut[i] = vma;
162                 } else {
163                         uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
164                         vma->exec_handle = handle;
165                         hlist_add_head(&vma->exec_node,
166                                        &eb->buckets[handle & eb->and]);
167                 }
168                 ++i;
169         }
170
171         return 0;
172
173
174 err:
175         while (!list_empty(&objects)) {
176                 obj = list_first_entry(&objects,
177                                        struct drm_i915_gem_object,
178                                        obj_exec_link);
179                 list_del_init(&obj->obj_exec_link);
180                 drm_gem_object_unreference(&obj->base);
181         }
182         /*
183          * Objects already transfered to the vmas list will be unreferenced by
184          * eb_destroy.
185          */
186
187         return ret;
188 }
189
190 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
191 {
192         if (eb->and < 0) {
193                 if (handle >= -eb->and)
194                         return NULL;
195                 return eb->lut[handle];
196         } else {
197                 struct hlist_head *head;
198                 struct hlist_node *node;
199
200                 head = &eb->buckets[handle & eb->and];
201                 hlist_for_each(node, head) {
202                         struct i915_vma *vma;
203
204                         vma = hlist_entry(node, struct i915_vma, exec_node);
205                         if (vma->exec_handle == handle)
206                                 return vma;
207                 }
208                 return NULL;
209         }
210 }
211
212 static void
213 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
214 {
215         struct drm_i915_gem_exec_object2 *entry;
216         struct drm_i915_gem_object *obj = vma->obj;
217
218         if (!drm_mm_node_allocated(&vma->node))
219                 return;
220
221         entry = vma->exec_entry;
222
223         if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
224                 i915_gem_object_unpin_fence(obj);
225
226         if (entry->flags & __EXEC_OBJECT_HAS_PIN)
227                 vma->pin_count--;
228
229         entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
230 }
231
232 static void eb_destroy(struct eb_vmas *eb)
233 {
234         while (!list_empty(&eb->vmas)) {
235                 struct i915_vma *vma;
236
237                 vma = list_first_entry(&eb->vmas,
238                                        struct i915_vma,
239                                        exec_list);
240                 list_del_init(&vma->exec_list);
241                 i915_gem_execbuffer_unreserve_vma(vma);
242                 drm_gem_object_unreference(&vma->obj->base);
243         }
244         kfree(eb);
245 }
246
247 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
248 {
249         return (HAS_LLC(obj->base.dev) ||
250                 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
251                 !obj->map_and_fenceable ||
252                 obj->cache_level != I915_CACHE_NONE);
253 }
254
255 static int
256 relocate_entry_cpu(struct drm_i915_gem_object *obj,
257                    struct drm_i915_gem_relocation_entry *reloc,
258                    uint64_t target_offset)
259 {
260         struct drm_device *dev = obj->base.dev;
261         uint32_t page_offset = offset_in_page(reloc->offset);
262         uint64_t delta = reloc->delta + target_offset;
263         char *vaddr;
264         int ret;
265
266         ret = i915_gem_object_set_to_cpu_domain(obj, true);
267         if (ret)
268                 return ret;
269
270         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
271                                 reloc->offset >> PAGE_SHIFT));
272         *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
273
274         if (INTEL_INFO(dev)->gen >= 8) {
275                 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
276
277                 if (page_offset == 0) {
278                         kunmap_atomic(vaddr);
279                         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
280                             (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
281                 }
282
283                 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
284         }
285
286         kunmap_atomic(vaddr);
287
288         return 0;
289 }
290
291 static int
292 relocate_entry_gtt(struct drm_i915_gem_object *obj,
293                    struct drm_i915_gem_relocation_entry *reloc,
294                    uint64_t target_offset)
295 {
296         struct drm_device *dev = obj->base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         uint64_t delta = reloc->delta + target_offset;
299         uint64_t offset;
300         void __iomem *reloc_page;
301         int ret;
302
303         ret = i915_gem_object_set_to_gtt_domain(obj, true);
304         if (ret)
305                 return ret;
306
307         ret = i915_gem_object_put_fence(obj);
308         if (ret)
309                 return ret;
310
311         /* Map the page containing the relocation we're going to perform.  */
312         offset = i915_gem_obj_ggtt_offset(obj);
313         offset += reloc->offset;
314         reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
315                                               offset & PAGE_MASK);
316         iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
317
318         if (INTEL_INFO(dev)->gen >= 8) {
319                 offset += sizeof(uint32_t);
320
321                 if (offset_in_page(offset) == 0) {
322                         io_mapping_unmap_atomic(reloc_page);
323                         reloc_page =
324                                 io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
325                                                          offset);
326                 }
327
328                 iowrite32(upper_32_bits(delta),
329                           reloc_page + offset_in_page(offset));
330         }
331
332         io_mapping_unmap_atomic(reloc_page);
333
334         return 0;
335 }
336
337 static int
338 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
339                                    struct eb_vmas *eb,
340                                    struct drm_i915_gem_relocation_entry *reloc)
341 {
342         struct drm_device *dev = obj->base.dev;
343         struct drm_gem_object *target_obj;
344         struct drm_i915_gem_object *target_i915_obj;
345         struct i915_vma *target_vma;
346         uint64_t target_offset;
347         int ret;
348
349         /* we've already hold a reference to all valid objects */
350         target_vma = eb_get_vma(eb, reloc->target_handle);
351         if (unlikely(target_vma == NULL))
352                 return -ENOENT;
353         target_i915_obj = target_vma->obj;
354         target_obj = &target_vma->obj->base;
355
356         target_offset = target_vma->node.start;
357
358         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
359          * pipe_control writes because the gpu doesn't properly redirect them
360          * through the ppgtt for non_secure batchbuffers. */
361         if (unlikely(IS_GEN6(dev) &&
362             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
363             !(target_vma->bound & GLOBAL_BIND))) {
364                 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
365                                     GLOBAL_BIND);
366                 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
367                         return ret;
368         }
369
370         /* Validate that the target is in a valid r/w GPU domain */
371         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
372                 DRM_DEBUG("reloc with multiple write domains: "
373                           "obj %p target %d offset %d "
374                           "read %08x write %08x",
375                           obj, reloc->target_handle,
376                           (int) reloc->offset,
377                           reloc->read_domains,
378                           reloc->write_domain);
379                 return -EINVAL;
380         }
381         if (unlikely((reloc->write_domain | reloc->read_domains)
382                      & ~I915_GEM_GPU_DOMAINS)) {
383                 DRM_DEBUG("reloc with read/write non-GPU domains: "
384                           "obj %p target %d offset %d "
385                           "read %08x write %08x",
386                           obj, reloc->target_handle,
387                           (int) reloc->offset,
388                           reloc->read_domains,
389                           reloc->write_domain);
390                 return -EINVAL;
391         }
392
393         target_obj->pending_read_domains |= reloc->read_domains;
394         target_obj->pending_write_domain |= reloc->write_domain;
395
396         /* If the relocation already has the right value in it, no
397          * more work needs to be done.
398          */
399         if (target_offset == reloc->presumed_offset)
400                 return 0;
401
402         /* Check that the relocation address is valid... */
403         if (unlikely(reloc->offset >
404                 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
405                 DRM_DEBUG("Relocation beyond object bounds: "
406                           "obj %p target %d offset %d size %d.\n",
407                           obj, reloc->target_handle,
408                           (int) reloc->offset,
409                           (int) obj->base.size);
410                 return -EINVAL;
411         }
412         if (unlikely(reloc->offset & 3)) {
413                 DRM_DEBUG("Relocation not 4-byte aligned: "
414                           "obj %p target %d offset %d.\n",
415                           obj, reloc->target_handle,
416                           (int) reloc->offset);
417                 return -EINVAL;
418         }
419
420         /* We can't wait for rendering with pagefaults disabled */
421         if (obj->active && in_atomic())
422                 return -EFAULT;
423
424         if (use_cpu_reloc(obj))
425                 ret = relocate_entry_cpu(obj, reloc, target_offset);
426         else
427                 ret = relocate_entry_gtt(obj, reloc, target_offset);
428
429         if (ret)
430                 return ret;
431
432         /* and update the user's relocation entry */
433         reloc->presumed_offset = target_offset;
434
435         return 0;
436 }
437
438 static int
439 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
440                                  struct eb_vmas *eb)
441 {
442 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
443         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
444         struct drm_i915_gem_relocation_entry __user *user_relocs;
445         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
446         int remain, ret;
447
448         user_relocs = to_user_ptr(entry->relocs_ptr);
449
450         remain = entry->relocation_count;
451         while (remain) {
452                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
453                 int count = remain;
454                 if (count > ARRAY_SIZE(stack_reloc))
455                         count = ARRAY_SIZE(stack_reloc);
456                 remain -= count;
457
458                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
459                         return -EFAULT;
460
461                 do {
462                         u64 offset = r->presumed_offset;
463
464                         ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
465                         if (ret)
466                                 return ret;
467
468                         if (r->presumed_offset != offset &&
469                             __copy_to_user_inatomic(&user_relocs->presumed_offset,
470                                                     &r->presumed_offset,
471                                                     sizeof(r->presumed_offset))) {
472                                 return -EFAULT;
473                         }
474
475                         user_relocs++;
476                         r++;
477                 } while (--count);
478         }
479
480         return 0;
481 #undef N_RELOC
482 }
483
484 static int
485 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
486                                       struct eb_vmas *eb,
487                                       struct drm_i915_gem_relocation_entry *relocs)
488 {
489         const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
490         int i, ret;
491
492         for (i = 0; i < entry->relocation_count; i++) {
493                 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
494                 if (ret)
495                         return ret;
496         }
497
498         return 0;
499 }
500
501 static int
502 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
503 {
504         struct i915_vma *vma;
505         int ret = 0;
506
507         /* This is the fast path and we cannot handle a pagefault whilst
508          * holding the struct mutex lest the user pass in the relocations
509          * contained within a mmaped bo. For in such a case we, the page
510          * fault handler would call i915_gem_fault() and we would try to
511          * acquire the struct mutex again. Obviously this is bad and so
512          * lockdep complains vehemently.
513          */
514         pagefault_disable();
515         list_for_each_entry(vma, &eb->vmas, exec_list) {
516                 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
517                 if (ret)
518                         break;
519         }
520         pagefault_enable();
521
522         return ret;
523 }
524
525 static int
526 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
527                                 struct intel_engine_cs *ring,
528                                 bool *need_reloc)
529 {
530         struct drm_i915_gem_object *obj = vma->obj;
531         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
532         uint64_t flags;
533         int ret;
534
535         flags = 0;
536         if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
537                 flags |= PIN_GLOBAL | PIN_MAPPABLE;
538         if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
539                 flags |= PIN_GLOBAL;
540         if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
541                 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
542
543         ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
544         if (ret)
545                 return ret;
546
547         entry->flags |= __EXEC_OBJECT_HAS_PIN;
548
549         if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
550                 ret = i915_gem_object_get_fence(obj);
551                 if (ret)
552                         return ret;
553
554                 if (i915_gem_object_pin_fence(obj))
555                         entry->flags |= __EXEC_OBJECT_HAS_FENCE;
556         }
557
558         if (entry->offset != vma->node.start) {
559                 entry->offset = vma->node.start;
560                 *need_reloc = true;
561         }
562
563         if (entry->flags & EXEC_OBJECT_WRITE) {
564                 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
565                 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
566         }
567
568         return 0;
569 }
570
571 static bool
572 need_reloc_mappable(struct i915_vma *vma)
573 {
574         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
575
576         if (entry->relocation_count == 0)
577                 return false;
578
579         if (!i915_is_ggtt(vma->vm))
580                 return false;
581
582         /* See also use_cpu_reloc() */
583         if (HAS_LLC(vma->obj->base.dev))
584                 return false;
585
586         if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
587                 return false;
588
589         return true;
590 }
591
592 static bool
593 eb_vma_misplaced(struct i915_vma *vma)
594 {
595         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
596         struct drm_i915_gem_object *obj = vma->obj;
597
598         WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
599                !i915_is_ggtt(vma->vm));
600
601         if (entry->alignment &&
602             vma->node.start & (entry->alignment - 1))
603                 return true;
604
605         if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
606                 return true;
607
608         if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
609             vma->node.start < BATCH_OFFSET_BIAS)
610                 return true;
611
612         return false;
613 }
614
615 static int
616 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
617                             struct list_head *vmas,
618                             bool *need_relocs)
619 {
620         struct drm_i915_gem_object *obj;
621         struct i915_vma *vma;
622         struct i915_address_space *vm;
623         struct list_head ordered_vmas;
624         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
625         int retry;
626
627         i915_gem_retire_requests_ring(ring);
628
629         vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
630
631         INIT_LIST_HEAD(&ordered_vmas);
632         while (!list_empty(vmas)) {
633                 struct drm_i915_gem_exec_object2 *entry;
634                 bool need_fence, need_mappable;
635
636                 vma = list_first_entry(vmas, struct i915_vma, exec_list);
637                 obj = vma->obj;
638                 entry = vma->exec_entry;
639
640                 if (!has_fenced_gpu_access)
641                         entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
642                 need_fence =
643                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
644                         obj->tiling_mode != I915_TILING_NONE;
645                 need_mappable = need_fence || need_reloc_mappable(vma);
646
647                 if (need_mappable) {
648                         entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
649                         list_move(&vma->exec_list, &ordered_vmas);
650                 } else
651                         list_move_tail(&vma->exec_list, &ordered_vmas);
652
653                 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
654                 obj->base.pending_write_domain = 0;
655         }
656         list_splice(&ordered_vmas, vmas);
657
658         /* Attempt to pin all of the buffers into the GTT.
659          * This is done in 3 phases:
660          *
661          * 1a. Unbind all objects that do not match the GTT constraints for
662          *     the execbuffer (fenceable, mappable, alignment etc).
663          * 1b. Increment pin count for already bound objects.
664          * 2.  Bind new objects.
665          * 3.  Decrement pin count.
666          *
667          * This avoid unnecessary unbinding of later objects in order to make
668          * room for the earlier objects *unless* we need to defragment.
669          */
670         retry = 0;
671         do {
672                 int ret = 0;
673
674                 /* Unbind any ill-fitting objects or pin. */
675                 list_for_each_entry(vma, vmas, exec_list) {
676                         if (!drm_mm_node_allocated(&vma->node))
677                                 continue;
678
679                         if (eb_vma_misplaced(vma))
680                                 ret = i915_vma_unbind(vma);
681                         else
682                                 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
683                         if (ret)
684                                 goto err;
685                 }
686
687                 /* Bind fresh objects */
688                 list_for_each_entry(vma, vmas, exec_list) {
689                         if (drm_mm_node_allocated(&vma->node))
690                                 continue;
691
692                         ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
693                         if (ret)
694                                 goto err;
695                 }
696
697 err:
698                 if (ret != -ENOSPC || retry++)
699                         return ret;
700
701                 /* Decrement pin count for bound objects */
702                 list_for_each_entry(vma, vmas, exec_list)
703                         i915_gem_execbuffer_unreserve_vma(vma);
704
705                 ret = i915_gem_evict_vm(vm, true);
706                 if (ret)
707                         return ret;
708         } while (1);
709 }
710
711 static int
712 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
713                                   struct drm_i915_gem_execbuffer2 *args,
714                                   struct drm_file *file,
715                                   struct intel_engine_cs *ring,
716                                   struct eb_vmas *eb,
717                                   struct drm_i915_gem_exec_object2 *exec)
718 {
719         struct drm_i915_gem_relocation_entry *reloc;
720         struct i915_address_space *vm;
721         struct i915_vma *vma;
722         bool need_relocs;
723         int *reloc_offset;
724         int i, total, ret;
725         unsigned count = args->buffer_count;
726
727         vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
728
729         /* We may process another execbuffer during the unlock... */
730         while (!list_empty(&eb->vmas)) {
731                 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
732                 list_del_init(&vma->exec_list);
733                 i915_gem_execbuffer_unreserve_vma(vma);
734                 drm_gem_object_unreference(&vma->obj->base);
735         }
736
737         mutex_unlock(&dev->struct_mutex);
738
739         total = 0;
740         for (i = 0; i < count; i++)
741                 total += exec[i].relocation_count;
742
743         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
744         reloc = drm_malloc_ab(total, sizeof(*reloc));
745         if (reloc == NULL || reloc_offset == NULL) {
746                 drm_free_large(reloc);
747                 drm_free_large(reloc_offset);
748                 mutex_lock(&dev->struct_mutex);
749                 return -ENOMEM;
750         }
751
752         total = 0;
753         for (i = 0; i < count; i++) {
754                 struct drm_i915_gem_relocation_entry __user *user_relocs;
755                 u64 invalid_offset = (u64)-1;
756                 int j;
757
758                 user_relocs = to_user_ptr(exec[i].relocs_ptr);
759
760                 if (copy_from_user(reloc+total, user_relocs,
761                                    exec[i].relocation_count * sizeof(*reloc))) {
762                         ret = -EFAULT;
763                         mutex_lock(&dev->struct_mutex);
764                         goto err;
765                 }
766
767                 /* As we do not update the known relocation offsets after
768                  * relocating (due to the complexities in lock handling),
769                  * we need to mark them as invalid now so that we force the
770                  * relocation processing next time. Just in case the target
771                  * object is evicted and then rebound into its old
772                  * presumed_offset before the next execbuffer - if that
773                  * happened we would make the mistake of assuming that the
774                  * relocations were valid.
775                  */
776                 for (j = 0; j < exec[i].relocation_count; j++) {
777                         if (__copy_to_user(&user_relocs[j].presumed_offset,
778                                            &invalid_offset,
779                                            sizeof(invalid_offset))) {
780                                 ret = -EFAULT;
781                                 mutex_lock(&dev->struct_mutex);
782                                 goto err;
783                         }
784                 }
785
786                 reloc_offset[i] = total;
787                 total += exec[i].relocation_count;
788         }
789
790         ret = i915_mutex_lock_interruptible(dev);
791         if (ret) {
792                 mutex_lock(&dev->struct_mutex);
793                 goto err;
794         }
795
796         /* reacquire the objects */
797         eb_reset(eb);
798         ret = eb_lookup_vmas(eb, exec, args, vm, file);
799         if (ret)
800                 goto err;
801
802         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
803         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
804         if (ret)
805                 goto err;
806
807         list_for_each_entry(vma, &eb->vmas, exec_list) {
808                 int offset = vma->exec_entry - exec;
809                 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
810                                                             reloc + reloc_offset[offset]);
811                 if (ret)
812                         goto err;
813         }
814
815         /* Leave the user relocations as are, this is the painfully slow path,
816          * and we want to avoid the complication of dropping the lock whilst
817          * having buffers reserved in the aperture and so causing spurious
818          * ENOSPC for random operations.
819          */
820
821 err:
822         drm_free_large(reloc);
823         drm_free_large(reloc_offset);
824         return ret;
825 }
826
827 static int
828 i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
829                                 struct list_head *vmas)
830 {
831         struct i915_vma *vma;
832         uint32_t flush_domains = 0;
833         bool flush_chipset = false;
834         int ret;
835
836         list_for_each_entry(vma, vmas, exec_list) {
837                 struct drm_i915_gem_object *obj = vma->obj;
838                 ret = i915_gem_object_sync(obj, ring);
839                 if (ret)
840                         return ret;
841
842                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
843                         flush_chipset |= i915_gem_clflush_object(obj, false);
844
845                 flush_domains |= obj->base.write_domain;
846         }
847
848         if (flush_chipset)
849                 i915_gem_chipset_flush(ring->dev);
850
851         if (flush_domains & I915_GEM_DOMAIN_GTT)
852                 wmb();
853
854         /* Unconditionally invalidate gpu caches and ensure that we do flush
855          * any residual writes from the previous batch.
856          */
857         return intel_ring_invalidate_all_caches(ring);
858 }
859
860 static bool
861 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
862 {
863         if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
864                 return false;
865
866         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
867 }
868
869 static int
870 validate_exec_list(struct drm_device *dev,
871                    struct drm_i915_gem_exec_object2 *exec,
872                    int count)
873 {
874         unsigned relocs_total = 0;
875         unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
876         unsigned invalid_flags;
877         int i;
878
879         invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
880         if (USES_FULL_PPGTT(dev))
881                 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
882
883         for (i = 0; i < count; i++) {
884                 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
885                 int length; /* limited by fault_in_pages_readable() */
886
887                 if (exec[i].flags & invalid_flags)
888                         return -EINVAL;
889
890                 /* First check for malicious input causing overflow in
891                  * the worst case where we need to allocate the entire
892                  * relocation tree as a single array.
893                  */
894                 if (exec[i].relocation_count > relocs_max - relocs_total)
895                         return -EINVAL;
896                 relocs_total += exec[i].relocation_count;
897
898                 length = exec[i].relocation_count *
899                         sizeof(struct drm_i915_gem_relocation_entry);
900                 /*
901                  * We must check that the entire relocation array is safe
902                  * to read, but since we may need to update the presumed
903                  * offsets during execution, check for full write access.
904                  */
905                 if (!access_ok(VERIFY_WRITE, ptr, length))
906                         return -EFAULT;
907
908                 if (likely(!i915.prefault_disable)) {
909                         if (fault_in_multipages_readable(ptr, length))
910                                 return -EFAULT;
911                 }
912         }
913
914         return 0;
915 }
916
917 static struct intel_context *
918 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
919                           struct intel_engine_cs *ring, const u32 ctx_id)
920 {
921         struct intel_context *ctx = NULL;
922         struct i915_ctx_hang_stats *hs;
923
924         if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
925                 return ERR_PTR(-EINVAL);
926
927         ctx = i915_gem_context_get(file->driver_priv, ctx_id);
928         if (IS_ERR(ctx))
929                 return ctx;
930
931         hs = &ctx->hang_stats;
932         if (hs->banned) {
933                 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
934                 return ERR_PTR(-EIO);
935         }
936
937         if (i915.enable_execlists && !ctx->engine[ring->id].state) {
938                 int ret = intel_lr_context_deferred_create(ctx, ring);
939                 if (ret) {
940                         DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
941                         return ERR_PTR(ret);
942                 }
943         }
944
945         return ctx;
946 }
947
948 void
949 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
950                                    struct intel_engine_cs *ring)
951 {
952         struct drm_i915_gem_request *req = intel_ring_get_request(ring);
953         struct i915_vma *vma;
954
955         list_for_each_entry(vma, vmas, exec_list) {
956                 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
957                 struct drm_i915_gem_object *obj = vma->obj;
958                 u32 old_read = obj->base.read_domains;
959                 u32 old_write = obj->base.write_domain;
960
961                 obj->base.write_domain = obj->base.pending_write_domain;
962                 if (obj->base.write_domain == 0)
963                         obj->base.pending_read_domains |= obj->base.read_domains;
964                 obj->base.read_domains = obj->base.pending_read_domains;
965
966                 i915_vma_move_to_active(vma, ring);
967                 if (obj->base.write_domain) {
968                         obj->dirty = 1;
969                         i915_gem_request_assign(&obj->last_write_req, req);
970
971                         intel_fb_obj_invalidate(obj, ring);
972
973                         /* update for the implicit flush after a batch */
974                         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
975                 }
976                 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
977                         i915_gem_request_assign(&obj->last_fenced_req, req);
978                         if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
979                                 struct drm_i915_private *dev_priv = to_i915(ring->dev);
980                                 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
981                                                &dev_priv->mm.fence_list);
982                         }
983                 }
984
985                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
986         }
987 }
988
989 void
990 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
991                                     struct drm_file *file,
992                                     struct intel_engine_cs *ring,
993                                     struct drm_i915_gem_object *obj)
994 {
995         /* Unconditionally force add_request to emit a full flush. */
996         ring->gpu_caches_dirty = true;
997
998         /* Add a breadcrumb for the completion of the batch buffer */
999         (void)__i915_add_request(ring, file, obj);
1000 }
1001
1002 static int
1003 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1004                             struct intel_engine_cs *ring)
1005 {
1006         struct drm_i915_private *dev_priv = dev->dev_private;
1007         int ret, i;
1008
1009         if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1010                 DRM_DEBUG("sol reset is gen7/rcs only\n");
1011                 return -EINVAL;
1012         }
1013
1014         ret = intel_ring_begin(ring, 4 * 3);
1015         if (ret)
1016                 return ret;
1017
1018         for (i = 0; i < 4; i++) {
1019                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1020                 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1021                 intel_ring_emit(ring, 0);
1022         }
1023
1024         intel_ring_advance(ring);
1025
1026         return 0;
1027 }
1028
1029 static int
1030 i915_emit_box(struct intel_engine_cs *ring,
1031               struct drm_clip_rect *box,
1032               int DR1, int DR4)
1033 {
1034         int ret;
1035
1036         if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
1037             box->y2 <= 0 || box->x2 <= 0) {
1038                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
1039                           box->x1, box->y1, box->x2, box->y2);
1040                 return -EINVAL;
1041         }
1042
1043         if (INTEL_INFO(ring->dev)->gen >= 4) {
1044                 ret = intel_ring_begin(ring, 4);
1045                 if (ret)
1046                         return ret;
1047
1048                 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
1049                 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1050                 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1051                 intel_ring_emit(ring, DR4);
1052         } else {
1053                 ret = intel_ring_begin(ring, 6);
1054                 if (ret)
1055                         return ret;
1056
1057                 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
1058                 intel_ring_emit(ring, DR1);
1059                 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1060                 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1061                 intel_ring_emit(ring, DR4);
1062                 intel_ring_emit(ring, 0);
1063         }
1064         intel_ring_advance(ring);
1065
1066         return 0;
1067 }
1068
1069
1070 int
1071 i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1072                                struct intel_engine_cs *ring,
1073                                struct intel_context *ctx,
1074                                struct drm_i915_gem_execbuffer2 *args,
1075                                struct list_head *vmas,
1076                                struct drm_i915_gem_object *batch_obj,
1077                                u64 exec_start, u32 flags)
1078 {
1079         struct drm_clip_rect *cliprects = NULL;
1080         struct drm_i915_private *dev_priv = dev->dev_private;
1081         u64 exec_len;
1082         int instp_mode;
1083         u32 instp_mask;
1084         int i, ret = 0;
1085
1086         if (args->num_cliprects != 0) {
1087                 if (ring != &dev_priv->ring[RCS]) {
1088                         DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1089                         return -EINVAL;
1090                 }
1091
1092                 if (INTEL_INFO(dev)->gen >= 5) {
1093                         DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1094                         return -EINVAL;
1095                 }
1096
1097                 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1098                         DRM_DEBUG("execbuf with %u cliprects\n",
1099                                   args->num_cliprects);
1100                         return -EINVAL;
1101                 }
1102
1103                 cliprects = kcalloc(args->num_cliprects,
1104                                     sizeof(*cliprects),
1105                                     GFP_KERNEL);
1106                 if (cliprects == NULL) {
1107                         ret = -ENOMEM;
1108                         goto error;
1109                 }
1110
1111                 if (copy_from_user(cliprects,
1112                                    to_user_ptr(args->cliprects_ptr),
1113                                    sizeof(*cliprects)*args->num_cliprects)) {
1114                         ret = -EFAULT;
1115                         goto error;
1116                 }
1117         } else {
1118                 if (args->DR4 == 0xffffffff) {
1119                         DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1120                         args->DR4 = 0;
1121                 }
1122
1123                 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1124                         DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1125                         return -EINVAL;
1126                 }
1127         }
1128
1129         ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1130         if (ret)
1131                 goto error;
1132
1133         ret = i915_switch_context(ring, ctx);
1134         if (ret)
1135                 goto error;
1136
1137         instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1138         instp_mask = I915_EXEC_CONSTANTS_MASK;
1139         switch (instp_mode) {
1140         case I915_EXEC_CONSTANTS_REL_GENERAL:
1141         case I915_EXEC_CONSTANTS_ABSOLUTE:
1142         case I915_EXEC_CONSTANTS_REL_SURFACE:
1143                 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1144                         DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1145                         ret = -EINVAL;
1146                         goto error;
1147                 }
1148
1149                 if (instp_mode != dev_priv->relative_constants_mode) {
1150                         if (INTEL_INFO(dev)->gen < 4) {
1151                                 DRM_DEBUG("no rel constants on pre-gen4\n");
1152                                 ret = -EINVAL;
1153                                 goto error;
1154                         }
1155
1156                         if (INTEL_INFO(dev)->gen > 5 &&
1157                             instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1158                                 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1159                                 ret = -EINVAL;
1160                                 goto error;
1161                         }
1162
1163                         /* The HW changed the meaning on this bit on gen6 */
1164                         if (INTEL_INFO(dev)->gen >= 6)
1165                                 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1166                 }
1167                 break;
1168         default:
1169                 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1170                 ret = -EINVAL;
1171                 goto error;
1172         }
1173
1174         if (ring == &dev_priv->ring[RCS] &&
1175                         instp_mode != dev_priv->relative_constants_mode) {
1176                 ret = intel_ring_begin(ring, 4);
1177                 if (ret)
1178                         goto error;
1179
1180                 intel_ring_emit(ring, MI_NOOP);
1181                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1182                 intel_ring_emit(ring, INSTPM);
1183                 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1184                 intel_ring_advance(ring);
1185
1186                 dev_priv->relative_constants_mode = instp_mode;
1187         }
1188
1189         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1190                 ret = i915_reset_gen7_sol_offsets(dev, ring);
1191                 if (ret)
1192                         goto error;
1193         }
1194
1195         exec_len = args->batch_len;
1196         if (cliprects) {
1197                 for (i = 0; i < args->num_cliprects; i++) {
1198                         ret = i915_emit_box(ring, &cliprects[i],
1199                                             args->DR1, args->DR4);
1200                         if (ret)
1201                                 goto error;
1202
1203                         ret = ring->dispatch_execbuffer(ring,
1204                                                         exec_start, exec_len,
1205                                                         flags);
1206                         if (ret)
1207                                 goto error;
1208                 }
1209         } else {
1210                 ret = ring->dispatch_execbuffer(ring,
1211                                                 exec_start, exec_len,
1212                                                 flags);
1213                 if (ret)
1214                         return ret;
1215         }
1216
1217         trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
1218
1219         i915_gem_execbuffer_move_to_active(vmas, ring);
1220         i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1221
1222 error:
1223         kfree(cliprects);
1224         return ret;
1225 }
1226
1227 /**
1228  * Find one BSD ring to dispatch the corresponding BSD command.
1229  * The Ring ID is returned.
1230  */
1231 static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1232                                   struct drm_file *file)
1233 {
1234         struct drm_i915_private *dev_priv = dev->dev_private;
1235         struct drm_i915_file_private *file_priv = file->driver_priv;
1236
1237         /* Check whether the file_priv is using one ring */
1238         if (file_priv->bsd_ring)
1239                 return file_priv->bsd_ring->id;
1240         else {
1241                 /* If no, use the ping-pong mechanism to select one ring */
1242                 int ring_id;
1243
1244                 mutex_lock(&dev->struct_mutex);
1245                 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1246                         ring_id = VCS;
1247                         dev_priv->mm.bsd_ring_dispatch_index = 1;
1248                 } else {
1249                         ring_id = VCS2;
1250                         dev_priv->mm.bsd_ring_dispatch_index = 0;
1251                 }
1252                 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1253                 mutex_unlock(&dev->struct_mutex);
1254                 return ring_id;
1255         }
1256 }
1257
1258 static struct drm_i915_gem_object *
1259 eb_get_batch(struct eb_vmas *eb)
1260 {
1261         struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1262
1263         /*
1264          * SNA is doing fancy tricks with compressing batch buffers, which leads
1265          * to negative relocation deltas. Usually that works out ok since the
1266          * relocate address is still positive, except when the batch is placed
1267          * very low in the GTT. Ensure this doesn't happen.
1268          *
1269          * Note that actual hangs have only been observed on gen7, but for
1270          * paranoia do it everywhere.
1271          */
1272         vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1273
1274         return vma->obj;
1275 }
1276
1277 static int
1278 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1279                        struct drm_file *file,
1280                        struct drm_i915_gem_execbuffer2 *args,
1281                        struct drm_i915_gem_exec_object2 *exec)
1282 {
1283         struct drm_i915_private *dev_priv = dev->dev_private;
1284         struct eb_vmas *eb;
1285         struct drm_i915_gem_object *batch_obj;
1286         struct drm_i915_gem_object *shadow_batch_obj = NULL;
1287         struct drm_i915_gem_exec_object2 shadow_exec_entry;
1288         struct intel_engine_cs *ring;
1289         struct intel_context *ctx;
1290         struct i915_address_space *vm;
1291         const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1292         u64 exec_start = args->batch_start_offset;
1293         u32 flags;
1294         int ret;
1295         bool need_relocs;
1296
1297         if (!i915_gem_check_execbuffer(args))
1298                 return -EINVAL;
1299
1300         ret = validate_exec_list(dev, exec, args->buffer_count);
1301         if (ret)
1302                 return ret;
1303
1304         flags = 0;
1305         if (args->flags & I915_EXEC_SECURE) {
1306                 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1307                     return -EPERM;
1308
1309                 flags |= I915_DISPATCH_SECURE;
1310         }
1311         if (args->flags & I915_EXEC_IS_PINNED)
1312                 flags |= I915_DISPATCH_PINNED;
1313
1314         if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1315                 DRM_DEBUG("execbuf with unknown ring: %d\n",
1316                           (int)(args->flags & I915_EXEC_RING_MASK));
1317                 return -EINVAL;
1318         }
1319
1320         if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1321                 ring = &dev_priv->ring[RCS];
1322         else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1323                 if (HAS_BSD2(dev)) {
1324                         int ring_id;
1325                         ring_id = gen8_dispatch_bsd_ring(dev, file);
1326                         ring = &dev_priv->ring[ring_id];
1327                 } else
1328                         ring = &dev_priv->ring[VCS];
1329         } else
1330                 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1331
1332         if (!intel_ring_initialized(ring)) {
1333                 DRM_DEBUG("execbuf with invalid ring: %d\n",
1334                           (int)(args->flags & I915_EXEC_RING_MASK));
1335                 return -EINVAL;
1336         }
1337
1338         if (args->buffer_count < 1) {
1339                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1340                 return -EINVAL;
1341         }
1342
1343         intel_runtime_pm_get(dev_priv);
1344
1345         ret = i915_mutex_lock_interruptible(dev);
1346         if (ret)
1347                 goto pre_mutex_err;
1348
1349         ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1350         if (IS_ERR(ctx)) {
1351                 mutex_unlock(&dev->struct_mutex);
1352                 ret = PTR_ERR(ctx);
1353                 goto pre_mutex_err;
1354         }
1355
1356         i915_gem_context_reference(ctx);
1357
1358         if (ctx->ppgtt)
1359                 vm = &ctx->ppgtt->base;
1360         else
1361                 vm = &dev_priv->gtt.base;
1362
1363         eb = eb_create(args);
1364         if (eb == NULL) {
1365                 i915_gem_context_unreference(ctx);
1366                 mutex_unlock(&dev->struct_mutex);
1367                 ret = -ENOMEM;
1368                 goto pre_mutex_err;
1369         }
1370
1371         /* Look up object handles */
1372         ret = eb_lookup_vmas(eb, exec, args, vm, file);
1373         if (ret)
1374                 goto err;
1375
1376         /* take note of the batch buffer before we might reorder the lists */
1377         batch_obj = eb_get_batch(eb);
1378
1379         /* Move the objects en-masse into the GTT, evicting if necessary. */
1380         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1381         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1382         if (ret)
1383                 goto err;
1384
1385         /* The objects are in their final locations, apply the relocations. */
1386         if (need_relocs)
1387                 ret = i915_gem_execbuffer_relocate(eb);
1388         if (ret) {
1389                 if (ret == -EFAULT) {
1390                         ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1391                                                                 eb, exec);
1392                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1393                 }
1394                 if (ret)
1395                         goto err;
1396         }
1397
1398         /* Set the pending read domains for the batch buffer to COMMAND */
1399         if (batch_obj->base.pending_write_domain) {
1400                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1401                 ret = -EINVAL;
1402                 goto err;
1403         }
1404
1405         if (i915_needs_cmd_parser(ring)) {
1406                 shadow_batch_obj =
1407                         i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
1408                                                 batch_obj->base.size);
1409                 if (IS_ERR(shadow_batch_obj)) {
1410                         ret = PTR_ERR(shadow_batch_obj);
1411                         /* Don't try to clean up the obj in the error path */
1412                         shadow_batch_obj = NULL;
1413                         goto err;
1414                 }
1415
1416                 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
1417                 if (ret)
1418                         goto err;
1419
1420                 ret = i915_parse_cmds(ring,
1421                                       batch_obj,
1422                                       shadow_batch_obj,
1423                                       args->batch_start_offset,
1424                                       args->batch_len,
1425                                       file->is_master);
1426                 i915_gem_object_ggtt_unpin(shadow_batch_obj);
1427
1428                 if (ret) {
1429                         if (ret != -EACCES)
1430                                 goto err;
1431                 } else {
1432                         struct i915_vma *vma;
1433
1434                         memset(&shadow_exec_entry, 0,
1435                                sizeof(shadow_exec_entry));
1436
1437                         vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1438                         vma->exec_entry = &shadow_exec_entry;
1439                         drm_gem_object_reference(&shadow_batch_obj->base);
1440                         list_add_tail(&vma->exec_list, &eb->vmas);
1441
1442                         shadow_batch_obj->base.pending_read_domains =
1443                                 batch_obj->base.pending_read_domains;
1444
1445                         batch_obj = shadow_batch_obj;
1446
1447                         /*
1448                          * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1449                          * bit from MI_BATCH_BUFFER_START commands issued in the
1450                          * dispatch_execbuffer implementations. We specifically
1451                          * don't want that set when the command parser is
1452                          * enabled.
1453                          *
1454                          * FIXME: with aliasing ppgtt, buffers that should only
1455                          * be in ggtt still end up in the aliasing ppgtt. remove
1456                          * this check when that is fixed.
1457                          */
1458                         if (USES_FULL_PPGTT(dev))
1459                                 flags |= I915_DISPATCH_SECURE;
1460                 }
1461         }
1462
1463         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1464
1465         /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1466          * batch" bit. Hence we need to pin secure batches into the global gtt.
1467          * hsw should have this fixed, but bdw mucks it up again. */
1468         if (flags & I915_DISPATCH_SECURE) {
1469                 /*
1470                  * So on first glance it looks freaky that we pin the batch here
1471                  * outside of the reservation loop. But:
1472                  * - The batch is already pinned into the relevant ppgtt, so we
1473                  *   already have the backing storage fully allocated.
1474                  * - No other BO uses the global gtt (well contexts, but meh),
1475                  *   so we don't really have issues with mutliple objects not
1476                  *   fitting due to fragmentation.
1477                  * So this is actually safe.
1478                  */
1479                 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1480                 if (ret)
1481                         goto err;
1482
1483                 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1484         } else
1485                 exec_start += i915_gem_obj_offset(batch_obj, vm);
1486
1487         ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
1488                                       &eb->vmas, batch_obj, exec_start, flags);
1489
1490         /*
1491          * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1492          * batch vma for correctness. For less ugly and less fragility this
1493          * needs to be adjusted to also track the ggtt batch vma properly as
1494          * active.
1495          */
1496         if (flags & I915_DISPATCH_SECURE)
1497                 i915_gem_object_ggtt_unpin(batch_obj);
1498 err:
1499         /* the request owns the ref now */
1500         i915_gem_context_unreference(ctx);
1501         eb_destroy(eb);
1502
1503         mutex_unlock(&dev->struct_mutex);
1504
1505 pre_mutex_err:
1506         /* intel_gpu_busy should also get a ref, so it will free when the device
1507          * is really idle. */
1508         intel_runtime_pm_put(dev_priv);
1509         return ret;
1510 }
1511
1512 /*
1513  * Legacy execbuffer just creates an exec2 list from the original exec object
1514  * list array and passes it to the real function.
1515  */
1516 int
1517 i915_gem_execbuffer(struct drm_device *dev, void *data,
1518                     struct drm_file *file)
1519 {
1520         struct drm_i915_gem_execbuffer *args = data;
1521         struct drm_i915_gem_execbuffer2 exec2;
1522         struct drm_i915_gem_exec_object *exec_list = NULL;
1523         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1524         int ret, i;
1525
1526         if (args->buffer_count < 1) {
1527                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1528                 return -EINVAL;
1529         }
1530
1531         /* Copy in the exec list from userland */
1532         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1533         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1534         if (exec_list == NULL || exec2_list == NULL) {
1535                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1536                           args->buffer_count);
1537                 drm_free_large(exec_list);
1538                 drm_free_large(exec2_list);
1539                 return -ENOMEM;
1540         }
1541         ret = copy_from_user(exec_list,
1542                              to_user_ptr(args->buffers_ptr),
1543                              sizeof(*exec_list) * args->buffer_count);
1544         if (ret != 0) {
1545                 DRM_DEBUG("copy %d exec entries failed %d\n",
1546                           args->buffer_count, ret);
1547                 drm_free_large(exec_list);
1548                 drm_free_large(exec2_list);
1549                 return -EFAULT;
1550         }
1551
1552         for (i = 0; i < args->buffer_count; i++) {
1553                 exec2_list[i].handle = exec_list[i].handle;
1554                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1555                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1556                 exec2_list[i].alignment = exec_list[i].alignment;
1557                 exec2_list[i].offset = exec_list[i].offset;
1558                 if (INTEL_INFO(dev)->gen < 4)
1559                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1560                 else
1561                         exec2_list[i].flags = 0;
1562         }
1563
1564         exec2.buffers_ptr = args->buffers_ptr;
1565         exec2.buffer_count = args->buffer_count;
1566         exec2.batch_start_offset = args->batch_start_offset;
1567         exec2.batch_len = args->batch_len;
1568         exec2.DR1 = args->DR1;
1569         exec2.DR4 = args->DR4;
1570         exec2.num_cliprects = args->num_cliprects;
1571         exec2.cliprects_ptr = args->cliprects_ptr;
1572         exec2.flags = I915_EXEC_RENDER;
1573         i915_execbuffer2_set_context_id(exec2, 0);
1574
1575         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1576         if (!ret) {
1577                 struct drm_i915_gem_exec_object __user *user_exec_list =
1578                         to_user_ptr(args->buffers_ptr);
1579
1580                 /* Copy the new buffer offsets back to the user's exec list. */
1581                 for (i = 0; i < args->buffer_count; i++) {
1582                         ret = __copy_to_user(&user_exec_list[i].offset,
1583                                              &exec2_list[i].offset,
1584                                              sizeof(user_exec_list[i].offset));
1585                         if (ret) {
1586                                 ret = -EFAULT;
1587                                 DRM_DEBUG("failed to copy %d exec entries "
1588                                           "back to user (%d)\n",
1589                                           args->buffer_count, ret);
1590                                 break;
1591                         }
1592                 }
1593         }
1594
1595         drm_free_large(exec_list);
1596         drm_free_large(exec2_list);
1597         return ret;
1598 }
1599
1600 int
1601 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1602                      struct drm_file *file)
1603 {
1604         struct drm_i915_gem_execbuffer2 *args = data;
1605         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1606         int ret;
1607
1608         if (args->buffer_count < 1 ||
1609             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1610                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1611                 return -EINVAL;
1612         }
1613
1614         if (args->rsvd2 != 0) {
1615                 DRM_DEBUG("dirty rvsd2 field\n");
1616                 return -EINVAL;
1617         }
1618
1619         exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1620                              GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1621         if (exec2_list == NULL)
1622                 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1623                                            args->buffer_count);
1624         if (exec2_list == NULL) {
1625                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1626                           args->buffer_count);
1627                 return -ENOMEM;
1628         }
1629         ret = copy_from_user(exec2_list,
1630                              to_user_ptr(args->buffers_ptr),
1631                              sizeof(*exec2_list) * args->buffer_count);
1632         if (ret != 0) {
1633                 DRM_DEBUG("copy %d exec entries failed %d\n",
1634                           args->buffer_count, ret);
1635                 drm_free_large(exec2_list);
1636                 return -EFAULT;
1637         }
1638
1639         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1640         if (!ret) {
1641                 /* Copy the new buffer offsets back to the user's exec list. */
1642                 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1643                                    to_user_ptr(args->buffers_ptr);
1644                 int i;
1645
1646                 for (i = 0; i < args->buffer_count; i++) {
1647                         ret = __copy_to_user(&user_exec_list[i].offset,
1648                                              &exec2_list[i].offset,
1649                                              sizeof(user_exec_list[i].offset));
1650                         if (ret) {
1651                                 ret = -EFAULT;
1652                                 DRM_DEBUG("failed to copy %d exec entries "
1653                                           "back to user\n",
1654                                           args->buffer_count);
1655                                 break;
1656                         }
1657                 }
1658         }
1659
1660         drm_free_large(exec2_list);
1661         return ret;
1662 }