Merge branch 'drm-intel-fixes' into drm-intel-next
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 23 Nov 2010 20:13:13 +0000 (20:13 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 23 Nov 2010 20:13:13 +0000 (20:13 +0000)
1  2 
drivers/gpu/drm/i915/i915_gem.c

@@@ -3611,20 -3525,101 +3649,101 @@@ i915_gem_execbuffer_reserve(struct drm_
                while (i--)
                        i915_gem_object_unpin(object_list[i]);
  
 -              if (ret == 0)
 -                      break;
 -
 -              if (ret != -ENOSPC || retry)
 +              if (ret != -ENOSPC || retry > 1)
                        return ret;
  
 -              ret = i915_gem_evict_everything(dev);
 +              /* First attempt, just clear anything that is purgeable.
 +               * Second attempt, clear the entire GTT.
 +               */
 +              ret = i915_gem_evict_everything(dev, retry == 0);
                if (ret)
                        return ret;
 -      }
  
 -      return 0;
 +              retry++;
 +      } while (1);
  }
  
+ static int
+ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+                                 struct drm_file *file,
+                                 struct drm_gem_object **object_list,
+                                 struct drm_i915_gem_exec_object2 *exec_list,
+                                 int count)
+ {
+       struct drm_i915_gem_relocation_entry *reloc;
+       int i, total, ret;
+       for (i = 0; i < count; i++) {
+               struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+               obj->in_execbuffer = false;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       total = 0;
+       for (i = 0; i < count; i++)
+               total += exec_list[i].relocation_count;
+       reloc = drm_malloc_ab(total, sizeof(*reloc));
+       if (reloc == NULL) {
+               mutex_lock(&dev->struct_mutex);
+               return -ENOMEM;
+       }
+       total = 0;
+       for (i = 0; i < count; i++) {
+               struct drm_i915_gem_relocation_entry __user *user_relocs;
+               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+               if (copy_from_user(reloc+total, user_relocs,
+                                  exec_list[i].relocation_count *
+                                  sizeof(*reloc))) {
+                       ret = -EFAULT;
+                       mutex_lock(&dev->struct_mutex);
+                       goto err;
+               }
+               total += exec_list[i].relocation_count;
+       }
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               mutex_lock(&dev->struct_mutex);
+               goto err;
+       }
+       ret = i915_gem_execbuffer_reserve(dev, file,
+                                         object_list, exec_list,
+                                         count);
+       if (ret)
+               goto err;
+       total = 0;
+       for (i = 0; i < count; i++) {
+               struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+               obj->base.pending_read_domains = 0;
+               obj->base.pending_write_domain = 0;
+               ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
+                                                              &exec_list[i],
+                                                              reloc + total);
+               if (ret)
+                       goto err;
+               total += exec_list[i].relocation_count;
+       }
+       /* Leave the user relocations as are, this is the painfully slow path,
+        * and we want to avoid the complication of dropping the lock whilst
+        * having buffers reserved in the aperture and so causing spurious
+        * ENOSPC for random operations.
+        */
+ err:
+       drm_free_large(reloc);
+       return ret;
+ }
  static int
  i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
                                struct drm_file *file,