drm/i915: Convert the file mutex into a spinlock
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index 3fd69ad..78282ed 100644 (file)
@@ -61,6 +61,57 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
 
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct completion *x = &dev_priv->error_completion;
+       unsigned long flags;
+       int ret;
+
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       ret = wait_for_completion_interruptible(x);
+       if (ret)
+               return ret;
+
+       /* Success, we reset the GPU! */
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       /* GPU is hung, bump the completion count to account for
+        * the token we just consumed so that we never hit zero and
+        * end up waiting upon a subsequent completion event that
+        * will never happen.
+        */
+       spin_lock_irqsave(&x->wait.lock, flags);
+       x->done++;
+       spin_unlock_irqrestore(&x->wait.lock, flags);
+       return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
 {
@@ -268,7 +319,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@ -387,7 +440,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto fail_put_user_pages;
 
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@ -586,8 +641,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (!access_ok(VERIFY_READ, user_data, remain))
                return -EFAULT;
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
-       mutex_lock(&dev->struct_mutex);
        ret = i915_gem_object_pin(obj, 0);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
@@ -682,7 +739,10 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                goto out_unpin_pages;
        }
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto out_unpin_pages;
+
        ret = i915_gem_object_pin(obj, 0);
        if (ret)
                goto out_unlock;
@@ -756,7 +816,9 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@ -852,7 +914,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto fail_put_user_pages;
 
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@ -1020,7 +1084,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        intel_mark_busy(dev, obj);
 
@@ -1075,11 +1143,14 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       mutex_lock(&dev->struct_mutex);
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL) {
-               mutex_unlock(&dev->struct_mutex);
+       if (obj == NULL)
                return -ENOENT;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
        }
 
 #if WATCH_BUF
@@ -1394,7 +1465,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        obj_priv = to_intel_bo(obj);
 
@@ -1592,17 +1667,17 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 
 uint32_t
 i915_add_request(struct drm_device *dev,
-                struct drm_file *file_priv,
+                struct drm_file *file,
                 struct drm_i915_gem_request *request,
                 struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *i915_file_priv = NULL;
+       struct drm_i915_file_private *file_priv = NULL;
        uint32_t seqno;
        int was_empty;
 
-       if (file_priv != NULL)
-               i915_file_priv = file_priv->driver_priv;
+       if (file != NULL)
+               file_priv = file->driver_priv;
 
        if (request == NULL) {
                request = kzalloc(sizeof(*request), GFP_KERNEL);
@@ -1610,7 +1685,7 @@ i915_add_request(struct drm_device *dev,
                        return 0;
        }
 
-       seqno = ring->add_request(dev, ring, file_priv, 0);
+       seqno = ring->add_request(dev, ring, 0);
 
        request->seqno = seqno;
        request->ring = ring;
@@ -1618,11 +1693,12 @@ i915_add_request(struct drm_device *dev,
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
 
-       if (i915_file_priv) {
+       if (file_priv) {
+               spin_lock(&file_priv->mm.lock);
+               request->file_priv = file_priv;
                list_add_tail(&request->client_list,
-                             &i915_file_priv->mm.request_list);
-       } else {
-               INIT_LIST_HEAD(&request->client_list);
+                             &file_priv->mm.request_list);
+               spin_unlock(&file_priv->mm.lock);
        }
 
        if (!dev_priv->mm.suspended) {
@@ -1654,20 +1730,18 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
                        I915_GEM_DOMAIN_COMMAND, flush_domains);
 }
 
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
-       return (int32_t)(seq1 - seq2) >= 0;
-}
+       struct drm_i915_file_private *file_priv = request->file_priv;
 
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
-                  struct intel_ring_buffer *ring)
-{
-       return ring->get_gem_seqno(dev, ring);
+       if (!file_priv)
+               return;
+
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
 }
 
 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
@@ -1681,7 +1755,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                           list);
 
                list_del(&request->list);
-               list_del(&request->client_list);
+               i915_gem_request_remove_from_client(request);
                kfree(request);
        }
 
@@ -1746,7 +1820,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
            list_empty(&ring->request_list))
                return;
 
-       seqno = i915_get_gem_seqno(dev, ring);
+       seqno = ring->get_seqno(dev, ring);
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -1760,7 +1834,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
                trace_i915_gem_request_retire(dev, request->seqno);
 
                list_del(&request->list);
-               list_del(&request->client_list);
+               i915_gem_request_remove_from_client(request);
                kfree(request);
        }
 
@@ -1853,16 +1927,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 
        BUG_ON(seqno == 0);
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EAGAIN;
+
        if (seqno == dev_priv->next_seqno) {
                seqno = i915_add_request(dev, NULL, NULL, ring);
                if (seqno == 0)
                        return -ENOMEM;
        }
 
-       if (atomic_read(&dev_priv->mm.wedged))
-               return -EIO;
-
-       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1881,12 +1955,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                if (interruptible)
                        ret = wait_event_interruptible(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
                else
                        wait_event(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
 
                ring->user_irq_put(dev, ring);
@@ -1895,11 +1969,11 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                trace_i915_gem_request_wait_end(dev, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
-               ret = -EIO;
+               ret = -EAGAIN;
 
        if (ret && ret != -ERESTARTSYS)
                DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring),
+                         __func__, ret, seqno, ring->get_seqno(dev, ring),
                          dev_priv->next_seqno);
 
        /* Directly dispatch request retiring.  While we have the work queue
@@ -1924,7 +1998,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
        return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
-void
+static void
 i915_gem_flush_ring(struct drm_device *dev,
                    struct drm_file *file_priv,
                    struct intel_ring_buffer *ring,
@@ -3384,28 +3458,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  * relatively low latency when blocking on a particular request to finish.
  */
 static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-       int ret = 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+       struct drm_i915_gem_request *request;
+       struct intel_ring_buffer *ring = NULL;
+       u32 seqno = 0;
+       int ret;
 
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&i915_file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
-
+       spin_lock(&file_priv->mm.lock);
+       list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
-               ret = i915_wait_request(dev, request->seqno, request->ring);
-               if (ret != 0)
-                       break;
+               ring = request->ring;
+               seqno = request->seqno;
        }
-       mutex_unlock(&dev->struct_mutex);
+       spin_unlock(&file_priv->mm.lock);
+
+       if (seqno == 0)
+               return 0;
+
+       ret = 0;
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+               /* And wait for the seqno passing without holding any locks and
+                * causing extra latency for others. This is safe as the irq
+                * generation is designed to be run atomically and so is
+                * lockless.
+                */
+               ring->user_irq_get(dev, ring);
+               ret = wait_event_interruptible(ring->irq_queue,
+                                              i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+                                              || atomic_read(&dev_priv->mm.wedged));
+               ring->user_irq_put(dev, ring);
+
+               if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+                       ret = -EIO;
+       }
+
+       if (ret == 0)
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
        return ret;
 }
@@ -3506,7 +3600,7 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
        return 0;
 }
 
-int
+static int
 i915_gem_wait_for_pending_flip(struct drm_device *dev,
                               struct drm_gem_object **object_list,
                               int count)
@@ -3554,13 +3648,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_gem_relocation_entry *relocs = NULL;
        struct drm_i915_gem_request *request = NULL;
-       int ret = 0, ret2, i, pinned = 0;
+       int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
        uint32_t reloc_index;
        int pin_tries, flips;
 
        struct intel_ring_buffer *ring = NULL;
 
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3618,16 +3716,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret != 0)
                goto pre_mutex_err;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto pre_mutex_err;
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               mutex_unlock(&dev->struct_mutex);
-               ret = -EIO;
-               goto pre_mutex_err;
-       }
-
        if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
                ret = -EBUSY;
@@ -4111,17 +4205,20 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
-
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
-               mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
        obj_priv = to_intel_bo(obj);
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
                drm_gem_object_unreference(obj);
@@ -4166,18 +4263,23 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-
-       mutex_lock(&dev->struct_mutex);
+       int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
                          args->handle);
-               mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
 
        obj_priv = to_intel_bo(obj);
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@ -4203,6 +4305,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
@@ -4211,7 +4314,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        }
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        /* Count all active objects as busy, even if they are currently not used
         * by the gpu. Users of this interface expect objects to eventually
@@ -4260,6 +4367,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_madvise *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
        switch (args->madv) {
        case I915_MADV_DONTNEED:
@@ -4275,10 +4383,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                          args->handle);
                return -ENOENT;
        }
-
-       mutex_lock(&dev->struct_mutex);
        obj_priv = to_intel_bo(obj);
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->pin_count) {
                drm_gem_object_unreference(obj);
                mutex_unlock(&dev->struct_mutex);
@@ -4628,6 +4740,7 @@ i915_gem_load(struct drm_device *dev)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
+       init_completion(&dev_priv->error_completion);
        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
@@ -4857,18 +4970,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
        return 0;
 }
 
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
 
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list))
-               list_del_init(i915_file_priv->mm.request_list.next);
-       mutex_unlock(&dev->struct_mutex);
+       spin_lock(&file_priv->mm.lock);
+       while (!list_empty(&file_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&file_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          client_list);
+               list_del(&request->client_list);
+               request->file_priv = NULL;
+       }
+       spin_unlock(&file_priv->mm.lock);
 }
 
 static int