drm/i915: Convert the file mutex into a spinlock
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index 4e978e4..78282ed 100644 (file)
@@ -61,6 +61,57 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
 
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct completion *x = &dev_priv->error_completion;
+       unsigned long flags;
+       int ret;
+
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       ret = wait_for_completion_interruptible(x);
+       if (ret)
+               return ret;
+
+       /* Success, we reset the GPU! */
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       /* GPU is hung, bump the completion count to account for
+        * the token we just consumed so that we never hit zero and
+        * end up waiting upon a subsequent completion event that
+        * will never happen.
+        */
+       spin_lock_irqsave(&x->wait.lock, flags);
+       x->done++;
+       spin_unlock_irqrestore(&x->wait.lock, flags);
+       return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
 {
@@ -268,7 +319,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@ -387,7 +440,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto fail_put_user_pages;
 
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@ -586,8 +641,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (!access_ok(VERIFY_READ, user_data, remain))
                return -EFAULT;
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
-       mutex_lock(&dev->struct_mutex);
        ret = i915_gem_object_pin(obj, 0);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
@@ -682,7 +739,10 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                goto out_unpin_pages;
        }
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto out_unpin_pages;
+
        ret = i915_gem_object_pin(obj, 0);
        if (ret)
                goto out_unlock;
@@ -756,7 +816,9 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@ -852,7 +914,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto fail_put_user_pages;
 
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@ -1020,7 +1084,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        intel_mark_busy(dev, obj);
 
@@ -1051,7 +1119,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
        }
 
-       
        /* Maintain LRU order of "inactive" objects */
        if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
@@ -1076,11 +1143,14 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       mutex_lock(&dev->struct_mutex);
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL) {
-               mutex_unlock(&dev->struct_mutex);
+       if (obj == NULL)
                return -ENOENT;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
        }
 
 #if WATCH_BUF
@@ -1247,7 +1317,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
                                                    obj->size / PAGE_SIZE, 0, 0);
        if (!list->file_offset_node) {
                DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOMEM;
+               ret = -ENOSPC;
                goto out_free_list;
        }
 
@@ -1259,9 +1329,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
        }
 
        list->hash.key = list->file_offset_node->start;
-       if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+       if (ret) {
                DRM_ERROR("failed to add to map hash\n");
-               ret = -ENOMEM;
                goto out_free_mm;
        }
 
@@ -1395,7 +1465,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        obj_priv = to_intel_bo(obj);
 
@@ -1471,24 +1545,12 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
        obj_priv->pages = NULL;
 }
 
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
-                           struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       ring->outstanding_lazy_request = true;
-
-       return dev_priv->next_seqno;
-}
-
 static void
 i915_gem_object_move_to_active(struct drm_gem_object *obj,
                               struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = obj->dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
 
        BUG_ON(ring == NULL);
        obj_priv->ring = ring;
@@ -1501,7 +1563,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj,
 
        /* Move from whatever list we were on to the tail of execution. */
        list_move_tail(&obj_priv->list, &ring->active_list);
-       obj_priv->last_rendering_seqno = seqno;
+       obj_priv->last_rendering_seqno = dev_priv->next_seqno;
 }
 
 static void
@@ -1552,7 +1614,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->pin_count != 0)
-               list_del_init(&obj_priv->list);
+               list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
        else
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 
@@ -1605,17 +1667,17 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 
 uint32_t
 i915_add_request(struct drm_device *dev,
-                struct drm_file *file_priv,
+                struct drm_file *file,
                 struct drm_i915_gem_request *request,
                 struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *i915_file_priv = NULL;
+       struct drm_i915_file_private *file_priv = NULL;
        uint32_t seqno;
        int was_empty;
 
-       if (file_priv != NULL)
-               i915_file_priv = file_priv->driver_priv;
+       if (file != NULL)
+               file_priv = file->driver_priv;
 
        if (request == NULL) {
                request = kzalloc(sizeof(*request), GFP_KERNEL);
@@ -1623,7 +1685,7 @@ i915_add_request(struct drm_device *dev,
                        return 0;
        }
 
-       seqno = ring->add_request(dev, ring, file_priv, 0);
+       seqno = ring->add_request(dev, ring, 0);
 
        request->seqno = seqno;
        request->ring = ring;
@@ -1631,11 +1693,12 @@ i915_add_request(struct drm_device *dev,
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
 
-       if (i915_file_priv) {
+       if (file_priv) {
+               spin_lock(&file_priv->mm.lock);
+               request->file_priv = file_priv;
                list_add_tail(&request->client_list,
-                             &i915_file_priv->mm.request_list);
-       } else {
-               INIT_LIST_HEAD(&request->client_list);
+                             &file_priv->mm.request_list);
+               spin_unlock(&file_priv->mm.lock);
        }
 
        if (!dev_priv->mm.suspended) {
@@ -1667,36 +1730,80 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
                        I915_GEM_DOMAIN_COMMAND, flush_domains);
 }
 
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
-       return (int32_t)(seq1 - seq2) >= 0;
+       struct drm_i915_file_private *file_priv = request->file_priv;
+
+       if (!file_priv)
+               return;
+
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
 }
 
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
-                  struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+                                     struct intel_ring_buffer *ring)
 {
-       return ring->get_gem_seqno(dev, ring);
+       while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&ring->request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
+
+               list_del(&request->list);
+               i915_gem_request_remove_from_client(request);
+               kfree(request);
+       }
+
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj_priv;
+
+               obj_priv = list_first_entry(&ring->active_list,
+                                           struct drm_i915_gem_object,
+                                           list);
+
+               obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
+               i915_gem_object_move_to_inactive(&obj_priv->base);
+       }
 }
 
-void i915_gem_reset_flushing_list(struct drm_device *dev)
+void i915_gem_reset_lists(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
 
-       while (!list_empty(&dev_priv->mm.flushing_list)) {
-               struct drm_i915_gem_object *obj_priv;
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+       if (HAS_BSD(dev))
+               i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
 
+       /* Remove anything from the flushing lists. The GPU cache is likely
+        * to be lost on reset along with the data, so simply move the
+        * lost bo to the inactive list.
+        */
+       while (!list_empty(&dev_priv->mm.flushing_list)) {
                obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
                                            struct drm_i915_gem_object,
                                            list);
 
                obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
                i915_gem_object_move_to_inactive(&obj_priv->base);
        }
+
+       /* Move everything out of the GPU domains to ensure we do any
+        * necessary invalidation upon reuse.
+        */
+       list_for_each_entry(obj_priv,
+                           &dev_priv->mm.inactive_list,
+                           list)
+       {
+               obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+       }
 }
 
 /**
@@ -1708,15 +1815,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
-       bool wedged;
 
        if (!ring->status_page.page_addr ||
            list_empty(&ring->request_list))
                return;
 
-       seqno = i915_get_gem_seqno(dev, ring);
-       wedged = atomic_read(&dev_priv->mm.wedged);
-
+       seqno = ring->get_seqno(dev, ring);
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -1724,13 +1828,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (!wedged && !i915_seqno_passed(seqno, request->seqno))
+               if (!i915_seqno_passed(seqno, request->seqno))
                        break;
 
                trace_i915_gem_request_retire(dev, request->seqno);
 
                list_del(&request->list);
-               list_del(&request->client_list);
+               i915_gem_request_remove_from_client(request);
                kfree(request);
        }
 
@@ -1745,8 +1849,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
                                            struct drm_i915_gem_object,
                                            list);
 
-               if (!wedged &&
-                   !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+               if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
                        break;
 
                obj = &obj_priv->base;
@@ -1824,16 +1927,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 
        BUG_ON(seqno == 0);
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EAGAIN;
+
        if (seqno == dev_priv->next_seqno) {
                seqno = i915_add_request(dev, NULL, NULL, ring);
                if (seqno == 0)
                        return -ENOMEM;
        }
 
-       if (atomic_read(&dev_priv->mm.wedged))
-               return -EIO;
-
-       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1852,12 +1955,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                if (interruptible)
                        ret = wait_event_interruptible(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
                else
                        wait_event(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
 
                ring->user_irq_put(dev, ring);
@@ -1866,11 +1969,11 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                trace_i915_gem_request_wait_end(dev, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
-               ret = -EIO;
+               ret = -EAGAIN;
 
        if (ret && ret != -ERESTARTSYS)
                DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring),
+                         __func__, ret, seqno, ring->get_seqno(dev, ring),
                          dev_priv->next_seqno);
 
        /* Directly dispatch request retiring.  While we have the work queue
@@ -1897,6 +2000,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
 
 static void
 i915_gem_flush_ring(struct drm_device *dev,
+                   struct drm_file *file_priv,
                    struct intel_ring_buffer *ring,
                    uint32_t invalidate_domains,
                    uint32_t flush_domains)
@@ -1907,6 +2011,7 @@ i915_gem_flush_ring(struct drm_device *dev,
 
 static void
 i915_gem_flush(struct drm_device *dev,
+              struct drm_file *file_priv,
               uint32_t invalidate_domains,
               uint32_t flush_domains,
               uint32_t flush_rings)
@@ -1918,11 +2023,11 @@ i915_gem_flush(struct drm_device *dev,
 
        if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
                if (flush_rings & RING_RENDER)
-                       i915_gem_flush_ring(dev,
+                       i915_gem_flush_ring(dev, file_priv,
                                            &dev_priv->render_ring,
                                            invalidate_domains, flush_domains);
                if (flush_rings & RING_BSD)
-                       i915_gem_flush_ring(dev,
+                       i915_gem_flush_ring(dev, file_priv,
                                            &dev_priv->bsd_ring,
                                            invalidate_domains, flush_domains);
        }
@@ -2024,9 +2129,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                obj_priv->gtt_space = NULL;
        }
 
-       /* Remove ourselves from the LRU list if present. */
-       if (!list_empty(&obj_priv->list))
-               list_del_init(&obj_priv->list);
+       list_del_init(&obj_priv->list);
 
        if (i915_gem_object_is_purgeable(obj_priv))
                i915_gem_object_truncate(obj);
@@ -2041,6 +2144,7 @@ i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
+       u32 seqno;
        int ret;
 
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
@@ -2051,24 +2155,18 @@ i915_gpu_idle(struct drm_device *dev)
                return 0;
 
        /* Flush everything onto the inactive list. */
-       i915_gem_flush_ring(dev,
-                           &dev_priv->render_ring,
+       seqno = dev_priv->next_seqno;
+       i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring,
                            I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-
-       ret = i915_wait_request(dev,
-                               i915_gem_next_request_seqno(dev, &dev_priv->render_ring),
-                               &dev_priv->render_ring);
+       ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
        if (ret)
                return ret;
 
        if (HAS_BSD(dev)) {
-               i915_gem_flush_ring(dev,
-                                   &dev_priv->bsd_ring,
+               seqno = dev_priv->next_seqno;
+               i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring,
                                    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-
-               ret = i915_wait_request(dev,
-                                       i915_gem_next_request_seqno(dev, &dev_priv->bsd_ring),
-                                       &dev_priv->bsd_ring);
+               ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring);
                if (ret)
                        return ret;
        }
@@ -2448,7 +2546,9 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
                              bool interruptible)
 {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_i915_fence_reg *reg;
 
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
                return 0;
@@ -2463,7 +2563,8 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
         * therefore we must wait for any outstanding access to complete
         * before clearing the fence.
         */
-       if (INTEL_INFO(dev)->gen < 4) {
+       reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+       if (reg->gpu) {
                int ret;
 
                ret = i915_gem_object_flush_gpu_write_domain(obj, true);
@@ -2473,6 +2574,8 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
                ret = i915_gem_object_wait_rendering(obj, interruptible);
                if (ret)
                        return ret;
+
+               reg->gpu = false;
        }
 
        i915_gem_object_flush_gtt_write_domain(obj);
@@ -2633,7 +2736,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
 
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
-       i915_gem_flush_ring(dev,
+       i915_gem_flush_ring(dev, NULL,
                            to_intel_bo(obj)->ring,
                            0, obj->write_domain);
        BUG_ON(obj->write_domain);
@@ -2762,7 +2865,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
        i915_gem_object_flush_cpu_write_domain(obj);
 
        old_read_domains = obj->read_domains;
-       obj->read_domains = I915_GEM_DOMAIN_GTT;
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@ -2819,7 +2922,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         * need to be invalidated at next use.
         */
        if (write) {
-               obj->read_domains &= I915_GEM_DOMAIN_CPU;
+               obj->read_domains = I915_GEM_DOMAIN_CPU;
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -3167,11 +3270,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
         * properly handle blits to/from tiled surfaces.
         */
        if (need_fence) {
-               ret = i915_gem_object_get_fence_reg(obj, false);
+               ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret != 0) {
                        i915_gem_object_unpin(obj);
                        return ret;
                }
+
+               dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
        }
 
        entry->offset = obj_priv->gtt_offset;
@@ -3353,28 +3458,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  * relatively low latency when blocking on a particular request to finish.
  */
 static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-       int ret = 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+       struct drm_i915_gem_request *request;
+       struct intel_ring_buffer *ring = NULL;
+       u32 seqno = 0;
+       int ret;
 
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&i915_file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
-
+       spin_lock(&file_priv->mm.lock);
+       list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
-               ret = i915_wait_request(dev, request->seqno, request->ring);
-               if (ret != 0)
-                       break;
+               ring = request->ring;
+               seqno = request->seqno;
        }
-       mutex_unlock(&dev->struct_mutex);
+       spin_unlock(&file_priv->mm.lock);
+
+       if (seqno == 0)
+               return 0;
+
+       ret = 0;
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+               /* And wait for the seqno passing without holding any locks and
+                * causing extra latency for others. This is safe as the irq
+                * generation is designed to be run atomically and so is
+                * lockless.
+                */
+               ring->user_irq_get(dev, ring);
+               ret = wait_event_interruptible(ring->irq_queue,
+                                              i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+                                              || atomic_read(&dev_priv->mm.wedged));
+               ring->user_irq_put(dev, ring);
+
+               if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+                       ret = -EIO;
+       }
+
+       if (ret == 0)
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
        return ret;
 }
@@ -3523,13 +3648,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_gem_relocation_entry *relocs = NULL;
        struct drm_i915_gem_request *request = NULL;
-       int ret = 0, ret2, i, pinned = 0;
+       int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
-       uint32_t seqno, reloc_index;
+       uint32_t reloc_index;
        int pin_tries, flips;
 
        struct intel_ring_buffer *ring = NULL;
 
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3587,16 +3716,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret != 0)
                goto pre_mutex_err;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto pre_mutex_err;
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               mutex_unlock(&dev->struct_mutex);
-               ret = -EIO;
-               goto pre_mutex_err;
-       }
-
        if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
                ret = -EBUSY;
@@ -3742,21 +3867,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         dev->invalidate_domains,
                         dev->flush_domains);
 #endif
-               i915_gem_flush(dev,
+               i915_gem_flush(dev, file_priv,
                               dev->invalidate_domains,
                               dev->flush_domains,
                               dev_priv->mm.flush_rings);
        }
 
-       if (dev_priv->render_ring.outstanding_lazy_request) {
-               (void)i915_add_request(dev, file_priv, NULL, &dev_priv->render_ring);
-               dev_priv->render_ring.outstanding_lazy_request = false;
-       }
-       if (dev_priv->bsd_ring.outstanding_lazy_request) {
-               (void)i915_add_request(dev, file_priv, NULL, &dev_priv->bsd_ring);
-               dev_priv->bsd_ring.outstanding_lazy_request = false;
-       }
-
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
                struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -3815,15 +3931,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 #endif
        }
-
-       /*
-        * Get a seqno representing the execution of the current buffer,
-        * which we can wait on.  We would like to mitigate these interrupts,
-        * likely by only creating seqnos occasionally (so that we have
-        * *some* interrupts representing completion of buffers that we can
-        * wait on when trying to clear up gtt space).
-        */
-       seqno = i915_add_request(dev, file_priv, request, ring);
+       i915_add_request(dev, file_priv, request, ring);
        request = NULL;
 
 #if WATCH_LRU
@@ -4017,6 +4125,7 @@ int
 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
@@ -4052,9 +4161,9 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        if (obj_priv->pin_count == 1) {
                atomic_inc(&dev->pin_count);
                atomic_add(obj->size, &dev->pin_memory);
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
-                       list_del_init(&obj_priv->list);
+               if (!obj_priv->active)
+                       list_move_tail(&obj_priv->list,
+                                      &dev_priv->mm.pinned_list);
        }
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -4078,8 +4187,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
         * the inactive list
         */
        if (obj_priv->pin_count == 0) {
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+               if (!obj_priv->active)
                        list_move_tail(&obj_priv->list,
                                       &dev_priv->mm.inactive_list);
                atomic_dec(&dev->pin_count);
@@ -4097,17 +4205,20 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
-
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
-               mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
        obj_priv = to_intel_bo(obj);
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
                drm_gem_object_unreference(obj);
@@ -4152,18 +4263,23 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-
-       mutex_lock(&dev->struct_mutex);
+       int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
                          args->handle);
-               mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
 
        obj_priv = to_intel_bo(obj);
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@ -4189,6 +4305,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
@@ -4197,7 +4314,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        }
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        /* Count all active objects as busy, even if they are currently not used
         * by the gpu. Users of this interface expect objects to eventually
@@ -4212,12 +4333,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * use this buffer rather sooner than later, so issuing the required
                 * flush earlier is beneficial.
                 */
-               if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
-                       i915_gem_flush_ring(dev,
+               if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+                       i915_gem_flush_ring(dev, file_priv,
                                            obj_priv->ring,
                                            0, obj->write_domain);
-                       (void)i915_add_request(dev, file_priv, NULL, obj_priv->ring);
-               }
 
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
@@ -4248,6 +4367,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_madvise *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
        switch (args->madv) {
        case I915_MADV_DONTNEED:
@@ -4263,10 +4383,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                          args->handle);
                return -ENOENT;
        }
-
-       mutex_lock(&dev->struct_mutex);
        obj_priv = to_intel_bo(obj);
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->pin_count) {
                drm_gem_object_unreference(obj);
                mutex_unlock(&dev->struct_mutex);
@@ -4483,28 +4607,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       dev_priv->render_ring = render_ring;
-
-       if (!I915_NEED_GFX_HWS(dev)) {
-               dev_priv->render_ring.status_page.page_addr
-                       = dev_priv->status_page_dmah->vaddr;
-               memset(dev_priv->render_ring.status_page.page_addr,
-                               0, PAGE_SIZE);
-       }
-
        if (HAS_PIPE_CONTROL(dev)) {
                ret = i915_gem_init_pipe_control(dev);
                if (ret)
                        return ret;
        }
 
-       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       ret = intel_init_render_ring_buffer(dev);
        if (ret)
                goto cleanup_pipe_control;
 
        if (HAS_BSD(dev)) {
-               dev_priv->bsd_ring = bsd_ring;
-               ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+               ret = intel_init_bsd_ring_buffer(dev);
                if (ret)
                        goto cleanup_render_ring;
        }
@@ -4613,6 +4727,7 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
        INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
@@ -4625,6 +4740,7 @@ i915_gem_load(struct drm_device *dev)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
+       init_completion(&dev_priv->error_completion);
        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
@@ -4854,18 +4970,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
        return 0;
 }
 
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
 
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list))
-               list_del_init(i915_file_priv->mm.request_list.next);
-       mutex_unlock(&dev->struct_mutex);
+       spin_lock(&file_priv->mm.lock);
+       while (!list_empty(&file_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&file_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          client_list);
+               list_del(&request->client_list);
+               request->file_priv = NULL;
+       }
+       spin_unlock(&file_priv->mm.lock);
 }
 
 static int