drm/i915: Convert the file mutex into a spinlock
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index eea8232..78282ed 100644 (file)
@@ -37,7 +37,9 @@
 #include <linux/intel-gtt.h>
 
 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+                                                 bool pipelined);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -59,6 +61,57 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
 
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct completion *x = &dev_priv->error_completion;
+       unsigned long flags;
+       int ret;
+
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       ret = wait_for_completion_interruptible(x);
+       if (ret)
+               return ret;
+
+       /* Success, we reset the GPU! */
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       /* GPU is hung, bump the completion count to account for
+        * the token we just consumed so that we never hit zero and
+        * end up waiting upon a subsequent completion event that
+        * will never happen.
+        */
+       spin_lock_irqsave(&x->wait.lock, flags);
+       x->done++;
+       spin_unlock_irqrestore(&x->wait.lock, flags);
+       return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
 {
@@ -266,7 +319,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@ -385,7 +440,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto fail_put_user_pages;
 
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@ -584,8 +641,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (!access_ok(VERIFY_READ, user_data, remain))
                return -EFAULT;
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
-       mutex_lock(&dev->struct_mutex);
        ret = i915_gem_object_pin(obj, 0);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
@@ -680,7 +739,10 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                goto out_unpin_pages;
        }
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto out_unpin_pages;
+
        ret = i915_gem_object_pin(obj, 0);
        if (ret)
                goto out_unlock;
@@ -754,7 +816,9 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@ -850,7 +914,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto fail_put_user_pages;
 
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@ -1018,7 +1084,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        intel_mark_busy(dev, obj);
 
@@ -1049,7 +1119,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
        }
 
-       
        /* Maintain LRU order of "inactive" objects */
        if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
@@ -1074,11 +1143,14 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       mutex_lock(&dev->struct_mutex);
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL) {
-               mutex_unlock(&dev->struct_mutex);
+       if (obj == NULL)
                return -ENOENT;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
        }
 
 #if WATCH_BUF
@@ -1180,7 +1252,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Need a new fence register? */
        if (obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj);
+               ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret)
                        goto unlock;
        }
@@ -1245,7 +1317,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
                                                    obj->size / PAGE_SIZE, 0, 0);
        if (!list->file_offset_node) {
                DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOMEM;
+               ret = -ENOSPC;
                goto out_free_list;
        }
 
@@ -1257,9 +1329,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
        }
 
        list->hash.key = list->file_offset_node->start;
-       if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+       if (ret) {
                DRM_ERROR("failed to add to map hash\n");
-               ret = -ENOMEM;
                goto out_free_mm;
        }
 
@@ -1344,14 +1416,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
-       if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+       if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
                return 4096;
 
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       if (IS_I9XX(dev))
+       if (INTEL_INFO(dev)->gen == 3)
                start = 1024*1024;
        else
                start = 512*1024;
@@ -1393,7 +1465,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        obj_priv = to_intel_bo(obj);
 
@@ -1469,21 +1545,13 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
        obj_priv->pages = NULL;
 }
 
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       return dev_priv->next_seqno;
-}
-
 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+i915_gem_object_move_to_active(struct drm_gem_object *obj,
                               struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = obj->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = obj->dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
        BUG_ON(ring == NULL);
        obj_priv->ring = ring;
 
@@ -1493,15 +1561,9 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
                obj_priv->active = 1;
        }
 
-       /* Take the seqno of the next request if none is given */
-       if (seqno == 0)
-               seqno = i915_gem_next_request_seqno(dev);
-
        /* Move from whatever list we were on to the tail of execution. */
-       spin_lock(&dev_priv->mm.active_list_lock);
        list_move_tail(&obj_priv->list, &ring->active_list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
-       obj_priv->last_rendering_seqno = seqno;
+       obj_priv->last_rendering_seqno = dev_priv->next_seqno;
 }
 
 static void
@@ -1552,7 +1614,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->pin_count != 0)
-               list_del_init(&obj_priv->list);
+               list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
        else
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 
@@ -1569,7 +1631,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 
 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains, uint32_t seqno,
+                              uint32_t flush_domains,
                               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1580,14 +1642,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                                 gpu_write_list) {
                struct drm_gem_object *obj = &obj_priv->base;
 
-               if ((obj->write_domain & flush_domains) ==
-                   obj->write_domain &&
-                   obj_priv->ring->ring_flag == ring->ring_flag) {
+               if (obj->write_domain & flush_domains &&
+                   obj_priv->ring == ring) {
                        uint32_t old_write_domain = obj->write_domain;
 
                        obj->write_domain = 0;
                        list_del_init(&obj_priv->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, seqno, ring);
+                       i915_gem_object_move_to_active(obj, ring);
 
                        /* update the fence lru list */
                        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1605,23 +1666,26 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 }
 
 uint32_t
-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                uint32_t flush_domains, struct intel_ring_buffer *ring)
+i915_add_request(struct drm_device *dev,
+                struct drm_file *file,
+                struct drm_i915_gem_request *request,
+                struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *i915_file_priv = NULL;
-       struct drm_i915_gem_request *request;
+       struct drm_i915_file_private *file_priv = NULL;
        uint32_t seqno;
        int was_empty;
 
-       if (file_priv != NULL)
-               i915_file_priv = file_priv->driver_priv;
+       if (file != NULL)
+               file_priv = file->driver_priv;
 
-       request = kzalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL)
-               return 0;
+       if (request == NULL) {
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return 0;
+       }
 
-       seqno = ring->add_request(dev, ring, file_priv, flush_domains);
+       seqno = ring->add_request(dev, ring, 0);
 
        request->seqno = seqno;
        request->ring = ring;
@@ -1629,23 +1693,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
 
-       if (i915_file_priv) {
+       if (file_priv) {
+               spin_lock(&file_priv->mm.lock);
+               request->file_priv = file_priv;
                list_add_tail(&request->client_list,
-                             &i915_file_priv->mm.request_list);
-       } else {
-               INIT_LIST_HEAD(&request->client_list);
+                             &file_priv->mm.request_list);
+               spin_unlock(&file_priv->mm.lock);
        }
 
-       /* Associate any objects on the flushing list matching the write
-        * domain we're flushing with our request.
-        */
-       if (flush_domains != 0) 
-               i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
-
        if (!dev_priv->mm.suspended) {
-               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+               mod_timer(&dev_priv->hangcheck_timer,
+                         jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
                if (was_empty)
-                       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+                       queue_delayed_work(dev_priv->wq,
+                                          &dev_priv->mm.retire_work, HZ);
        }
        return seqno;
 }
@@ -1656,91 +1717,93 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  * Ensures that all commands in the ring are finished
  * before signalling the CPU
  */
-static uint32_t
+static void
 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
        uint32_t flush_domains = 0;
 
        /* The sampler always gets flushed on i965 (sigh) */
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
 
        ring->flush(dev, ring,
                        I915_GEM_DOMAIN_COMMAND, flush_domains);
-       return flush_domains;
 }
 
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
-                       struct drm_i915_gem_request *request)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = request->file_priv;
 
-       trace_i915_gem_request_retire(dev, request->seqno);
+       if (!file_priv)
+               return;
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate.
-        */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       while (!list_empty(&request->ring->active_list)) {
-               struct drm_gem_object *obj;
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
+}
+
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+                                     struct intel_ring_buffer *ring)
+{
+       while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&ring->request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
+
+               list_del(&request->list);
+               i915_gem_request_remove_from_client(request);
+               kfree(request);
+       }
+
+       while (!list_empty(&ring->active_list)) {
                struct drm_i915_gem_object *obj_priv;
 
-               obj_priv = list_first_entry(&request->ring->active_list,
+               obj_priv = list_first_entry(&ring->active_list,
                                            struct drm_i915_gem_object,
                                            list);
-               obj = &obj_priv->base;
-
-               /* If the seqno being retired doesn't match the oldest in the
-                * list, then the oldest in the list must still be newer than
-                * this seqno.
-                */
-               if (obj_priv->last_rendering_seqno != request->seqno)
-                       goto out;
-
-#if WATCH_LRU
-               DRM_INFO("%s: retire %d moves to inactive list %p\n",
-                        __func__, request->seqno, obj);
-#endif
 
-               if (obj->write_domain != 0)
-                       i915_gem_object_move_to_flushing(obj);
-               else {
-                       /* Take a reference on the object so it won't be
-                        * freed while the spinlock is held.  The list
-                        * protection for this spinlock is safe when breaking
-                        * the lock like this since the next thing we do
-                        * is just get the head of the list again.
-                        */
-                       drm_gem_object_reference(obj);
-                       i915_gem_object_move_to_inactive(obj);
-                       spin_unlock(&dev_priv->mm.active_list_lock);
-                       drm_gem_object_unreference(obj);
-                       spin_lock(&dev_priv->mm.active_list_lock);
-               }
+               obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
+               i915_gem_object_move_to_inactive(&obj_priv->base);
        }
-out:
-       spin_unlock(&dev_priv->mm.active_list_lock);
 }
 
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+void i915_gem_reset_lists(struct drm_device *dev)
 {
-       return (int32_t)(seq1 - seq2) >= 0;
-}
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
 
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
-                  struct intel_ring_buffer *ring)
-{
-       return ring->get_gem_seqno(dev, ring);
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+       if (HAS_BSD(dev))
+               i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+
+       /* Remove anything from the flushing lists. The GPU cache is likely
+        * to be lost on reset along with the data, so simply move the
+        * lost bo to the inactive list.
+        */
+       while (!list_empty(&dev_priv->mm.flushing_list)) {
+               obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+                                           struct drm_i915_gem_object,
+                                           list);
+
+               obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
+               i915_gem_object_move_to_inactive(&obj_priv->base);
+       }
+
+       /* Move everything out of the GPU domains to ensure we do any
+        * necessary invalidation upon reuse.
+        */
+       list_for_each_entry(obj_priv,
+                           &dev_priv->mm.inactive_list,
+                           list)
+       {
+               obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+       }
 }
 
 /**
@@ -1753,35 +1816,57 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!ring->status_page.page_addr
-                       || list_empty(&ring->request_list))
+       if (!ring->status_page.page_addr ||
+           list_empty(&ring->request_list))
                return;
 
-       seqno = i915_get_gem_seqno(dev, ring);
-
+       seqno = ring->get_seqno(dev, ring);
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
-               uint32_t retiring_seqno;
 
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
-               retiring_seqno = request->seqno;
 
-               if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   atomic_read(&dev_priv->mm.wedged)) {
-                       i915_gem_retire_request(dev, request);
+               if (!i915_seqno_passed(seqno, request->seqno))
+                       break;
+
+               trace_i915_gem_request_retire(dev, request->seqno);
+
+               list_del(&request->list);
+               i915_gem_request_remove_from_client(request);
+               kfree(request);
+       }
+
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_gem_object *obj;
+               struct drm_i915_gem_object *obj_priv;
+
+               obj_priv = list_first_entry(&ring->active_list,
+                                           struct drm_i915_gem_object,
+                                           list);
 
-                       list_del(&request->list);
-                       list_del(&request->client_list);
-                       kfree(request);
-               } else
+               if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
                        break;
+
+               obj = &obj_priv->base;
+
+#if WATCH_LRU
+               DRM_INFO("%s: retire %d moves to inactive list %p\n",
+                        __func__, request->seqno, obj);
+#endif
+
+               if (obj->write_domain != 0)
+                       i915_gem_object_move_to_flushing(obj);
+               else
+                       i915_gem_object_move_to_inactive(obj);
        }
 
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-
                ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
@@ -1834,7 +1919,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
 
 int
 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
-               int interruptible, struct intel_ring_buffer *ring)
+                    bool interruptible, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
@@ -1842,16 +1927,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 
        BUG_ON(seqno == 0);
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EAGAIN;
+
        if (seqno == dev_priv->next_seqno) {
-               seqno = i915_add_request(dev, NULL, 0, ring);
+               seqno = i915_add_request(dev, NULL, NULL, ring);
                if (seqno == 0)
                        return -ENOMEM;
        }
 
-       if (atomic_read(&dev_priv->mm.wedged))
-               return -EIO;
-
-       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1870,12 +1955,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                if (interruptible)
                        ret = wait_event_interruptible(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
                else
                        wait_event(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
 
                ring->user_irq_put(dev, ring);
@@ -1884,11 +1969,11 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                trace_i915_gem_request_wait_end(dev, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
-               ret = -EIO;
+               ret = -EAGAIN;
 
        if (ret && ret != -ERESTARTSYS)
                DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring),
+                         __func__, ret, seqno, ring->get_seqno(dev, ring),
                          dev_priv->next_seqno);
 
        /* Directly dispatch request retiring.  While we have the work queue
@@ -1913,34 +1998,38 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
        return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+                   struct drm_file *file_priv,
+                   struct intel_ring_buffer *ring,
+                   uint32_t invalidate_domains,
+                   uint32_t flush_domains)
+{
+       ring->flush(dev, ring, invalidate_domains, flush_domains);
+       i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
 static void
 i915_gem_flush(struct drm_device *dev,
+              struct drm_file *file_priv,
               uint32_t invalidate_domains,
-              uint32_t flush_domains)
+              uint32_t flush_domains,
+              uint32_t flush_rings)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
 
-       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
-                       invalidate_domains,
-                       flush_domains);
-
-       if (HAS_BSD(dev))
-               dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
-                               invalidate_domains,
-                               flush_domains);
-
-       /* Associate any objects on the flushing list matching the write
-        * domain we're flushing with the next request.
-        */
-       if (flush_domains != 0)  {
-               i915_gem_process_flushing_list(dev, flush_domains, 0,
-                                              &dev_priv->render_ring);
-               if (HAS_BSD(dev))
-                       i915_gem_process_flushing_list(dev, flush_domains, 0,
-                                                      &dev_priv->bsd_ring);
+       if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+               if (flush_rings & RING_RENDER)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->render_ring,
+                                           invalidate_domains, flush_domains);
+               if (flush_rings & RING_BSD)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->bsd_ring,
+                                           invalidate_domains, flush_domains);
        }
 }
 
@@ -1973,7 +2062,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
                                           obj_priv->last_rendering_seqno,
                                           interruptible,
                                           obj_priv->ring);
-               if (ret != 0)
+               if (ret)
                        return ret;
        }
 
@@ -1987,7 +2076,6 @@ int
 i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret = 0;
 
@@ -2041,11 +2129,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
                obj_priv->gtt_space = NULL;
        }
 
-       /* Remove ourselves from the LRU list if present. */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       if (!list_empty(&obj_priv->list))
-               list_del_init(&obj_priv->list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       list_del_init(&obj_priv->list);
 
        if (i915_gem_object_is_purgeable(obj_priv))
                i915_gem_object_truncate(obj);
@@ -2060,39 +2144,34 @@ i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
-       uint32_t seqno1, seqno2;
+       u32 seqno;
        int ret;
 
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->render_ring.active_list) &&
                       (!HAS_BSD(dev) ||
                        list_empty(&dev_priv->bsd_ring.active_list)));
-       spin_unlock(&dev_priv->mm.active_list_lock);
-
        if (lists_empty)
                return 0;
 
        /* Flush everything onto the inactive list. */
-       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno1 = i915_add_request(dev, NULL, 0,
-                       &dev_priv->render_ring);
-       if (seqno1 == 0)
-               return -ENOMEM;
-       ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+       seqno = dev_priv->next_seqno;
+       i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring,
+                           I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
+       if (ret)
+               return ret;
 
        if (HAS_BSD(dev)) {
-               seqno2 = i915_add_request(dev, NULL, 0,
-                               &dev_priv->bsd_ring);
-               if (seqno2 == 0)
-                       return -ENOMEM;
-
-               ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+               seqno = dev_priv->next_seqno;
+               i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring,
+                                   I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+               ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring);
                if (ret)
                        return ret;
        }
 
-       return ret;
+       return 0;
 }
 
 int
@@ -2275,7 +2354,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
 }
 
-static int i915_find_fence_reg(struct drm_device *dev)
+static int i915_find_fence_reg(struct drm_device *dev,
+                              bool interruptible)
 {
        struct drm_i915_fence_reg *reg = NULL;
        struct drm_i915_gem_object *obj_priv = NULL;
@@ -2320,7 +2400,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
         * private reference to obj like the other callers of put_fence_reg
         * (set_tiling ioctl) do. */
        drm_gem_object_reference(obj);
-       ret = i915_gem_object_put_fence_reg(obj);
+       ret = i915_gem_object_put_fence_reg(obj, interruptible);
        drm_gem_object_unreference(obj);
        if (ret != 0)
                return ret;
@@ -2342,7 +2422,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
  * and tiling format.
  */
 int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+                             bool interruptible)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2377,7 +2458,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
                break;
        }
 
-       ret = i915_find_fence_reg(dev);
+       ret = i915_find_fence_reg(dev, interruptible);
        if (ret < 0)
                return ret;
 
@@ -2387,14 +2468,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 
        reg->obj = obj;
 
-       if (IS_GEN6(dev))
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
                sandybridge_write_fence_reg(reg);
-       else if (IS_I965G(dev))
+               break;
+       case 5:
+       case 4:
                i965_write_fence_reg(reg);
-       else if (IS_I9XX(dev))
+               break;
+       case 3:
                i915_write_fence_reg(reg);
-       else
+               break;
+       case 2:
                i830_write_fence_reg(reg);
+               break;
+       }
 
        trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
                        obj_priv->tiling_mode);
@@ -2417,22 +2505,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_i915_fence_reg *reg =
                &dev_priv->fence_regs[obj_priv->fence_reg];
+       uint32_t fence_reg;
 
-       if (IS_GEN6(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
                             (obj_priv->fence_reg * 8), 0);
-       } else if (IS_I965G(dev)) {
+               break;
+       case 5:
+       case 4:
                I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
-       } else {
-               uint32_t fence_reg;
-
-               if (obj_priv->fence_reg < 8)
-                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+               break;
+       case 3:
+               if (obj_priv->fence_reg > 8)
+                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
                else
-                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
-                                                      8) * 4;
+       case 2:
+                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
 
                I915_WRITE(fence_reg, 0);
+               break;
        }
 
        reg->obj = NULL;
@@ -2444,15 +2536,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  * to the buffer to finish, and then resets the fence register.
  * @obj: tiled object holding a fence register.
+ * @bool: whether the wait upon the fence is interruptible
  *
  * Zeroes out the fence register itself and clears out the associated
  * data structures in dev_priv and obj_priv.
  */
 int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+                             bool interruptible)
 {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_i915_fence_reg *reg;
 
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
                return 0;
@@ -2467,20 +2563,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
         * therefore we must wait for any outstanding access to complete
         * before clearing the fence.
         */
-       if (!IS_I965G(dev)) {
+       reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+       if (reg->gpu) {
                int ret;
 
-               ret = i915_gem_object_flush_gpu_write_domain(obj);
-               if (ret != 0)
+               ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+               if (ret)
                        return ret;
 
-               ret = i915_gem_object_wait_rendering(obj, true);
-               if (ret != 0)
+               ret = i915_gem_object_wait_rendering(obj, interruptible);
+               if (ret)
                        return ret;
+
+               reg->gpu = false;
        }
 
        i915_gem_object_flush_gtt_write_domain(obj);
-       i915_gem_clear_fence_reg (obj);
+       i915_gem_clear_fence_reg(obj);
 
        return 0;
 }
@@ -2626,25 +2725,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
 
 /** Flushes any GPU write domain for the object if it's dirty. */
 static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+                                      bool pipelined)
 {
        struct drm_device *dev = obj->dev;
        uint32_t old_write_domain;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
-       i915_gem_flush(dev, 0, obj->write_domain);
-       if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
-               return -ENOMEM;
+       i915_gem_flush_ring(dev, NULL,
+                           to_intel_bo(obj)->ring,
+                           0, obj->write_domain);
+       BUG_ON(obj->write_domain);
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->read_domains,
                                            old_write_domain);
-       return 0;
+
+       if (pipelined)
+               return 0;
+
+       return i915_gem_object_wait_rendering(obj, true);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2688,26 +2792,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
                                            old_write_domain);
 }
 
-int
-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
-{
-       int ret = 0;
-
-       switch (obj->write_domain) {
-       case I915_GEM_DOMAIN_GTT:
-               i915_gem_object_flush_gtt_write_domain(obj);
-               break;
-       case I915_GEM_DOMAIN_CPU:
-               i915_gem_object_flush_cpu_write_domain(obj);
-               break;
-       default:
-               ret = i915_gem_object_flush_gpu_write_domain(obj);
-               break;
-       }
-
-       return ret;
-}
-
 /**
  * Moves a single object to the GTT read, and possibly write domain.
  *
@@ -2725,32 +2809,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
 
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj, true);
-       if (ret != 0)
-               return ret;
+       i915_gem_object_flush_cpu_write_domain(obj);
+
+       if (write) {
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
 
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
 
-       /* If we're writing through the GTT domain, then CPU and GPU caches
-        * will need to be invalidated at next use.
-        */
-       if (write)
-               obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
-       i915_gem_object_flush_cpu_write_domain(obj);
-
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
        BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
        obj->read_domains |= I915_GEM_DOMAIN_GTT;
        if (write) {
+               obj->read_domains = I915_GEM_DOMAIN_GTT;
                obj->write_domain = I915_GEM_DOMAIN_GTT;
                obj_priv->dirty = 1;
        }
@@ -2767,41 +2847,29 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  * wait, as in modesetting process we're not supposed to be interrupted.
  */
 int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+                                    bool pipelined)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       uint32_t old_write_domain, old_read_domains;
+       uint32_t old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined);
        if (ret)
                return ret;
 
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj, false);
-       if (ret != 0)
-               return ret;
-
        i915_gem_object_flush_cpu_write_domain(obj);
 
-       old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
-
-       /* It should now be out of any other write domains, and we can update
-        * the domain values for our changes.
-        */
-       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-       obj->read_domains = I915_GEM_DOMAIN_GTT;
-       obj->write_domain = I915_GEM_DOMAIN_GTT;
-       obj_priv->dirty = 1;
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
-                                           old_write_domain);
+                                           obj->write_domain);
 
        return 0;
 }
@@ -2818,12 +2886,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj, true);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
 
@@ -2834,6 +2897,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         */
        i915_gem_object_set_to_full_cpu_read_domain(obj);
 
+       if (write) {
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
+
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
 
@@ -2853,7 +2922,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         * need to be invalidated at next use.
         */
        if (write) {
-               obj->read_domains &= I915_GEM_DOMAIN_CPU;
+               obj->read_domains = I915_GEM_DOMAIN_CPU;
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -2979,7 +3048,7 @@ static void
 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 {
        struct drm_device               *dev = obj->dev;
-       drm_i915_private_t              *dev_priv = dev->dev_private;
+       struct drm_i915_private         *dev_priv = dev->dev_private;
        struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
@@ -3042,15 +3111,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
                obj->pending_write_domain = obj->write_domain;
        obj->read_domains = obj->pending_read_domains;
 
-       if (flush_domains & I915_GEM_GPU_DOMAINS) {
-               if (obj_priv->ring == &dev_priv->render_ring)
-                       dev_priv->flush_rings |= FLUSH_RENDER_RING;
-               else if (obj_priv->ring == &dev_priv->bsd_ring)
-                       dev_priv->flush_rings |= FLUSH_BSD_RING;
-       }
-
        dev->invalidate_domains |= invalidate_domains;
        dev->flush_domains |= flush_domains;
+       if (obj_priv->ring)
+               dev_priv->mm.flush_rings |= obj_priv->ring->id;
 #if WATCH_BUF
        DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
                 __func__,
@@ -3119,12 +3183,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
        if (offset == 0 && size == obj->size)
                return i915_gem_object_set_to_cpu_domain(obj, 0);
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj, true);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
        i915_gem_object_flush_gtt_write_domain(obj);
@@ -3211,11 +3270,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
         * properly handle blits to/from tiled surfaces.
         */
        if (need_fence) {
-               ret = i915_gem_object_get_fence_reg(obj);
+               ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret != 0) {
                        i915_gem_object_unpin(obj);
                        return ret;
                }
+
+               dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
        }
 
        entry->offset = obj_priv->gtt_offset;
@@ -3397,28 +3458,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
  * relatively low latency when blocking on a particular request to finish.
  */
 static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-       int ret = 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+       struct drm_i915_gem_request *request;
+       struct intel_ring_buffer *ring = NULL;
+       u32 seqno = 0;
+       int ret;
 
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&i915_file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
-
+       spin_lock(&file_priv->mm.lock);
+       list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
-               ret = i915_wait_request(dev, request->seqno, request->ring);
-               if (ret != 0)
-                       break;
+               ring = request->ring;
+               seqno = request->seqno;
        }
-       mutex_unlock(&dev->struct_mutex);
+       spin_unlock(&file_priv->mm.lock);
+
+       if (seqno == 0)
+               return 0;
+
+       ret = 0;
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+               /* And wait for the seqno passing without holding any locks and
+                * causing extra latency for others. This is safe as the irq
+                * generation is designed to be run atomically and so is
+                * lockless.
+                */
+               ring->user_irq_get(dev, ring);
+               ret = wait_event_interruptible(ring->irq_queue,
+                                              i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+                                              || atomic_read(&dev_priv->mm.wedged));
+               ring->user_irq_put(dev, ring);
+
+               if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+                       ret = -EIO;
+       }
+
+       if (ret == 0)
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
        return ret;
 }
@@ -3554,8 +3635,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
        return ret;
 }
 
-
-int
+static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file_priv,
                       struct drm_i915_gem_execbuffer2 *args,
@@ -3567,13 +3647,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_gem_relocation_entry *relocs = NULL;
-       int ret = 0, ret2, i, pinned = 0;
+       struct drm_i915_gem_request *request = NULL;
+       int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
-       uint32_t seqno, flush_domains, reloc_index;
+       uint32_t reloc_index;
        int pin_tries, flips;
 
        struct intel_ring_buffer *ring = NULL;
 
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3620,21 +3705,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
        }
 
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL) {
+               ret = -ENOMEM;
+               goto pre_mutex_err;
+       }
+
        ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
                                            &relocs);
        if (ret != 0)
                goto pre_mutex_err;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto pre_mutex_err;
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               mutex_unlock(&dev->struct_mutex);
-               ret = -EIO;
-               goto pre_mutex_err;
-       }
-
        if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
                ret = -EBUSY;
@@ -3762,7 +3849,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         */
        dev->invalidate_domains = 0;
        dev->flush_domains = 0;
-       dev_priv->flush_rings = 0;
+       dev_priv->mm.flush_rings = 0;
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
@@ -3780,15 +3867,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         dev->invalidate_domains,
                         dev->flush_domains);
 #endif
-               i915_gem_flush(dev,
+               i915_gem_flush(dev, file_priv,
                               dev->invalidate_domains,
-                              dev->flush_domains);
-               if (dev_priv->flush_rings & FLUSH_RENDER_RING)
-                       (void)i915_add_request(dev, file_priv, 0,
-                                              &dev_priv->render_ring);
-               if (dev_priv->flush_rings & FLUSH_BSD_RING)
-                       (void)i915_add_request(dev, file_priv, 0,
-                                              &dev_priv->bsd_ring);
+                              dev->flush_domains,
+                              dev_priv->mm.flush_rings);
        }
 
        for (i = 0; i < args->buffer_count; i++) {
@@ -3836,28 +3918,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * Ensure that the commands in the batch buffer are
         * finished before the interrupt fires
         */
-       flush_domains = i915_retire_commands(dev, ring);
+       i915_retire_commands(dev, ring);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       /*
-        * Get a seqno representing the execution of the current buffer,
-        * which we can wait on.  We would like to mitigate these interrupts,
-        * likely by only creating seqnos occasionally (so that we have
-        * *some* interrupts representing completion of buffers that we can
-        * wait on when trying to clear up gtt space).
-        */
-       seqno = i915_add_request(dev, file_priv, flush_domains, ring);
-       BUG_ON(seqno == 0);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
                obj_priv = to_intel_bo(obj);
 
-               i915_gem_object_move_to_active(obj, seqno, ring);
+               i915_gem_object_move_to_active(obj, ring);
 #if WATCH_LRU
                DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 #endif
        }
+       i915_add_request(dev, file_priv, request, ring);
+       request = NULL;
+
 #if WATCH_LRU
        i915_dump_lru(dev, __func__);
 #endif
@@ -3895,6 +3971,7 @@ pre_mutex_err:
 
        drm_free_large(object_list);
        kfree(cliprects);
+       kfree(request);
 
        return ret;
 }
@@ -3951,7 +4028,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
                exec2_list[i].alignment = exec_list[i].alignment;
                exec2_list[i].offset = exec_list[i].offset;
-               if (!IS_I965G(dev))
+               if (INTEL_INFO(dev)->gen < 4)
                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
                else
                        exec2_list[i].flags = 0;
@@ -4048,6 +4125,7 @@ int
 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
@@ -4083,9 +4161,9 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        if (obj_priv->pin_count == 1) {
                atomic_inc(&dev->pin_count);
                atomic_add(obj->size, &dev->pin_memory);
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
-                       list_del_init(&obj_priv->list);
+               if (!obj_priv->active)
+                       list_move_tail(&obj_priv->list,
+                                      &dev_priv->mm.pinned_list);
        }
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -4109,8 +4187,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
         * the inactive list
         */
        if (obj_priv->pin_count == 0) {
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+               if (!obj_priv->active)
                        list_move_tail(&obj_priv->list,
                                       &dev_priv->mm.inactive_list);
                atomic_dec(&dev->pin_count);
@@ -4128,17 +4205,20 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
-
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
-               mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
        obj_priv = to_intel_bo(obj);
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
                drm_gem_object_unreference(obj);
@@ -4183,18 +4263,23 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-
-       mutex_lock(&dev->struct_mutex);
+       int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
                          args->handle);
-               mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
 
        obj_priv = to_intel_bo(obj);
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@ -4220,6 +4305,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
@@ -4228,7 +4314,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        }
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        /* Count all active objects as busy, even if they are currently not used
         * by the gpu. Users of this interface expect objects to eventually
@@ -4243,10 +4333,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * use this buffer rather sooner than later, so issuing the required
                 * flush earlier is beneficial.
                 */
-               if (obj->write_domain) {
-                       i915_gem_flush(dev, 0, obj->write_domain);
-                       (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
-               }
+               if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           obj_priv->ring,
+                                           0, obj->write_domain);
 
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
@@ -4277,6 +4367,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_madvise *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
        switch (args->madv) {
        case I915_MADV_DONTNEED:
@@ -4292,10 +4383,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                          args->handle);
                return -ENOENT;
        }
-
-       mutex_lock(&dev->struct_mutex);
        obj_priv = to_intel_bo(obj);
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        if (obj_priv->pin_count) {
                drm_gem_object_unreference(obj);
                mutex_unlock(&dev->struct_mutex);
@@ -4512,28 +4607,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       dev_priv->render_ring = render_ring;
-
-       if (!I915_NEED_GFX_HWS(dev)) {
-               dev_priv->render_ring.status_page.page_addr
-                       = dev_priv->status_page_dmah->vaddr;
-               memset(dev_priv->render_ring.status_page.page_addr,
-                               0, PAGE_SIZE);
-       }
-
        if (HAS_PIPE_CONTROL(dev)) {
                ret = i915_gem_init_pipe_control(dev);
                if (ret)
                        return ret;
        }
 
-       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       ret = intel_init_render_ring_buffer(dev);
        if (ret)
                goto cleanup_pipe_control;
 
        if (HAS_BSD(dev)) {
-               dev_priv->bsd_ring = bsd_ring;
-               ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+               ret = intel_init_bsd_ring_buffer(dev);
                if (ret)
                        goto cleanup_render_ring;
        }
@@ -4586,11 +4671,8 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                return ret;
        }
 
-       spin_lock(&dev_priv->mm.active_list_lock);
        BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
        BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
-       spin_unlock(&dev_priv->mm.active_list_lock);
-
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
@@ -4642,10 +4724,10 @@ i915_gem_load(struct drm_device *dev)
        int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       spin_lock_init(&dev_priv->mm.active_list_lock);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
        INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
@@ -4658,6 +4740,7 @@ i915_gem_load(struct drm_device *dev)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
+       init_completion(&dev_priv->error_completion);
        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
@@ -4676,21 +4759,30 @@ i915_gem_load(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                dev_priv->fence_reg_start = 3;
 
-       if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+       if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                dev_priv->num_fence_regs = 16;
        else
                dev_priv->num_fence_regs = 8;
 
        /* Initialize fence registers to zero */
-       if (IS_I965G(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
+               break;
+       case 5:
+       case 4:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
-       } else {
-               for (i = 0; i < 8; i++)
-                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               break;
+       case 3:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+       case 2:
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               break;
        }
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4700,8 +4792,8 @@ i915_gem_load(struct drm_device *dev)
  * Create a physically contiguous memory object for this object
  * e.g. for cursor + overlay regs
  */
-int i915_gem_init_phys_object(struct drm_device *dev,
-                             int id, int size, int align)
+static int i915_gem_init_phys_object(struct drm_device *dev,
+                                    int id, int size, int align)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@ -4733,7 +4825,7 @@ kfree_obj:
        return ret;
 }
 
-void i915_gem_free_phys_object(struct drm_device *dev, int id)
+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@ -4878,18 +4970,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
        return 0;
 }
 
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
 
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list))
-               list_del_init(i915_file_priv->mm.request_list.next);
-       mutex_unlock(&dev->struct_mutex);
+       spin_lock(&file_priv->mm.lock);
+       while (!list_empty(&file_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&file_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          client_list);
+               list_del(&request->client_list);
+               request->file_priv = NULL;
+       }
+       spin_unlock(&file_priv->mm.lock);
 }
 
 static int
@@ -4898,12 +4997,10 @@ i915_gpu_is_active(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int lists_empty;
 
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
                      list_empty(&dev_priv->render_ring.active_list);
        if (HAS_BSD(dev))
                lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
 
        return !lists_empty;
 }