drm/i915: Use a device flag for non-interruptible phases
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index 3dfc848..ac23dcf 100644 (file)
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
        dev_priv->mm.object_memory -= size;
 }
 
-int
-i915_gem_check_is_wedged(struct drm_device *dev)
+static int
+i915_gem_wait_for_error(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
        if (ret)
                return ret;
 
-       /* Success, we reset the GPU! */
-       if (!atomic_read(&dev_priv->mm.wedged))
-               return 0;
-
-       /* GPU is hung, bump the completion count to account for
-        * the token we just consumed so that we never hit zero and
-        * end up waiting upon a subsequent completion event that
-        * will never happen.
-        */
-       spin_lock_irqsave(&x->wait.lock, flags);
-       x->done++;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-       return -EIO;
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               /* GPU is hung, bump the completion count to account for
+                * the token we just consumed so that we never hit zero and
+                * end up waiting upon a subsequent completion event that
+                * will never happen.
+                */
+               spin_lock_irqsave(&x->wait.lock, flags);
+               x->done++;
+               spin_unlock_irqrestore(&x->wait.lock, flags);
+       }
+       return 0;
 }
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = i915_gem_check_is_wedged(dev);
+       ret = i915_gem_wait_for_error(dev);
        if (ret)
                return ret;
 
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
        if (ret)
                return ret;
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               mutex_unlock(&dev->struct_mutex);
-               return -EAGAIN;
-       }
-
        WARN_ON(i915_verify_lists(dev));
        return 0;
 }
@@ -140,12 +132,16 @@ void i915_gem_do_init(struct drm_device *dev,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       drm_mm_init(&dev_priv->mm.gtt_space, start,
-                   end - start);
+       drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
 
+       dev_priv->mm.gtt_start = start;
+       dev_priv->mm.gtt_mappable_end = mappable_end;
+       dev_priv->mm.gtt_end = end;
        dev_priv->mm.gtt_total = end - start;
        dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-       dev_priv->mm.gtt_mappable_end = mappable_end;
+
+       /* Take over this portion of the GTT */
+       intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
 }
 
 int
@@ -510,7 +506,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -522,6 +518,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
+       trace_i915_gem_object_pread(obj, args->offset, args->size);
+
        ret = i915_gem_object_set_cpu_read_domain_range(obj,
                                                        args->offset,
                                                        args->size);
@@ -951,7 +949,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -963,6 +961,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
+       trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
         * it would end up going through the fenced access, and we'll get
         * different detiling behavior between reading and writing.
@@ -1045,7 +1045,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1088,7 +1088,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1117,7 +1117,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_mmap *args = data;
        struct drm_gem_object *obj;
-       loff_t offset;
        unsigned long addr;
 
        if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1132,8 +1131,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                return -E2BIG;
        }
 
-       offset = args->offset;
-
        down_write(&current->mm->mmap_sem);
        addr = do_mmap(obj->filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1178,9 +1175,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
                PAGE_SHIFT;
 
-       /* Now bind it into the GTT if needed */
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto out;
 
+       trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+       /* Now bind it into the GTT if needed */
        if (!obj->map_and_fenceable) {
                ret = i915_gem_object_unbind(obj);
                if (ret)
@@ -1199,7 +1200,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (obj->tiling_mode == I915_TILING_NONE)
                ret = i915_gem_object_put_fence(obj);
        else
-               ret = i915_gem_object_get_fence(obj, NULL, true);
+               ret = i915_gem_object_get_fence(obj, NULL);
        if (ret)
                goto unlock;
 
@@ -1215,12 +1216,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 unlock:
        mutex_unlock(&dev->struct_mutex);
-
+out:
        switch (ret) {
+       case -EIO:
        case -EAGAIN:
+               /* Give the error handler a chance to run and move the
+                * objects off the GPU active list. Next time we service the
+                * fault, we should be able to transition the page into the
+                * GTT without touching the GPU (and so avoid further
+                * EIO/EGAIN). If the GPU is wedged, then there is no issue
+                * with coherency, just lost writes.
+                */
                set_need_resched();
        case 0:
        case -ERESTARTSYS:
+       case -EINTR:
                return VM_FAULT_NOPAGE;
        case -ENOMEM:
                return VM_FAULT_OOM;
@@ -1453,7 +1463,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1665,9 +1675,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains,
-                              struct intel_ring_buffer *ring)
+i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+                              uint32_t flush_domains)
 {
        struct drm_i915_gem_object *obj, *next;
 
@@ -1680,7 +1689,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                        obj->base.write_domain = 0;
                        list_del_init(&obj->gpu_write_list);
                        i915_gem_object_move_to_active(obj, ring,
-                                                      i915_gem_next_request_seqno(dev, ring));
+                                                      i915_gem_next_request_seqno(ring));
 
                        trace_i915_gem_object_change_domain(obj,
                                                            obj->base.read_domains,
@@ -1690,27 +1699,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 }
 
 int
-i915_add_request(struct drm_device *dev,
+i915_add_request(struct intel_ring_buffer *ring,
                 struct drm_file *file,
-                struct drm_i915_gem_request *request,
-                struct intel_ring_buffer *ring)
+                struct drm_i915_gem_request *request)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *file_priv = NULL;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
        uint32_t seqno;
        int was_empty;
        int ret;
 
        BUG_ON(request == NULL);
 
-       if (file != NULL)
-               file_priv = file->driver_priv;
-
        ret = ring->add_request(ring, &seqno);
        if (ret)
            return ret;
 
-       ring->outstanding_lazy_request = false;
+       trace_i915_gem_request_add(ring, seqno);
 
        request->seqno = seqno;
        request->ring = ring;
@@ -1718,7 +1722,9 @@ i915_add_request(struct drm_device *dev,
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
 
-       if (file_priv) {
+       if (file) {
+               struct drm_i915_file_private *file_priv = file->driver_priv;
+
                spin_lock(&file_priv->mm.lock);
                request->file_priv = file_priv;
                list_add_tail(&request->client_list,
@@ -1726,6 +1732,8 @@ i915_add_request(struct drm_device *dev,
                spin_unlock(&file_priv->mm.lock);
        }
 
+       ring->outstanding_lazy_request = false;
+
        if (!dev_priv->mm.suspended) {
                mod_timer(&dev_priv->hangcheck_timer,
                          jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1842,22 +1850,19 @@ void i915_gem_reset(struct drm_device *dev)
  * This function clears the request list as sequence numbers are passed.
  */
 static void
-i915_gem_retire_requests_ring(struct drm_device *dev,
-                             struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
        int i;
 
-       if (!ring->status_page.page_addr ||
-           list_empty(&ring->request_list))
+       if (list_empty(&ring->request_list))
                return;
 
-       WARN_ON(i915_verify_lists(dev));
+       WARN_ON(i915_verify_lists(ring->dev));
 
        seqno = ring->get_seqno(ring);
 
-       for (i = 0; i < I915_NUM_RINGS; i++)
+       for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
                if (seqno >= ring->sync_seqno[i])
                        ring->sync_seqno[i] = 0;
 
@@ -1871,7 +1876,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
                if (!i915_seqno_passed(seqno, request->seqno))
                        break;
 
-               trace_i915_gem_request_retire(dev, request->seqno);
+               trace_i915_gem_request_retire(ring, request->seqno);
 
                list_del(&request->list);
                i915_gem_request_remove_from_client(request);
@@ -1897,13 +1902,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
                        i915_gem_object_move_to_inactive(obj);
        }
 
-       if (unlikely (dev_priv->trace_irq_seqno &&
-                     i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+       if (unlikely(ring->trace_irq_seqno &&
+                    i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
                ring->irq_put(ring);
-               dev_priv->trace_irq_seqno = 0;
+               ring->trace_irq_seqno = 0;
        }
 
-       WARN_ON(i915_verify_lists(dev));
+       WARN_ON(i915_verify_lists(ring->dev));
 }
 
 void
@@ -1927,7 +1932,7 @@ i915_gem_retire_requests(struct drm_device *dev)
        }
 
        for (i = 0; i < I915_NUM_RINGS; i++)
-               i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
+               i915_gem_retire_requests_ring(&dev_priv->ring[i]);
 }
 
 static void
@@ -1961,11 +1966,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
                        struct drm_i915_gem_request *request;
                        int ret;
 
-                       ret = i915_gem_flush_ring(dev, ring, 0,
-                                                 I915_GEM_GPU_DOMAINS);
+                       ret = i915_gem_flush_ring(ring,
+                                                 0, I915_GEM_GPU_DOMAINS);
                        request = kzalloc(sizeof(*request), GFP_KERNEL);
                        if (ret || request == NULL ||
-                           i915_add_request(dev, NULL, request, ring))
+                           i915_add_request(ring, NULL, request))
                            kfree(request);
                }
 
@@ -1978,18 +1983,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
-                    bool interruptible, struct intel_ring_buffer *ring)
+i915_wait_request(struct intel_ring_buffer *ring,
+                 uint32_t seqno)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
        u32 ier;
        int ret = 0;
 
        BUG_ON(seqno == 0);
 
-       if (atomic_read(&dev_priv->mm.wedged))
-               return -EAGAIN;
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               struct completion *x = &dev_priv->error_completion;
+               bool recovery_complete;
+               unsigned long flags;
+
+               /* Give the error handler a chance to run. */
+               spin_lock_irqsave(&x->wait.lock, flags);
+               recovery_complete = x->done > 0;
+               spin_unlock_irqrestore(&x->wait.lock, flags);
+
+               return recovery_complete ? -EIO : -EAGAIN;
+       }
 
        if (seqno == ring->outstanding_lazy_request) {
                struct drm_i915_gem_request *request;
@@ -1998,7 +2017,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                if (request == NULL)
                        return -ENOMEM;
 
-               ret = i915_add_request(dev, NULL, request, ring);
+               ret = i915_add_request(ring, NULL, request);
                if (ret) {
                        kfree(request);
                        return ret;
@@ -2008,22 +2027,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
        }
 
        if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-               if (HAS_PCH_SPLIT(dev))
+               if (HAS_PCH_SPLIT(ring->dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
                        ier = I915_READ(IER);
                if (!ier) {
                        DRM_ERROR("something (likely vbetool) disabled "
                                  "interrupts, re-enabling\n");
-                       i915_driver_irq_preinstall(dev);
-                       i915_driver_irq_postinstall(dev);
+                       i915_driver_irq_preinstall(ring->dev);
+                       i915_driver_irq_postinstall(ring->dev);
                }
 
-               trace_i915_gem_request_wait_begin(dev, seqno);
+               trace_i915_gem_request_wait_begin(ring, seqno);
 
                ring->waiting_seqno = seqno;
                if (ring->irq_get(ring)) {
-                       if (interruptible)
+                       if (dev_priv->mm.interruptible)
                                ret = wait_event_interruptible(ring->irq_queue,
                                                               i915_seqno_passed(ring->get_seqno(ring), seqno)
                                                               || atomic_read(&dev_priv->mm.wedged));
@@ -2039,7 +2058,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                        ret = -EBUSY;
                ring->waiting_seqno = 0;
 
-               trace_i915_gem_request_wait_end(dev, seqno);
+               trace_i915_gem_request_wait_end(ring, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
                ret = -EAGAIN;
@@ -2055,31 +2074,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
         * a separate wait queue to handle that.
         */
        if (ret == 0)
-               i915_gem_retire_requests_ring(dev, ring);
+               i915_gem_retire_requests_ring(ring);
 
        return ret;
 }
 
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno,
-                 struct intel_ring_buffer *ring)
-{
-       return i915_do_wait_request(dev, seqno, 1, ring);
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
 int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-                              bool interruptible)
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
        int ret;
 
        /* This function only exists to support waiting for existing rendering,
@@ -2091,10 +2097,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
         * it.
         */
        if (obj->active) {
-               ret = i915_do_wait_request(dev,
-                                          obj->last_rendering_seqno,
-                                          interruptible,
-                                          obj->ring);
+               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
                if (ret)
                        return ret;
        }
@@ -2144,6 +2147,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (ret == -ERESTARTSYS)
                return ret;
 
+       trace_i915_gem_object_unbind(obj);
+
        i915_gem_gtt_unbind_object(obj);
        i915_gem_object_put_pages_gtt(obj);
 
@@ -2159,29 +2164,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
-       trace_i915_gem_object_unbind(obj);
-
        return ret;
 }
 
 int
-i915_gem_flush_ring(struct drm_device *dev,
-                   struct intel_ring_buffer *ring,
+i915_gem_flush_ring(struct intel_ring_buffer *ring,
                    uint32_t invalidate_domains,
                    uint32_t flush_domains)
 {
        int ret;
 
+       trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+
        ret = ring->flush(ring, invalidate_domains, flush_domains);
        if (ret)
                return ret;
 
-       i915_gem_process_flushing_list(dev, flush_domains, ring);
+       i915_gem_process_flushing_list(ring, flush_domains);
        return 0;
 }
 
-static int i915_ring_idle(struct drm_device *dev,
-                         struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
        int ret;
 
@@ -2189,15 +2192,13 @@ static int i915_ring_idle(struct drm_device *dev,
                return 0;
 
        if (!list_empty(&ring->gpu_write_list)) {
-               ret = i915_gem_flush_ring(dev, ring,
+               ret = i915_gem_flush_ring(ring,
                                    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
                if (ret)
                        return ret;
        }
 
-       return i915_wait_request(dev,
-                                i915_gem_next_request_seqno(dev, ring),
-                                ring);
+       return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
 }
 
 int
@@ -2214,7 +2215,7 @@ i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+               ret = i915_ring_idle(&dev_priv->ring[i]);
                if (ret)
                        return ret;
        }
@@ -2398,15 +2399,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
 
 static int
 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
-                           struct intel_ring_buffer *pipelined,
-                           bool interruptible)
+                           struct intel_ring_buffer *pipelined)
 {
        int ret;
 
        if (obj->fenced_gpu_access) {
                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-                       ret = i915_gem_flush_ring(obj->base.dev,
-                                                 obj->last_fenced_ring,
+                       ret = i915_gem_flush_ring(obj->last_fenced_ring,
                                                  0, obj->base.write_domain);
                        if (ret)
                                return ret;
@@ -2418,10 +2417,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
        if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
                if (!ring_passed_seqno(obj->last_fenced_ring,
                                       obj->last_fenced_seqno)) {
-                       ret = i915_do_wait_request(obj->base.dev,
-                                                  obj->last_fenced_seqno,
-                                                  interruptible,
-                                                  obj->last_fenced_ring);
+                       ret = i915_wait_request(obj->last_fenced_ring,
+                                               obj->last_fenced_seqno);
                        if (ret)
                                return ret;
                }
@@ -2447,7 +2444,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
        if (obj->tiling_mode)
                i915_gem_release_mmap(obj);
 
-       ret = i915_gem_object_flush_fence(obj, NULL, true);
+       ret = i915_gem_object_flush_fence(obj, NULL);
        if (ret)
                return ret;
 
@@ -2524,8 +2521,7 @@ i915_find_fence_reg(struct drm_device *dev,
  */
 int
 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
-                         struct intel_ring_buffer *pipelined,
-                         bool interruptible)
+                         struct intel_ring_buffer *pipelined)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2547,10 +2543,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                        if (reg->setup_seqno) {
                                if (!ring_passed_seqno(obj->last_fenced_ring,
                                                       reg->setup_seqno)) {
-                                       ret = i915_do_wait_request(obj->base.dev,
-                                                                  reg->setup_seqno,
-                                                                  interruptible,
-                                                                  obj->last_fenced_ring);
+                                       ret = i915_wait_request(obj->last_fenced_ring,
+                                                               reg->setup_seqno);
                                        if (ret)
                                                return ret;
                                }
@@ -2559,15 +2553,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                        }
                } else if (obj->last_fenced_ring &&
                           obj->last_fenced_ring != pipelined) {
-                       ret = i915_gem_object_flush_fence(obj,
-                                                         pipelined,
-                                                         interruptible);
+                       ret = i915_gem_object_flush_fence(obj, pipelined);
                        if (ret)
                                return ret;
                } else if (obj->tiling_changed) {
                        if (obj->fenced_gpu_access) {
                                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-                                       ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+                                       ret = i915_gem_flush_ring(obj->ring,
                                                                  0, obj->base.write_domain);
                                        if (ret)
                                                return ret;
@@ -2584,7 +2576,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                if (obj->tiling_changed) {
                        if (pipelined) {
                                reg->setup_seqno =
-                                       i915_gem_next_request_seqno(dev, pipelined);
+                                       i915_gem_next_request_seqno(pipelined);
                                obj->last_fenced_seqno = reg->setup_seqno;
                                obj->last_fenced_ring = pipelined;
                        }
@@ -2598,7 +2590,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
        if (reg == NULL)
                return -ENOSPC;
 
-       ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+       ret = i915_gem_object_flush_fence(obj, pipelined);
        if (ret)
                return ret;
 
@@ -2610,9 +2602,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                if (old->tiling_mode)
                        i915_gem_release_mmap(old);
 
-               ret = i915_gem_object_flush_fence(old,
-                                                 pipelined,
-                                                 interruptible);
+               ret = i915_gem_object_flush_fence(old, pipelined);
                if (ret) {
                        drm_gem_object_unreference(&old->base);
                        return ret;
@@ -2624,7 +2614,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                old->fence_reg = I915_FENCE_REG_NONE;
                old->last_fenced_ring = pipelined;
                old->last_fenced_seqno =
-                       pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+                       pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
 
                drm_gem_object_unreference(&old->base);
        } else if (obj->last_fenced_seqno == 0)
@@ -2636,7 +2626,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
        obj->last_fenced_ring = pipelined;
 
        reg->setup_seqno =
-               pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+               pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
        obj->last_fenced_seqno = reg->setup_seqno;
 
 update:
@@ -2833,7 +2823,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        obj->map_and_fenceable = mappable && fenceable;
 
-       trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+       trace_i915_gem_object_bind(obj, map_and_fenceable);
        return 0;
 }
 
@@ -2856,13 +2846,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-
        if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
        /* Queue the GPU write cache flushing we need. */
-       return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+       return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2929,12 +2917,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (obj->gtt_space == NULL)
                return -EINVAL;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+               return 0;
+
        ret = i915_gem_object_flush_gpu_write_domain(obj);
        if (ret)
                return ret;
 
        if (obj->pending_gpu_write || write) {
-               ret = i915_gem_object_wait_rendering(obj, true);
+               ret = i915_gem_object_wait_rendering(obj);
                if (ret)
                        return ret;
        }
@@ -2984,7 +2975,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
 
        /* Currently, we are always called from an non-interruptible context. */
        if (pipelined != obj->ring) {
-               ret = i915_gem_object_wait_rendering(obj, false);
+               ret = i915_gem_object_wait_rendering(obj);
                if (ret)
                        return ret;
        }
@@ -3002,8 +2993,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
 }
 
 int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
-                         bool interruptible)
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
 {
        int ret;
 
@@ -3011,13 +3001,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
                return 0;
 
        if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-               ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
-                                         0, obj->base.write_domain);
+               ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
                if (ret)
                        return ret;
        }
 
-       return i915_gem_object_wait_rendering(obj, interruptible);
+       return i915_gem_object_wait_rendering(obj);
 }
 
 /**
@@ -3032,11 +3021,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return 0;
+
        ret = i915_gem_object_flush_gpu_write_domain(obj);
        if (ret)
                return ret;
 
-       ret = i915_gem_object_wait_rendering(obj, true);
+       ret = i915_gem_object_wait_rendering(obj);
        if (ret)
                return ret;
 
@@ -3134,7 +3126,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       ret = i915_gem_object_wait_rendering(obj, true);
+       ret = i915_gem_object_wait_rendering(obj);
        if (ret)
                return ret;
 
@@ -3205,6 +3197,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        u32 seqno = 0;
        int ret;
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EIO;
+
        spin_lock(&file_priv->mm.lock);
        list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
                if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -3320,7 +3315,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3371,7 +3366,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3408,7 +3403,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3426,7 +3421,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * flush earlier is beneficial.
                 */
                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-                       ret = i915_gem_flush_ring(dev, obj->ring,
+                       ret = i915_gem_flush_ring(obj->ring,
                                                  0, obj->base.write_domain);
                } else if (obj->ring->outstanding_lazy_request ==
                           obj->last_rendering_seqno) {
@@ -3437,9 +3432,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                         */
                        request = kzalloc(sizeof(*request), GFP_KERNEL);
                        if (request)
-                               ret = i915_add_request(dev,
-                                                      NULL, request,
-                                                      obj->ring);
+                               ret = i915_add_request(obj->ring, NULL,request);
                        else
                                ret = -ENOMEM;
                }
@@ -3449,7 +3442,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * are actually unmasked, and our working set ends up being
                 * larger than required.
                 */
-               i915_gem_retire_requests_ring(dev, obj->ring);
+               i915_gem_retire_requests_ring(obj->ring);
 
                args->busy = obj->active;
        }
@@ -3488,7 +3481,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3579,6 +3572,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
        kfree(obj->page_cpu_valid);
        kfree(obj->bit_17);
        kfree(obj);
+
+       trace_i915_gem_object_destroy(obj);
 }
 
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -3586,8 +3581,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
 
-       trace_i915_gem_object_destroy(obj);
-
        while (obj->pin_count > 0)
                i915_gem_object_unpin(obj);
 
@@ -3833,6 +3826,8 @@ i915_gem_load(struct drm_device *dev)
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
 
+       dev_priv->mm.interruptible = true;
+
        dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
        dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
        register_shrinker(&dev_priv->mm.inactive_shrinker);