Merge branch 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index 36e66cc..c4c2855 100644 (file)
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
        dev_priv->mm.object_memory -= size;
 }
 
-int
-i915_gem_check_is_wedged(struct drm_device *dev)
+static int
+i915_gem_wait_for_error(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
        if (ret)
                return ret;
 
-       /* Success, we reset the GPU! */
-       if (!atomic_read(&dev_priv->mm.wedged))
-               return 0;
-
-       /* GPU is hung, bump the completion count to account for
-        * the token we just consumed so that we never hit zero and
-        * end up waiting upon a subsequent completion event that
-        * will never happen.
-        */
-       spin_lock_irqsave(&x->wait.lock, flags);
-       x->done++;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-       return -EIO;
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               /* GPU is hung, bump the completion count to account for
+                * the token we just consumed so that we never hit zero and
+                * end up waiting upon a subsequent completion event that
+                * will never happen.
+                */
+               spin_lock_irqsave(&x->wait.lock, flags);
+               x->done++;
+               spin_unlock_irqrestore(&x->wait.lock, flags);
+       }
+       return 0;
 }
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = i915_gem_check_is_wedged(dev);
+       ret = i915_gem_wait_for_error(dev);
        if (ret)
                return ret;
 
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
        if (ret)
                return ret;
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               mutex_unlock(&dev->struct_mutex);
-               return -EAGAIN;
-       }
-
        WARN_ON(i915_verify_lists(dev));
        return 0;
 }
@@ -193,22 +185,20 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
-                     struct drm_file *file)
+static int
+i915_gem_create(struct drm_file *file,
+               struct drm_device *dev,
+               uint64_t size,
+               uint32_t *handle_p)
 {
-       struct drm_i915_gem_create *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
        u32 handle;
 
-       args->size = roundup(args->size, PAGE_SIZE);
+       size = roundup(size, PAGE_SIZE);
 
        /* Allocate the new object */
-       obj = i915_gem_alloc_object(dev, args->size);
+       obj = i915_gem_alloc_object(dev, size);
        if (obj == NULL)
                return -ENOMEM;
 
@@ -224,10 +214,41 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        drm_gem_object_unreference(&obj->base);
        trace_i915_gem_object_create(obj);
 
-       args->handle = handle;
+       *handle_p = handle;
        return 0;
 }
 
+int
+i915_gem_dumb_create(struct drm_file *file,
+                    struct drm_device *dev,
+                    struct drm_mode_create_dumb *args)
+{
+       /* have to work out size/pitch and return them */
+       args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64);
+       args->size = args->pitch * args->height;
+       return i915_gem_create(file, dev,
+                              args->size, &args->handle);
+}
+
+int i915_gem_dumb_destroy(struct drm_file *file,
+                         struct drm_device *dev,
+                         uint32_t handle)
+{
+       return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+                     struct drm_file *file)
+{
+       struct drm_i915_gem_create *args = data;
+       return i915_gem_create(file, dev,
+                              args->size, &args->handle);
+}
+
 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@@ -514,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -526,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
+       trace_i915_gem_object_pread(obj, args->offset, args->size);
+
        ret = i915_gem_object_set_cpu_read_domain_range(obj,
                                                        args->offset,
                                                        args->size);
@@ -955,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -967,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
+       trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
         * it would end up going through the fenced access, and we'll get
         * different detiling behavior between reading and writing.
@@ -1049,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1092,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1121,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_mmap *args = data;
        struct drm_gem_object *obj;
-       loff_t offset;
        unsigned long addr;
 
        if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1136,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                return -E2BIG;
        }
 
-       offset = args->offset;
-
        down_write(&current->mm->mmap_sem);
        addr = do_mmap(obj->filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1182,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
                PAGE_SHIFT;
 
-       /* Now bind it into the GTT if needed */
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto out;
 
+       trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+       /* Now bind it into the GTT if needed */
        if (!obj->map_and_fenceable) {
                ret = i915_gem_object_unbind(obj);
                if (ret)
@@ -1203,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (obj->tiling_mode == I915_TILING_NONE)
                ret = i915_gem_object_put_fence(obj);
        else
-               ret = i915_gem_object_get_fence(obj, NULL, true);
+               ret = i915_gem_object_get_fence(obj, NULL);
        if (ret)
                goto unlock;
 
@@ -1219,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 unlock:
        mutex_unlock(&dev->struct_mutex);
-
+out:
        switch (ret) {
+       case -EIO:
        case -EAGAIN:
+               /* Give the error handler a chance to run and move the
+                * objects off the GPU active list. Next time we service the
+                * fault, we should be able to transition the page into the
+                * GTT without touching the GPU (and so avoid further
+                * EIO/EGAIN). If the GPU is wedged, then there is no issue
+                * with coherency, just lost writes.
+                */
                set_need_resched();
        case 0:
        case -ERESTARTSYS:
+       case -EINTR:
                return VM_FAULT_NOPAGE;
        case -ENOMEM:
                return VM_FAULT_OOM;
@@ -1425,27 +1460,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
        return tile_height * obj->stride * 2;
 }
 
-/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
- * @dev: DRM device
- * @data: GTT mapping ioctl data
- * @file: GEM object info
- *
- * Simply returns the fake offset to userspace so it can mmap it.
- * The mmap call will end up in drm_gem_mmap(), which will set things
- * up so we can get faults in the handler above.
- *
- * The fault handler will take care of binding the object into the GTT
- * (since it may have been evicted to make room for something), allocating
- * a fence register, and mapping the appropriate aperture address into
- * userspace.
- */
 int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file)
+i915_gem_mmap_gtt(struct drm_file *file,
+                 struct drm_device *dev,
+                 uint32_t handle,
+                 uint64_t *offset)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_mmap_gtt *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
 
@@ -1456,8 +1477,8 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1479,7 +1500,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
                        goto out;
        }
 
-       args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+       *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -1488,6 +1509,34 @@ unlock:
        return ret;
 }
 
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file)
+{
+       struct drm_i915_gem_mmap_gtt *args = data;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
                              gfp_t gfpmask)
@@ -1669,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains,
-                              struct intel_ring_buffer *ring)
+i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+                              uint32_t flush_domains)
 {
        struct drm_i915_gem_object *obj, *next;
 
@@ -1684,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                        obj->base.write_domain = 0;
                        list_del_init(&obj->gpu_write_list);
                        i915_gem_object_move_to_active(obj, ring,
-                                                      i915_gem_next_request_seqno(dev, ring));
+                                                      i915_gem_next_request_seqno(ring));
 
                        trace_i915_gem_object_change_domain(obj,
                                                            obj->base.read_domains,
@@ -1694,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 }
 
 int
-i915_add_request(struct drm_device *dev,
+i915_add_request(struct intel_ring_buffer *ring,
                 struct drm_file *file,
-                struct drm_i915_gem_request *request,
-                struct intel_ring_buffer *ring)
+                struct drm_i915_gem_request *request)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *file_priv = NULL;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
        uint32_t seqno;
        int was_empty;
        int ret;
 
        BUG_ON(request == NULL);
 
-       if (file != NULL)
-               file_priv = file->driver_priv;
-
        ret = ring->add_request(ring, &seqno);
        if (ret)
            return ret;
 
-       ring->outstanding_lazy_request = false;
+       trace_i915_gem_request_add(ring, seqno);
 
        request->seqno = seqno;
        request->ring = ring;
@@ -1722,7 +1765,9 @@ i915_add_request(struct drm_device *dev,
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
 
-       if (file_priv) {
+       if (file) {
+               struct drm_i915_file_private *file_priv = file->driver_priv;
+
                spin_lock(&file_priv->mm.lock);
                request->file_priv = file_priv;
                list_add_tail(&request->client_list,
@@ -1730,6 +1775,8 @@ i915_add_request(struct drm_device *dev,
                spin_unlock(&file_priv->mm.lock);
        }
 
+       ring->outstanding_lazy_request = false;
+
        if (!dev_priv->mm.suspended) {
                mod_timer(&dev_priv->hangcheck_timer,
                          jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1846,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev)
  * This function clears the request list as sequence numbers are passed.
  */
 static void
-i915_gem_retire_requests_ring(struct drm_device *dev,
-                             struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
        int i;
 
-       if (!ring->status_page.page_addr ||
-           list_empty(&ring->request_list))
+       if (list_empty(&ring->request_list))
                return;
 
-       WARN_ON(i915_verify_lists(dev));
+       WARN_ON(i915_verify_lists(ring->dev));
 
        seqno = ring->get_seqno(ring);
 
@@ -1875,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
                if (!i915_seqno_passed(seqno, request->seqno))
                        break;
 
-               trace_i915_gem_request_retire(dev, request->seqno);
+               trace_i915_gem_request_retire(ring, request->seqno);
 
                list_del(&request->list);
                i915_gem_request_remove_from_client(request);
@@ -1901,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
                        i915_gem_object_move_to_inactive(obj);
        }
 
-       if (unlikely (dev_priv->trace_irq_seqno &&
-                     i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+       if (unlikely(ring->trace_irq_seqno &&
+                    i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
                ring->irq_put(ring);
-               dev_priv->trace_irq_seqno = 0;
+               ring->trace_irq_seqno = 0;
        }
 
-       WARN_ON(i915_verify_lists(dev));
+       WARN_ON(i915_verify_lists(ring->dev));
 }
 
 void
@@ -1931,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev)
        }
 
        for (i = 0; i < I915_NUM_RINGS; i++)
-               i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
+               i915_gem_retire_requests_ring(&dev_priv->ring[i]);
 }
 
 static void
@@ -1965,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
                        struct drm_i915_gem_request *request;
                        int ret;
 
-                       ret = i915_gem_flush_ring(dev, ring, 0,
-                                                 I915_GEM_GPU_DOMAINS);
+                       ret = i915_gem_flush_ring(ring,
+                                                 0, I915_GEM_GPU_DOMAINS);
                        request = kzalloc(sizeof(*request), GFP_KERNEL);
                        if (ret || request == NULL ||
-                           i915_add_request(dev, NULL, request, ring))
+                           i915_add_request(ring, NULL, request))
                            kfree(request);
                }
 
@@ -1982,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
-                    bool interruptible, struct intel_ring_buffer *ring)
+i915_wait_request(struct intel_ring_buffer *ring,
+                 uint32_t seqno)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
        u32 ier;
        int ret = 0;
 
        BUG_ON(seqno == 0);
 
-       if (atomic_read(&dev_priv->mm.wedged))
-               return -EAGAIN;
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               struct completion *x = &dev_priv->error_completion;
+               bool recovery_complete;
+               unsigned long flags;
+
+               /* Give the error handler a chance to run. */
+               spin_lock_irqsave(&x->wait.lock, flags);
+               recovery_complete = x->done > 0;
+               spin_unlock_irqrestore(&x->wait.lock, flags);
+
+               return recovery_complete ? -EIO : -EAGAIN;
+       }
 
        if (seqno == ring->outstanding_lazy_request) {
                struct drm_i915_gem_request *request;
@@ -2002,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                if (request == NULL)
                        return -ENOMEM;
 
-               ret = i915_add_request(dev, NULL, request, ring);
+               ret = i915_add_request(ring, NULL, request);
                if (ret) {
                        kfree(request);
                        return ret;
@@ -2012,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
        }
 
        if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-               if (HAS_PCH_SPLIT(dev))
+               if (HAS_PCH_SPLIT(ring->dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
                        ier = I915_READ(IER);
                if (!ier) {
                        DRM_ERROR("something (likely vbetool) disabled "
                                  "interrupts, re-enabling\n");
-                       i915_driver_irq_preinstall(dev);
-                       i915_driver_irq_postinstall(dev);
+                       i915_driver_irq_preinstall(ring->dev);
+                       i915_driver_irq_postinstall(ring->dev);
                }
 
-               trace_i915_gem_request_wait_begin(dev, seqno);
+               trace_i915_gem_request_wait_begin(ring, seqno);
 
                ring->waiting_seqno = seqno;
                if (ring->irq_get(ring)) {
-                       if (interruptible)
+                       if (dev_priv->mm.interruptible)
                                ret = wait_event_interruptible(ring->irq_queue,
                                                               i915_seqno_passed(ring->get_seqno(ring), seqno)
                                                               || atomic_read(&dev_priv->mm.wedged));
@@ -2043,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                        ret = -EBUSY;
                ring->waiting_seqno = 0;
 
-               trace_i915_gem_request_wait_end(dev, seqno);
+               trace_i915_gem_request_wait_end(ring, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
                ret = -EAGAIN;
@@ -2059,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
         * a separate wait queue to handle that.
         */
        if (ret == 0)
-               i915_gem_retire_requests_ring(dev, ring);
+               i915_gem_retire_requests_ring(ring);
 
        return ret;
 }
 
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno,
-                 struct intel_ring_buffer *ring)
-{
-       return i915_do_wait_request(dev, seqno, 1, ring);
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
 int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-                              bool interruptible)
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
        int ret;
 
        /* This function only exists to support waiting for existing rendering,
@@ -2095,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
         * it.
         */
        if (obj->active) {
-               ret = i915_do_wait_request(dev,
-                                          obj->last_rendering_seqno,
-                                          interruptible,
-                                          obj->ring);
+               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
                if (ret)
                        return ret;
        }
@@ -2148,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (ret == -ERESTARTSYS)
                return ret;
 
+       trace_i915_gem_object_unbind(obj);
+
        i915_gem_gtt_unbind_object(obj);
        i915_gem_object_put_pages_gtt(obj);
 
@@ -2163,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
-       trace_i915_gem_object_unbind(obj);
-
        return ret;
 }
 
 int
-i915_gem_flush_ring(struct drm_device *dev,
-                   struct intel_ring_buffer *ring,
+i915_gem_flush_ring(struct intel_ring_buffer *ring,
                    uint32_t invalidate_domains,
                    uint32_t flush_domains)
 {
        int ret;
 
+       trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+
        ret = ring->flush(ring, invalidate_domains, flush_domains);
        if (ret)
                return ret;
 
-       i915_gem_process_flushing_list(dev, flush_domains, ring);
+       i915_gem_process_flushing_list(ring, flush_domains);
        return 0;
 }
 
-static int i915_ring_idle(struct drm_device *dev,
-                         struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
        int ret;
 
@@ -2193,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev,
                return 0;
 
        if (!list_empty(&ring->gpu_write_list)) {
-               ret = i915_gem_flush_ring(dev, ring,
+               ret = i915_gem_flush_ring(ring,
                                    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
                if (ret)
                        return ret;
        }
 
-       return i915_wait_request(dev,
-                                i915_gem_next_request_seqno(dev, ring),
-                                ring);
+       return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
 }
 
 int
@@ -2218,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+               ret = i915_ring_idle(&dev_priv->ring[i]);
                if (ret)
                        return ret;
        }
@@ -2402,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
 
 static int
 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
-                           struct intel_ring_buffer *pipelined,
-                           bool interruptible)
+                           struct intel_ring_buffer *pipelined)
 {
        int ret;
 
        if (obj->fenced_gpu_access) {
                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-                       ret = i915_gem_flush_ring(obj->base.dev,
-                                                 obj->last_fenced_ring,
+                       ret = i915_gem_flush_ring(obj->last_fenced_ring,
                                                  0, obj->base.write_domain);
                        if (ret)
                                return ret;
@@ -2422,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
        if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
                if (!ring_passed_seqno(obj->last_fenced_ring,
                                       obj->last_fenced_seqno)) {
-                       ret = i915_do_wait_request(obj->base.dev,
-                                                  obj->last_fenced_seqno,
-                                                  interruptible,
-                                                  obj->last_fenced_ring);
+                       ret = i915_wait_request(obj->last_fenced_ring,
+                                               obj->last_fenced_seqno);
                        if (ret)
                                return ret;
                }
@@ -2451,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
        if (obj->tiling_mode)
                i915_gem_release_mmap(obj);
 
-       ret = i915_gem_object_flush_fence(obj, NULL, true);
+       ret = i915_gem_object_flush_fence(obj, NULL);
        if (ret)
                return ret;
 
@@ -2528,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev,
  */
 int
 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
-                         struct intel_ring_buffer *pipelined,
-                         bool interruptible)
+                         struct intel_ring_buffer *pipelined)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2551,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                        if (reg->setup_seqno) {
                                if (!ring_passed_seqno(obj->last_fenced_ring,
                                                       reg->setup_seqno)) {
-                                       ret = i915_do_wait_request(obj->base.dev,
-                                                                  reg->setup_seqno,
-                                                                  interruptible,
-                                                                  obj->last_fenced_ring);
+                                       ret = i915_wait_request(obj->last_fenced_ring,
+                                                               reg->setup_seqno);
                                        if (ret)
                                                return ret;
                                }
@@ -2563,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                        }
                } else if (obj->last_fenced_ring &&
                           obj->last_fenced_ring != pipelined) {
-                       ret = i915_gem_object_flush_fence(obj,
-                                                         pipelined,
-                                                         interruptible);
+                       ret = i915_gem_object_flush_fence(obj, pipelined);
                        if (ret)
                                return ret;
                } else if (obj->tiling_changed) {
                        if (obj->fenced_gpu_access) {
                                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-                                       ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+                                       ret = i915_gem_flush_ring(obj->ring,
                                                                  0, obj->base.write_domain);
                                        if (ret)
                                                return ret;
@@ -2588,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                if (obj->tiling_changed) {
                        if (pipelined) {
                                reg->setup_seqno =
-                                       i915_gem_next_request_seqno(dev, pipelined);
+                                       i915_gem_next_request_seqno(pipelined);
                                obj->last_fenced_seqno = reg->setup_seqno;
                                obj->last_fenced_ring = pipelined;
                        }
@@ -2602,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
        if (reg == NULL)
                return -ENOSPC;
 
-       ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+       ret = i915_gem_object_flush_fence(obj, pipelined);
        if (ret)
                return ret;
 
@@ -2614,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                if (old->tiling_mode)
                        i915_gem_release_mmap(old);
 
-               ret = i915_gem_object_flush_fence(old,
-                                                 pipelined,
-                                                 interruptible);
+               ret = i915_gem_object_flush_fence(old, pipelined);
                if (ret) {
                        drm_gem_object_unreference(&old->base);
                        return ret;
@@ -2628,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                old->fence_reg = I915_FENCE_REG_NONE;
                old->last_fenced_ring = pipelined;
                old->last_fenced_seqno =
-                       pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+                       pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
 
                drm_gem_object_unreference(&old->base);
        } else if (obj->last_fenced_seqno == 0)
@@ -2640,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
        obj->last_fenced_ring = pipelined;
 
        reg->setup_seqno =
-               pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+               pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
        obj->last_fenced_seqno = reg->setup_seqno;
 
 update:
@@ -2837,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        obj->map_and_fenceable = mappable && fenceable;
 
-       trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+       trace_i915_gem_object_bind(obj, map_and_fenceable);
        return 0;
 }
 
@@ -2860,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-
        if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
        /* Queue the GPU write cache flushing we need. */
-       return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+       return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2933,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (obj->gtt_space == NULL)
                return -EINVAL;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+               return 0;
+
        ret = i915_gem_object_flush_gpu_write_domain(obj);
        if (ret)
                return ret;
 
        if (obj->pending_gpu_write || write) {
-               ret = i915_gem_object_wait_rendering(obj, true);
+               ret = i915_gem_object_wait_rendering(obj);
                if (ret)
                        return ret;
        }
@@ -2988,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
 
        /* Currently, we are always called from an non-interruptible context. */
        if (pipelined != obj->ring) {
-               ret = i915_gem_object_wait_rendering(obj, false);
+               ret = i915_gem_object_wait_rendering(obj);
                if (ret)
                        return ret;
        }
@@ -3006,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
 }
 
 int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
-                         bool interruptible)
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
 {
        int ret;
 
@@ -3015,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
                return 0;
 
        if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-               ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
-                                         0, obj->base.write_domain);
+               ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
                if (ret)
                        return ret;
        }
 
-       return i915_gem_object_wait_rendering(obj, interruptible);
+       return i915_gem_object_wait_rendering(obj);
 }
 
 /**
@@ -3036,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return 0;
+
        ret = i915_gem_object_flush_gpu_write_domain(obj);
        if (ret)
                return ret;
 
-       ret = i915_gem_object_wait_rendering(obj, true);
+       ret = i915_gem_object_wait_rendering(obj);
        if (ret)
                return ret;
 
@@ -3138,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       ret = i915_gem_object_wait_rendering(obj, true);
+       ret = i915_gem_object_wait_rendering(obj);
        if (ret)
                return ret;
 
@@ -3209,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        u32 seqno = 0;
        int ret;
 
+       if (atomic_read(&dev_priv->mm.wedged))
+               return -EIO;
+
        spin_lock(&file_priv->mm.lock);
        list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
                if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -3324,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3375,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3412,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3430,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * flush earlier is beneficial.
                 */
                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-                       ret = i915_gem_flush_ring(dev, obj->ring,
+                       ret = i915_gem_flush_ring(obj->ring,
                                                  0, obj->base.write_domain);
                } else if (obj->ring->outstanding_lazy_request ==
                           obj->last_rendering_seqno) {
@@ -3441,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                         */
                        request = kzalloc(sizeof(*request), GFP_KERNEL);
                        if (request)
-                               ret = i915_add_request(dev,
-                                                      NULL, request,
-                                                      obj->ring);
+                               ret = i915_add_request(obj->ring, NULL,request);
                        else
                                ret = -ENOMEM;
                }
@@ -3453,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * are actually unmasked, and our working set ends up being
                 * larger than required.
                 */
-               i915_gem_retire_requests_ring(dev, obj->ring);
+               i915_gem_retire_requests_ring(obj->ring);
 
                args->busy = obj->active;
        }
@@ -3492,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                return ret;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
-       if (obj == NULL) {
+       if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -3583,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
        kfree(obj->page_cpu_valid);
        kfree(obj->bit_17);
        kfree(obj);
+
+       trace_i915_gem_object_destroy(obj);
 }
 
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -3590,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
 
-       trace_i915_gem_object_destroy(obj);
-
        while (obj->pin_count > 0)
                i915_gem_object_unpin(obj);
 
@@ -3837,6 +3869,8 @@ i915_gem_load(struct drm_device *dev)
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
 
+       dev_priv->mm.interruptible = true;
+
        dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
        dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
        register_shrinker(&dev_priv->mm.inactive_shrinker);