Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt...
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index ec8a0d7..80871c6 100644 (file)
@@ -31,6 +31,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
 
@@ -128,9 +129,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
-       mutex_lock(&dev->struct_mutex);
-       drm_gem_object_handle_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference_unlocked(obj);
 
        if (ret)
                return ret;
@@ -164,7 +163,7 @@ fast_shmem_read(struct page **pages,
 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
                obj_priv->tiling_mode != I915_TILING_NONE;
@@ -265,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_i915_gem_pread *args,
                          struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
@@ -286,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
 
        while (remain > 0) {
@@ -355,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_i915_gem_pread *args,
                          struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
@@ -404,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
 
        while (remain > 0) {
@@ -480,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        /* Bounds check source.
         *
@@ -488,7 +487,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
         */
        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
-               drm_gem_object_unreference(obj);
+               drm_gem_object_unreference_unlocked(obj);
                return -EINVAL;
        }
 
@@ -501,7 +500,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                                                        file_priv);
        }
 
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference_unlocked(obj);
 
        return ret;
 }
@@ -582,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        drm_i915_private_t *dev_priv = dev->dev_private;
        ssize_t remain;
        loff_t offset, page_base;
@@ -606,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret)
                goto fail;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
 
        while (remain > 0) {
@@ -656,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        drm_i915_private_t *dev_priv = dev->dev_private;
        ssize_t remain;
        loff_t gtt_page_base, offset;
@@ -700,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret)
                goto out_unpin_object;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
 
        while (remain > 0) {
@@ -762,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
@@ -782,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
        obj_priv->dirty = 1;
 
@@ -830,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
@@ -878,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
        obj_priv->dirty = 1;
 
@@ -953,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        /* Bounds check destination.
         *
@@ -961,7 +960,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         */
        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
-               drm_gem_object_unreference(obj);
+               drm_gem_object_unreference_unlocked(obj);
                return -EINVAL;
        }
 
@@ -995,7 +994,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                DRM_INFO("pwrite failed %d\n", ret);
 #endif
 
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference_unlocked(obj);
 
        return ret;
 }
@@ -1035,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -1097,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        DRM_INFO("%s: sw_finish %d (%p %zd)\n",
                 __func__, args->handle, obj, obj->size);
 #endif
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        /* Pinned buffers may be scanout, so flush the cache */
        if (obj_priv->pin_count)
@@ -1138,9 +1137,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
        up_write(&current->mm->mmap_sem);
-       mutex_lock(&dev->struct_mutex);
-       drm_gem_object_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference_unlocked(obj);
        if (IS_ERR((void *)addr))
                return addr;
 
@@ -1170,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        pgoff_t page_offset;
        unsigned long pfn;
        int ret = 0;
@@ -1237,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_map_list *list;
        struct drm_local_map *map;
        int ret = 0;
@@ -1308,7 +1305,7 @@ void
 i915_gem_release_mmap(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (dev->dev_mapping)
                unmap_mapping_range(dev->dev_mapping,
@@ -1319,7 +1316,7 @@ static void
 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_gem_mm *mm = dev->mm_private;
        struct drm_map_list *list;
 
@@ -1350,7 +1347,7 @@ static uint32_t
 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int start, i;
 
        /*
@@ -1409,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
        mutex_lock(&dev->struct_mutex);
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1453,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 void
 i915_gem_object_put_pages(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count = obj->size / PAGE_SIZE;
        int i;
 
@@ -1470,9 +1467,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
                obj_priv->dirty = 0;
 
        for (i = 0; i < page_count; i++) {
-               if (obj_priv->pages[i] == NULL)
-                       break;
-
                if (obj_priv->dirty)
                        set_page_dirty(obj_priv->pages[i]);
 
@@ -1492,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        /* Add a reference if we're newly entering the active list. */
        if (!obj_priv->active) {
@@ -1512,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        BUG_ON(!obj_priv->active);
        list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1523,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
 static void
 i915_gem_object_truncate(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct inode *inode;
 
        inode = obj->filp->f_path.dentry->d_inode;
@@ -1544,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->pin_count != 0)
@@ -1562,6 +1556,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
        i915_verify_inactive(dev, __FILE__, __LINE__);
 }
 
+static void
+i915_gem_process_flushing_list(struct drm_device *dev,
+                              uint32_t flush_domains, uint32_t seqno)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv, *next;
+
+       list_for_each_entry_safe(obj_priv, next,
+                                &dev_priv->mm.gpu_write_list,
+                                gpu_write_list) {
+               struct drm_gem_object *obj = obj_priv->obj;
+
+               if ((obj->write_domain & flush_domains) ==
+                   obj->write_domain) {
+                       uint32_t old_write_domain = obj->write_domain;
+
+                       obj->write_domain = 0;
+                       list_del_init(&obj_priv->gpu_write_list);
+                       i915_gem_object_move_to_active(obj, seqno);
+
+                       /* update the fence lru list */
+                       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+                               list_move_tail(&obj_priv->fence_list,
+                                               &dev_priv->mm.fence_list);
+
+                       trace_i915_gem_object_change_domain(obj,
+                                                           obj->read_domains,
+                                                           old_write_domain);
+               }
+       }
+}
+
 /**
  * Creates a new sequence number, emitting a write of it to the status page
  * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1620,29 +1646,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        /* Associate any objects on the flushing list matching the write
         * domain we're flushing with our flush.
         */
-       if (flush_domains != 0) {
-               struct drm_i915_gem_object *obj_priv, *next;
-
-               list_for_each_entry_safe(obj_priv, next,
-                                        &dev_priv->mm.gpu_write_list,
-                                        gpu_write_list) {
-                       struct drm_gem_object *obj = obj_priv->obj;
-
-                       if ((obj->write_domain & flush_domains) ==
-                           obj->write_domain) {
-                               uint32_t old_write_domain = obj->write_domain;
-
-                               obj->write_domain = 0;
-                               list_del_init(&obj_priv->gpu_write_list);
-                               i915_gem_object_move_to_active(obj, seqno);
-
-                               trace_i915_gem_object_change_domain(obj,
-                                                                   obj->read_domains,
-                                                                   old_write_domain);
-                       }
-               }
-
-       }
+       if (flush_domains != 0) 
+               i915_gem_process_flushing_list(dev, flush_domains, seqno);
 
        if (!dev_priv->mm.suspended) {
                mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1822,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
                return -EIO;
 
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
-               if (IS_IRONLAKE(dev))
+               if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
                        ier = I915_READ(IER);
@@ -1960,7 +1965,7 @@ static int
 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
        /* This function only exists to support waiting for existing rendering,
@@ -1991,7 +1996,8 @@ int
 i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret = 0;
 
 #if WATCH_BUF
@@ -2046,8 +2052,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        }
 
        /* Remove ourselves from the LRU list if present. */
+       spin_lock(&dev_priv->mm.active_list_lock);
        if (!list_empty(&obj_priv->list))
                list_del_init(&obj_priv->list);
+       spin_unlock(&dev_priv->mm.active_list_lock);
 
        if (i915_gem_object_is_purgeable(obj_priv))
                i915_gem_object_truncate(obj);
@@ -2084,12 +2092,35 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
        return best ? best : first;
 }
 
+static int
+i915_gpu_idle(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       bool lists_empty;
+       uint32_t seqno;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+                     list_empty(&dev_priv->mm.active_list);
+       spin_unlock(&dev_priv->mm.active_list_lock);
+
+       if (lists_empty)
+               return 0;
+
+       /* Flush everything onto the inactive list. */
+       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+       if (seqno == 0)
+               return -ENOMEM;
+
+       return i915_wait_request(dev, seqno);
+}
+
 static int
 i915_gem_evict_everything(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
-       uint32_t seqno;
        bool lists_empty;
 
        spin_lock(&dev_priv->mm.active_list_lock);
@@ -2102,12 +2133,7 @@ i915_gem_evict_everything(struct drm_device *dev)
                return -ENOSPC;
 
        /* Flush everything (on to the inactive lists) and evict */
-       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-       if (seqno == 0)
-               return -ENOMEM;
-
-       ret = i915_wait_request(dev, seqno);
+       ret = i915_gpu_idle(dev);
        if (ret)
                return ret;
 
@@ -2147,7 +2173,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 #if WATCH_LRU
                        DRM_INFO("%s: evicting %p\n", __func__, obj);
 #endif
-                       obj_priv = obj->driver_private;
+                       obj_priv = to_intel_bo(obj);
                        BUG_ON(obj_priv->pin_count != 0);
                        BUG_ON(obj_priv->active);
 
@@ -2199,11 +2225,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
                                seqno = i915_add_request(dev, NULL, obj->write_domain);
                                if (seqno == 0)
                                        return -ENOMEM;
-
-                               ret = i915_wait_request(dev, seqno);
-                               if (ret)
-                                       return ret;
-
                                continue;
                        }
                }
@@ -2223,12 +2244,11 @@ int
 i915_gem_object_get_pages(struct drm_gem_object *obj,
                          gfp_t gfpmask)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count, i;
        struct address_space *mapping;
        struct inode *inode;
        struct page *page;
-       int ret;
 
        if (obj_priv->pages_refcount++ != 0)
                return 0;
@@ -2251,11 +2271,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
                                           mapping_gfp_mask (mapping) |
                                           __GFP_COLD |
                                           gfpmask);
-               if (IS_ERR(page)) {
-                       ret = PTR_ERR(page);
-                       i915_gem_object_put_pages(obj);
-                       return ret;
-               }
+               if (IS_ERR(page))
+                       goto err_pages;
+
                obj_priv->pages[i] = page;
        }
 
@@ -2263,6 +2281,37 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
                i915_gem_object_do_bit_17_swizzle(obj);
 
        return 0;
+
+err_pages:
+       while (i--)
+               page_cache_release(obj_priv->pages[i]);
+
+       drm_free_large(obj_priv->pages);
+       obj_priv->pages = NULL;
+       obj_priv->pages_refcount--;
+       return PTR_ERR(page);
+}
+
+static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+       struct drm_gem_object *obj = reg->obj;
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       int regnum = obj_priv->fence_reg;
+       uint64_t val;
+
+       val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+                   0xfffff000) << 32;
+       val |= obj_priv->gtt_offset & 0xfffff000;
+       val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+               SANDYBRIDGE_FENCE_PITCH_SHIFT;
+
+       if (obj_priv->tiling_mode == I915_TILING_Y)
+               val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+       val |= I965_FENCE_REG_VALID;
+
+       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
 }
 
 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -2270,7 +2319,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int regnum = obj_priv->fence_reg;
        uint64_t val;
 
@@ -2290,7 +2339,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int regnum = obj_priv->fence_reg;
        int tile_width;
        uint32_t fence_reg, val;
@@ -2332,7 +2381,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int regnum = obj_priv->fence_reg;
        uint32_t val;
        uint32_t pitch_val;
@@ -2361,6 +2410,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
 }
 
+static int i915_find_fence_reg(struct drm_device *dev)
+{
+       struct drm_i915_fence_reg *reg = NULL;
+       struct drm_i915_gem_object *obj_priv = NULL;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj = NULL;
+       int i, avail, ret;
+
+       /* First try to find a free reg */
+       avail = 0;
+       for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+               reg = &dev_priv->fence_regs[i];
+               if (!reg->obj)
+                       return i;
+
+               obj_priv = to_intel_bo(reg->obj);
+               if (!obj_priv->pin_count)
+                   avail++;
+       }
+
+       if (avail == 0)
+               return -ENOSPC;
+
+       /* None available, try to steal one or wait for a user to finish */
+       i = I915_FENCE_REG_NONE;
+       list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
+                           fence_list) {
+               obj = obj_priv->obj;
+
+               if (obj_priv->pin_count)
+                       continue;
+
+               /* found one! */
+               i = obj_priv->fence_reg;
+               break;
+       }
+
+       BUG_ON(i == I915_FENCE_REG_NONE);
+
+       /* We only have a reference on obj from the active list. put_fence_reg
+        * might drop that one, causing a use-after-free in it. So hold a
+        * private reference to obj like the other callers of put_fence_reg
+        * (set_tiling ioctl) do. */
+       drm_gem_object_reference(obj);
+       ret = i915_gem_object_put_fence_reg(obj);
+       drm_gem_object_unreference(obj);
+       if (ret != 0)
+               return ret;
+
+       return i;
+}
+
 /**
  * i915_gem_object_get_fence_reg - set up a fence reg for an object
  * @obj: object to map through a fence reg
@@ -2379,10 +2480,9 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_i915_fence_reg *reg = NULL;
-       struct drm_i915_gem_object *old_obj_priv = NULL;
-       int i, ret, avail;
+       int ret;
 
        /* Just update our place in the LRU if our fence is getting used. */
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2410,86 +2510,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
                break;
        }
 
-       /* First try to find a free reg */
-       avail = 0;
-       for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
-               reg = &dev_priv->fence_regs[i];
-               if (!reg->obj)
-                       break;
-
-               old_obj_priv = reg->obj->driver_private;
-               if (!old_obj_priv->pin_count)
-                   avail++;
-       }
-
-       /* None available, try to steal one or wait for a user to finish */
-       if (i == dev_priv->num_fence_regs) {
-               struct drm_gem_object *old_obj = NULL;
-
-               if (avail == 0)
-                       return -ENOSPC;
-
-               list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
-                                   fence_list) {
-                       old_obj = old_obj_priv->obj;
-
-                       if (old_obj_priv->pin_count)
-                               continue;
-
-                       /* Take a reference, as otherwise the wait_rendering
-                        * below may cause the object to get freed out from
-                        * under us.
-                        */
-                       drm_gem_object_reference(old_obj);
-
-                       /* i915 uses fences for GPU access to tiled buffers */
-                       if (IS_I965G(dev) || !old_obj_priv->active)
-                               break;
-
-                       /* This brings the object to the head of the LRU if it
-                        * had been written to.  The only way this should
-                        * result in us waiting longer than the expected
-                        * optimal amount of time is if there was a
-                        * fence-using buffer later that was read-only.
-                        */
-                       i915_gem_object_flush_gpu_write_domain(old_obj);
-                       ret = i915_gem_object_wait_rendering(old_obj);
-                       if (ret != 0) {
-                               drm_gem_object_unreference(old_obj);
-                               return ret;
-                       }
-
-                       break;
-               }
-
-               /*
-                * Zap this virtual mapping so we can set up a fence again
-                * for this object next time we need it.
-                */
-               i915_gem_release_mmap(old_obj);
-
-               i = old_obj_priv->fence_reg;
-               reg = &dev_priv->fence_regs[i];
-
-               old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
-               list_del_init(&old_obj_priv->fence_list);
-
-               drm_gem_object_unreference(old_obj);
-       }
+       ret = i915_find_fence_reg(dev);
+       if (ret < 0)
+               return ret;
 
-       obj_priv->fence_reg = i;
+       obj_priv->fence_reg = ret;
+       reg = &dev_priv->fence_regs[obj_priv->fence_reg];
        list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
 
        reg->obj = obj;
 
-       if (IS_I965G(dev))
+       if (IS_GEN6(dev))
+               sandybridge_write_fence_reg(reg);
+       else if (IS_I965G(dev))
                i965_write_fence_reg(reg);
        else if (IS_I9XX(dev))
                i915_write_fence_reg(reg);
        else
                i830_write_fence_reg(reg);
 
-       trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+       trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
+                       obj_priv->tiling_mode);
 
        return 0;
 }
@@ -2506,11 +2547,14 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
-       if (IS_I965G(dev))
+       if (IS_GEN6(dev)) {
+               I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
+                            (obj_priv->fence_reg * 8), 0);
+       } else if (IS_I965G(dev)) {
                I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
-       else {
+       else {
                uint32_t fence_reg;
 
                if (obj_priv->fence_reg < 8)
@@ -2539,11 +2583,17 @@ int
 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
                return 0;
 
+       /* If we've changed tiling, GTT-mappings of the object
+        * need to re-fault to ensure that the correct fence register
+        * setup is in place.
+        */
+       i915_gem_release_mmap(obj);
+
        /* On the i915, GPU access to tiled buffers is via a fence,
         * therefore we must wait for any outstanding access to complete
         * before clearing the fence.
@@ -2552,12 +2602,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
                int ret;
 
                i915_gem_object_flush_gpu_write_domain(obj);
-               i915_gem_object_flush_gtt_write_domain(obj);
                ret = i915_gem_object_wait_rendering(obj);
                if (ret != 0)
                        return ret;
        }
 
+       i915_gem_object_flush_gtt_write_domain(obj);
        i915_gem_clear_fence_reg (obj);
 
        return 0;
@@ -2571,7 +2621,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_mm_node *free_space;
        gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
        int ret;
@@ -2678,7 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 void
 i915_gem_clflush_object(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object      *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
 
        /* If we don't have a page list set up, then we're not pinned
         * to GPU, and we can ignore the cache flush because it'll happen
@@ -2697,7 +2747,6 @@ static void
 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       uint32_t seqno;
        uint32_t old_write_domain;
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2706,9 +2755,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
-       seqno = i915_add_request(dev, NULL, obj->write_domain);
+       (void) i915_add_request(dev, NULL, obj->write_domain);
        BUG_ON(obj->write_domain);
-       i915_gem_object_move_to_active(obj, seqno);
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->read_domains,
@@ -2781,7 +2829,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
@@ -2831,7 +2879,7 @@ int
 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
@@ -3044,7 +3092,7 @@ static void
 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 {
        struct drm_device               *dev = obj->dev;
-       struct drm_i915_gem_object      *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
        uint32_t                        old_read_domains;
@@ -3129,7 +3177,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 static void
 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (!obj_priv->page_cpu_valid)
                return;
@@ -3169,7 +3217,7 @@ static int
 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                                          uint64_t offset, uint64_t size)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_read_domains;
        int i, ret;
 
@@ -3238,7 +3286,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int i, ret;
        void __iomem *reloc_page;
        bool need_fence;
@@ -3247,7 +3295,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                     obj_priv->tiling_mode != I915_TILING_NONE;
 
        /* Check fence reg constraints and rebind if necessary */
-       if (need_fence && !i915_obj_fenceable(dev, obj))
+       if (need_fence && !i915_gem_object_fence_offset_ok(obj,
+           obj_priv->tiling_mode))
                i915_gem_object_unbind(obj);
 
        /* Choose the GTT offset for our buffer and put it there. */
@@ -3288,7 +3337,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                        i915_gem_object_unpin(obj);
                        return -EBADF;
                }
-               target_obj_priv = target_obj->driver_private;
+               target_obj_priv = to_intel_bo(target_obj);
 
 #if WATCH_RELOC
                DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3317,6 +3366,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                }
 
                /* Validate that the target is in a valid r/w GPU domain */
+               if (reloc->write_domain & (reloc->write_domain - 1)) {
+                       DRM_ERROR("reloc with multiple write domains: "
+                                 "obj %p target %d offset %d "
+                                 "read %08x write %08x",
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset,
+                                 reloc->read_domains,
+                                 reloc->write_domain);
+                       return -EINVAL;
+               }
                if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
                    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
                        DRM_ERROR("reloc with read/write CPU domains: "
@@ -3630,7 +3689,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
                prepare_to_wait(&dev_priv->pending_flip_queue,
                                &wait, TASK_INTERRUPTIBLE);
                for (i = 0; i < count; i++) {
-                       obj_priv = object_list[i]->driver_private;
+                       obj_priv = to_intel_bo(object_list[i]);
                        if (atomic_read(&obj_priv->pending_flip) > 0)
                                break;
                }
@@ -3739,7 +3798,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
                }
 
-               obj_priv = object_list[i]->driver_private;
+               obj_priv = to_intel_bo(object_list[i]);
                if (obj_priv->in_execbuffer) {
                        DRM_ERROR("Object %p appears more than once in object list\n",
                                   object_list[i]);
@@ -3865,7 +3924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               struct drm_i915_gem_object *obj_priv = obj->driver_private;
+               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
                uint32_t old_write_domain = obj->write_domain;
 
                obj->write_domain = obj->pending_write_domain;
@@ -3940,7 +3999,7 @@ err:
 
        for (i = 0; i < args->buffer_count; i++) {
                if (object_list[i]) {
-                       obj_priv = object_list[i]->driver_private;
+                       obj_priv = to_intel_bo(object_list[i]);
                        obj_priv->in_execbuffer = false;
                }
                drm_gem_object_unreference(object_list[i]);
@@ -4118,7 +4177,7 @@ int
 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4151,7 +4210,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
        obj_priv->pin_count--;
@@ -4191,7 +4250,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                mutex_unlock(&dev->struct_mutex);
                return -EBADF;
        }
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4248,7 +4307,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
                return -EBADF;
        }
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@ -4290,7 +4349,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
         */
        i915_gem_retire_requests(dev);
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        /* Don't count being on the flushing list against the object being
         * done.  Otherwise, a buffer left on the flushing list but not getting
         * flushed (because nobody's flushing that domain) won't ever return
@@ -4336,7 +4395,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev->struct_mutex);
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->pin_count) {
                drm_gem_object_unreference(obj);
@@ -4397,7 +4456,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 void i915_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        trace_i915_gem_object_destroy(obj);
 
@@ -4445,8 +4504,7 @@ int
 i915_gem_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t seqno, cur_seqno, last_seqno;
-       int stuck, ret;
+       int ret;
 
        mutex_lock(&dev->struct_mutex);
 
@@ -4455,115 +4513,36 @@ i915_gem_idle(struct drm_device *dev)
                return 0;
        }
 
-       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
-        * We need to replace this with a semaphore, or something.
-        */
-       dev_priv->mm.suspended = 1;
-       del_timer(&dev_priv->hangcheck_timer);
-
-       /* Cancel the retire work handler, wait for it to finish if running
-        */
-       mutex_unlock(&dev->struct_mutex);
-       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
-       mutex_lock(&dev->struct_mutex);
-
-       i915_kernel_lost_context(dev);
-
-       /* Flush the GPU along with all non-CPU write domains
-        */
-       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-
-       if (seqno == 0) {
+       ret = i915_gpu_idle(dev);
+       if (ret) {
                mutex_unlock(&dev->struct_mutex);
-               return -ENOMEM;
+               return ret;
        }
 
-       dev_priv->mm.waiting_gem_seqno = seqno;
-       last_seqno = 0;
-       stuck = 0;
-       for (;;) {
-               cur_seqno = i915_get_gem_seqno(dev);
-               if (i915_seqno_passed(cur_seqno, seqno))
-                       break;
-               if (last_seqno == cur_seqno) {
-                       if (stuck++ > 100) {
-                               DRM_ERROR("hardware wedged\n");
-                               atomic_set(&dev_priv->mm.wedged, 1);
-                               DRM_WAKEUP(&dev_priv->irq_queue);
-                               break;
-                       }
+       /* Under UMS, be paranoid and evict. */
+       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = i915_gem_evict_from_inactive_list(dev);
+               if (ret) {
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
                }
-               msleep(10);
-               last_seqno = cur_seqno;
        }
-       dev_priv->mm.waiting_gem_seqno = 0;
-
-       i915_gem_retire_requests(dev);
 
-       spin_lock(&dev_priv->mm.active_list_lock);
-       if (!atomic_read(&dev_priv->mm.wedged)) {
-               /* Active and flushing should now be empty as we've
-                * waited for a sequence higher than any pending execbuffer
-                */
-               WARN_ON(!list_empty(&dev_priv->mm.active_list));
-               WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
-               /* Request should now be empty as we've also waited
-                * for the last request in the list
-                */
-               WARN_ON(!list_empty(&dev_priv->mm.request_list));
-       }
-
-       /* Empty the active and flushing lists to inactive.  If there's
-        * anything left at this point, it means that we're wedged and
-        * nothing good's going to happen by leaving them there.  So strip
-        * the GPU domains and just stuff them onto inactive.
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        * And not confound mm.suspended!
         */
-       while (!list_empty(&dev_priv->mm.active_list)) {
-               struct drm_gem_object *obj;
-               uint32_t old_write_domain;
-
-               obj = list_first_entry(&dev_priv->mm.active_list,
-                                      struct drm_i915_gem_object,
-                                      list)->obj;
-               old_write_domain = obj->write_domain;
-               obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-               i915_gem_object_move_to_inactive(obj);
-
-               trace_i915_gem_object_change_domain(obj,
-                                                   obj->read_domains,
-                                                   old_write_domain);
-       }
-       spin_unlock(&dev_priv->mm.active_list_lock);
-
-       while (!list_empty(&dev_priv->mm.flushing_list)) {
-               struct drm_gem_object *obj;
-               uint32_t old_write_domain;
-
-               obj = list_first_entry(&dev_priv->mm.flushing_list,
-                                      struct drm_i915_gem_object,
-                                      list)->obj;
-               old_write_domain = obj->write_domain;
-               obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-               i915_gem_object_move_to_inactive(obj);
-
-               trace_i915_gem_object_change_domain(obj,
-                                                   obj->read_domains,
-                                                   old_write_domain);
-       }
-
-
-       /* Move all inactive buffers out of the GTT. */
-       ret = i915_gem_evict_from_inactive_list(dev);
-       WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       dev_priv->mm.suspended = 1;
+       del_timer(&dev_priv->hangcheck_timer);
 
+       i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
+
        mutex_unlock(&dev->struct_mutex);
 
+       /* Cancel the retire work handler, which should be idle now. */
+       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
        return 0;
 }
 
@@ -4586,7 +4565,7 @@ i915_gem_init_hws(struct drm_device *dev)
                DRM_ERROR("Failed to allocate status page\n");
                return -ENOMEM;
        }
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
 
        ret = i915_gem_object_pin(obj, 4096);
@@ -4607,8 +4586,13 @@ i915_gem_init_hws(struct drm_device *dev)
        }
        dev_priv->hws_obj = obj;
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-       I915_READ(HWS_PGA); /* posting read */
+       if (IS_GEN6(dev)) {
+               I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
+               I915_READ(HWS_PGA_GEN6); /* posting read */
+       } else {
+               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+               I915_READ(HWS_PGA); /* posting read */
+       }
        DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
 
        return 0;
@@ -4625,7 +4609,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
                return;
 
        obj = dev_priv->hws_obj;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        kunmap(obj_priv->pages[0]);
        i915_gem_object_unpin(obj);
@@ -4659,7 +4643,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
                i915_gem_cleanup_hws(dev);
                return -ENOMEM;
        }
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        ret = i915_gem_object_pin(obj, 4096);
        if (ret != 0) {
@@ -4745,6 +4729,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
                        ring->space += ring->Size;
        }
 
+       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+               I915_WRITE(MI_MODE,
+                          (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+       }
+
        return 0;
 }
 
@@ -4850,7 +4839,8 @@ i915_gem_load(struct drm_device *dev)
        spin_unlock(&shrink_list_lock);
 
        /* Old X drivers will take 0-2 for front, back, depth buffers */
-       dev_priv->fence_reg_start = 3;
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               dev_priv->fence_reg_start = 3;
 
        if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                dev_priv->num_fence_regs = 16;
@@ -4946,7 +4936,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        int ret;
        int page_count;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        if (!obj_priv->phys_obj)
                return;
 
@@ -4985,7 +4975,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        if (id > I915_MAX_PHYS_OBJECT)
                return -EINVAL;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->phys_obj) {
                if (obj_priv->phys_obj->id == id)
@@ -5036,7 +5026,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
                     struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        void *obj_addr;
        int ret;
        char __user *user_data;