Merge branch 'drm-intel-fixes' into drm-intel-next
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index 781c26c..54fc1e7 100644 (file)
 #include <linux/pci.h>
 #include <linux/intel-gtt.h>
 
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
+struct change_domains {
+       uint32_t invalidate_domains;
+       uint32_t flush_domains;
+       uint32_t flush_rings;
+};
+
+static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv);
+static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv);
 
 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
                                                  bool pipelined);
@@ -51,22 +58,18 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
                                          bool interruptible);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
-                                          unsigned alignment);
+                                      unsigned alignment,
+                                      bool map_and_fenceable);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
 static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
-                         gfp_t gfpmask);
-
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+                                   int nr_to_scan,
+                                   gfp_t gfp_mask);
 
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
 
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -84,31 +87,75 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
 }
 
 static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
-                                 size_t size)
+                                 struct drm_i915_gem_object *obj)
 {
        dev_priv->mm.gtt_count++;
-       dev_priv->mm.gtt_memory += size;
+       dev_priv->mm.gtt_memory += obj->gtt_space->size;
+       if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
+               dev_priv->mm.mappable_gtt_used +=
+                       min_t(size_t, obj->gtt_space->size,
+                             dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
+       }
 }
 
 static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
-                                    size_t size)
+                                    struct drm_i915_gem_object *obj)
 {
        dev_priv->mm.gtt_count--;
-       dev_priv->mm.gtt_memory -= size;
+       dev_priv->mm.gtt_memory -= obj->gtt_space->size;
+       if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
+               dev_priv->mm.mappable_gtt_used -=
+                       min_t(size_t, obj->gtt_space->size,
+                             dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
+       }
+}
+
+/**
+ * Update the mappable working set counters. Call _only_ when there is a change
+ * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
+ * @mappable: new state the changed mappable flag (either pin_ or fault_).
+ */
+static void
+i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
+                             struct drm_i915_gem_object *obj,
+                             bool mappable)
+{
+       if (mappable) {
+               if (obj->pin_mappable && obj->fault_mappable)
+                       /* Combined state was already mappable. */
+                       return;
+               dev_priv->mm.gtt_mappable_count++;
+               dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
+       } else {
+               if (obj->pin_mappable || obj->fault_mappable)
+                       /* Combined state still mappable. */
+                       return;
+               dev_priv->mm.gtt_mappable_count--;
+               dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
+       }
 }
 
 static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
-                                 size_t size)
+                                 struct drm_i915_gem_object *obj,
+                                 bool mappable)
 {
        dev_priv->mm.pin_count++;
-       dev_priv->mm.pin_memory += size;
+       dev_priv->mm.pin_memory += obj->gtt_space->size;
+       if (mappable) {
+               obj->pin_mappable = true;
+               i915_gem_info_update_mappable(dev_priv, obj, true);
+       }
 }
 
 static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
-                                    size_t size)
+                                    struct drm_i915_gem_object *obj)
 {
        dev_priv->mm.pin_count--;
-       dev_priv->mm.pin_memory -= size;
+       dev_priv->mm.pin_memory -= obj->gtt_space->size;
+       if (obj->pin_mappable) {
+               obj->pin_mappable = false;
+               i915_gem_info_update_mappable(dev_priv, obj, false);
+       }
 }
 
 int
@@ -173,6 +220,7 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
 
 int i915_gem_do_init(struct drm_device *dev,
                     unsigned long start,
+                    unsigned long mappable_end,
                     unsigned long end)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -187,6 +235,8 @@ int i915_gem_do_init(struct drm_device *dev,
                    end - start);
 
        dev_priv->mm.gtt_total = end - start;
+       dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+       dev_priv->mm.gtt_mappable_end = mappable_end;
 
        return 0;
 }
@@ -199,7 +249,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
        int ret;
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
+       ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
@@ -259,22 +309,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static inline int
-fast_shmem_read(struct page **pages,
-               loff_t page_base, int page_offset,
-               char __user *data,
-               int length)
-{
-       char *vaddr;
-       int ret;
-
-       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-       ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
-       kunmap_atomic(vaddr);
-
-       return ret;
-}
-
 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->dev->dev_private;
@@ -362,8 +396,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_file *file_priv)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
        ssize_t remain;
-       loff_t offset, page_base;
+       loff_t offset;
        char __user *user_data;
        int page_offset, page_length;
 
@@ -374,21 +409,34 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        offset = args->offset;
 
        while (remain > 0) {
+               struct page *page;
+               char *vaddr;
+               int ret;
+
                /* Operation in this page
                 *
-                * page_base = page offset within aperture
                 * page_offset = offset within page
                 * page_length = bytes to copy for this page
                 */
-               page_base = (offset & ~(PAGE_SIZE-1));
                page_offset = offset & (PAGE_SIZE-1);
                page_length = remain;
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               if (fast_shmem_read(obj_priv->pages,
-                                   page_base, page_offset,
-                                   user_data, page_length))
+               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               vaddr = kmap_atomic(page);
+               ret = __copy_to_user_inatomic(user_data,
+                                             vaddr + page_offset,
+                                             page_length);
+               kunmap_atomic(vaddr);
+
+               mark_page_accessed(page);
+               page_cache_release(page);
+               if (ret)
                        return -EFAULT;
 
                remain -= page_length;
@@ -399,30 +447,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        return 0;
 }
 
-static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
-{
-       int ret;
-
-       ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
-
-       /* If we've insufficient memory to map in the pages, attempt
-        * to make some space by throwing out some old buffers.
-        */
-       if (ret == -ENOMEM) {
-               struct drm_device *dev = obj->dev;
-
-               ret = i915_gem_evict_something(dev, obj->size,
-                                              i915_gem_get_gtt_alignment(obj));
-               if (ret)
-                       return ret;
-
-               ret = i915_gem_object_get_pages(obj, 0);
-       }
-
-       return ret;
-}
-
 /**
  * This is the fallback shmem pread path, which allocates temporary storage
  * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -434,14 +458,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_i915_gem_pread *args,
                          struct drm_file *file_priv)
 {
+       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
        loff_t offset, pinned_pages, i;
        loff_t first_data_page, last_data_page, num_pages;
-       int shmem_page_index, shmem_page_offset;
-       int data_page_index,  data_page_offset;
+       int shmem_page_offset;
+       int data_page_index, data_page_offset;
        int page_length;
        int ret;
        uint64_t data_ptr = args->data_ptr;
@@ -484,15 +509,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
        offset = args->offset;
 
        while (remain > 0) {
+               struct page *page;
+
                /* Operation in this page
                 *
-                * shmem_page_index = page number within shmem file
                 * shmem_page_offset = offset within page in shmem file
                 * data_page_index = page number in get_user_pages return
                 * data_page_offset = offset with data_page_index page.
                 * page_length = bytes to copy for this page
                 */
-               shmem_page_index = offset / PAGE_SIZE;
                shmem_page_offset = offset & ~PAGE_MASK;
                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
                data_page_offset = data_ptr & ~PAGE_MASK;
@@ -503,8 +528,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
+               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
                if (do_bit17_swizzling) {
-                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(page,
                                              shmem_page_offset,
                                              user_pages[data_page_index],
                                              data_page_offset,
@@ -513,11 +543,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                } else {
                        slow_shmem_copy(user_pages[data_page_index],
                                        data_page_offset,
-                                       obj_priv->pages[shmem_page_index],
+                                       page,
                                        shmem_page_offset,
                                        page_length);
                }
 
+               mark_page_accessed(page);
+               page_cache_release(page);
+
                remain -= page_length;
                data_ptr += page_length;
                offset += page_length;
@@ -526,6 +559,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 out:
        for (i = 0; i < pinned_pages; i++) {
                SetPageDirty(user_pages[i]);
+               mark_page_accessed(user_pages[i]);
                page_cache_release(user_pages[i]);
        }
        drm_free_large(user_pages);
@@ -581,15 +615,11 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       ret = i915_gem_object_get_pages_or_evict(obj);
-       if (ret)
-               goto out;
-
        ret = i915_gem_object_set_cpu_read_domain_range(obj,
                                                        args->offset,
                                                        args->size);
        if (ret)
-               goto out_put;
+               goto out;
 
        ret = -EFAULT;
        if (!i915_gem_object_needs_bit17_swizzle(obj))
@@ -597,8 +627,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        if (ret == -EFAULT)
                ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
 
-out_put:
-       i915_gem_object_put_pages(obj);
 out:
        drm_gem_object_unreference(obj);
 unlock:
@@ -650,22 +678,6 @@ slow_kernel_write(struct io_mapping *mapping,
        io_mapping_unmap(dst_vaddr);
 }
 
-static inline int
-fast_shmem_write(struct page **pages,
-                loff_t page_base, int page_offset,
-                char __user *data,
-                int length)
-{
-       char *vaddr;
-       int ret;
-
-       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-       ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
-       kunmap_atomic(vaddr);
-
-       return ret;
-}
-
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
@@ -822,9 +834,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
 {
+       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        ssize_t remain;
-       loff_t offset, page_base;
+       loff_t offset;
        char __user *user_data;
        int page_offset, page_length;
 
@@ -836,21 +849,40 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        obj_priv->dirty = 1;
 
        while (remain > 0) {
+               struct page *page;
+               char *vaddr;
+               int ret;
+
                /* Operation in this page
                 *
-                * page_base = page offset within aperture
                 * page_offset = offset within page
                 * page_length = bytes to copy for this page
                 */
-               page_base = (offset & ~(PAGE_SIZE-1));
                page_offset = offset & (PAGE_SIZE-1);
                page_length = remain;
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               if (fast_shmem_write(obj_priv->pages,
-                                      page_base, page_offset,
-                                      user_data, page_length))
+               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               vaddr = kmap_atomic(page, KM_USER0);
+               ret = __copy_from_user_inatomic(vaddr + page_offset,
+                                               user_data,
+                                               page_length);
+               kunmap_atomic(vaddr, KM_USER0);
+
+               set_page_dirty(page);
+               mark_page_accessed(page);
+               page_cache_release(page);
+
+               /* If we get a fault while copying data, then (presumably) our
+                * source page isn't available.  Return the error and we'll
+                * retry in the slow path.
+                */
+               if (ret)
                        return -EFAULT;
 
                remain -= page_length;
@@ -873,13 +905,14 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
 {
+       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
        loff_t offset, pinned_pages, i;
        loff_t first_data_page, last_data_page, num_pages;
-       int shmem_page_index, shmem_page_offset;
+       int shmem_page_offset;
        int data_page_index,  data_page_offset;
        int page_length;
        int ret;
@@ -922,15 +955,15 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        obj_priv->dirty = 1;
 
        while (remain > 0) {
+               struct page *page;
+
                /* Operation in this page
                 *
-                * shmem_page_index = page number within shmem file
                 * shmem_page_offset = offset within page in shmem file
                 * data_page_index = page number in get_user_pages return
                 * data_page_offset = offset with data_page_index page.
                 * page_length = bytes to copy for this page
                 */
-               shmem_page_index = offset / PAGE_SIZE;
                shmem_page_offset = offset & ~PAGE_MASK;
                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
                data_page_offset = data_ptr & ~PAGE_MASK;
@@ -941,21 +974,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
+               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               if (IS_ERR(page)) {
+                       ret = PTR_ERR(page);
+                       goto out;
+               }
+
                if (do_bit17_swizzling) {
-                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(page,
                                              shmem_page_offset,
                                              user_pages[data_page_index],
                                              data_page_offset,
                                              page_length,
                                              0);
                } else {
-                       slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                       slow_shmem_copy(page,
                                        shmem_page_offset,
                                        user_pages[data_page_index],
                                        data_page_offset,
                                        page_length);
                }
 
+               set_page_dirty(page);
+               mark_page_accessed(page);
+               page_cache_release(page);
+
                remain -= page_length;
                data_ptr += page_length;
                offset += page_length;
@@ -1029,7 +1073,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
                 obj_priv->gtt_space &&
                 obj->write_domain != I915_GEM_DOMAIN_CPU) {
-               ret = i915_gem_object_pin(obj, 0);
+               ret = i915_gem_object_pin(obj, 0, true);
                if (ret)
                        goto out;
 
@@ -1044,22 +1088,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 out_unpin:
                i915_gem_object_unpin(obj);
        } else {
-               ret = i915_gem_object_get_pages_or_evict(obj);
-               if (ret)
-                       goto out;
-
                ret = i915_gem_object_set_to_cpu_domain(obj, 1);
                if (ret)
-                       goto out_put;
+                       goto out;
 
                ret = -EFAULT;
                if (!i915_gem_object_needs_bit17_swizzle(obj))
                        ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
                if (ret == -EFAULT)
                        ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-
-out_put:
-               i915_gem_object_put_pages(obj);
        }
 
 out:
@@ -1192,6 +1229,7 @@ int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                   struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_mmap *args = data;
        struct drm_gem_object *obj;
        loff_t offset;
@@ -1204,6 +1242,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
+       if (obj->size > dev_priv->mm.gtt_mappable_end) {
+               drm_gem_object_unreference_unlocked(obj);
+               return -E2BIG;
+       }
+
        offset = args->offset;
 
        down_write(&current->mm->mmap_sem);
@@ -1253,14 +1296,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Now bind it into the GTT if needed */
        mutex_lock(&dev->struct_mutex);
+       BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
+
+       if (obj_priv->gtt_space) {
+               if (!obj_priv->map_and_fenceable) {
+                       ret = i915_gem_object_unbind(obj);
+                       if (ret)
+                               goto unlock;
+               }
+       }
+
        if (!obj_priv->gtt_space) {
-               ret = i915_gem_object_bind_to_gtt(obj, 0);
+               ret = i915_gem_object_bind_to_gtt(obj, 0, true);
                if (ret)
                        goto unlock;
+       }
 
-               ret = i915_gem_object_set_to_gtt_domain(obj, write);
-               if (ret)
-                       goto unlock;
+       ret = i915_gem_object_set_to_gtt_domain(obj, write);
+       if (ret)
+               goto unlock;
+
+       if (!obj_priv->fault_mappable) {
+               obj_priv->fault_mappable = true;
+               i915_gem_info_update_mappable(dev_priv, obj_priv, true);
        }
 
        /* Need a new fence register? */
@@ -1282,11 +1340,12 @@ unlock:
        mutex_unlock(&dev->struct_mutex);
 
        switch (ret) {
+       case -EAGAIN:
+               set_need_resched();
        case 0:
        case -ERESTARTSYS:
                return VM_FAULT_NOPAGE;
        case -ENOMEM:
-       case -EAGAIN:
                return VM_FAULT_OOM;
        default:
                return VM_FAULT_SIGBUS;
@@ -1309,7 +1368,6 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_map_list *list;
        struct drm_local_map *map;
        int ret = 0;
@@ -1348,16 +1406,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
                goto out_free_mm;
        }
 
-       /* By now we should be all set, any drm_mmap request on the offset
-        * below will get to our mmap & fault handler */
-       obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
        return 0;
 
 out_free_mm:
        drm_mm_put_block(list->file_offset_node);
 out_free_list:
        kfree(list->map);
+       list->map = NULL;
 
        return ret;
 }
@@ -1380,35 +1435,31 @@ void
 i915_gem_release_mmap(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
-       if (dev->dev_mapping)
+       if (unlikely(obj->map_list.map && dev->dev_mapping))
                unmap_mapping_range(dev->dev_mapping,
-                                   obj_priv->mmap_offset, obj->size, 1);
+                                   (loff_t)obj->map_list.hash.key<<PAGE_SHIFT,
+                                   obj->size, 1);
+
+       if (obj_priv->fault_mappable) {
+               obj_priv->fault_mappable = false;
+               i915_gem_info_update_mappable(dev_priv, obj_priv, false);
+       }
 }
 
 static void
 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list;
+       struct drm_map_list *list = &obj->map_list;
 
-       list = &obj->map_list;
        drm_ht_remove_item(&mm->offset_hash, &list->hash);
-
-       if (list->file_offset_node) {
-               drm_mm_put_block(list->file_offset_node);
-               list->file_offset_node = NULL;
-       }
-
-       if (list->map) {
-               kfree(list->map);
-               list->map = NULL;
-       }
-
-       obj_priv->mmap_offset = 0;
+       drm_mm_put_block(list->file_offset_node);
+       kfree(list->map);
+       list->map = NULL;
 }
 
 /**
@@ -1416,35 +1467,89 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  * @obj: object to check
  *
  * Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
  */
 static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv)
 {
-       struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int start, i;
+       struct drm_device *dev = obj_priv->base.dev;
 
        /*
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
-       if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+       if (INTEL_INFO(dev)->gen >= 4 ||
+           obj_priv->tiling_mode == I915_TILING_NONE)
                return 4096;
 
+       /*
+        * Previous chips need to be aligned to the size of the smallest
+        * fence register that can contain the object.
+        */
+       return i915_gem_get_gtt_size(obj_priv);
+}
+
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ *                                      unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+static uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv)
+{
+       struct drm_device *dev = obj_priv->base.dev;
+       int tile_height;
+
+       /*
+        * Minimum alignment is 4k (GTT page size) for sane hw.
+        */
+       if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+           obj_priv->tiling_mode == I915_TILING_NONE)
+               return 4096;
+
+       /*
+        * Older chips need unfenced tiled buffers to be aligned to the left
+        * edge of an even tile row (where tile rows are counted as if the bo is
+        * placed in a fenced gtt region).
+        */
+       if (IS_GEN2(dev) ||
+           (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+               tile_height = 32;
+       else
+               tile_height = 8;
+
+       return tile_height * obj_priv->stride * 2;
+}
+
+static uint32_t
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
+{
+       struct drm_device *dev = obj_priv->base.dev;
+       uint32_t size;
+
+       /*
+        * Minimum alignment is 4k (GTT page size), but might be greater
+        * if a fence register is needed for the object.
+        */
+       if (INTEL_INFO(dev)->gen >= 4)
+               return obj_priv->base.size;
+
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
        if (INTEL_INFO(dev)->gen == 3)
-               start = 1024*1024;
+               size = 1024*1024;
        else
-               start = 512*1024;
+               size = 512*1024;
 
-       for (i = start; i < obj->size; i <<= 1)
-               ;
+       while (size < obj_priv->base.size)
+               size <<= 1;
 
-       return i;
+       return size;
 }
 
 /**
@@ -1466,6 +1571,7 @@ int
 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_mmap_gtt *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
@@ -1485,29 +1591,24 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        }
        obj_priv = to_intel_bo(obj);
 
+       if (obj->size > dev_priv->mm.gtt_mappable_end) {
+               ret = -E2BIG;
+               goto unlock;
+       }
+
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to mmap a purgeable buffer\n");
                ret = -EINVAL;
                goto out;
        }
 
-       if (!obj_priv->mmap_offset) {
+       if (!obj->map_list.map) {
                ret = i915_gem_create_mmap_offset(obj);
                if (ret)
                        goto out;
        }
 
-       args->offset = obj_priv->mmap_offset;
-
-       /*
-        * Pull it into the GTT so that we have a page list (makes the
-        * initial fault faster and any subsequent flushing possible).
-        */
-       if (!obj_priv->agp_mem) {
-               ret = i915_gem_object_bind_to_gtt(obj, 0);
-               if (ret)
-                       goto out;
-       }
+       args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
 
 out:
        drm_gem_object_unreference(obj);
@@ -1516,19 +1617,62 @@ unlock:
        return ret;
 }
 
+static int
+i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
+                             gfp_t gfpmask)
+{
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       int page_count, i;
+       struct address_space *mapping;
+       struct inode *inode;
+       struct page *page;
+
+       /* Get the list of pages out of our struct file.  They'll be pinned
+        * at this point until we release them.
+        */
+       page_count = obj->size / PAGE_SIZE;
+       BUG_ON(obj_priv->pages != NULL);
+       obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+       if (obj_priv->pages == NULL)
+               return -ENOMEM;
+
+       inode = obj->filp->f_path.dentry->d_inode;
+       mapping = inode->i_mapping;
+       for (i = 0; i < page_count; i++) {
+               page = read_cache_page_gfp(mapping, i,
+                                          GFP_HIGHUSER |
+                                          __GFP_COLD |
+                                          __GFP_RECLAIMABLE |
+                                          gfpmask);
+               if (IS_ERR(page))
+                       goto err_pages;
+
+               obj_priv->pages[i] = page;
+       }
+
+       if (obj_priv->tiling_mode != I915_TILING_NONE)
+               i915_gem_object_do_bit_17_swizzle(obj);
+
+       return 0;
+
+err_pages:
+       while (i--)
+               page_cache_release(obj_priv->pages[i]);
+
+       drm_free_large(obj_priv->pages);
+       obj_priv->pages = NULL;
+       return PTR_ERR(page);
+}
+
 static void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count = obj->size / PAGE_SIZE;
        int i;
 
-       BUG_ON(obj_priv->pages_refcount == 0);
        BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
 
-       if (--obj_priv->pages_refcount != 0)
-               return;
-
        if (obj_priv->tiling_mode != I915_TILING_NONE)
                i915_gem_object_save_bit_17_swizzle(obj);
 
@@ -1555,9 +1699,7 @@ i915_gem_next_request_seqno(struct drm_device *dev,
                            struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-
-       ring->outstanding_lazy_request = true;
-       return dev_priv->next_seqno;
+       return ring->outstanding_lazy_request = dev_priv->next_seqno;
 }
 
 static void
@@ -1683,7 +1825,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
        }
 }
 
-uint32_t
+int
 i915_add_request(struct drm_device *dev,
                 struct drm_file *file,
                 struct drm_i915_gem_request *request,
@@ -1693,17 +1835,17 @@ i915_add_request(struct drm_device *dev,
        struct drm_i915_file_private *file_priv = NULL;
        uint32_t seqno;
        int was_empty;
+       int ret;
+
+       BUG_ON(request == NULL);
 
        if (file != NULL)
                file_priv = file->driver_priv;
 
-       if (request == NULL) {
-               request = kzalloc(sizeof(*request), GFP_KERNEL);
-               if (request == NULL)
-                       return 0;
-       }
+       ret = ring->add_request(ring, &seqno);
+       if (ret)
+           return ret;
 
-       seqno = ring->add_request(dev, ring, 0);
        ring->outstanding_lazy_request = false;
 
        request->seqno = seqno;
@@ -1727,7 +1869,7 @@ i915_add_request(struct drm_device *dev,
                        queue_delayed_work(dev_priv->wq,
                                           &dev_priv->mm.retire_work, HZ);
        }
-       return seqno;
+       return 0;
 }
 
 /**
@@ -1745,8 +1887,7 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
        if (INTEL_INFO(dev)->gen >= 4)
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
 
-       ring->flush(dev, ring,
-                       I915_GEM_DOMAIN_COMMAND, flush_domains);
+       ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
 }
 
 static inline void
@@ -1853,7 +1994,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 
        WARN_ON(i915_verify_lists(dev));
 
-       seqno = ring->get_seqno(dev, ring);
+       seqno = ring->get_seqno(ring);
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -1894,7 +2035,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-               ring->user_irq_put(dev, ring);
+               ring->user_irq_put(ring);
                dev_priv->trace_irq_seqno = 0;
        }
 
@@ -1964,14 +2105,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
        if (atomic_read(&dev_priv->mm.wedged))
                return -EAGAIN;
 
-       if (ring->outstanding_lazy_request) {
-               seqno = i915_add_request(dev, NULL, NULL, ring);
-               if (seqno == 0)
+       if (seqno == ring->outstanding_lazy_request) {
+               struct drm_i915_gem_request *request;
+
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
                        return -ENOMEM;
+
+               ret = i915_add_request(dev, NULL, request, ring);
+               if (ret) {
+                       kfree(request);
+                       return ret;
+               }
+
+               seqno = request->seqno;
        }
-       BUG_ON(seqno == dev_priv->next_seqno);
 
-       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+       if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1985,21 +2135,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 
                trace_i915_gem_request_wait_begin(dev, seqno);
 
-               ring->waiting_gem_seqno = seqno;
-               ring->user_irq_get(dev, ring);
+               ring->waiting_seqno = seqno;
+               ring->user_irq_get(ring);
                if (interruptible)
                        ret = wait_event_interruptible(ring->irq_queue,
-                               i915_seqno_passed(
-                                       ring->get_seqno(dev, ring), seqno)
+                               i915_seqno_passed(ring->get_seqno(ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
                else
                        wait_event(ring->irq_queue,
-                               i915_seqno_passed(
-                                       ring->get_seqno(dev, ring), seqno)
+                               i915_seqno_passed(ring->get_seqno(ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
 
-               ring->user_irq_put(dev, ring);
-               ring->waiting_gem_seqno = 0;
+               ring->user_irq_put(ring);
+               ring->waiting_seqno = 0;
 
                trace_i915_gem_request_wait_end(dev, seqno);
        }
@@ -2008,7 +2156,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 
        if (ret && ret != -ERESTARTSYS)
                DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-                         __func__, ret, seqno, ring->get_seqno(dev, ring),
+                         __func__, ret, seqno, ring->get_seqno(ring),
                          dev_priv->next_seqno);
 
        /* Directly dispatch request retiring.  While we have the work queue
@@ -2040,7 +2188,7 @@ i915_gem_flush_ring(struct drm_device *dev,
                    uint32_t invalidate_domains,
                    uint32_t flush_domains)
 {
-       ring->flush(dev, ring, invalidate_domains, flush_domains);
+       ring->flush(ring, invalidate_domains, flush_domains);
        i915_gem_process_flushing_list(dev, flush_domains, ring);
 }
 
@@ -2151,11 +2299,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        drm_unbind_agp(obj_priv->agp_mem);
        drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
 
-       i915_gem_object_put_pages(obj);
-       BUG_ON(obj_priv->pages_refcount);
+       i915_gem_object_put_pages_gtt(obj);
 
-       i915_gem_info_remove_gtt(dev_priv, obj->size);
+       i915_gem_info_remove_gtt(dev_priv, obj_priv);
        list_del_init(&obj_priv->mm_list);
+       /* Avoid an unnecessary call to unbind on rebind. */
+       obj_priv->map_and_fenceable = true;
 
        drm_mm_put_block(obj_priv->gtt_space);
        obj_priv->gtt_space = NULL;
@@ -2210,72 +2359,16 @@ i915_gpu_idle(struct drm_device *dev)
        return 0;
 }
 
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
-                         gfp_t gfpmask)
+static void sandybridge_write_fence_reg(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int page_count, i;
-       struct address_space *mapping;
-       struct inode *inode;
-       struct page *page;
-
-       BUG_ON(obj_priv->pages_refcount
-                       == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
-
-       if (obj_priv->pages_refcount++ != 0)
-               return 0;
-
-       /* Get the list of pages out of our struct file.  They'll be pinned
-        * at this point until we release them.
-        */
-       page_count = obj->size / PAGE_SIZE;
-       BUG_ON(obj_priv->pages != NULL);
-       obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
-       if (obj_priv->pages == NULL) {
-               obj_priv->pages_refcount--;
-               return -ENOMEM;
-       }
-
-       inode = obj->filp->f_path.dentry->d_inode;
-       mapping = inode->i_mapping;
-       for (i = 0; i < page_count; i++) {
-               page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER |
-                                          __GFP_COLD |
-                                          __GFP_RECLAIMABLE |
-                                          gfpmask);
-               if (IS_ERR(page))
-                       goto err_pages;
-
-               obj_priv->pages[i] = page;
-       }
-
-       if (obj_priv->tiling_mode != I915_TILING_NONE)
-               i915_gem_object_do_bit_17_swizzle(obj);
-
-       return 0;
-
-err_pages:
-       while (i--)
-               page_cache_release(obj_priv->pages[i]);
-
-       drm_free_large(obj_priv->pages);
-       obj_priv->pages = NULL;
-       obj_priv->pages_refcount--;
-       return PTR_ERR(page);
-}
-
-static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
-{
-       struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       u32 size = i915_gem_get_gtt_size(obj_priv);
        int regnum = obj_priv->fence_reg;
        uint64_t val;
 
-       val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+       val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
                    0xfffff000) << 32;
        val |= obj_priv->gtt_offset & 0xfffff000;
        val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
@@ -2288,16 +2381,16 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
        I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
 }
 
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static void i965_write_fence_reg(struct drm_gem_object *obj)
 {
-       struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       u32 size = i915_gem_get_gtt_size(obj_priv);
        int regnum = obj_priv->fence_reg;
        uint64_t val;
 
-       val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+       val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
                    0xfffff000) << 32;
        val |= obj_priv->gtt_offset & 0xfffff000;
        val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
@@ -2308,21 +2401,20 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
        I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
 }
 
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static void i915_write_fence_reg(struct drm_gem_object *obj)
 {
-       struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int regnum = obj_priv->fence_reg;
+       u32 size = i915_gem_get_gtt_size(obj_priv);
+       uint32_t fence_reg, val, pitch_val;
        int tile_width;
-       uint32_t fence_reg, val;
-       uint32_t pitch_val;
 
        if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
-           (obj_priv->gtt_offset & (obj->size - 1))) {
-               WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
-                    __func__, obj_priv->gtt_offset, obj->size);
+           (obj_priv->gtt_offset & (size - 1))) {
+               WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
+                    __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size,
+                    obj_priv->gtt_space->start, obj_priv->gtt_space->size);
                return;
        }
 
@@ -2345,23 +2437,24 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        val = obj_priv->gtt_offset;
        if (obj_priv->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-       val |= I915_FENCE_SIZE_BITS(obj->size);
+       val |= I915_FENCE_SIZE_BITS(size);
        val |= pitch_val << I830_FENCE_PITCH_SHIFT;
        val |= I830_FENCE_REG_VALID;
 
-       if (regnum < 8)
-               fence_reg = FENCE_REG_830_0 + (regnum * 4);
+       fence_reg = obj_priv->fence_reg;
+       if (fence_reg < 8)
+               fence_reg = FENCE_REG_830_0 + fence_reg * 4;
        else
-               fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
+               fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
        I915_WRITE(fence_reg, val);
 }
 
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static void i830_write_fence_reg(struct drm_gem_object *obj)
 {
-       struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       u32 size = i915_gem_get_gtt_size(obj_priv);
        int regnum = obj_priv->fence_reg;
        uint32_t val;
        uint32_t pitch_val;
@@ -2381,7 +2474,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        val = obj_priv->gtt_offset;
        if (obj_priv->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-       fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
+       fence_size_bits = I830_FENCE_SIZE_BITS(size);
        WARN_ON(fence_size_bits & ~0x00000f00);
        val |= fence_size_bits;
        val |= pitch_val << I830_FENCE_PITCH_SHIFT;
@@ -2393,10 +2486,9 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
 static int i915_find_fence_reg(struct drm_device *dev,
                               bool interruptible)
 {
-       struct drm_i915_fence_reg *reg = NULL;
-       struct drm_i915_gem_object *obj_priv = NULL;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj = NULL;
+       struct drm_i915_fence_reg *reg;
+       struct drm_i915_gem_object *obj_priv = NULL;
        int i, avail, ret;
 
        /* First try to find a free reg */
@@ -2415,33 +2507,31 @@ static int i915_find_fence_reg(struct drm_device *dev,
                return -ENOSPC;
 
        /* None available, try to steal one or wait for a user to finish */
-       i = I915_FENCE_REG_NONE;
+       avail = I915_FENCE_REG_NONE;
        list_for_each_entry(reg, &dev_priv->mm.fence_list,
                            lru_list) {
-               obj = reg->obj;
-               obj_priv = to_intel_bo(obj);
-
+               obj_priv = to_intel_bo(reg->obj);
                if (obj_priv->pin_count)
                        continue;
 
                /* found one! */
-               i = obj_priv->fence_reg;
+               avail = obj_priv->fence_reg;
                break;
        }
 
-       BUG_ON(i == I915_FENCE_REG_NONE);
+       BUG_ON(avail == I915_FENCE_REG_NONE);
 
        /* We only have a reference on obj from the active list. put_fence_reg
         * might drop that one, causing a use-after-free in it. So hold a
         * private reference to obj like the other callers of put_fence_reg
         * (set_tiling ioctl) do. */
-       drm_gem_object_reference(obj);
-       ret = i915_gem_object_put_fence_reg(obj, interruptible);
-       drm_gem_object_unreference(obj);
+       drm_gem_object_reference(&obj_priv->base);
+       ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible);
+       drm_gem_object_unreference(&obj_priv->base);
        if (ret != 0)
                return ret;
 
-       return i;
+       return avail;
 }
 
 /**
@@ -2506,22 +2596,23 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
 
        switch (INTEL_INFO(dev)->gen) {
        case 6:
-               sandybridge_write_fence_reg(reg);
+               sandybridge_write_fence_reg(obj);
                break;
        case 5:
        case 4:
-               i965_write_fence_reg(reg);
+               i965_write_fence_reg(obj);
                break;
        case 3:
-               i915_write_fence_reg(reg);
+               i915_write_fence_reg(obj);
                break;
        case 2:
-               i830_write_fence_reg(reg);
+               i830_write_fence_reg(obj);
                break;
        }
 
-       trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
-                       obj_priv->tiling_mode);
+       trace_i915_gem_object_get_fence(obj,
+                                       obj_priv->fence_reg,
+                                       obj_priv->tiling_mode);
 
        return 0;
 }
@@ -2624,13 +2715,17 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+                           unsigned alignment,
+                           bool map_and_fenceable)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_mm_node *free_space;
-       gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
+       gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+       u32 size, fence_size, fence_alignment, unfenced_alignment;
+       bool mappable, fenceable;
        int ret;
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2638,47 +2733,73 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                return -EINVAL;
        }
 
+       fence_size = i915_gem_get_gtt_size(obj_priv);
+       fence_alignment = i915_gem_get_gtt_alignment(obj_priv);
+       unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv);
+
        if (alignment == 0)
-               alignment = i915_gem_get_gtt_alignment(obj);
-       if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+               alignment = map_and_fenceable ? fence_alignment :
+                                               unfenced_alignment;
+       if (map_and_fenceable && alignment & (fence_alignment - 1)) {
                DRM_ERROR("Invalid object alignment requested %u\n", alignment);
                return -EINVAL;
        }
 
+       size = map_and_fenceable ? fence_size : obj->size;
+
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
-       if (obj->size > dev_priv->mm.gtt_total) {
+       if (obj->size >
+           (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
                DRM_ERROR("Attempting to bind an object larger than the aperture\n");
                return -E2BIG;
        }
 
  search_free:
-       free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
-                                       obj->size, alignment, 0);
-       if (free_space != NULL)
-               obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
-                                                      alignment);
+       if (map_and_fenceable)
+               free_space =
+                       drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+                                                   size, alignment, 0,
+                                                   dev_priv->mm.gtt_mappable_end,
+                                                   0);
+       else
+               free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+                                               size, alignment, 0);
+
+       if (free_space != NULL) {
+               if (map_and_fenceable)
+                       obj_priv->gtt_space =
+                               drm_mm_get_block_range_generic(free_space,
+                                                              size, alignment, 0,
+                                                              dev_priv->mm.gtt_mappable_end,
+                                                              0);
+               else
+                       obj_priv->gtt_space =
+                               drm_mm_get_block(free_space, size, alignment);
+       }
        if (obj_priv->gtt_space == NULL) {
                /* If the gtt is empty and we're still having trouble
                 * fitting our object in, we're out of memory.
                 */
-               ret = i915_gem_evict_something(dev, obj->size, alignment);
+               ret = i915_gem_evict_something(dev, size, alignment,
+                                              map_and_fenceable);
                if (ret)
                        return ret;
 
                goto search_free;
        }
 
-       ret = i915_gem_object_get_pages(obj, gfpmask);
+       ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
 
                if (ret == -ENOMEM) {
                        /* first try to clear up some space from the GTT */
-                       ret = i915_gem_evict_something(dev, obj->size,
-                                                      alignment);
+                       ret = i915_gem_evict_something(dev, size,
+                                                      alignment,
+                                                      map_and_fenceable);
                        if (ret) {
                                /* now try to shrink everyone else */
                                if (gfpmask) {
@@ -2704,20 +2825,23 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                                               obj_priv->gtt_space->start,
                                               obj_priv->agp_type);
        if (obj_priv->agp_mem == NULL) {
-               i915_gem_object_put_pages(obj);
+               i915_gem_object_put_pages_gtt(obj);
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
 
-               ret = i915_gem_evict_something(dev, obj->size, alignment);
+               ret = i915_gem_evict_something(dev, size,
+                                              alignment, map_and_fenceable);
                if (ret)
                        return ret;
 
                goto search_free;
        }
 
+       obj_priv->gtt_offset = obj_priv->gtt_space->start;
+
        /* keep track of bounds object by adding it to the inactive list */
        list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-       i915_gem_info_add_gtt(dev_priv, obj->size);
+       i915_gem_info_add_gtt(dev_priv, obj_priv);
 
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2726,8 +2850,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
        BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
 
-       obj_priv->gtt_offset = obj_priv->gtt_space->start;
-       trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+       trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable);
+
+       fenceable =
+               obj_priv->gtt_space->size == fence_size &&
+               (obj_priv->gtt_space->start & (fence_alignment -1)) == 0;
+
+       mappable =
+               obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end;
+
+       obj_priv->map_and_fenceable = mappable && fenceable;
 
        return 0;
 }
@@ -2755,22 +2887,16 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
                                       bool pipelined)
 {
        struct drm_device *dev = obj->dev;
-       uint32_t old_write_domain;
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
        /* Queue the GPU write cache flushing we need. */
-       old_write_domain = obj->write_domain;
        i915_gem_flush_ring(dev, NULL,
                            to_intel_bo(obj)->ring,
                            0, obj->write_domain);
        BUG_ON(obj->write_domain);
 
-       trace_i915_gem_object_change_domain(obj,
-                                           obj->read_domains,
-                                           old_write_domain);
-
        if (pipelined)
                return 0;
 
@@ -2790,6 +2916,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
         * to it immediately go to main memory as far as we know, so there's
         * no chipset flush.  It also doesn't land in render cache.
         */
+       i915_gem_release_mmap(obj);
+
        old_write_domain = obj->write_domain;
        obj->write_domain = 0;
 
@@ -3093,16 +3221,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  */
 static void
 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
-                                 struct intel_ring_buffer *ring)
+                                 struct intel_ring_buffer *ring,
+                                 struct change_domains *cd)
 {
-       struct drm_device               *dev = obj->dev;
-       struct drm_i915_private         *dev_priv = dev->dev_private;
        struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
-       uint32_t                        old_read_domains;
-
-       intel_mark_busy(dev, obj);
 
        /*
         * If the object isn't moving to a new write domain,
@@ -3110,8 +3234,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
         */
        if (obj->pending_write_domain == 0)
                obj->pending_read_domains |= obj->read_domains;
-       else
-               obj_priv->dirty = 1;
 
        /*
         * Flush the current write domain if
@@ -3134,7 +3256,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
        if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
 
-       old_read_domains = obj->read_domains;
+       /* blow away mappings if mapped through GTT */
+       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+               i915_gem_release_mmap(obj);
 
        /* The actual obj->write_domain will be updated with
         * pending_write_domain after we emit the accumulated flush for all
@@ -3144,18 +3268,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
         */
        if (flush_domains == 0 && obj->pending_write_domain == 0)
                obj->pending_write_domain = obj->write_domain;
-       obj->read_domains = obj->pending_read_domains;
 
-       dev->invalidate_domains |= invalidate_domains;
-       dev->flush_domains |= flush_domains;
+       cd->invalidate_domains |= invalidate_domains;
+       cd->flush_domains |= flush_domains;
        if (flush_domains & I915_GEM_GPU_DOMAINS)
-               dev_priv->mm.flush_rings |= obj_priv->ring->id;
+               cd->flush_rings |= obj_priv->ring->id;
        if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-               dev_priv->mm.flush_rings |= ring->id;
-
-       trace_i915_gem_object_change_domain(obj,
-                                           old_read_domains,
-                                           obj->write_domain);
+               cd->flush_rings |= ring->id;
 }
 
 /**
@@ -3454,25 +3573,30 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
        int ret, i, retry;
 
        /* attempt to pin all of the buffers into the GTT */
-       for (retry = 0; retry < 2; retry++) {
+       retry = 0;
+       do {
                ret = 0;
                for (i = 0; i < count; i++) {
                        struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
-                       struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
+                       struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
                        bool need_fence =
                                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                                obj->tiling_mode != I915_TILING_NONE;
 
+                       /* g33/pnv can't fence buffers in the unmappable part */
+                       bool need_mappable =
+                               entry->relocation_count ? true : need_fence;
+
                        /* Check fence reg constraints and rebind if necessary */
-                       if (need_fence &&
-                           !i915_gem_object_fence_offset_ok(&obj->base,
-                                                            obj->tiling_mode)) {
+                       if (need_mappable && !obj->map_and_fenceable) {
                                ret = i915_gem_object_unbind(&obj->base);
                                if (ret)
                                        break;
                        }
 
-                       ret = i915_gem_object_pin(&obj->base, entry->alignment);
+                       ret = i915_gem_object_pin(&obj->base,
+                                                 entry->alignment,
+                                                 need_mappable);
                        if (ret)
                                break;
 
@@ -3496,18 +3620,18 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
                while (i--)
                        i915_gem_object_unpin(object_list[i]);
 
-               if (ret == 0)
-                       break;
-
-               if (ret != -ENOSPC || retry)
+               if (ret != -ENOSPC || retry > 1)
                        return ret;
 
-               ret = i915_gem_evict_everything(dev);
+               /* First attempt, just clear anything that is purgeable.
+                * Second attempt, clear the entire GTT.
+                */
+               ret = i915_gem_evict_everything(dev, retry == 0);
                if (ret)
                        return ret;
-       }
 
-       return 0;
+               retry++;
+       } while (1);
 }
 
 static int
@@ -3517,30 +3641,26 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
                                struct drm_gem_object **objects,
                                int count)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct change_domains cd;
        int ret, i;
 
-       /* Zero the global flush/invalidate flags. These
-        * will be modified as new domains are computed
-        * for each object
-        */
-       dev->invalidate_domains = 0;
-       dev->flush_domains = 0;
-       dev_priv->mm.flush_rings = 0;
+       cd.invalidate_domains = 0;
+       cd.flush_domains = 0;
+       cd.flush_rings = 0;
        for (i = 0; i < count; i++)
-               i915_gem_object_set_to_gpu_domain(objects[i], ring);
+               i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd);
 
-       if (dev->invalidate_domains | dev->flush_domains) {
+       if (cd.invalidate_domains | cd.flush_domains) {
 #if WATCH_EXEC
                DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
                          __func__,
-                        dev->invalidate_domains,
-                        dev->flush_domains);
+                        cd.invalidate_domains,
+                        cd.flush_domains);
 #endif
                i915_gem_flush(dev, file,
-                              dev->invalidate_domains,
-                              dev->flush_domains,
-                              dev_priv->mm.flush_rings);
+                              cd.invalidate_domains,
+                              cd.flush_domains,
+                              cd.flush_rings);
        }
 
        for (i = 0; i < count; i++) {
@@ -3591,17 +3711,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                return 0;
 
        ret = 0;
-       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+       if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
                /* And wait for the seqno passing without holding any locks and
                 * causing extra latency for others. This is safe as the irq
                 * generation is designed to be run atomically and so is
                 * lockless.
                 */
-               ring->user_irq_get(dev, ring);
+               ring->user_irq_get(ring);
                ret = wait_event_interruptible(ring->irq_queue,
-                                              i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+                                              i915_seqno_passed(ring->get_seqno(ring), seqno)
                                               || atomic_read(&dev_priv->mm.wedged));
-               ring->user_irq_put(dev, ring);
+               ring->user_irq_put(ring);
 
                if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
                        ret = -EIO;
@@ -3664,7 +3784,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object **object_list = NULL;
        struct drm_gem_object *batch_obj;
-       struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_gem_request *request = NULL;
        int ret, i, flips;
@@ -3759,6 +3878,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* Look up object handles */
        for (i = 0; i < args->buffer_count; i++) {
+               struct drm_i915_gem_object *obj_priv;
+
                object_list[i] = drm_gem_object_lookup(dev, file,
                                                       exec_list[i].handle);
                if (object_list[i] == NULL) {
@@ -3821,15 +3942,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto err;
 
-       for (i = 0; i < args->buffer_count; i++) {
-               struct drm_gem_object *obj = object_list[i];
-               uint32_t old_write_domain = obj->write_domain;
-               obj->write_domain = obj->pending_write_domain;
-               trace_i915_gem_object_change_domain(obj,
-                                                   obj->read_domains,
-                                                   old_write_domain);
-       }
-
 #if WATCH_COHERENCY
        for (i = 0; i < args->buffer_count; i++) {
                i915_gem_object_check_coherency(object_list[i],
@@ -3865,46 +3977,60 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        else
                                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
 
-                       intel_ring_begin(dev, ring, 2);
-                       intel_ring_emit(dev, ring,
-                                       MI_WAIT_FOR_EVENT | flip_mask);
-                       intel_ring_emit(dev, ring, MI_NOOP);
-                       intel_ring_advance(dev, ring);
+                       ret = intel_ring_begin(ring, 2);
+                       if (ret)
+                               goto err;
+
+                       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+                       intel_ring_emit(ring, MI_NOOP);
+                       intel_ring_advance(ring);
                }
        }
 
        /* Exec the batchbuffer */
-       ret = ring->dispatch_gem_execbuffer(dev, ring, args,
-                                           cliprects, exec_offset);
+       ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
        }
 
-       /*
-        * Ensure that the commands in the batch buffer are
-        * finished before the interrupt fires
-        */
-       i915_retire_commands(dev, ring);
-
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
 
+               obj->read_domains = obj->pending_read_domains;
+               obj->write_domain = obj->pending_write_domain;
+
                i915_gem_object_move_to_active(obj, ring);
-               if (obj->write_domain)
-                       list_move_tail(&to_intel_bo(obj)->gpu_write_list,
+               if (obj->write_domain) {
+                       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+                       obj_priv->dirty = 1;
+                       list_move_tail(&obj_priv->gpu_write_list,
                                       &ring->gpu_write_list);
+                       intel_mark_busy(dev, obj);
+               }
+
+               trace_i915_gem_object_change_domain(obj,
+                                                   obj->read_domains,
+                                                   obj->write_domain);
        }
 
-       i915_add_request(dev, file, request, ring);
-       request = NULL;
+       /*
+        * Ensure that the commands in the batch buffer are
+        * finished before the interrupt fires
+        */
+       i915_retire_commands(dev, ring);
+
+       if (i915_add_request(dev, file, request, ring))
+               i915_gem_next_request_seqno(dev, ring);
+       else
+               request = NULL;
 
 err:
        for (i = 0; i < args->buffer_count; i++) {
-               if (object_list[i]) {
-                       obj_priv = to_intel_bo(object_list[i]);
-                       obj_priv->in_execbuffer = false;
-               }
+               if (object_list[i] == NULL)
+                   break;
+
+               to_intel_bo(object_list[i])->in_execbuffer = false;
                drm_gem_object_unreference(object_list[i]);
        }
 
@@ -4064,7 +4190,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
 }
 
 int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
+                   bool map_and_fenceable)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4072,15 +4199,19 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        int ret;
 
        BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+       BUG_ON(map_and_fenceable && !map_and_fenceable);
        WARN_ON(i915_verify_lists(dev));
 
        if (obj_priv->gtt_space != NULL) {
-               if (alignment == 0)
-                       alignment = i915_gem_get_gtt_alignment(obj);
-               if (obj_priv->gtt_offset & (alignment - 1)) {
+               if ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
+                   (map_and_fenceable && !obj_priv->map_and_fenceable)) {
                        WARN(obj_priv->pin_count,
-                            "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
-                            obj_priv->gtt_offset, alignment);
+                            "bo is already pinned with incorrect alignment:"
+                            " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+                            " obj->map_and_fenceable=%d\n",
+                            obj_priv->gtt_offset, alignment,
+                            map_and_fenceable,
+                            obj_priv->map_and_fenceable);
                        ret = i915_gem_object_unbind(obj);
                        if (ret)
                                return ret;
@@ -4088,22 +4219,19 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        }
 
        if (obj_priv->gtt_space == NULL) {
-               ret = i915_gem_object_bind_to_gtt(obj, alignment);
+               ret = i915_gem_object_bind_to_gtt(obj, alignment,
+                                                 map_and_fenceable);
                if (ret)
                        return ret;
        }
 
-       obj_priv->pin_count++;
-
-       /* If the object is not active and not pending a flush,
-        * remove it from the inactive list
-        */
-       if (obj_priv->pin_count == 1) {
-               i915_gem_info_add_pin(dev_priv, obj->size);
+       if (obj_priv->pin_count++ == 0) {
+               i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable);
                if (!obj_priv->active)
                        list_move_tail(&obj_priv->mm_list,
                                       &dev_priv->mm.pinned_list);
        }
+       BUG_ON(!obj_priv->pin_mappable && map_and_fenceable);
 
        WARN_ON(i915_verify_lists(dev));
        return 0;
@@ -4117,19 +4245,14 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        WARN_ON(i915_verify_lists(dev));
-       obj_priv->pin_count--;
-       BUG_ON(obj_priv->pin_count < 0);
+       BUG_ON(obj_priv->pin_count == 0);
        BUG_ON(obj_priv->gtt_space == NULL);
 
-       /* If the object is no longer pinned, and is
-        * neither active nor being flushed, then stick it on
-        * the inactive list
-        */
-       if (obj_priv->pin_count == 0) {
+       if (--obj_priv->pin_count == 0) {
                if (!obj_priv->active)
                        list_move_tail(&obj_priv->mm_list,
                                       &dev_priv->mm.inactive_list);
-               i915_gem_info_remove_pin(dev_priv, obj->size);
+               i915_gem_info_remove_pin(dev_priv, obj_priv);
        }
        WARN_ON(i915_verify_lists(dev));
 }
@@ -4170,7 +4293,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        obj_priv->user_pin_count++;
        obj_priv->pin_filp = file_priv;
        if (obj_priv->user_pin_count == 1) {
-               ret = i915_gem_object_pin(obj, args->alignment);
+               ret = i915_gem_object_pin(obj, args->alignment, true);
                if (ret)
                        goto out;
        }
@@ -4363,6 +4486,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->gpu_write_list);
        obj->madv = I915_MADV_WILLNEED;
+       /* Avoid an unnecessary call to unbind on the first bind. */
+       obj->map_and_fenceable = true;
 
        return &obj->base;
 }
@@ -4388,7 +4513,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
                return;
        }
 
-       if (obj_priv->mmap_offset)
+       if (obj->map_list.map)
                i915_gem_free_mmap_offset(obj);
 
        drm_gem_object_release(obj);
@@ -4436,7 +4561,7 @@ i915_gem_idle(struct drm_device *dev)
 
        /* Under UMS, be paranoid and evict. */
        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = i915_gem_evict_inactive(dev);
+               ret = i915_gem_evict_inactive(dev, false);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
                        return ret;
@@ -4482,7 +4607,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
        obj_priv = to_intel_bo(obj);
        obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
 
-       ret = i915_gem_object_pin(obj, 4096);
+       ret = i915_gem_object_pin(obj, 4096, true);
        if (ret)
                goto err_unref;
 
@@ -4555,9 +4680,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        return 0;
 
 cleanup_bsd_ring:
-       intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
 cleanup_render_ring:
-       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+       intel_cleanup_ring_buffer(&dev_priv->render_ring);
 cleanup_pipe_control:
        if (HAS_PIPE_CONTROL(dev))
                i915_gem_cleanup_pipe_control(dev);
@@ -4569,9 +4694,9 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-       intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-       intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+       intel_cleanup_ring_buffer(&dev_priv->render_ring);
+       intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(&dev_priv->blt_ring);
        if (HAS_PIPE_CONTROL(dev))
                i915_gem_cleanup_pipe_control(dev);
 }
@@ -4678,9 +4803,6 @@ i915_gem_load(struct drm_device *dev)
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        init_completion(&dev_priv->error_completion);
-       spin_lock(&shrink_list_lock);
-       list_add(&dev_priv->mm.shrink_list, &shrink_list);
-       spin_unlock(&shrink_list_lock);
 
        /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
        if (IS_GEN3(dev)) {
@@ -4723,6 +4845,10 @@ i915_gem_load(struct drm_device *dev)
        }
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+       dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+       dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+       register_shrinker(&dev_priv->mm.inactive_shrinker);
 }
 
 /*
@@ -4794,33 +4920,35 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
 void i915_gem_detach_phys_object(struct drm_device *dev,
                                 struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv;
+       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       char *vaddr;
        int i;
-       int ret;
        int page_count;
 
-       obj_priv = to_intel_bo(obj);
        if (!obj_priv->phys_obj)
                return;
-
-       ret = i915_gem_object_get_pages(obj, 0);
-       if (ret)
-               goto out;
+       vaddr = obj_priv->phys_obj->handle->vaddr;
 
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *dst = kmap_atomic(obj_priv->pages[i]);
-               char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
-
-               memcpy(dst, src, PAGE_SIZE);
-               kunmap_atomic(dst);
+               struct page *page = read_cache_page_gfp(mapping, i,
+                                                       GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               if (!IS_ERR(page)) {
+                       char *dst = kmap_atomic(page);
+                       memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+                       kunmap_atomic(dst);
+
+                       drm_clflush_pages(&page, 1);
+
+                       set_page_dirty(page);
+                       mark_page_accessed(page);
+                       page_cache_release(page);
+               }
        }
-       drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
 
-       i915_gem_object_put_pages(obj);
-out:
        obj_priv->phys_obj->cur_obj = NULL;
        obj_priv->phys_obj = NULL;
 }
@@ -4831,6 +4959,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                            int id,
                            int align)
 {
+       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv;
        int ret = 0;
@@ -4854,7 +4983,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                                                obj->size, align);
                if (ret) {
                        DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
-                       goto out;
+                       return ret;
                }
        }
 
@@ -4862,27 +4991,27 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
        obj_priv->phys_obj->cur_obj = obj;
 
-       ret = i915_gem_object_get_pages(obj, 0);
-       if (ret) {
-               DRM_ERROR("failed to get page list\n");
-               goto out;
-       }
-
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *src = kmap_atomic(obj_priv->pages[i]);
-               char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+               struct page *page;
+               char *dst, *src;
 
+               page = read_cache_page_gfp(mapping, i,
+                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               src = kmap_atomic(page);
+               dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
                memcpy(dst, src, PAGE_SIZE);
                kunmap_atomic(src);
-       }
 
-       i915_gem_object_put_pages(obj);
+               mark_page_accessed(page);
+               page_cache_release(page);
+       }
 
        return 0;
-out:
-       return ret;
 }
 
 static int
@@ -4948,144 +5077,68 @@ i915_gpu_is_active(struct drm_device *dev)
 }
 
 static int
-i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
+i915_gem_inactive_shrink(struct shrinker *shrinker,
+                        int nr_to_scan,
+                        gfp_t gfp_mask)
 {
-       drm_i915_private_t *dev_priv, *next_dev;
-       struct drm_i915_gem_object *obj_priv, *next_obj;
-       int cnt = 0;
-       int would_deadlock = 1;
+       struct drm_i915_private *dev_priv =
+               container_of(shrinker,
+                            struct drm_i915_private,
+                            mm.inactive_shrinker);
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_i915_gem_object *obj, *next;
+       int cnt;
+
+       if (!mutex_trylock(&dev->struct_mutex))
+               return 0;
 
        /* "fast-path" to count number of available objects */
        if (nr_to_scan == 0) {
-               spin_lock(&shrink_list_lock);
-               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
-                       struct drm_device *dev = dev_priv->dev;
-
-                       if (mutex_trylock(&dev->struct_mutex)) {
-                               list_for_each_entry(obj_priv,
-                                                   &dev_priv->mm.inactive_list,
-                                                   mm_list)
-                                       cnt++;
-                               mutex_unlock(&dev->struct_mutex);
-                       }
-               }
-               spin_unlock(&shrink_list_lock);
-
-               return (cnt / 100) * sysctl_vfs_cache_pressure;
+               cnt = 0;
+               list_for_each_entry(obj,
+                                   &dev_priv->mm.inactive_list,
+                                   mm_list)
+                       cnt++;
+               mutex_unlock(&dev->struct_mutex);
+               return cnt / 100 * sysctl_vfs_cache_pressure;
        }
 
-       spin_lock(&shrink_list_lock);
-
 rescan:
        /* first scan for clean buffers */
-       list_for_each_entry_safe(dev_priv, next_dev,
-                                &shrink_list, mm.shrink_list) {
-               struct drm_device *dev = dev_priv->dev;
-
-               if (! mutex_trylock(&dev->struct_mutex))
-                       continue;
-
-               spin_unlock(&shrink_list_lock);
-               i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev);
 
-               list_for_each_entry_safe(obj_priv, next_obj,
-                                        &dev_priv->mm.inactive_list,
-                                        mm_list) {
-                       if (i915_gem_object_is_purgeable(obj_priv)) {
-                               i915_gem_object_unbind(&obj_priv->base);
-                               if (--nr_to_scan <= 0)
-                                       break;
-                       }
+       list_for_each_entry_safe(obj, next,
+                                &dev_priv->mm.inactive_list,
+                                mm_list) {
+               if (i915_gem_object_is_purgeable(obj)) {
+                       i915_gem_object_unbind(&obj->base);
+                       if (--nr_to_scan == 0)
+                               break;
                }
-
-               spin_lock(&shrink_list_lock);
-               mutex_unlock(&dev->struct_mutex);
-
-               would_deadlock = 0;
-
-               if (nr_to_scan <= 0)
-                       break;
        }
 
        /* second pass, evict/count anything still on the inactive list */
-       list_for_each_entry_safe(dev_priv, next_dev,
-                                &shrink_list, mm.shrink_list) {
-               struct drm_device *dev = dev_priv->dev;
-
-               if (! mutex_trylock(&dev->struct_mutex))
-                       continue;
-
-               spin_unlock(&shrink_list_lock);
-
-               list_for_each_entry_safe(obj_priv, next_obj,
-                                        &dev_priv->mm.inactive_list,
-                                        mm_list) {
-                       if (nr_to_scan > 0) {
-                               i915_gem_object_unbind(&obj_priv->base);
-                               nr_to_scan--;
-                       } else
-                               cnt++;
-               }
-
-               spin_lock(&shrink_list_lock);
-               mutex_unlock(&dev->struct_mutex);
-
-               would_deadlock = 0;
-       }
-
-       if (nr_to_scan) {
-               int active = 0;
-
+       cnt = 0;
+       list_for_each_entry_safe(obj, next,
+                                &dev_priv->mm.inactive_list,
+                                mm_list) {
+               if (nr_to_scan) {
+                       i915_gem_object_unbind(&obj->base);
+                       nr_to_scan--;
+               } else
+                       cnt++;
+       }
+
+       if (nr_to_scan && i915_gpu_is_active(dev)) {
                /*
                 * We are desperate for pages, so as a last resort, wait
                 * for the GPU to finish and discard whatever we can.
                 * This has a dramatic impact to reduce the number of
                 * OOM-killer events whilst running the GPU aggressively.
                 */
-               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
-                       struct drm_device *dev = dev_priv->dev;
-
-                       if (!mutex_trylock(&dev->struct_mutex))
-                               continue;
-
-                       spin_unlock(&shrink_list_lock);
-
-                       if (i915_gpu_is_active(dev)) {
-                               i915_gpu_idle(dev);
-                               active++;
-                       }
-
-                       spin_lock(&shrink_list_lock);
-                       mutex_unlock(&dev->struct_mutex);
-               }
-
-               if (active)
+               if (i915_gpu_idle(dev) == 0)
                        goto rescan;
        }
-
-       spin_unlock(&shrink_list_lock);
-
-       if (would_deadlock)
-               return -1;
-       else if (cnt > 0)
-               return (cnt / 100) * sysctl_vfs_cache_pressure;
-       else
-               return 0;
-}
-
-static struct shrinker shrinker = {
-       .shrink = i915_gem_shrink,
-       .seeks = DEFAULT_SEEKS,
-};
-
-__init void
-i915_gem_shrinker_init(void)
-{
-    register_shrinker(&shrinker);
-}
-
-__exit void
-i915_gem_shrinker_exit(void)
-{
-    unregister_shrinker(&shrinker);
+       mutex_unlock(&dev->struct_mutex);
+       return cnt / 100 * sysctl_vfs_cache_pressure;
 }