Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index 94c84d7..a087e1b 100644 (file)
@@ -31,6 +31,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
@@ -1219,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                ret = i915_gem_object_bind_to_gtt(obj, 0, true);
                if (ret)
                        goto unlock;
-       }
 
-       ret = i915_gem_object_set_to_gtt_domain(obj, write);
-       if (ret)
-               goto unlock;
+               ret = i915_gem_object_set_to_gtt_domain(obj, write);
+               if (ret)
+                       goto unlock;
+       }
 
        if (obj->tiling_mode == I915_TILING_NONE)
                ret = i915_gem_object_put_fence(obj);
@@ -1377,25 +1374,24 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
 }
 
 static uint32_t
-i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
-       struct drm_device *dev = obj->base.dev;
-       uint32_t size;
+       uint32_t gtt_size;
 
        if (INTEL_INFO(dev)->gen >= 4 ||
-           obj->tiling_mode == I915_TILING_NONE)
-               return obj->base.size;
+           tiling_mode == I915_TILING_NONE)
+               return size;
 
        /* Previous chips need a power-of-two fence region when tiling */
        if (INTEL_INFO(dev)->gen == 3)
-               size = 1024*1024;
+               gtt_size = 1024*1024;
        else
-               size = 512*1024;
+               gtt_size = 512*1024;
 
-       while (size < obj->base.size)
-               size <<= 1;
+       while (gtt_size < size)
+               gtt_size <<= 1;
 
-       return size;
+       return gtt_size;
 }
 
 /**
@@ -1406,59 +1402,52 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
  * potential fence register mapping.
  */
 static uint32_t
-i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_device *dev,
+                          uint32_t size,
+                          int tiling_mode)
 {
-       struct drm_device *dev = obj->base.dev;
-
        /*
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
        if (INTEL_INFO(dev)->gen >= 4 ||
-           obj->tiling_mode == I915_TILING_NONE)
+           tiling_mode == I915_TILING_NONE)
                return 4096;
 
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       return i915_gem_get_gtt_size(obj);
+       return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
 /**
  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
  *                                      unfenced object
- * @obj: object to check
+ * @dev: the device
+ * @size: size of the object
+ * @tiling_mode: tiling mode of the object
  *
  * Return the required GTT alignment for an object, only taking into account
  * unfenced tiled surface requirements.
  */
 uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+                                   uint32_t size,
+                                   int tiling_mode)
 {
-       struct drm_device *dev = obj->base.dev;
-       int tile_height;
-
        /*
         * Minimum alignment is 4k (GTT page size) for sane hw.
         */
        if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
-           obj->tiling_mode == I915_TILING_NONE)
+           tiling_mode == I915_TILING_NONE)
                return 4096;
 
-       /*
-        * Older chips need unfenced tiled buffers to be aligned to the left
-        * edge of an even tile row (where tile rows are counted as if the bo is
-        * placed in a fenced gtt region).
+       /* Previous hardware however needs to be aligned to a power-of-two
+        * tile height. The simplest method for determining this is to reuse
+        * the power-of-tile object size.
         */
-       if (IS_GEN2(dev))
-               tile_height = 16;
-       else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
-               tile_height = 32;
-       else
-               tile_height = 8;
-
-       return tile_height * obj->stride * 2;
+       return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
 int
@@ -1558,12 +1547,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
 
        inode = obj->base.filp->f_path.dentry->d_inode;
        mapping = inode->i_mapping;
+       gfpmask |= mapping_gfp_mask(mapping);
+
        for (i = 0; i < page_count; i++) {
-               page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER |
-                                          __GFP_COLD |
-                                          __GFP_RECLAIMABLE |
-                                          gfpmask);
+               page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
                if (IS_ERR(page))
                        goto err_pages;
 
@@ -1701,13 +1688,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
        /* Our goal here is to return as much of the memory as
         * is possible back to the system as we are called from OOM.
         * To do this we must instruct the shmfs to drop all of its
-        * backing pages, *now*. Here we mirror the actions taken
-        * when by shmem_delete_inode() to release the backing store.
+        * backing pages, *now*.
         */
        inode = obj->base.filp->f_path.dentry->d_inode;
-       truncate_inode_pages(inode->i_mapping, 0);
-       if (inode->i_op->truncate_range)
-               inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+       shmem_truncate_range(inode, 0, (loff_t)-1);
 
        obj->madv = __I915_MADV_PURGED;
 }
@@ -2080,8 +2064,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
                if (!ier) {
                        DRM_ERROR("something (likely vbetool) disabled "
                                  "interrupts, re-enabling\n");
-                       i915_driver_irq_preinstall(ring->dev);
-                       i915_driver_irq_postinstall(ring->dev);
+                       ring->dev->driver->irq_preinstall(ring->dev);
+                       ring->dev->driver->irq_postinstall(ring->dev);
                }
 
                trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2752,9 +2736,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                return -EINVAL;
        }
 
-       fence_size = i915_gem_get_gtt_size(obj);
-       fence_alignment = i915_gem_get_gtt_alignment(obj);
-       unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+       fence_size = i915_gem_get_gtt_size(dev,
+                                          obj->base.size,
+                                          obj->tiling_mode);
+       fence_alignment = i915_gem_get_gtt_alignment(dev,
+                                                    obj->base.size,
+                                                    obj->tiling_mode);
+       unfenced_alignment =
+               i915_gem_get_unfenced_gtt_alignment(dev,
+                                                   obj->base.size,
+                                                   obj->tiling_mode);
 
        if (alignment == 0)
                alignment = map_and_fenceable ? fence_alignment :
@@ -2926,8 +2917,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
         */
        wmb();
 
-       i915_gem_release_mmap(obj);
-
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
@@ -3567,6 +3556,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
+       struct address_space *mapping;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (obj == NULL)
@@ -3577,6 +3567,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                return NULL;
        }
 
+       mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
        i915_gem_info_add_obj(dev_priv, size);
 
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3952,8 +3945,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 
        page_count = obj->base.size / PAGE_SIZE;
        for (i = 0; i < page_count; i++) {
-               struct page *page = read_cache_page_gfp(mapping, i,
-                                                       GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               struct page *page = shmem_read_mapping_page(mapping, i);
                if (!IS_ERR(page)) {
                        char *dst = kmap_atomic(page);
                        memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4014,8 +4006,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                struct page *page;
                char *dst, *src;
 
-               page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page))
                        return PTR_ERR(page);