X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_gem.c;h=85f713746a1fbc59908fe8aa17f9284dc27af207;hb=04b905942b482092a547798a2477f21e32a8f65d;hp=0b2e167d2bce856c9b9ef55ea73e22174abf7d2f;hpb=e6ece70732b905742ad91a7b5489e0ca1362c0cd;p=pandora-kernel.git diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0b2e167d2bce..85f713746a1f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -31,6 +31,7 @@ #include "i915_drv.h" #include "i915_trace.h" #include "intel_drv.h" +#include #include #include #include @@ -354,13 +355,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, * page_offset = offset within page * page_length = bytes to copy for this page */ - page_offset = offset & (PAGE_SIZE-1); + page_offset = offset_in_page(offset); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, - GFP_HIGHUSER | __GFP_RECLAIMABLE); + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); @@ -453,9 +453,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, * data_page_offset = offset with data_page_index page. * page_length = bytes to copy for this page */ - shmem_page_offset = offset & ~PAGE_MASK; + shmem_page_offset = offset_in_page(offset); data_page_index = data_ptr / PAGE_SIZE - first_data_page; - data_page_offset = data_ptr & ~PAGE_MASK; + data_page_offset = offset_in_page(data_ptr); page_length = remain; if ((shmem_page_offset + page_length) > PAGE_SIZE) @@ -463,10 +463,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, - GFP_HIGHUSER | __GFP_RECLAIMABLE); - if (IS_ERR(page)) - return PTR_ERR(page); + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto out; + } if (do_bit17_swizzling) { slow_shmem_bit17_copy(page, @@ -638,8 +639,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, * page_offset = offset within page * page_length = bytes to copy for this page */ - page_base = (offset & ~(PAGE_SIZE-1)); - page_offset = offset & (PAGE_SIZE-1); + page_base = offset & PAGE_MASK; + page_offset = offset_in_page(offset); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; @@ -650,7 +651,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, */ if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, page_offset, user_data, page_length)) - return -EFAULT; remain -= page_length; @@ -730,9 +730,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, * page_length = bytes to copy for this page */ gtt_page_base = offset & PAGE_MASK; - gtt_page_offset = offset & ~PAGE_MASK; + gtt_page_offset = offset_in_page(offset); data_page_index = data_ptr / PAGE_SIZE - first_data_page; - data_page_offset = data_ptr & ~PAGE_MASK; + data_page_offset = offset_in_page(data_ptr); page_length = remain; if ((gtt_page_offset + page_length) > PAGE_SIZE) @@ -791,13 +791,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, * page_offset = offset within page * page_length = bytes to copy for this page */ - page_offset = offset & (PAGE_SIZE-1); + page_offset = offset_in_page(offset); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, - GFP_HIGHUSER | __GFP_RECLAIMABLE); + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); @@ -896,9 +895,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, * data_page_offset = offset with data_page_index page. * page_length = bytes to copy for this page */ - shmem_page_offset = offset & ~PAGE_MASK; + shmem_page_offset = offset_in_page(offset); data_page_index = data_ptr / PAGE_SIZE - first_data_page; - data_page_offset = data_ptr & ~PAGE_MASK; + data_page_offset = offset_in_page(data_ptr); page_length = remain; if ((shmem_page_offset + page_length) > PAGE_SIZE) @@ -906,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, - GFP_HIGHUSER | __GFP_RECLAIMABLE); + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; @@ -1218,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; - } - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto unlock; + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unlock; + } if (obj->tiling_mode == I915_TILING_NONE) ret = i915_gem_object_put_fence(obj); @@ -1450,8 +1448,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) * edge of an even tile row (where tile rows are counted as if the bo is * placed in a fenced gtt region). */ - if (IS_GEN2(dev) || - (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) + if (IS_GEN2(dev)) + tile_height = 16; + else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_height = 32; else tile_height = 8; @@ -1556,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, inode = obj->base.filp->f_path.dentry->d_inode; mapping = inode->i_mapping; + gfpmask |= mapping_gfp_mask(mapping); + for (i = 0; i < page_count; i++) { - page = read_cache_page_gfp(mapping, i, - GFP_HIGHUSER | - __GFP_COLD | - __GFP_RECLAIMABLE | - gfpmask); + page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); if (IS_ERR(page)) goto err_pages; @@ -1699,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) /* Our goal here is to return as much of the memory as * is possible back to the system as we are called from OOM. * To do this we must instruct the shmfs to drop all of its - * backing pages, *now*. Here we mirror the actions taken - * when by shmem_delete_inode() to release the backing store. + * backing pages, *now*. */ inode = obj->base.filp->f_path.dentry->d_inode; - truncate_inode_pages(inode->i_mapping, 0); - if (inode->i_op->truncate_range) - inode->i_op->truncate_range(inode, 0, (loff_t)-1); + shmem_truncate_range(inode, 0, (loff_t)-1); obj->madv = __I915_MADV_PURGED; } @@ -2924,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) */ wmb(); - i915_gem_release_mmap(obj); - old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3565,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + struct address_space *mapping; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (obj == NULL) @@ -3575,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, return NULL; } + mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; + mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); + i915_gem_info_add_obj(dev_priv, size); obj->base.write_domain = I915_GEM_DOMAIN_CPU; @@ -3950,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, page_count = obj->base.size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - struct page *page = read_cache_page_gfp(mapping, i, - GFP_HIGHUSER | __GFP_RECLAIMABLE); + struct page *page = shmem_read_mapping_page(mapping, i); if (!IS_ERR(page)) { char *dst = kmap_atomic(page); memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); @@ -4012,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, struct page *page; char *dst, *src; - page = read_cache_page_gfp(mapping, i, - GFP_HIGHUSER | __GFP_RECLAIMABLE); + page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) return PTR_ERR(page);