By using the recently introduced pinning of pages, we can safely drop
the mutex in the knowledge that the pages are not going to disappear
jeneath us, and so we can simplify the code for iterating over the pages.
Note: The old code had such complicated page refcounting since it used
obj->pages as a micro-optimization if it's there, but that could
(before this patch) disappear when we drop the dev->struct_mutex.
Hence some manual page refcounting was required for the slow path,
complicated by the fact that pages returned by shmem_read_mapping_page
already have a pageref, which needs to be dropped again.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Added note to explain the question Ben raised in review.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
page_length);
kunmap_atomic(vaddr);
page_length);
kunmap_atomic(vaddr);
+ return ret ? -EFAULT : 0;
}
/* Only difference to the fast-path function is that this can handle bit17
}
/* Only difference to the fast-path function is that this can handle bit17
page_do_bit17_swizzling);
kunmap(page);
page_do_bit17_swizzling);
kunmap(page);
+ return ret ? -EFAULT : 0;
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
ssize_t remain;
loff_t offset;
char __user *user_data;
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
obj->dirty = 1;
offset = args->offset;
obj->dirty = 1;
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = obj->pages[offset >> PAGE_SHIFT];
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
goto next_page;
hit_slowpath = 1;
goto next_page;
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->struct_mutex);
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
next_page:
set_page_dirty(page);
mark_page_accessed(page);
next_page:
set_page_dirty(page);
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
remain -= page_length;
user_data += page_length;
remain -= page_length;
user_data += page_length;
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)