* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
+ * ->zone.lock
*
* ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
return copied;
}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
/*
* This has the same sideeffects and return value as
kunmap(page);
return copied;
}
+EXPORT_SYMBOL(iov_iter_copy_from_user);
static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
{
__iov_iter_advance_iov(i, bytes);
i->count -= bytes;
}
+EXPORT_SYMBOL(iov_iter_advance);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
bytes = min(bytes, i->iov->iov_len - i->iov_offset);
return fault_in_pages_readable(buf, bytes);
}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
/*
* Return the count of just the current iov_iter segment.
else
return min(i->count, iov->iov_len - i->iov_offset);
}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
/*
* Performs necessary checks before doing a write
ret = aops->prepare_write(file, page, offset, offset+len);
if (ret) {
- if (ret != AOP_TRUNCATED_PAGE)
- unlock_page(page);
+ unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
vmtruncate(inode, inode->i_size);
- if (ret == AOP_TRUNCATED_PAGE)
- goto again;
}
return ret;
}
unlock_page(page);
mark_page_accessed(page);
page_cache_release(page);
- BUG_ON(ret == AOP_TRUNCATED_PAGE); /* can't deal with */
if (ret < 0) {
if (pos + len > inode->i_size)
* cannot take a pagefault with the destination page locked.
* So pin the source page to copy it.
*/
- if (!PageUptodate(page)) {
+ if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
unlock_page(page);
src_page = alloc_page(GFP_KERNEL);
flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset, offset+bytes);
- if (unlikely(status < 0 || status == AOP_TRUNCATED_PAGE))
+ if (unlikely(status < 0))
goto fs_write_aop_error;
if (unlikely(status > 0)) /* filesystem did partial write */
copied = min_t(size_t, copied, status);
continue;
fs_write_aop_error:
- if (status != AOP_TRUNCATED_PAGE)
- unlock_page(page);
+ unlock_page(page);
page_cache_release(page);
if (src_page)
page_cache_release(src_page);
*/
if (pos + bytes > inode->i_size)
vmtruncate(inode, inode->i_size);
- if (status == AOP_TRUNCATED_PAGE)
- continue;
- else
- break;
+ break;
} while (iov_iter_count(i));
return written ? written : status;
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
+ unsigned int flags = 0;
+
+ /*
+ * Copies from kernel address space cannot fail (NFSD is a big user).
+ */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ flags |= AOP_FLAG_UNINTERRUPTIBLE;
do {
struct page *page;
break;
}
- status = a_ops->write_begin(file, mapping, pos, bytes, 0,
+ status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status))
break;