ext4: fix data corruption in data=journal mode
authorJan Kara <jack@suse.cz>
Fri, 27 Jan 2017 19:35:38 +0000 (14:35 -0500)
committerBen Hutchings <ben@decadent.org.uk>
Mon, 5 Jun 2017 20:13:45 +0000 (21:13 +0100)
commit 3b136499e906460919f0d21a49db1aaccf0ae963 upstream.

ext4_journalled_write_end() did not propely handle all the cases when
generic_perform_write() did not copy all the data into the target page
and could mark buffers with uninitialized contents as uptodate and dirty
leading to possible data corruption (which would be quickly fixed by
generic_perform_write() retrying the write but still). Fix the problem
by carefully handling the case when the page that is written to is not
uptodate.

Reported-by: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
[bwh: Backported to 3.2: adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
fs/ext4/inode.c

index 7e22e76..f881e34 100644 (file)
@@ -1073,7 +1073,9 @@ static int ext4_writeback_write_end(struct file *file,
  * set the buffer to be dirty, since in data=journalled mode we need
  * to call ext4_handle_dirty_metadata() instead.
  */
  * set the buffer to be dirty, since in data=journalled mode we need
  * to call ext4_handle_dirty_metadata() instead.
  */
-static void zero_new_buffers(struct page *page, unsigned from, unsigned to)
+static void ext4_journalled_zero_new_buffers(handle_t *handle,
+                                           struct page *page,
+                                           unsigned from, unsigned to)
 {
        unsigned int block_start = 0, block_end;
        struct buffer_head *head, *bh;
 {
        unsigned int block_start = 0, block_end;
        struct buffer_head *head, *bh;
@@ -1090,7 +1092,7 @@ static void zero_new_buffers(struct page *page, unsigned from, unsigned to)
                                        size = min(to, block_end) - start;
 
                                        zero_user(page, start, size);
                                        size = min(to, block_end) - start;
 
                                        zero_user(page, start, size);
-                                       set_buffer_uptodate(bh);
+                                       write_end_fn(handle, bh);
                                }
                                clear_buffer_new(bh);
                        }
                                }
                                clear_buffer_new(bh);
                        }
@@ -1118,16 +1120,19 @@ static int ext4_journalled_write_end(struct file *file,
 
        BUG_ON(!ext4_handle_valid(handle));
 
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       if (copied < len) {
-               if (!PageUptodate(page))
-                       copied = 0;
-               zero_new_buffers(page, from+copied, to);
+       if (unlikely(copied < len) && !PageUptodate(page)) {
+               copied = 0;
+               ext4_journalled_zero_new_buffers(handle, page, from, to);
+       } else {
+               if (unlikely(copied < len))
+                       ext4_journalled_zero_new_buffers(handle, page,
+                                                        from + copied, to);
+               ret = walk_page_buffers(handle, page_buffers(page), from,
+                                       from + copied, &partial,
+                                       write_end_fn);
+               if (!partial)
+                       SetPageUptodate(page);
        }
        }
-
-       ret = walk_page_buffers(handle, page_buffers(page), from,
-                               to, &partial, write_end_fn);
-       if (!partial)
-               SetPageUptodate(page);
        new_i_size = pos + copied;
        if (new_i_size > inode->i_size)
                i_size_write(inode, pos+copied);
        new_i_size = pos + copied;
        if (new_i_size > inode->i_size)
                i_size_write(inode, pos+copied);