vfs: Block mmapped writes while the fs is frozen
[pandora-kernel.git] / fs / buffer.c
index 2219a76..b0675bf 100644 (file)
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
 }
 EXPORT_SYMBOL(init_buffer);
 
-static int sync_buffer(void *word)
+static int sleep_on_buffer(void *word)
 {
-       struct block_device *bd;
-       struct buffer_head *bh
-               = container_of(word, struct buffer_head, b_state);
-
-       smp_mb();
-       bd = bh->b_bdev;
-       if (bd)
-               blk_run_address_space(bd->bd_inode->i_mapping);
        io_schedule();
        return 0;
 }
 
 void __lock_buffer(struct buffer_head *bh)
 {
-       wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+       wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
  */
 void __wait_on_buffer(struct buffer_head * bh)
 {
-       wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
@@ -749,10 +741,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
-       struct address_space *mapping, *prev_mapping = NULL;
+       struct address_space *mapping;
        int err = 0, err2;
+       struct blk_plug plug;
 
        INIT_LIST_HEAD(&tmp);
+       blk_start_plug(&plug);
 
        spin_lock(lock);
        while (!list_empty(list)) {
@@ -775,7 +769,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * still in flight on potentially older
                                 * contents.
                                 */
-                               write_dirty_buffer(bh, WRITE_SYNC_PLUG);
+                               write_dirty_buffer(bh, WRITE_SYNC);
 
                                /*
                                 * Kick off IO for the previous mapping. Note
@@ -783,16 +777,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * wait_on_buffer() will do that for us
                                 * through sync_buffer().
                                 */
-                               if (prev_mapping && prev_mapping != mapping)
-                                       blk_run_address_space(prev_mapping);
-                               prev_mapping = mapping;
-
                                brelse(bh);
                                spin_lock(lock);
                        }
                }
        }
 
+       spin_unlock(lock);
+       blk_finish_plug(&plug);
+       spin_lock(lock);
+
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
                get_bh(bh);
@@ -1144,7 +1138,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
  * inode list.
  *
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
- * mapping->tree_lock and the global inode_lock.
+ * mapping->tree_lock and mapping->host->i_lock.
  */
 void mark_buffer_dirty(struct buffer_head *bh)
 {
@@ -1614,14 +1608,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
  * prevents this contention from occurring.
  *
  * If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
- * causes the writes to be flagged as synchronous writes, but the
- * block device queue will NOT be unplugged, since usually many pages
- * will be pushed to the out before the higher-level caller actually
- * waits for the writes to be completed.  The various wait functions,
- * such as wait_on_writeback_range() will ultimately call sync_page()
- * which will ultimately call blk_run_backing_dev(), which will end up
- * unplugging the device queue.
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * causes the writes to be flagged as synchronous writes.
  */
 static int __block_write_full_page(struct inode *inode, struct page *page,
                        get_block_t *get_block, struct writeback_control *wbc,
@@ -1634,7 +1622,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
        int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
-                       WRITE_SYNC_PLUG : WRITE);
+                       WRITE_SYNC : WRITE);
 
        BUG_ON(!PageLocked(page));
 
@@ -2343,24 +2331,26 @@ EXPORT_SYMBOL(block_commit_write);
  * page lock we can determine safely if the page is beyond EOF. If it is not
  * beyond EOF, then the page is guaranteed safe against truncation until we
  * unlock the page.
+ *
+ * Direct callers of this function should call vfs_check_frozen() so that page
+ * fault does not busyloop until the fs is thawed.
  */
-int
-block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
-                  get_block_t get_block)
+int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                        get_block_t get_block)
 {
        struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        unsigned long end;
        loff_t size;
-       int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
+       int ret;
 
        lock_page(page);
        size = i_size_read(inode);
        if ((page->mapping != inode->i_mapping) ||
            (page_offset(page) > size)) {
-               /* page got truncated out from underneath us */
-               unlock_page(page);
-               goto out;
+               /* We overload EFAULT to mean page got truncated */
+               ret = -EFAULT;
+               goto out_unlock;
        }
 
        /* page is wholly or partially inside EOF */
@@ -2373,18 +2363,41 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
        if (!ret)
                ret = block_commit_write(page, 0, end);
 
-       if (unlikely(ret)) {
-               unlock_page(page);
-               if (ret == -ENOMEM)
-                       ret = VM_FAULT_OOM;
-               else /* -ENOSPC, -EIO, etc */
-                       ret = VM_FAULT_SIGBUS;
-       } else
-               ret = VM_FAULT_LOCKED;
-
-out:
+       if (unlikely(ret < 0))
+               goto out_unlock;
+       /*
+        * Freezing in progress? We check after the page is marked dirty and
+        * with page lock held so if the test here fails, we are sure freezing
+        * code will wait during syncing until the page fault is done - at that
+        * point page will be dirty and unlocked so freezing code will write it
+        * and writeprotect it again.
+        */
+       set_page_dirty(page);
+       if (inode->i_sb->s_frozen != SB_UNFROZEN) {
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+       return 0;
+out_unlock:
+       unlock_page(page);
        return ret;
 }
+EXPORT_SYMBOL(__block_page_mkwrite);
+
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                  get_block_t get_block)
+{
+       int ret;
+       struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
+
+       /*
+        * This check is racy but catches the common case. The check in
+        * __block_page_mkwrite() is reliable.
+        */
+       vfs_check_frozen(sb, SB_FREEZE_WRITE);
+       ret = __block_page_mkwrite(vma, vmf, get_block);
+       return block_page_mkwrite_return(ret);
+}
 EXPORT_SYMBOL(block_page_mkwrite);
 
 /*
@@ -3138,17 +3151,6 @@ out:
 }
 EXPORT_SYMBOL(try_to_free_buffers);
 
-void block_sync_page(struct page *page)
-{
-       struct address_space *mapping;
-
-       smp_mb();
-       mapping = page_mapping(page);
-       if (mapping)
-               blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-EXPORT_SYMBOL(block_sync_page);
-
 /*
  * There are no bdflush tunables left.  But distributions are
  * still running obsolete flush daemons, so we terminate them here.