Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
[pandora-kernel.git] / fs / fs-writeback.c
index 55f6e46..ab38fef 100644 (file)
@@ -52,8 +52,6 @@ struct wb_writeback_work {
 #define CREATE_TRACE_POINTS
 #include <trace/events/writeback.h>
 
-#define inode_to_bdi(inode)    ((inode)->i_mapping->backing_dev_info)
-
 /*
  * We don't actually have pdflush, but this one is exported though /proc...
  */
@@ -68,7 +66,17 @@ int nr_pdflush_threads;
  */
 int writeback_in_progress(struct backing_dev_info *bdi)
 {
-       return !list_empty(&bdi->work_list);
+       return test_bit(BDI_writeback_running, &bdi->state);
+}
+
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       if (strcmp(sb->s_type->name, "bdev") == 0)
+               return inode->i_mapping->backing_dev_info;
+
+       return sb->s_bdi;
 }
 
 static void bdi_queue_work(struct backing_dev_info *bdi,
@@ -76,7 +84,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
 {
        trace_writeback_queue(bdi, work);
 
-       spin_lock(&bdi->wb_lock);
+       spin_lock_bh(&bdi->wb_lock);
        list_add_tail(&work->list, &bdi->work_list);
        if (bdi->wb.task) {
                wake_up_process(bdi->wb.task);
@@ -88,7 +96,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
                trace_writeback_nothread(bdi, work);
                wake_up_process(default_backing_dev_info.wb.task);
        }
-       spin_unlock(&bdi->wb_lock);
+       spin_unlock_bh(&bdi->wb_lock);
 }
 
 static void
@@ -249,10 +257,18 @@ static void move_expired_inodes(struct list_head *delaying_queue,
 
 /*
  * Queue all expired dirty inodes for io, eldest first.
+ * Before
+ *         newly dirtied     b_dirty    b_io    b_more_io
+ *         =============>    gf         edc     BA
+ * After
+ *         newly dirtied     b_dirty    b_io    b_more_io
+ *         =============>    g          fBAedc
+ *                                           |
+ *                                           +--> dequeue for IO
  */
 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
 {
-       list_splice_init(&wb->b_more_io, wb->b_io.prev);
+       list_splice_init(&wb->b_more_io, &wb->b_io);
        move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
 }
 
@@ -362,63 +378,36 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 
        spin_lock(&inode_lock);
        inode->i_state &= ~I_SYNC;
-       if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
-               if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
-                       /*
-                        * More pages get dirtied by a fast dirtier.
-                        */
-                       goto select_queue;
-               } else if (inode->i_state & I_DIRTY) {
-                       /*
-                        * At least XFS will redirty the inode during the
-                        * writeback (delalloc) and on io completion (isize).
-                        */
-                       redirty_tail(inode);
-               } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+       if (!(inode->i_state & I_FREEING)) {
+               if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
                        /*
                         * We didn't write back all the pages.  nfs_writepages()
-                        * sometimes bales out without doing anything. Redirty
-                        * the inode; Move it from b_io onto b_more_io/b_dirty.
-                        */
-                       /*
-                        * akpm: if the caller was the kupdate function we put
-                        * this inode at the head of b_dirty so it gets first
-                        * consideration.  Otherwise, move it to the tail, for
-                        * the reasons described there.  I'm not really sure
-                        * how much sense this makes.  Presumably I had a good
-                        * reasons for doing it this way, and I'd rather not
-                        * muck with it at present.
+                        * sometimes bales out without doing anything.
                         */
-                       if (wbc->for_kupdate) {
+                       inode->i_state |= I_DIRTY_PAGES;
+                       if (wbc->nr_to_write <= 0) {
                                /*
-                                * For the kupdate function we move the inode
-                                * to b_more_io so it will get more writeout as
-                                * soon as the queue becomes uncongested.
+                                * slice used up: queue for next turn
                                 */
-                               inode->i_state |= I_DIRTY_PAGES;
-select_queue:
-                               if (wbc->nr_to_write <= 0) {
-                                       /*
-                                        * slice used up: queue for next turn
-                                        */
-                                       requeue_io(inode);
-                               } else {
-                                       /*
-                                        * somehow blocked: retry later
-                                        */
-                                       redirty_tail(inode);
-                               }
+                               requeue_io(inode);
                        } else {
                                /*
-                                * Otherwise fully redirty the inode so that
-                                * other inodes on this superblock will get some
-                                * writeout.  Otherwise heavy writing to one
-                                * file would indefinitely suspend writeout of
-                                * all the other files.
+                                * Writeback blocked by something other than
+                                * congestion. Delay the inode for some time to
+                                * avoid spinning on the CPU (100% iowait)
+                                * retrying writeback of the dirty page/inode
+                                * that cannot be performed immediately.
                                 */
-                               inode->i_state |= I_DIRTY_PAGES;
                                redirty_tail(inode);
                        }
+               } else if (inode->i_state & I_DIRTY) {
+                       /*
+                        * Filesystems can dirty the inode during writeback
+                        * operations, such as delayed allocation during
+                        * submission or metadata updates after data IO
+                        * completion.
+                        */
+                       redirty_tail(inode);
                } else if (atomic_read(&inode->i_count)) {
                        /*
                         * The inode is clean, inuse
@@ -509,7 +498,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
                if (inode_dirtied_after(inode, wbc->wb_start))
                        return 1;
 
-               BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
+               BUG_ON(inode->i_state & I_FREEING);
                __iget(inode);
                pages_skipped = wbc->pages_skipped;
                writeback_single_inode(inode, wbc);
@@ -540,7 +529,8 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
 {
        int ret = 0;
 
-       wbc->wb_start = jiffies; /* livelock avoidance */
+       if (!wbc->wb_start)
+               wbc->wb_start = jiffies; /* livelock avoidance */
        spin_lock(&inode_lock);
        if (!wbc->for_kupdate || list_empty(&wb->b_io))
                queue_io(wb, wbc->older_than_this);
@@ -569,7 +559,6 @@ static void __writeback_inodes_sb(struct super_block *sb,
 {
        WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
-       wbc->wb_start = jiffies; /* livelock avoidance */
        spin_lock(&inode_lock);
        if (!wbc->for_kupdate || list_empty(&wb->b_io))
                queue_io(wb, wbc->older_than_this);
@@ -590,7 +579,7 @@ static inline bool over_bground_thresh(void)
 {
        unsigned long background_thresh, dirty_thresh;
 
-       get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+       global_dirty_limits(&background_thresh, &dirty_thresh);
 
        return (global_page_state(NR_FILE_DIRTY) +
                global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
@@ -635,6 +624,7 @@ static long wb_writeback(struct bdi_writeback *wb,
                wbc.range_end = LLONG_MAX;
        }
 
+       wbc.wb_start = jiffies; /* livelock avoidance */
        for (;;) {
                /*
                 * Stop writeback when nr_pages has been consumed
@@ -704,13 +694,13 @@ get_next_work_item(struct backing_dev_info *bdi)
 {
        struct wb_writeback_work *work = NULL;
 
-       spin_lock(&bdi->wb_lock);
+       spin_lock_bh(&bdi->wb_lock);
        if (!list_empty(&bdi->work_list)) {
                work = list_entry(bdi->work_list.next,
                                  struct wb_writeback_work, list);
                list_del_init(&work->list);
        }
-       spin_unlock(&bdi->wb_lock);
+       spin_unlock_bh(&bdi->wb_lock);
        return work;
 }
 
@@ -758,6 +748,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
        struct wb_writeback_work *work;
        long wrote = 0;
 
+       set_bit(BDI_writeback_running, &wb->bdi->state);
        while ((work = get_next_work_item(bdi)) != NULL) {
                /*
                 * Override sync mode, in case we must wait for completion
@@ -784,6 +775,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
         * Check for periodic writeback, kupdated() style
         */
        wrote += wb_check_old_data_flush(wb);
+       clear_bit(BDI_writeback_running, &wb->bdi->state);
 
        return wrote;
 }
@@ -810,6 +802,12 @@ int bdi_writeback_thread(void *data)
        trace_writeback_thread_start(bdi);
 
        while (!kthread_should_stop()) {
+               /*
+                * Remove own delayed wake-up timer, since we are already awake
+                * and we'll take care of the preriodic write-back.
+                */
+               del_timer(&wb->wakeup_timer);
+
                pages_written = wb_do_writeback(wb, 0);
 
                trace_writeback_pages_written(pages_written);
@@ -818,7 +816,7 @@ int bdi_writeback_thread(void *data)
                        wb->last_active = jiffies;
 
                set_current_state(TASK_INTERRUPTIBLE);
-               if (!list_empty(&bdi->work_list)) {
+               if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
                        __set_current_state(TASK_RUNNING);
                        continue;
                }
@@ -868,26 +866,6 @@ void wakeup_flusher_threads(long nr_pages)
        rcu_read_unlock();
 }
 
-/*
- * This function is used when the first inode for this bdi is marked dirty. It
- * wakes-up the corresponding bdi thread which should then take care of the
- * periodic background write-out of dirty inodes.
- */
-static void wakeup_bdi_thread(struct backing_dev_info *bdi)
-{
-       spin_lock(&bdi->wb_lock);
-       if (bdi->wb.task)
-               wake_up_process(bdi->wb.task);
-       else
-               /*
-                * When bdi tasks are inactive for long time, they are killed.
-                * In this case we have to wake-up the forker thread which
-                * should create and run the bdi thread.
-                */
-               wake_up_process(default_backing_dev_info.wb.task);
-       spin_unlock(&bdi->wb_lock);
-}
-
 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
 {
        if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -987,7 +965,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                        if (hlist_unhashed(&inode->i_hash))
                                goto out;
                }
-               if (inode->i_state & (I_FREEING|I_CLEAR))
+               if (inode->i_state & I_FREEING)
                        goto out;
 
                /*
@@ -1019,7 +997,7 @@ out:
        spin_unlock(&inode_lock);
 
        if (wakeup_bdi)
-               wakeup_bdi_thread(bdi);
+               bdi_wakeup_thread_delayed(bdi);
 }
 EXPORT_SYMBOL(__mark_inode_dirty);
 
@@ -1062,7 +1040,7 @@ static void wait_sb_inodes(struct super_block *sb)
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                struct address_space *mapping;
 
-               if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
+               if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
                        continue;
                mapping = inode->i_mapping;
                if (mapping->nrpages == 0)