Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / mm / page-writeback.c
index 2cb01f6..31f6988 100644 (file)
@@ -927,7 +927,7 @@ retry:
                                break;
                        }
 
-                       done_index = page->index + 1;
+                       done_index = page->index;
 
                        lock_page(page);
 
@@ -977,6 +977,7 @@ continue_unlock:
                                         * not be suitable for data integrity
                                         * writeout).
                                         */
+                                       done_index = page->index + 1;
                                        done = 1;
                                        break;
                                }
@@ -1039,11 +1040,17 @@ static int __writepage(struct page *page, struct writeback_control *wbc,
 int generic_writepages(struct address_space *mapping,
                       struct writeback_control *wbc)
 {
+       struct blk_plug plug;
+       int ret;
+
        /* deal with chardevs and other special file */
        if (!mapping->a_ops->writepage)
                return 0;
 
-       return write_cache_pages(mapping, wbc, __writepage, mapping);
+       blk_start_plug(&plug);
+       ret = write_cache_pages(mapping, wbc, __writepage, mapping);
+       blk_finish_plug(&plug);
+       return ret;
 }
 
 EXPORT_SYMBOL(generic_writepages);
@@ -1211,6 +1218,17 @@ int set_page_dirty(struct page *page)
 
        if (likely(mapping)) {
                int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
+               /*
+                * readahead/lru_deactivate_page could remain
+                * PG_readahead/PG_reclaim due to race with end_page_writeback
+                * About readahead, if the page is written, the flags would be
+                * reset. So no problem.
+                * About lru_deactivate_page, if the page is redirty, the flag
+                * will be reset. So no problem. but if the page is used by readahead
+                * it will confuse readahead and make it restart the size rampup
+                * process. But it's a trivial problem.
+                */
+               ClearPageReclaim(page);
 #ifdef CONFIG_BLOCK
                if (!spd)
                        spd = __set_page_dirty_buffers;
@@ -1239,7 +1257,7 @@ int set_page_dirty_lock(struct page *page)
 {
        int ret;
 
-       lock_page_nosync(page);
+       lock_page(page);
        ret = set_page_dirty(page);
        unlock_page(page);
        return ret;
@@ -1266,7 +1284,6 @@ int clear_page_dirty_for_io(struct page *page)
 
        BUG_ON(!PageLocked(page));
 
-       ClearPageReclaim(page);
        if (mapping && mapping_cap_account_dirty(mapping)) {
                /*
                 * Yes, Virginia, this is indeed insane.