SL*B: drop kmem cache argument from constructor
[pandora-kernel.git] / fs / buffer.c
index 0f51c0f..5fd497c 100644 (file)
@@ -706,7 +706,7 @@ static int __set_page_dirty(struct page *page,
        if (TestSetPageDirty(page))
                return 0;
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(warn && !PageUptodate(page));
 
@@ -719,7 +719,7 @@ static int __set_page_dirty(struct page *page,
                radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
        }
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
        return 1;
@@ -1464,7 +1464,7 @@ static void invalidate_bh_lru(void *arg)
        
 void invalidate_bh_lrus(void)
 {
-       on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
+       on_each_cpu(invalidate_bh_lru, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
@@ -1691,11 +1691,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                         */
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
-               } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+               } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
+                          buffer_dirty(bh)) {
                        WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                goto recover;
+                       clear_buffer_delay(bh);
                        if (buffer_new(bh)) {
                                /* blockdev mappings never come here */
                                clear_buffer_new(bh);
@@ -1774,7 +1776,8 @@ recover:
        bh = head;
        /* Recovery: lock and submit the mapped buffers */
        do {
-               if (buffer_mapped(bh) && buffer_dirty(bh)) {
+               if (buffer_mapped(bh) && buffer_dirty(bh) &&
+                   !buffer_delay(bh)) {
                        lock_buffer(bh);
                        mark_buffer_async_write(bh);
                } else {
@@ -2061,6 +2064,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
                        struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
+       int i_size_changed = 0;
 
        copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 
@@ -2073,12 +2077,21 @@ int generic_write_end(struct file *file, struct address_space *mapping,
         */
        if (pos+copied > inode->i_size) {
                i_size_write(inode, pos+copied);
-               mark_inode_dirty(inode);
+               i_size_changed = 1;
        }
 
        unlock_page(page);
        page_cache_release(page);
 
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (i_size_changed)
+               mark_inode_dirty(inode);
+
        return copied;
 }
 EXPORT_SYMBOL(generic_write_end);
@@ -3259,7 +3272,7 @@ int bh_submit_read(struct buffer_head *bh)
 EXPORT_SYMBOL(bh_submit_read);
 
 static void
-init_buffer_head(struct kmem_cache *cachep, void *data)
+init_buffer_head(void *data)
 {
        struct buffer_head *bh = data;