[PATCH] Fix try_to_free_buffer() locking
authorNick Piggin <nickpiggin@yahoo.com.au>
Tue, 30 Jan 2007 03:36:27 +0000 (14:36 +1100)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 30 Jan 2007 04:20:42 +0000 (20:20 -0800)
Fix commit ecdfc9787fe527491baefc22dce8b2dbd5b2908d

Not to put too fine a point on it, but in a nutshell...

__set_page_dirty_buffers() | try_to_free_buffers()
---------------------------+---------------------------
                           | spin_lock(private_lock);
                           | drop_bufers()
                           | spin_unlock(private_lock);
spin_lock(private_lock)    |
!page_has_buffers()        |
spin_unlock(private_lock)  |
SetPageDirty()             |
                           | cancel_dirty_page()

                          oops!

Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/buffer.c

index 460f1c4..1ad674f 100644 (file)
@@ -2844,7 +2844,6 @@ int try_to_free_buffers(struct page *page)
 
        spin_lock(&mapping->private_lock);
        ret = drop_buffers(page, &buffers_to_free);
-       spin_unlock(&mapping->private_lock);
 
        /*
         * If the filesystem writes its buffers by hand (eg ext3)
@@ -2855,9 +2854,14 @@ int try_to_free_buffers(struct page *page)
         * Also, during truncate, discard_buffer will have marked all
         * the page's buffers clean.  We discover that here and clean
         * the page also.
+        *
+        * private_lock must be held over this entire operation in order
+        * to synchronise against __set_page_dirty_buffers and prevent the
+        * dirty bit from being lost.
         */
        if (ret)
                cancel_dirty_page(page, PAGE_CACHE_SIZE);
+       spin_unlock(&mapping->private_lock);
 out:
        if (buffers_to_free) {
                struct buffer_head *bh = buffers_to_free;