docbook: make a networking book and fix a few errors
[pandora-kernel.git] / fs / buffer.c
index 1de9214..3ebccf4 100644 (file)
@@ -67,14 +67,14 @@ static int sync_buffer(void *word)
        return 0;
 }
 
-void fastcall __lock_buffer(struct buffer_head *bh)
+void __lock_buffer(struct buffer_head *bh)
 {
        wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
 
-void fastcall unlock_buffer(struct buffer_head *bh)
+void unlock_buffer(struct buffer_head *bh)
 {
        smp_mb__before_clear_bit();
        clear_buffer_locked(bh);
@@ -678,7 +678,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        } else {
                BUG_ON(mapping->assoc_mapping != buffer_mapping);
        }
-       if (list_empty(&bh->b_assoc_buffers)) {
+       if (!bh->b_assoc_map) {
                spin_lock(&buffer_mapping->private_lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->private_list);
@@ -794,6 +794,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
+       struct address_space *mapping;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -801,9 +802,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
        spin_lock(lock);
        while (!list_empty(list)) {
                bh = BH_ENTRY(list->next);
+               mapping = bh->b_assoc_map;
                __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
+                       bh->b_assoc_map = mapping;
                        if (buffer_dirty(bh)) {
                                get_bh(bh);
                                spin_unlock(lock);
@@ -822,8 +828,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
-               list_del_init(&bh->b_assoc_buffers);
                get_bh(bh);
+               mapping = bh->b_assoc_map;
+               __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
+               if (buffer_dirty(bh)) {
+                       list_add(&bh->b_assoc_buffers,
+                                &bh->b_assoc_map->private_list);
+                       bh->b_assoc_map = mapping;
+               }
                spin_unlock(lock);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh))
@@ -1164,7 +1179,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
  * mapping->tree_lock and the global inode_lock.
  */
-void fastcall mark_buffer_dirty(struct buffer_head *bh)
+void mark_buffer_dirty(struct buffer_head *bh)
 {
        WARN_ON_ONCE(!buffer_uptodate(bh));
        if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
@@ -1195,7 +1210,7 @@ void __brelse(struct buffer_head * buf)
 void __bforget(struct buffer_head *bh)
 {
        clear_buffer_dirty(bh);
-       if (!list_empty(&bh->b_assoc_buffers)) {
+       if (bh->b_assoc_map) {
                struct address_space *buffer_mapping = bh->b_page->mapping;
 
                spin_lock(&buffer_mapping->private_lock);
@@ -1436,6 +1451,7 @@ void invalidate_bh_lrus(void)
 {
        on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
 }
+EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
 void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset)
@@ -3021,7 +3037,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
        do {
                struct buffer_head *next = bh->b_this_page;
 
-               if (!list_empty(&bh->b_assoc_buffers))
+               if (bh->b_assoc_map)
                        __remove_assoc_queue(bh);
                bh = next;
        } while (bh != head);
@@ -3153,7 +3169,7 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
+       struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
                                set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -3241,12 +3257,24 @@ int bh_submit_read(struct buffer_head *bh)
 }
 EXPORT_SYMBOL(bh_submit_read);
 
+static void
+init_buffer_head(struct kmem_cache *cachep, void *data)
+{
+       struct buffer_head *bh = data;
+
+       memset(bh, 0, sizeof(*bh));
+       INIT_LIST_HEAD(&bh->b_assoc_buffers);
+}
+
 void __init buffer_init(void)
 {
        int nrpages;
 
-       bh_cachep = KMEM_CACHE(buffer_head,
-                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
+       bh_cachep = kmem_cache_create("buffer_head",
+                       sizeof(struct buffer_head), 0,
+                               (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                               SLAB_MEM_SPREAD),
+                               init_buffer_head);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL