nilfs2: add free entries count only if clear bit operation succeeded
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Tue, 22 Feb 2011 17:26:17 +0000 (02:26 +0900)
committerRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Tue, 8 Mar 2011 05:58:04 +0000 (14:58 +0900)
Three functions of the current persistent object allocator,
nilfs_palloc_commit_free_entry, nilfs_palloc_abort_alloc_entry, and
nilfs_palloc_freev functions unconditionally add a counter after doing
clear bit operation on a bitmap block.

If the clear bit operation overlapped, the counter will not add up.
This fixes the issue by making the counter operations conditional.

Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
fs/nilfs2/alloc.c

index d7fd696..0a0a66d 100644 (file)
@@ -521,8 +521,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
                                    group_offset, bitmap))
                printk(KERN_WARNING "%s: entry number %llu already freed\n",
                       __func__, (unsigned long long)req->pr_entry_nr);
-
-       nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
+       else
+               nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
 
        kunmap(req->pr_bitmap_bh->b_page);
        kunmap(req->pr_desc_bh->b_page);
@@ -558,8 +558,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
                                    group_offset, bitmap))
                printk(KERN_WARNING "%s: entry number %llu already freed\n",
                       __func__, (unsigned long long)req->pr_entry_nr);
-
-       nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
+       else
+               nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
 
        kunmap(req->pr_bitmap_bh->b_page);
        kunmap(req->pr_desc_bh->b_page);
@@ -665,7 +665,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
                for (j = i, n = 0;
                     (j < nitems) && nilfs_palloc_group_is_in(inode, group,
                                                              entry_nrs[j]);
-                    j++, n++) {
+                    j++) {
                        nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
                        if (!nilfs_clear_bit_atomic(
                                    nilfs_mdt_bgl_lock(inode, group),
@@ -674,6 +674,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
                                       "%s: entry number %llu already freed\n",
                                       __func__,
                                       (unsigned long long)entry_nrs[j]);
+                       } else {
+                               n++;
                        }
                }
                nilfs_palloc_group_desc_add_entries(inode, group, desc, n);