Btrfs: avoid taking the chunk_mutex in do_chunk_alloc
authorJosef Bacik <josef@redhat.com>
Tue, 12 Apr 2011 00:20:11 +0000 (20:20 -0400)
committerChris Mason <chris.mason@oracle.com>
Sat, 16 Apr 2011 11:10:56 +0000 (07:10 -0400)
Everytime we try to allocate disk space we try and see if we can pre-emptively
allocate a chunk, but in the common case we don't allocate anything, so there is
no sense in taking the chunk_mutex at all.  So instead if we are allocating a
chunk, mark it in the space_info so we don't get two people trying to allocate
at the same time.  Thanks,

Signed-off-by: Josef Bacik <josef@redhat.com>
Reviewed-by: Liu Bo <liubo2009@cn.fujitsu.com>
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c

index 0d00a07..2e61fe1 100644 (file)
@@ -740,8 +740,10 @@ struct btrfs_space_info {
         */
        unsigned long reservation_progress;
 
         */
        unsigned long reservation_progress;
 
-       int full;               /* indicates that we cannot allocate any more
+       int full:1;             /* indicates that we cannot allocate any more
                                   chunks for this space */
                                   chunks for this space */
+       int chunk_alloc:1;      /* set if we are allocating a chunk */
+
        int force_alloc;        /* set if we need to force a chunk alloc for
                                   this space */
 
        int force_alloc;        /* set if we need to force a chunk alloc for
                                   this space */
 
index 2647948..31f33ba 100644 (file)
@@ -3039,6 +3039,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        found->bytes_may_use = 0;
        found->full = 0;
        found->force_alloc = CHUNK_ALLOC_NO_FORCE;
        found->bytes_may_use = 0;
        found->full = 0;
        found->force_alloc = CHUNK_ALLOC_NO_FORCE;
+       found->chunk_alloc = 0;
        *space_info = found;
        list_add_rcu(&found->list, &info->space_info);
        atomic_set(&found->caching_threads, 0);
        *space_info = found;
        list_add_rcu(&found->list, &info->space_info);
        atomic_set(&found->caching_threads, 0);
@@ -3318,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 {
        struct btrfs_space_info *space_info;
        struct btrfs_fs_info *fs_info = extent_root->fs_info;
 {
        struct btrfs_space_info *space_info;
        struct btrfs_fs_info *fs_info = extent_root->fs_info;
+       int wait_for_alloc = 0;
        int ret = 0;
 
        int ret = 0;
 
-       mutex_lock(&fs_info->chunk_mutex);
-
        flags = btrfs_reduce_alloc_profile(extent_root, flags);
 
        space_info = __find_space_info(extent_root->fs_info, flags);
        flags = btrfs_reduce_alloc_profile(extent_root, flags);
 
        space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3332,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        }
        BUG_ON(!space_info);
 
        }
        BUG_ON(!space_info);
 
+again:
        spin_lock(&space_info->lock);
        if (space_info->force_alloc)
                force = space_info->force_alloc;
        if (space_info->full) {
                spin_unlock(&space_info->lock);
        spin_lock(&space_info->lock);
        if (space_info->force_alloc)
                force = space_info->force_alloc;
        if (space_info->full) {
                spin_unlock(&space_info->lock);
-               goto out;
+               return 0;
        }
 
        if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
                spin_unlock(&space_info->lock);
        }
 
        if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
                spin_unlock(&space_info->lock);
-               goto out;
+               return 0;
+       } else if (space_info->chunk_alloc) {
+               wait_for_alloc = 1;
+       } else {
+               space_info->chunk_alloc = 1;
        }
 
        spin_unlock(&space_info->lock);
 
        }
 
        spin_unlock(&space_info->lock);
 
+       mutex_lock(&fs_info->chunk_mutex);
+
+       /*
+        * The chunk_mutex is held throughout the entirety of a chunk
+        * allocation, so once we've acquired the chunk_mutex we know that the
+        * other guy is done and we need to recheck and see if we should
+        * allocate.
+        */
+       if (wait_for_alloc) {
+               mutex_unlock(&fs_info->chunk_mutex);
+               wait_for_alloc = 0;
+               goto again;
+       }
+
        /*
         * If we have mixed data/metadata chunks we want to make sure we keep
         * allocating mixed chunks instead of individual chunks.
        /*
         * If we have mixed data/metadata chunks we want to make sure we keep
         * allocating mixed chunks instead of individual chunks.
@@ -3372,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
                space_info->full = 1;
        else
                ret = 1;
                space_info->full = 1;
        else
                ret = 1;
+
        space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
        space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+       space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
        spin_unlock(&space_info->lock);
-out:
        mutex_unlock(&extent_root->fs_info->chunk_mutex);
        return ret;
 }
        mutex_unlock(&extent_root->fs_info->chunk_mutex);
        return ret;
 }