Signed-off-by: Chris Mason <chris.mason@oracle.com>
WARN_ON(p->nodes[0] != NULL);
WARN_ON(cow && root == root->fs_info->extent_root &&
!mutex_is_locked(&root->fs_info->alloc_mutex));
WARN_ON(p->nodes[0] != NULL);
WARN_ON(cow && root == root->fs_info->extent_root &&
!mutex_is_locked(&root->fs_info->alloc_mutex));
- WARN_ON(root == root->fs_info->chunk_root &&
- !mutex_is_locked(&root->fs_info->chunk_mutex));
- WARN_ON(root == root->fs_info->dev_root &&
- !mutex_is_locked(&root->fs_info->chunk_mutex));
if (ins_len < 0)
lowest_unlock = 2;
again:
if (ins_len < 0)
lowest_unlock = 2;
again:
struct mutex alloc_mutex;
struct mutex chunk_mutex;
struct mutex drop_mutex;
struct mutex alloc_mutex;
struct mutex chunk_mutex;
struct mutex drop_mutex;
+ struct mutex volume_mutex;
struct list_head trans_list;
struct list_head hashers;
struct list_head dead_roots;
struct list_head trans_list;
struct list_head hashers;
struct list_head dead_roots;
mutex_init(&fs_info->chunk_mutex);
mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->chunk_mutex);
mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex);
+ mutex_init(&fs_info->volume_mutex);
#if 0
ret = add_hasher(fs_info, "crc32c");
#if 0
ret = add_hasher(fs_info, "crc32c");
u64 search_start = *start_ret;
int wrapped = 0;
u64 search_start = *start_ret;
int wrapped = 0;
+ WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
free_space_cache = &root->fs_info->free_space_cache;
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
free_space_cache = &root->fs_info->free_space_cache;
+ WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
while(total) {
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache) {
while(total) {
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache) {
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *fs_info = root->fs_info;
+ WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
if (pin) {
set_extent_dirty(&fs_info->pinned_extents,
bytenr, bytenr + num - 1, GFP_NOFS);
if (pin) {
set_extent_dirty(&fs_info->pinned_extents,
bytenr, bytenr + num - 1, GFP_NOFS);
+ WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
btrfs_set_stack_extent_refs(&extent_item, 1);
btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
path = btrfs_alloc_path();
btrfs_set_stack_extent_refs(&extent_item, 1);
btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
path = btrfs_alloc_path();
+ WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
if (!pending) {
struct extent_buffer *buf;
buf = btrfs_find_tree_block(root, bytenr, num_bytes);
if (!pending) {
struct extent_buffer *buf;
buf = btrfs_find_tree_block(root, bytenr, num_bytes);
struct btrfs_extent_item *ei;
u32 refs;
struct btrfs_extent_item *ei;
u32 refs;
+ WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
key.objectid = bytenr;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
key.offset = num_bytes;
key.objectid = bytenr;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
key.offset = num_bytes;
struct extent_io_tree *pending_del;
struct extent_io_tree *pinned_extents;
struct extent_io_tree *pending_del;
struct extent_io_tree *pinned_extents;
+ WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
pending_del = &extent_root->fs_info->pending_del;
pinned_extents = &extent_root->fs_info->pinned_extents;
pending_del = &extent_root->fs_info->pending_del;
pinned_extents = &extent_root->fs_info->pinned_extents;
btrfs_node_key(node, &found_key, path->slots[level]);
WARN_ON(memcmp(&found_key, &root_item->drop_progress,
sizeof(found_key)));
btrfs_node_key(node, &found_key, path->slots[level]);
WARN_ON(memcmp(&found_key, &root_item->drop_progress,
sizeof(found_key)));
+ /*
+ * unlock our path, this is safe because only this
+ * function is allowed to delete this snapshot
+ */
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
if (path->nodes[i] && path->locks[i]) {
path->locks[i] = 0;
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
if (path->nodes[i] && path->locks[i]) {
path->locks[i] = 0;
u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
u64 found_bytenr;
int ret;
u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
u64 found_bytenr;
int ret;
root_location.offset = (u64)-1;
root_location.type = BTRFS_ROOT_ITEM_KEY;
root_location.offset = (u64)-1;
root_location.type = BTRFS_ROOT_ITEM_KEY;
found_bytenr = path->nodes[level]->start;
}
found_bytenr = path->nodes[level]->start;
}
- for (i = level; i < BTRFS_MAX_LEVEL; i++) {
- if (!path->nodes[i])
- break;
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- }
btrfs_release_path(cur_root, path);
if (found_bytenr == bytenr) {
btrfs_release_path(cur_root, path);
if (found_bytenr == bytenr) {
+ WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
+
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_extent_ref);
ref_root = btrfs_ref_root(path->nodes[0], ref);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_extent_ref);
ref_root = btrfs_ref_root(path->nodes[0], ref);
found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
&root_location);
BUG_ON(!found_root);
found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
&root_location);
BUG_ON(!found_root);
+ mutex_unlock(&extent_root->fs_info->alloc_mutex);
if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
found_key.objectid = ref_objectid;
if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
found_key.objectid = ref_objectid;
/* this can happen if the reference is not against
* the latest version of the tree root
*/
/* this can happen if the reference is not against
* the latest version of the tree root
*/
- if (is_bad_inode(inode)) {
+ if (is_bad_inode(inode))
*last_file_objectid = inode->i_ino;
*last_file_root = found_root->root_key.objectid;
*last_file_offset = ref_offset;
*last_file_objectid = inode->i_ino;
*last_file_root = found_root->root_key.objectid;
*last_file_offset = ref_offset;
} else {
struct btrfs_trans_handle *trans;
struct extent_buffer *eb;
} else {
struct btrfs_trans_handle *trans;
struct extent_buffer *eb;
eb = read_tree_block(found_root, extent_key->objectid,
extent_key->offset, 0);
eb = read_tree_block(found_root, extent_key->objectid,
extent_key->offset, 0);
+ /*
+ * right here almost anything could happen to our key,
+ * but that's ok. The cow below will either relocate it
+ * or someone else will have relocated it. Either way,
+ * it is in a different spot than it was before and
+ * we're happy.
+ */
+
trans = btrfs_start_transaction(found_root, 1);
trans = btrfs_start_transaction(found_root, 1);
+ if (found_root == extent_root->fs_info->extent_root ||
+ found_root == extent_root->fs_info->chunk_root ||
+ found_root == extent_root->fs_info->dev_root) {
+ needs_lock = 1;
+ mutex_lock(&extent_root->fs_info->alloc_mutex);
+ }
+
path->lowest_level = level;
path->reada = 2;
ret = btrfs_search_slot(trans, found_root, &found_key, path,
0, 1);
path->lowest_level = 0;
path->lowest_level = level;
path->reada = 2;
ret = btrfs_search_slot(trans, found_root, &found_key, path,
0, 1);
path->lowest_level = 0;
- for (i = level; i < BTRFS_MAX_LEVEL; i++) {
- if (!path->nodes[i])
- break;
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- }
btrfs_release_path(found_root, path);
btrfs_release_path(found_root, path);
if (found_root == found_root->fs_info->extent_root)
btrfs_extent_post_op(trans, found_root);
if (found_root == found_root->fs_info->extent_root)
btrfs_extent_post_op(trans, found_root);
+ if (needs_lock)
+ mutex_unlock(&extent_root->fs_info->alloc_mutex);
+
btrfs_end_transaction(trans, found_root);
btrfs_end_transaction(trans, found_root);
+ mutex_lock(&extent_root->fs_info->alloc_mutex);
if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
+ mutex_unlock(&root->fs_info->alloc_mutex);
trans = btrfs_start_transaction(root, 1);
trans = btrfs_start_transaction(root, 1);
+ mutex_lock(&root->fs_info->alloc_mutex);
+
new_alloc_flags = update_block_group_flags(root,
shrink_block_group->flags);
if (new_alloc_flags != shrink_block_group->flags) {
new_alloc_flags = update_block_group_flags(root,
shrink_block_group->flags);
if (new_alloc_flags != shrink_block_group->flags) {
}
do_chunk_alloc(trans, root->fs_info->extent_root,
calc + 2 * 1024 * 1024, new_alloc_flags, force);
}
do_chunk_alloc(trans, root->fs_info->extent_root,
calc + 2 * 1024 * 1024, new_alloc_flags, force);
+
+ mutex_unlock(&root->fs_info->alloc_mutex);
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans, root);
+ mutex_lock(&root->fs_info->alloc_mutex);
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
printk("btrfs relocate found %llu last extent was %llu\n",
(unsigned long long)total_found,
(unsigned long long)found_key.objectid);
printk("btrfs relocate found %llu last extent was %llu\n",
(unsigned long long)total_found,
(unsigned long long)found_key.objectid);
+ mutex_unlock(&root->fs_info->alloc_mutex);
trans = btrfs_start_transaction(tree_root, 1);
btrfs_commit_transaction(trans, tree_root);
trans = btrfs_start_transaction(tree_root, 1);
btrfs_commit_transaction(trans, tree_root);
trans = btrfs_start_transaction(tree_root, 1);
btrfs_commit_transaction(trans, tree_root);
trans = btrfs_start_transaction(tree_root, 1);
btrfs_commit_transaction(trans, tree_root);
+ mutex_lock(&root->fs_info->alloc_mutex);
* we've freed all the extents, now remove the block
* group item from the tree
*/
* we've freed all the extents, now remove the block
* group item from the tree
*/
+ mutex_unlock(&root->fs_info->alloc_mutex);
+
trans = btrfs_start_transaction(root, 1);
trans = btrfs_start_transaction(root, 1);
+ mutex_lock(&root->fs_info->alloc_mutex);
memcpy(&key, &shrink_block_group->key, sizeof(key));
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
memcpy(&key, &shrink_block_group->key, sizeof(key));
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
kfree(shrink_block_group);
btrfs_del_item(trans, root, path);
kfree(shrink_block_group);
btrfs_del_item(trans, root, path);
+ btrfs_release_path(root, path);
+ mutex_unlock(&root->fs_info->alloc_mutex);
btrfs_commit_transaction(trans, root);
btrfs_commit_transaction(trans, root);
+ mutex_lock(&root->fs_info->alloc_mutex);
+
/* the code to unpin extents might set a few bits in the free
* space cache for this range again
*/
/* the code to unpin extents might set a few bits in the free
* space cache for this range again
*/
struct btrfs_block_group_cache *cache;
struct extent_io_tree *block_group_cache;
struct btrfs_block_group_cache *cache;
struct extent_io_tree *block_group_cache;
+ WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
extent_root = root->fs_info->extent_root;
block_group_cache = &root->fs_info->block_group_cache;
extent_root = root->fs_info->extent_root;
block_group_cache = &root->fs_info->block_group_cache;
- mutex_lock(&root->fs_info->alloc_mutex);
- mutex_lock(&root->fs_info->chunk_mutex);
+ mutex_lock(&root->fs_info->volume_mutex);
sizestr = vol_args->name;
devstr = strchr(sizestr, ':');
if (devstr) {
sizestr = vol_args->name;
devstr = strchr(sizestr, ':');
if (devstr) {
- mutex_lock(&root->fs_info->alloc_mutex);
- mutex_lock(&root->fs_info->chunk_mutex);
+ mutex_unlock(&root->fs_info->volume_mutex);
out:
kfree(vol_args);
return ret;
out:
kfree(vol_args);
return ret;
mutex_unlock(&uuid_mutex);
}
mutex_unlock(&uuid_mutex);
}
+static void lock_chunks(struct btrfs_root *root)
+{
+ mutex_lock(&root->fs_info->alloc_mutex);
+ mutex_lock(&root->fs_info->chunk_mutex);
+}
+
+static void unlock_chunks(struct btrfs_root *root)
+{
+ mutex_unlock(&root->fs_info->alloc_mutex);
+ mutex_unlock(&root->fs_info->chunk_mutex);
+}
+
int btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
int btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
total_bytes - 1);
out:
btrfs_free_path(path);
total_bytes - 1);
out:
btrfs_free_path(path);
btrfs_commit_transaction(trans, root);
return ret;
}
btrfs_commit_transaction(trans, root);
return ret;
}
- mutex_lock(&root->fs_info->alloc_mutex);
- mutex_lock(&root->fs_info->chunk_mutex);
+ mutex_lock(&root->fs_info->volume_mutex);
all_avail = root->fs_info->avail_data_alloc_bits |
root->fs_info->avail_system_alloc_bits |
all_avail = root->fs_info->avail_data_alloc_bits |
root->fs_info->avail_system_alloc_bits |
if (bdev)
close_bdev_excl(bdev);
out:
if (bdev)
close_bdev_excl(bdev);
out:
+ mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
mutex_unlock(&uuid_mutex);
- mutex_unlock(&root->fs_info->chunk_mutex);
- mutex_unlock(&root->fs_info->alloc_mutex);
- mutex_lock(&root->fs_info->alloc_mutex);
- mutex_lock(&root->fs_info->chunk_mutex);
+ mutex_lock(&root->fs_info->volume_mutex);
trans = btrfs_start_transaction(root, 1);
trans = btrfs_start_transaction(root, 1);
devices = &root->fs_info->fs_devices->devices;
list_for_each(cur, devices) {
device = list_entry(cur, struct btrfs_device, dev_list);
devices = &root->fs_info->fs_devices->devices;
list_for_each(cur, devices) {
device = list_entry(cur, struct btrfs_device, dev_list);
root->fs_info->fs_devices->num_devices++;
root->fs_info->fs_devices->open_devices++;
out:
root->fs_info->fs_devices->num_devices++;
root->fs_info->fs_devices->open_devices++;
out:
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans, root);
- mutex_unlock(&root->fs_info->chunk_mutex);
- mutex_unlock(&root->fs_info->alloc_mutex);
+ mutex_unlock(&root->fs_info->volume_mutex);
-int btrfs_grow_device(struct btrfs_trans_handle *trans,
+static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size)
{
struct btrfs_super_block *super_copy =
struct btrfs_device *device, u64 new_size)
{
struct btrfs_super_block *super_copy =
return btrfs_update_device(trans, device);
}
return btrfs_update_device(trans, device);
}
+int btrfs_grow_device(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device, u64 new_size)
+{
+ int ret;
+ lock_chunks(device->dev_root);
+ ret = __btrfs_grow_device(trans, device, new_size);
+ unlock_chunks(device->dev_root);
+ return ret;
+}
+
static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 chunk_tree, u64 chunk_objectid,
static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 chunk_tree, u64 chunk_objectid,
trans = btrfs_start_transaction(root, 1);
BUG_ON(!trans);
trans = btrfs_start_transaction(root, 1);
BUG_ON(!trans);
/*
* step two, delete the device extents and the
* chunk tree entries
/*
* step two, delete the device extents and the
* chunk tree entries
/* once for us */
free_extent_map(em);
/* once for us */
free_extent_map(em);
btrfs_end_transaction(trans, root);
return 0;
}
btrfs_end_transaction(trans, root);
return 0;
}
struct btrfs_key found_key;
struct btrfs_key found_key;
- BUG(); /* FIXME, needs locking */
-
+ mutex_lock(&dev_root->fs_info->volume_mutex);
dev_root = dev_root->fs_info->dev_root;
/* step one make some room on all the devices */
dev_root = dev_root->fs_info->dev_root;
/* step one make some room on all the devices */
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
if (found_key.objectid != key.objectid)
break;
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
if (found_key.objectid != key.objectid)
break;
chunk = btrfs_item_ptr(path->nodes[0],
path->slots[0],
struct btrfs_chunk);
chunk = btrfs_item_ptr(path->nodes[0],
path->slots[0],
struct btrfs_chunk);
if (key.offset == 0)
break;
if (key.offset == 0)
break;
+ btrfs_release_path(chunk_root, path);
ret = btrfs_relocate_chunk(chunk_root,
chunk_root->root_key.objectid,
found_key.objectid,
found_key.offset);
BUG_ON(ret);
ret = btrfs_relocate_chunk(chunk_root,
chunk_root->root_key.objectid,
found_key.objectid,
found_key.offset);
BUG_ON(ret);
- btrfs_release_path(chunk_root, path);
}
ret = 0;
error:
btrfs_free_path(path);
}
ret = 0;
error:
btrfs_free_path(path);
+ mutex_unlock(&dev_root->fs_info->volume_mutex);
device->total_bytes = new_size;
ret = btrfs_update_device(trans, device);
if (ret) {
device->total_bytes = new_size;
ret = btrfs_update_device(trans, device);
if (ret) {
btrfs_end_transaction(trans, root);
goto done;
}
WARN_ON(diff > old_total);
btrfs_set_super_total_bytes(super_copy, old_total - diff);
btrfs_end_transaction(trans, root);
goto done;
}
WARN_ON(diff > old_total);
btrfs_set_super_total_bytes(super_copy, old_total - diff);
btrfs_end_transaction(trans, root);
key.objectid = device->devid;
btrfs_end_transaction(trans, root);
key.objectid = device->devid;