2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
30 #include "print-tree.h"
31 #include "transaction.h"
34 #include "free-space-cache.h"
36 static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc);
39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40 u64 num_bytes, int reserve, int sinfo);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force);
63 static int find_next_key(struct btrfs_path *path, int level,
64 struct btrfs_key *key);
65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66 int dump_block_groups);
69 block_group_cache_done(struct btrfs_block_group_cache *cache)
72 return cache->cached == BTRFS_CACHE_FINISHED;
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
77 return (cache->flags & bits) == bits;
80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
82 atomic_inc(&cache->count);
85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
87 if (atomic_dec_and_test(&cache->count)) {
88 WARN_ON(cache->pinned > 0);
89 WARN_ON(cache->reserved > 0);
90 WARN_ON(cache->reserved_pinned > 0);
96 * this adds the block group to the fs_info rb tree for the block group
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
100 struct btrfs_block_group_cache *block_group)
103 struct rb_node *parent = NULL;
104 struct btrfs_block_group_cache *cache;
106 spin_lock(&info->block_group_cache_lock);
107 p = &info->block_group_cache_tree.rb_node;
111 cache = rb_entry(parent, struct btrfs_block_group_cache,
113 if (block_group->key.objectid < cache->key.objectid) {
115 } else if (block_group->key.objectid > cache->key.objectid) {
118 spin_unlock(&info->block_group_cache_lock);
123 rb_link_node(&block_group->cache_node, parent, p);
124 rb_insert_color(&block_group->cache_node,
125 &info->block_group_cache_tree);
126 spin_unlock(&info->block_group_cache_lock);
132 * This will return the block group at or after bytenr if contains is 0, else
133 * it will return the block group that contains the bytenr
135 static struct btrfs_block_group_cache *
136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
139 struct btrfs_block_group_cache *cache, *ret = NULL;
143 spin_lock(&info->block_group_cache_lock);
144 n = info->block_group_cache_tree.rb_node;
147 cache = rb_entry(n, struct btrfs_block_group_cache,
149 end = cache->key.objectid + cache->key.offset - 1;
150 start = cache->key.objectid;
152 if (bytenr < start) {
153 if (!contains && (!ret || start < ret->key.objectid))
156 } else if (bytenr > start) {
157 if (contains && bytenr <= end) {
168 btrfs_get_block_group(ret);
169 spin_unlock(&info->block_group_cache_lock);
174 static int add_excluded_extent(struct btrfs_root *root,
175 u64 start, u64 num_bytes)
177 u64 end = start + num_bytes - 1;
178 set_extent_bits(&root->fs_info->freed_extents[0],
179 start, end, EXTENT_UPTODATE, GFP_NOFS);
180 set_extent_bits(&root->fs_info->freed_extents[1],
181 start, end, EXTENT_UPTODATE, GFP_NOFS);
185 static void free_excluded_extents(struct btrfs_root *root,
186 struct btrfs_block_group_cache *cache)
190 start = cache->key.objectid;
191 end = start + cache->key.offset - 1;
193 clear_extent_bits(&root->fs_info->freed_extents[0],
194 start, end, EXTENT_UPTODATE, GFP_NOFS);
195 clear_extent_bits(&root->fs_info->freed_extents[1],
196 start, end, EXTENT_UPTODATE, GFP_NOFS);
199 static int exclude_super_stripes(struct btrfs_root *root,
200 struct btrfs_block_group_cache *cache)
207 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209 cache->bytes_super += stripe_len;
210 ret = add_excluded_extent(root, cache->key.objectid,
215 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216 bytenr = btrfs_sb_offset(i);
217 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218 cache->key.objectid, bytenr,
219 0, &logical, &nr, &stripe_len);
223 cache->bytes_super += stripe_len;
224 ret = add_excluded_extent(root, logical[nr],
234 static struct btrfs_caching_control *
235 get_caching_control(struct btrfs_block_group_cache *cache)
237 struct btrfs_caching_control *ctl;
239 spin_lock(&cache->lock);
240 if (cache->cached != BTRFS_CACHE_STARTED) {
241 spin_unlock(&cache->lock);
245 /* We're loading it the fast way, so we don't have a caching_ctl. */
246 if (!cache->caching_ctl) {
247 spin_unlock(&cache->lock);
251 ctl = cache->caching_ctl;
252 atomic_inc(&ctl->count);
253 spin_unlock(&cache->lock);
257 static void put_caching_control(struct btrfs_caching_control *ctl)
259 if (atomic_dec_and_test(&ctl->count))
264 * this is only called by cache_block_group, since we could have freed extents
265 * we need to check the pinned_extents for any extents that can't be used yet
266 * since their free space will be released as soon as the transaction commits.
268 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
269 struct btrfs_fs_info *info, u64 start, u64 end)
271 u64 extent_start, extent_end, size, total_added = 0;
274 while (start < end) {
275 ret = find_first_extent_bit(info->pinned_extents, start,
276 &extent_start, &extent_end,
277 EXTENT_DIRTY | EXTENT_UPTODATE);
281 if (extent_start <= start) {
282 start = extent_end + 1;
283 } else if (extent_start > start && extent_start < end) {
284 size = extent_start - start;
286 ret = btrfs_add_free_space(block_group, start,
289 start = extent_end + 1;
298 ret = btrfs_add_free_space(block_group, start, size);
305 static int caching_kthread(void *data)
307 struct btrfs_block_group_cache *block_group = data;
308 struct btrfs_fs_info *fs_info = block_group->fs_info;
309 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
310 struct btrfs_root *extent_root = fs_info->extent_root;
311 struct btrfs_path *path;
312 struct extent_buffer *leaf;
313 struct btrfs_key key;
319 path = btrfs_alloc_path();
323 exclude_super_stripes(extent_root, block_group);
324 spin_lock(&block_group->space_info->lock);
325 block_group->space_info->bytes_readonly += block_group->bytes_super;
326 spin_unlock(&block_group->space_info->lock);
328 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
331 * We don't want to deadlock with somebody trying to allocate a new
332 * extent for the extent root while also trying to search the extent
333 * root to add free space. So we skip locking and search the commit
334 * root, since its read-only
336 path->skip_locking = 1;
337 path->search_commit_root = 1;
342 key.type = BTRFS_EXTENT_ITEM_KEY;
344 mutex_lock(&caching_ctl->mutex);
345 /* need to make sure the commit_root doesn't disappear */
346 down_read(&fs_info->extent_commit_sem);
348 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
352 leaf = path->nodes[0];
353 nritems = btrfs_header_nritems(leaf);
357 if (fs_info->closing > 1) {
362 if (path->slots[0] < nritems) {
363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
365 ret = find_next_key(path, 0, &key);
369 caching_ctl->progress = last;
370 btrfs_release_path(extent_root, path);
371 up_read(&fs_info->extent_commit_sem);
372 mutex_unlock(&caching_ctl->mutex);
373 if (btrfs_transaction_in_commit(fs_info))
380 if (key.objectid < block_group->key.objectid) {
385 if (key.objectid >= block_group->key.objectid +
386 block_group->key.offset)
389 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
390 total_found += add_new_free_space(block_group,
393 last = key.objectid + key.offset;
395 if (total_found > (1024 * 1024 * 2)) {
397 wake_up(&caching_ctl->wait);
404 total_found += add_new_free_space(block_group, fs_info, last,
405 block_group->key.objectid +
406 block_group->key.offset);
407 caching_ctl->progress = (u64)-1;
409 spin_lock(&block_group->lock);
410 block_group->caching_ctl = NULL;
411 block_group->cached = BTRFS_CACHE_FINISHED;
412 spin_unlock(&block_group->lock);
415 btrfs_free_path(path);
416 up_read(&fs_info->extent_commit_sem);
418 free_excluded_extents(extent_root, block_group);
420 mutex_unlock(&caching_ctl->mutex);
421 wake_up(&caching_ctl->wait);
423 put_caching_control(caching_ctl);
424 atomic_dec(&block_group->space_info->caching_threads);
425 btrfs_put_block_group(block_group);
430 static int cache_block_group(struct btrfs_block_group_cache *cache,
431 struct btrfs_trans_handle *trans,
434 struct btrfs_fs_info *fs_info = cache->fs_info;
435 struct btrfs_caching_control *caching_ctl;
436 struct task_struct *tsk;
440 if (cache->cached != BTRFS_CACHE_NO)
444 * We can't do the read from on-disk cache during a commit since we need
445 * to have the normal tree locking.
447 if (!trans->transaction->in_commit) {
448 spin_lock(&cache->lock);
449 if (cache->cached != BTRFS_CACHE_NO) {
450 spin_unlock(&cache->lock);
453 cache->cached = BTRFS_CACHE_STARTED;
454 spin_unlock(&cache->lock);
456 ret = load_free_space_cache(fs_info, cache);
458 spin_lock(&cache->lock);
460 cache->cached = BTRFS_CACHE_FINISHED;
461 cache->last_byte_to_unpin = (u64)-1;
463 cache->cached = BTRFS_CACHE_NO;
465 spin_unlock(&cache->lock);
473 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
474 BUG_ON(!caching_ctl);
476 INIT_LIST_HEAD(&caching_ctl->list);
477 mutex_init(&caching_ctl->mutex);
478 init_waitqueue_head(&caching_ctl->wait);
479 caching_ctl->block_group = cache;
480 caching_ctl->progress = cache->key.objectid;
481 /* one for caching kthread, one for caching block group list */
482 atomic_set(&caching_ctl->count, 2);
484 spin_lock(&cache->lock);
485 if (cache->cached != BTRFS_CACHE_NO) {
486 spin_unlock(&cache->lock);
490 cache->caching_ctl = caching_ctl;
491 cache->cached = BTRFS_CACHE_STARTED;
492 spin_unlock(&cache->lock);
494 down_write(&fs_info->extent_commit_sem);
495 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
496 up_write(&fs_info->extent_commit_sem);
498 atomic_inc(&cache->space_info->caching_threads);
499 btrfs_get_block_group(cache);
501 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
502 cache->key.objectid);
505 printk(KERN_ERR "error running thread %d\n", ret);
513 * return the block group that starts at or after bytenr
515 static struct btrfs_block_group_cache *
516 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
518 struct btrfs_block_group_cache *cache;
520 cache = block_group_cache_tree_search(info, bytenr, 0);
526 * return the block group that contains the given bytenr
528 struct btrfs_block_group_cache *btrfs_lookup_block_group(
529 struct btrfs_fs_info *info,
532 struct btrfs_block_group_cache *cache;
534 cache = block_group_cache_tree_search(info, bytenr, 1);
539 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
542 struct list_head *head = &info->space_info;
543 struct btrfs_space_info *found;
545 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
546 BTRFS_BLOCK_GROUP_METADATA;
549 list_for_each_entry_rcu(found, head, list) {
550 if (found->flags == flags) {
560 * after adding space to the filesystem, we need to clear the full flags
561 * on all the space infos.
563 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
565 struct list_head *head = &info->space_info;
566 struct btrfs_space_info *found;
569 list_for_each_entry_rcu(found, head, list)
574 static u64 div_factor(u64 num, int factor)
583 u64 btrfs_find_block_group(struct btrfs_root *root,
584 u64 search_start, u64 search_hint, int owner)
586 struct btrfs_block_group_cache *cache;
588 u64 last = max(search_hint, search_start);
595 cache = btrfs_lookup_first_block_group(root->fs_info, last);
599 spin_lock(&cache->lock);
600 last = cache->key.objectid + cache->key.offset;
601 used = btrfs_block_group_used(&cache->item);
603 if ((full_search || !cache->ro) &&
604 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
605 if (used + cache->pinned + cache->reserved <
606 div_factor(cache->key.offset, factor)) {
607 group_start = cache->key.objectid;
608 spin_unlock(&cache->lock);
609 btrfs_put_block_group(cache);
613 spin_unlock(&cache->lock);
614 btrfs_put_block_group(cache);
622 if (!full_search && factor < 10) {
632 /* simple helper to search for an existing extent at a given offset */
633 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
636 struct btrfs_key key;
637 struct btrfs_path *path;
639 path = btrfs_alloc_path();
641 key.objectid = start;
643 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
644 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
646 btrfs_free_path(path);
651 * helper function to lookup reference count and flags of extent.
653 * the head node for delayed ref is used to store the sum of all the
654 * reference count modifications queued up in the rbtree. the head
655 * node may also store the extent flags to set. This way you can check
656 * to see what the reference count and extent flags would be if all of
657 * the delayed refs are not processed.
659 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
660 struct btrfs_root *root, u64 bytenr,
661 u64 num_bytes, u64 *refs, u64 *flags)
663 struct btrfs_delayed_ref_head *head;
664 struct btrfs_delayed_ref_root *delayed_refs;
665 struct btrfs_path *path;
666 struct btrfs_extent_item *ei;
667 struct extent_buffer *leaf;
668 struct btrfs_key key;
674 path = btrfs_alloc_path();
678 key.objectid = bytenr;
679 key.type = BTRFS_EXTENT_ITEM_KEY;
680 key.offset = num_bytes;
682 path->skip_locking = 1;
683 path->search_commit_root = 1;
686 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
692 leaf = path->nodes[0];
693 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
694 if (item_size >= sizeof(*ei)) {
695 ei = btrfs_item_ptr(leaf, path->slots[0],
696 struct btrfs_extent_item);
697 num_refs = btrfs_extent_refs(leaf, ei);
698 extent_flags = btrfs_extent_flags(leaf, ei);
700 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
701 struct btrfs_extent_item_v0 *ei0;
702 BUG_ON(item_size != sizeof(*ei0));
703 ei0 = btrfs_item_ptr(leaf, path->slots[0],
704 struct btrfs_extent_item_v0);
705 num_refs = btrfs_extent_refs_v0(leaf, ei0);
706 /* FIXME: this isn't correct for data */
707 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
712 BUG_ON(num_refs == 0);
722 delayed_refs = &trans->transaction->delayed_refs;
723 spin_lock(&delayed_refs->lock);
724 head = btrfs_find_delayed_ref_head(trans, bytenr);
726 if (!mutex_trylock(&head->mutex)) {
727 atomic_inc(&head->node.refs);
728 spin_unlock(&delayed_refs->lock);
730 btrfs_release_path(root->fs_info->extent_root, path);
732 mutex_lock(&head->mutex);
733 mutex_unlock(&head->mutex);
734 btrfs_put_delayed_ref(&head->node);
737 if (head->extent_op && head->extent_op->update_flags)
738 extent_flags |= head->extent_op->flags_to_set;
740 BUG_ON(num_refs == 0);
742 num_refs += head->node.ref_mod;
743 mutex_unlock(&head->mutex);
745 spin_unlock(&delayed_refs->lock);
747 WARN_ON(num_refs == 0);
751 *flags = extent_flags;
753 btrfs_free_path(path);
758 * Back reference rules. Back refs have three main goals:
760 * 1) differentiate between all holders of references to an extent so that
761 * when a reference is dropped we can make sure it was a valid reference
762 * before freeing the extent.
764 * 2) Provide enough information to quickly find the holders of an extent
765 * if we notice a given block is corrupted or bad.
767 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
768 * maintenance. This is actually the same as #2, but with a slightly
769 * different use case.
771 * There are two kinds of back refs. The implicit back refs is optimized
772 * for pointers in non-shared tree blocks. For a given pointer in a block,
773 * back refs of this kind provide information about the block's owner tree
774 * and the pointer's key. These information allow us to find the block by
775 * b-tree searching. The full back refs is for pointers in tree blocks not
776 * referenced by their owner trees. The location of tree block is recorded
777 * in the back refs. Actually the full back refs is generic, and can be
778 * used in all cases the implicit back refs is used. The major shortcoming
779 * of the full back refs is its overhead. Every time a tree block gets
780 * COWed, we have to update back refs entry for all pointers in it.
782 * For a newly allocated tree block, we use implicit back refs for
783 * pointers in it. This means most tree related operations only involve
784 * implicit back refs. For a tree block created in old transaction, the
785 * only way to drop a reference to it is COW it. So we can detect the
786 * event that tree block loses its owner tree's reference and do the
787 * back refs conversion.
789 * When a tree block is COW'd through a tree, there are four cases:
791 * The reference count of the block is one and the tree is the block's
792 * owner tree. Nothing to do in this case.
794 * The reference count of the block is one and the tree is not the
795 * block's owner tree. In this case, full back refs is used for pointers
796 * in the block. Remove these full back refs, add implicit back refs for
797 * every pointers in the new block.
799 * The reference count of the block is greater than one and the tree is
800 * the block's owner tree. In this case, implicit back refs is used for
801 * pointers in the block. Add full back refs for every pointers in the
802 * block, increase lower level extents' reference counts. The original
803 * implicit back refs are entailed to the new block.
805 * The reference count of the block is greater than one and the tree is
806 * not the block's owner tree. Add implicit back refs for every pointer in
807 * the new block, increase lower level extents' reference count.
809 * Back Reference Key composing:
811 * The key objectid corresponds to the first byte in the extent,
812 * The key type is used to differentiate between types of back refs.
813 * There are different meanings of the key offset for different types
816 * File extents can be referenced by:
818 * - multiple snapshots, subvolumes, or different generations in one subvol
819 * - different files inside a single subvolume
820 * - different offsets inside a file (bookend extents in file.c)
822 * The extent ref structure for the implicit back refs has fields for:
824 * - Objectid of the subvolume root
825 * - objectid of the file holding the reference
826 * - original offset in the file
827 * - how many bookend extents
829 * The key offset for the implicit back refs is hash of the first
832 * The extent ref structure for the full back refs has field for:
834 * - number of pointers in the tree leaf
836 * The key offset for the implicit back refs is the first byte of
839 * When a file extent is allocated, The implicit back refs is used.
840 * the fields are filled in:
842 * (root_key.objectid, inode objectid, offset in file, 1)
844 * When a file extent is removed file truncation, we find the
845 * corresponding implicit back refs and check the following fields:
847 * (btrfs_header_owner(leaf), inode objectid, offset in file)
849 * Btree extents can be referenced by:
851 * - Different subvolumes
853 * Both the implicit back refs and the full back refs for tree blocks
854 * only consist of key. The key offset for the implicit back refs is
855 * objectid of block's owner tree. The key offset for the full back refs
856 * is the first byte of parent block.
858 * When implicit back refs is used, information about the lowest key and
859 * level of the tree block are required. These information are stored in
860 * tree block info structure.
863 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
864 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
865 struct btrfs_root *root,
866 struct btrfs_path *path,
867 u64 owner, u32 extra_size)
869 struct btrfs_extent_item *item;
870 struct btrfs_extent_item_v0 *ei0;
871 struct btrfs_extent_ref_v0 *ref0;
872 struct btrfs_tree_block_info *bi;
873 struct extent_buffer *leaf;
874 struct btrfs_key key;
875 struct btrfs_key found_key;
876 u32 new_size = sizeof(*item);
880 leaf = path->nodes[0];
881 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
883 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
884 ei0 = btrfs_item_ptr(leaf, path->slots[0],
885 struct btrfs_extent_item_v0);
886 refs = btrfs_extent_refs_v0(leaf, ei0);
888 if (owner == (u64)-1) {
890 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
891 ret = btrfs_next_leaf(root, path);
895 leaf = path->nodes[0];
897 btrfs_item_key_to_cpu(leaf, &found_key,
899 BUG_ON(key.objectid != found_key.objectid);
900 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
904 ref0 = btrfs_item_ptr(leaf, path->slots[0],
905 struct btrfs_extent_ref_v0);
906 owner = btrfs_ref_objectid_v0(leaf, ref0);
910 btrfs_release_path(root, path);
912 if (owner < BTRFS_FIRST_FREE_OBJECTID)
913 new_size += sizeof(*bi);
915 new_size -= sizeof(*ei0);
916 ret = btrfs_search_slot(trans, root, &key, path,
917 new_size + extra_size, 1);
922 ret = btrfs_extend_item(trans, root, path, new_size);
925 leaf = path->nodes[0];
926 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
927 btrfs_set_extent_refs(leaf, item, refs);
928 /* FIXME: get real generation */
929 btrfs_set_extent_generation(leaf, item, 0);
930 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
931 btrfs_set_extent_flags(leaf, item,
932 BTRFS_EXTENT_FLAG_TREE_BLOCK |
933 BTRFS_BLOCK_FLAG_FULL_BACKREF);
934 bi = (struct btrfs_tree_block_info *)(item + 1);
935 /* FIXME: get first key of the block */
936 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
937 btrfs_set_tree_block_level(leaf, bi, (int)owner);
939 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
941 btrfs_mark_buffer_dirty(leaf);
946 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
948 u32 high_crc = ~(u32)0;
949 u32 low_crc = ~(u32)0;
952 lenum = cpu_to_le64(root_objectid);
953 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
954 lenum = cpu_to_le64(owner);
955 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
956 lenum = cpu_to_le64(offset);
957 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
959 return ((u64)high_crc << 31) ^ (u64)low_crc;
962 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
963 struct btrfs_extent_data_ref *ref)
965 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
966 btrfs_extent_data_ref_objectid(leaf, ref),
967 btrfs_extent_data_ref_offset(leaf, ref));
970 static int match_extent_data_ref(struct extent_buffer *leaf,
971 struct btrfs_extent_data_ref *ref,
972 u64 root_objectid, u64 owner, u64 offset)
974 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
975 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
976 btrfs_extent_data_ref_offset(leaf, ref) != offset)
981 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
982 struct btrfs_root *root,
983 struct btrfs_path *path,
984 u64 bytenr, u64 parent,
986 u64 owner, u64 offset)
988 struct btrfs_key key;
989 struct btrfs_extent_data_ref *ref;
990 struct extent_buffer *leaf;
996 key.objectid = bytenr;
998 key.type = BTRFS_SHARED_DATA_REF_KEY;
1001 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1002 key.offset = hash_extent_data_ref(root_objectid,
1007 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1016 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1017 key.type = BTRFS_EXTENT_REF_V0_KEY;
1018 btrfs_release_path(root, path);
1019 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1030 leaf = path->nodes[0];
1031 nritems = btrfs_header_nritems(leaf);
1033 if (path->slots[0] >= nritems) {
1034 ret = btrfs_next_leaf(root, path);
1040 leaf = path->nodes[0];
1041 nritems = btrfs_header_nritems(leaf);
1045 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1046 if (key.objectid != bytenr ||
1047 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1050 ref = btrfs_item_ptr(leaf, path->slots[0],
1051 struct btrfs_extent_data_ref);
1053 if (match_extent_data_ref(leaf, ref, root_objectid,
1056 btrfs_release_path(root, path);
1068 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1069 struct btrfs_root *root,
1070 struct btrfs_path *path,
1071 u64 bytenr, u64 parent,
1072 u64 root_objectid, u64 owner,
1073 u64 offset, int refs_to_add)
1075 struct btrfs_key key;
1076 struct extent_buffer *leaf;
1081 key.objectid = bytenr;
1083 key.type = BTRFS_SHARED_DATA_REF_KEY;
1084 key.offset = parent;
1085 size = sizeof(struct btrfs_shared_data_ref);
1087 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1088 key.offset = hash_extent_data_ref(root_objectid,
1090 size = sizeof(struct btrfs_extent_data_ref);
1093 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1094 if (ret && ret != -EEXIST)
1097 leaf = path->nodes[0];
1099 struct btrfs_shared_data_ref *ref;
1100 ref = btrfs_item_ptr(leaf, path->slots[0],
1101 struct btrfs_shared_data_ref);
1103 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1105 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1106 num_refs += refs_to_add;
1107 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1110 struct btrfs_extent_data_ref *ref;
1111 while (ret == -EEXIST) {
1112 ref = btrfs_item_ptr(leaf, path->slots[0],
1113 struct btrfs_extent_data_ref);
1114 if (match_extent_data_ref(leaf, ref, root_objectid,
1117 btrfs_release_path(root, path);
1119 ret = btrfs_insert_empty_item(trans, root, path, &key,
1121 if (ret && ret != -EEXIST)
1124 leaf = path->nodes[0];
1126 ref = btrfs_item_ptr(leaf, path->slots[0],
1127 struct btrfs_extent_data_ref);
1129 btrfs_set_extent_data_ref_root(leaf, ref,
1131 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1132 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1133 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1135 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1136 num_refs += refs_to_add;
1137 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1140 btrfs_mark_buffer_dirty(leaf);
1143 btrfs_release_path(root, path);
1147 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1148 struct btrfs_root *root,
1149 struct btrfs_path *path,
1152 struct btrfs_key key;
1153 struct btrfs_extent_data_ref *ref1 = NULL;
1154 struct btrfs_shared_data_ref *ref2 = NULL;
1155 struct extent_buffer *leaf;
1159 leaf = path->nodes[0];
1160 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1162 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1163 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1164 struct btrfs_extent_data_ref);
1165 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1166 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1167 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1168 struct btrfs_shared_data_ref);
1169 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1170 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1171 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1172 struct btrfs_extent_ref_v0 *ref0;
1173 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1174 struct btrfs_extent_ref_v0);
1175 num_refs = btrfs_ref_count_v0(leaf, ref0);
1181 BUG_ON(num_refs < refs_to_drop);
1182 num_refs -= refs_to_drop;
1184 if (num_refs == 0) {
1185 ret = btrfs_del_item(trans, root, path);
1187 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1188 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1189 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1190 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1191 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1193 struct btrfs_extent_ref_v0 *ref0;
1194 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1195 struct btrfs_extent_ref_v0);
1196 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1199 btrfs_mark_buffer_dirty(leaf);
1204 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1205 struct btrfs_path *path,
1206 struct btrfs_extent_inline_ref *iref)
1208 struct btrfs_key key;
1209 struct extent_buffer *leaf;
1210 struct btrfs_extent_data_ref *ref1;
1211 struct btrfs_shared_data_ref *ref2;
1214 leaf = path->nodes[0];
1215 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1217 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1218 BTRFS_EXTENT_DATA_REF_KEY) {
1219 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1220 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1222 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1223 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1225 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1226 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1227 struct btrfs_extent_data_ref);
1228 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1229 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1230 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1231 struct btrfs_shared_data_ref);
1232 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1233 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1234 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1235 struct btrfs_extent_ref_v0 *ref0;
1236 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1237 struct btrfs_extent_ref_v0);
1238 num_refs = btrfs_ref_count_v0(leaf, ref0);
1246 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1247 struct btrfs_root *root,
1248 struct btrfs_path *path,
1249 u64 bytenr, u64 parent,
1252 struct btrfs_key key;
1255 key.objectid = bytenr;
1257 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1258 key.offset = parent;
1260 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1261 key.offset = root_objectid;
1264 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268 if (ret == -ENOENT && parent) {
1269 btrfs_release_path(root, path);
1270 key.type = BTRFS_EXTENT_REF_V0_KEY;
1271 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1279 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1280 struct btrfs_root *root,
1281 struct btrfs_path *path,
1282 u64 bytenr, u64 parent,
1285 struct btrfs_key key;
1288 key.objectid = bytenr;
1290 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1291 key.offset = parent;
1293 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1294 key.offset = root_objectid;
1297 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1298 btrfs_release_path(root, path);
1302 static inline int extent_ref_type(u64 parent, u64 owner)
1305 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1307 type = BTRFS_SHARED_BLOCK_REF_KEY;
1309 type = BTRFS_TREE_BLOCK_REF_KEY;
1312 type = BTRFS_SHARED_DATA_REF_KEY;
1314 type = BTRFS_EXTENT_DATA_REF_KEY;
1319 static int find_next_key(struct btrfs_path *path, int level,
1320 struct btrfs_key *key)
1323 for (; level < BTRFS_MAX_LEVEL; level++) {
1324 if (!path->nodes[level])
1326 if (path->slots[level] + 1 >=
1327 btrfs_header_nritems(path->nodes[level]))
1330 btrfs_item_key_to_cpu(path->nodes[level], key,
1331 path->slots[level] + 1);
1333 btrfs_node_key_to_cpu(path->nodes[level], key,
1334 path->slots[level] + 1);
1341 * look for inline back ref. if back ref is found, *ref_ret is set
1342 * to the address of inline back ref, and 0 is returned.
1344 * if back ref isn't found, *ref_ret is set to the address where it
1345 * should be inserted, and -ENOENT is returned.
1347 * if insert is true and there are too many inline back refs, the path
1348 * points to the extent item, and -EAGAIN is returned.
1350 * NOTE: inline back refs are ordered in the same way that back ref
1351 * items in the tree are ordered.
1353 static noinline_for_stack
1354 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1355 struct btrfs_root *root,
1356 struct btrfs_path *path,
1357 struct btrfs_extent_inline_ref **ref_ret,
1358 u64 bytenr, u64 num_bytes,
1359 u64 parent, u64 root_objectid,
1360 u64 owner, u64 offset, int insert)
1362 struct btrfs_key key;
1363 struct extent_buffer *leaf;
1364 struct btrfs_extent_item *ei;
1365 struct btrfs_extent_inline_ref *iref;
1376 key.objectid = bytenr;
1377 key.type = BTRFS_EXTENT_ITEM_KEY;
1378 key.offset = num_bytes;
1380 want = extent_ref_type(parent, owner);
1382 extra_size = btrfs_extent_inline_ref_size(want);
1383 path->keep_locks = 1;
1386 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1393 leaf = path->nodes[0];
1394 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1395 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1396 if (item_size < sizeof(*ei)) {
1401 ret = convert_extent_item_v0(trans, root, path, owner,
1407 leaf = path->nodes[0];
1408 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1411 BUG_ON(item_size < sizeof(*ei));
1413 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1414 flags = btrfs_extent_flags(leaf, ei);
1416 ptr = (unsigned long)(ei + 1);
1417 end = (unsigned long)ei + item_size;
1419 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1420 ptr += sizeof(struct btrfs_tree_block_info);
1423 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1432 iref = (struct btrfs_extent_inline_ref *)ptr;
1433 type = btrfs_extent_inline_ref_type(leaf, iref);
1437 ptr += btrfs_extent_inline_ref_size(type);
1441 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1442 struct btrfs_extent_data_ref *dref;
1443 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1444 if (match_extent_data_ref(leaf, dref, root_objectid,
1449 if (hash_extent_data_ref_item(leaf, dref) <
1450 hash_extent_data_ref(root_objectid, owner, offset))
1454 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1456 if (parent == ref_offset) {
1460 if (ref_offset < parent)
1463 if (root_objectid == ref_offset) {
1467 if (ref_offset < root_objectid)
1471 ptr += btrfs_extent_inline_ref_size(type);
1473 if (err == -ENOENT && insert) {
1474 if (item_size + extra_size >=
1475 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1480 * To add new inline back ref, we have to make sure
1481 * there is no corresponding back ref item.
1482 * For simplicity, we just do not add new inline back
1483 * ref if there is any kind of item for this block
1485 if (find_next_key(path, 0, &key) == 0 &&
1486 key.objectid == bytenr &&
1487 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1492 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1495 path->keep_locks = 0;
1496 btrfs_unlock_up_safe(path, 1);
1502 * helper to add new inline back ref
1504 static noinline_for_stack
1505 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1506 struct btrfs_root *root,
1507 struct btrfs_path *path,
1508 struct btrfs_extent_inline_ref *iref,
1509 u64 parent, u64 root_objectid,
1510 u64 owner, u64 offset, int refs_to_add,
1511 struct btrfs_delayed_extent_op *extent_op)
1513 struct extent_buffer *leaf;
1514 struct btrfs_extent_item *ei;
1517 unsigned long item_offset;
1523 leaf = path->nodes[0];
1524 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1525 item_offset = (unsigned long)iref - (unsigned long)ei;
1527 type = extent_ref_type(parent, owner);
1528 size = btrfs_extent_inline_ref_size(type);
1530 ret = btrfs_extend_item(trans, root, path, size);
1533 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1534 refs = btrfs_extent_refs(leaf, ei);
1535 refs += refs_to_add;
1536 btrfs_set_extent_refs(leaf, ei, refs);
1538 __run_delayed_extent_op(extent_op, leaf, ei);
1540 ptr = (unsigned long)ei + item_offset;
1541 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1542 if (ptr < end - size)
1543 memmove_extent_buffer(leaf, ptr + size, ptr,
1546 iref = (struct btrfs_extent_inline_ref *)ptr;
1547 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1548 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1549 struct btrfs_extent_data_ref *dref;
1550 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1551 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1552 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1553 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1554 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1555 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1556 struct btrfs_shared_data_ref *sref;
1557 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1558 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1559 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1560 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1561 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1563 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1565 btrfs_mark_buffer_dirty(leaf);
1569 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1570 struct btrfs_root *root,
1571 struct btrfs_path *path,
1572 struct btrfs_extent_inline_ref **ref_ret,
1573 u64 bytenr, u64 num_bytes, u64 parent,
1574 u64 root_objectid, u64 owner, u64 offset)
1578 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1579 bytenr, num_bytes, parent,
1580 root_objectid, owner, offset, 0);
1584 btrfs_release_path(root, path);
1587 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1588 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1591 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1592 root_objectid, owner, offset);
1598 * helper to update/remove inline back ref
1600 static noinline_for_stack
1601 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1602 struct btrfs_root *root,
1603 struct btrfs_path *path,
1604 struct btrfs_extent_inline_ref *iref,
1606 struct btrfs_delayed_extent_op *extent_op)
1608 struct extent_buffer *leaf;
1609 struct btrfs_extent_item *ei;
1610 struct btrfs_extent_data_ref *dref = NULL;
1611 struct btrfs_shared_data_ref *sref = NULL;
1620 leaf = path->nodes[0];
1621 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1622 refs = btrfs_extent_refs(leaf, ei);
1623 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1624 refs += refs_to_mod;
1625 btrfs_set_extent_refs(leaf, ei, refs);
1627 __run_delayed_extent_op(extent_op, leaf, ei);
1629 type = btrfs_extent_inline_ref_type(leaf, iref);
1631 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1632 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1633 refs = btrfs_extent_data_ref_count(leaf, dref);
1634 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1635 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1636 refs = btrfs_shared_data_ref_count(leaf, sref);
1639 BUG_ON(refs_to_mod != -1);
1642 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1643 refs += refs_to_mod;
1646 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1647 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1649 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1651 size = btrfs_extent_inline_ref_size(type);
1652 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1653 ptr = (unsigned long)iref;
1654 end = (unsigned long)ei + item_size;
1655 if (ptr + size < end)
1656 memmove_extent_buffer(leaf, ptr, ptr + size,
1659 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1662 btrfs_mark_buffer_dirty(leaf);
1666 static noinline_for_stack
1667 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1668 struct btrfs_root *root,
1669 struct btrfs_path *path,
1670 u64 bytenr, u64 num_bytes, u64 parent,
1671 u64 root_objectid, u64 owner,
1672 u64 offset, int refs_to_add,
1673 struct btrfs_delayed_extent_op *extent_op)
1675 struct btrfs_extent_inline_ref *iref;
1678 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1679 bytenr, num_bytes, parent,
1680 root_objectid, owner, offset, 1);
1682 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1683 ret = update_inline_extent_backref(trans, root, path, iref,
1684 refs_to_add, extent_op);
1685 } else if (ret == -ENOENT) {
1686 ret = setup_inline_extent_backref(trans, root, path, iref,
1687 parent, root_objectid,
1688 owner, offset, refs_to_add,
1694 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1695 struct btrfs_root *root,
1696 struct btrfs_path *path,
1697 u64 bytenr, u64 parent, u64 root_objectid,
1698 u64 owner, u64 offset, int refs_to_add)
1701 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1702 BUG_ON(refs_to_add != 1);
1703 ret = insert_tree_block_ref(trans, root, path, bytenr,
1704 parent, root_objectid);
1706 ret = insert_extent_data_ref(trans, root, path, bytenr,
1707 parent, root_objectid,
1708 owner, offset, refs_to_add);
1713 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1714 struct btrfs_root *root,
1715 struct btrfs_path *path,
1716 struct btrfs_extent_inline_ref *iref,
1717 int refs_to_drop, int is_data)
1721 BUG_ON(!is_data && refs_to_drop != 1);
1723 ret = update_inline_extent_backref(trans, root, path, iref,
1724 -refs_to_drop, NULL);
1725 } else if (is_data) {
1726 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1728 ret = btrfs_del_item(trans, root, path);
1733 static void btrfs_issue_discard(struct block_device *bdev,
1736 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1737 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
1740 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1744 u64 map_length = num_bytes;
1745 struct btrfs_multi_bio *multi = NULL;
1747 if (!btrfs_test_opt(root, DISCARD))
1750 /* Tell the block device(s) that the sectors can be discarded */
1751 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1752 bytenr, &map_length, &multi, 0);
1754 struct btrfs_bio_stripe *stripe = multi->stripes;
1757 if (map_length > num_bytes)
1758 map_length = num_bytes;
1760 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1761 btrfs_issue_discard(stripe->dev->bdev,
1771 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1772 struct btrfs_root *root,
1773 u64 bytenr, u64 num_bytes, u64 parent,
1774 u64 root_objectid, u64 owner, u64 offset)
1777 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1778 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1780 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1781 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1782 parent, root_objectid, (int)owner,
1783 BTRFS_ADD_DELAYED_REF, NULL);
1785 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1786 parent, root_objectid, owner, offset,
1787 BTRFS_ADD_DELAYED_REF, NULL);
1792 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1793 struct btrfs_root *root,
1794 u64 bytenr, u64 num_bytes,
1795 u64 parent, u64 root_objectid,
1796 u64 owner, u64 offset, int refs_to_add,
1797 struct btrfs_delayed_extent_op *extent_op)
1799 struct btrfs_path *path;
1800 struct extent_buffer *leaf;
1801 struct btrfs_extent_item *item;
1806 path = btrfs_alloc_path();
1811 path->leave_spinning = 1;
1812 /* this will setup the path even if it fails to insert the back ref */
1813 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1814 path, bytenr, num_bytes, parent,
1815 root_objectid, owner, offset,
1816 refs_to_add, extent_op);
1820 if (ret != -EAGAIN) {
1825 leaf = path->nodes[0];
1826 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1827 refs = btrfs_extent_refs(leaf, item);
1828 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1830 __run_delayed_extent_op(extent_op, leaf, item);
1832 btrfs_mark_buffer_dirty(leaf);
1833 btrfs_release_path(root->fs_info->extent_root, path);
1836 path->leave_spinning = 1;
1838 /* now insert the actual backref */
1839 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1840 path, bytenr, parent, root_objectid,
1841 owner, offset, refs_to_add);
1844 btrfs_free_path(path);
1848 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1849 struct btrfs_root *root,
1850 struct btrfs_delayed_ref_node *node,
1851 struct btrfs_delayed_extent_op *extent_op,
1852 int insert_reserved)
1855 struct btrfs_delayed_data_ref *ref;
1856 struct btrfs_key ins;
1861 ins.objectid = node->bytenr;
1862 ins.offset = node->num_bytes;
1863 ins.type = BTRFS_EXTENT_ITEM_KEY;
1865 ref = btrfs_delayed_node_to_data_ref(node);
1866 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1867 parent = ref->parent;
1869 ref_root = ref->root;
1871 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1873 BUG_ON(extent_op->update_key);
1874 flags |= extent_op->flags_to_set;
1876 ret = alloc_reserved_file_extent(trans, root,
1877 parent, ref_root, flags,
1878 ref->objectid, ref->offset,
1879 &ins, node->ref_mod);
1880 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1881 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1882 node->num_bytes, parent,
1883 ref_root, ref->objectid,
1884 ref->offset, node->ref_mod,
1886 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1887 ret = __btrfs_free_extent(trans, root, node->bytenr,
1888 node->num_bytes, parent,
1889 ref_root, ref->objectid,
1890 ref->offset, node->ref_mod,
1898 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1899 struct extent_buffer *leaf,
1900 struct btrfs_extent_item *ei)
1902 u64 flags = btrfs_extent_flags(leaf, ei);
1903 if (extent_op->update_flags) {
1904 flags |= extent_op->flags_to_set;
1905 btrfs_set_extent_flags(leaf, ei, flags);
1908 if (extent_op->update_key) {
1909 struct btrfs_tree_block_info *bi;
1910 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1911 bi = (struct btrfs_tree_block_info *)(ei + 1);
1912 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1916 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1917 struct btrfs_root *root,
1918 struct btrfs_delayed_ref_node *node,
1919 struct btrfs_delayed_extent_op *extent_op)
1921 struct btrfs_key key;
1922 struct btrfs_path *path;
1923 struct btrfs_extent_item *ei;
1924 struct extent_buffer *leaf;
1929 path = btrfs_alloc_path();
1933 key.objectid = node->bytenr;
1934 key.type = BTRFS_EXTENT_ITEM_KEY;
1935 key.offset = node->num_bytes;
1938 path->leave_spinning = 1;
1939 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1950 leaf = path->nodes[0];
1951 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1952 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1953 if (item_size < sizeof(*ei)) {
1954 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1960 leaf = path->nodes[0];
1961 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1964 BUG_ON(item_size < sizeof(*ei));
1965 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1966 __run_delayed_extent_op(extent_op, leaf, ei);
1968 btrfs_mark_buffer_dirty(leaf);
1970 btrfs_free_path(path);
1974 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1975 struct btrfs_root *root,
1976 struct btrfs_delayed_ref_node *node,
1977 struct btrfs_delayed_extent_op *extent_op,
1978 int insert_reserved)
1981 struct btrfs_delayed_tree_ref *ref;
1982 struct btrfs_key ins;
1986 ins.objectid = node->bytenr;
1987 ins.offset = node->num_bytes;
1988 ins.type = BTRFS_EXTENT_ITEM_KEY;
1990 ref = btrfs_delayed_node_to_tree_ref(node);
1991 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1992 parent = ref->parent;
1994 ref_root = ref->root;
1996 BUG_ON(node->ref_mod != 1);
1997 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1998 BUG_ON(!extent_op || !extent_op->update_flags ||
1999 !extent_op->update_key);
2000 ret = alloc_reserved_tree_block(trans, root,
2002 extent_op->flags_to_set,
2005 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2006 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2007 node->num_bytes, parent, ref_root,
2008 ref->level, 0, 1, extent_op);
2009 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2010 ret = __btrfs_free_extent(trans, root, node->bytenr,
2011 node->num_bytes, parent, ref_root,
2012 ref->level, 0, 1, extent_op);
2019 /* helper function to actually process a single delayed ref entry */
2020 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2021 struct btrfs_root *root,
2022 struct btrfs_delayed_ref_node *node,
2023 struct btrfs_delayed_extent_op *extent_op,
2024 int insert_reserved)
2027 if (btrfs_delayed_ref_is_head(node)) {
2028 struct btrfs_delayed_ref_head *head;
2030 * we've hit the end of the chain and we were supposed
2031 * to insert this extent into the tree. But, it got
2032 * deleted before we ever needed to insert it, so all
2033 * we have to do is clean up the accounting
2036 head = btrfs_delayed_node_to_head(node);
2037 if (insert_reserved) {
2038 btrfs_pin_extent(root, node->bytenr,
2039 node->num_bytes, 1);
2040 if (head->is_data) {
2041 ret = btrfs_del_csums(trans, root,
2047 mutex_unlock(&head->mutex);
2051 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2052 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2053 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2055 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2056 node->type == BTRFS_SHARED_DATA_REF_KEY)
2057 ret = run_delayed_data_ref(trans, root, node, extent_op,
2064 static noinline struct btrfs_delayed_ref_node *
2065 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2067 struct rb_node *node;
2068 struct btrfs_delayed_ref_node *ref;
2069 int action = BTRFS_ADD_DELAYED_REF;
2072 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2073 * this prevents ref count from going down to zero when
2074 * there still are pending delayed ref.
2076 node = rb_prev(&head->node.rb_node);
2080 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2082 if (ref->bytenr != head->node.bytenr)
2084 if (ref->action == action)
2086 node = rb_prev(node);
2088 if (action == BTRFS_ADD_DELAYED_REF) {
2089 action = BTRFS_DROP_DELAYED_REF;
2095 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2096 struct btrfs_root *root,
2097 struct list_head *cluster)
2099 struct btrfs_delayed_ref_root *delayed_refs;
2100 struct btrfs_delayed_ref_node *ref;
2101 struct btrfs_delayed_ref_head *locked_ref = NULL;
2102 struct btrfs_delayed_extent_op *extent_op;
2105 int must_insert_reserved = 0;
2107 delayed_refs = &trans->transaction->delayed_refs;
2110 /* pick a new head ref from the cluster list */
2111 if (list_empty(cluster))
2114 locked_ref = list_entry(cluster->next,
2115 struct btrfs_delayed_ref_head, cluster);
2117 /* grab the lock that says we are going to process
2118 * all the refs for this head */
2119 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2122 * we may have dropped the spin lock to get the head
2123 * mutex lock, and that might have given someone else
2124 * time to free the head. If that's true, it has been
2125 * removed from our list and we can move on.
2127 if (ret == -EAGAIN) {
2135 * record the must insert reserved flag before we
2136 * drop the spin lock.
2138 must_insert_reserved = locked_ref->must_insert_reserved;
2139 locked_ref->must_insert_reserved = 0;
2141 extent_op = locked_ref->extent_op;
2142 locked_ref->extent_op = NULL;
2145 * locked_ref is the head node, so we have to go one
2146 * node back for any delayed ref updates
2148 ref = select_delayed_ref(locked_ref);
2150 /* All delayed refs have been processed, Go ahead
2151 * and send the head node to run_one_delayed_ref,
2152 * so that any accounting fixes can happen
2154 ref = &locked_ref->node;
2156 if (extent_op && must_insert_reserved) {
2162 spin_unlock(&delayed_refs->lock);
2164 ret = run_delayed_extent_op(trans, root,
2170 spin_lock(&delayed_refs->lock);
2174 list_del_init(&locked_ref->cluster);
2179 rb_erase(&ref->rb_node, &delayed_refs->root);
2180 delayed_refs->num_entries--;
2182 spin_unlock(&delayed_refs->lock);
2184 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2185 must_insert_reserved);
2188 btrfs_put_delayed_ref(ref);
2193 spin_lock(&delayed_refs->lock);
2199 * this starts processing the delayed reference count updates and
2200 * extent insertions we have queued up so far. count can be
2201 * 0, which means to process everything in the tree at the start
2202 * of the run (but not newly added entries), or it can be some target
2203 * number you'd like to process.
2205 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2206 struct btrfs_root *root, unsigned long count)
2208 struct rb_node *node;
2209 struct btrfs_delayed_ref_root *delayed_refs;
2210 struct btrfs_delayed_ref_node *ref;
2211 struct list_head cluster;
2213 int run_all = count == (unsigned long)-1;
2216 if (root == root->fs_info->extent_root)
2217 root = root->fs_info->tree_root;
2219 delayed_refs = &trans->transaction->delayed_refs;
2220 INIT_LIST_HEAD(&cluster);
2222 spin_lock(&delayed_refs->lock);
2224 count = delayed_refs->num_entries * 2;
2228 if (!(run_all || run_most) &&
2229 delayed_refs->num_heads_ready < 64)
2233 * go find something we can process in the rbtree. We start at
2234 * the beginning of the tree, and then build a cluster
2235 * of refs to process starting at the first one we are able to
2238 ret = btrfs_find_ref_cluster(trans, &cluster,
2239 delayed_refs->run_delayed_start);
2243 ret = run_clustered_refs(trans, root, &cluster);
2246 count -= min_t(unsigned long, ret, count);
2253 node = rb_first(&delayed_refs->root);
2256 count = (unsigned long)-1;
2259 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2261 if (btrfs_delayed_ref_is_head(ref)) {
2262 struct btrfs_delayed_ref_head *head;
2264 head = btrfs_delayed_node_to_head(ref);
2265 atomic_inc(&ref->refs);
2267 spin_unlock(&delayed_refs->lock);
2268 mutex_lock(&head->mutex);
2269 mutex_unlock(&head->mutex);
2271 btrfs_put_delayed_ref(ref);
2275 node = rb_next(node);
2277 spin_unlock(&delayed_refs->lock);
2278 schedule_timeout(1);
2282 spin_unlock(&delayed_refs->lock);
2286 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2287 struct btrfs_root *root,
2288 u64 bytenr, u64 num_bytes, u64 flags,
2291 struct btrfs_delayed_extent_op *extent_op;
2294 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2298 extent_op->flags_to_set = flags;
2299 extent_op->update_flags = 1;
2300 extent_op->update_key = 0;
2301 extent_op->is_data = is_data ? 1 : 0;
2303 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2309 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2310 struct btrfs_root *root,
2311 struct btrfs_path *path,
2312 u64 objectid, u64 offset, u64 bytenr)
2314 struct btrfs_delayed_ref_head *head;
2315 struct btrfs_delayed_ref_node *ref;
2316 struct btrfs_delayed_data_ref *data_ref;
2317 struct btrfs_delayed_ref_root *delayed_refs;
2318 struct rb_node *node;
2322 delayed_refs = &trans->transaction->delayed_refs;
2323 spin_lock(&delayed_refs->lock);
2324 head = btrfs_find_delayed_ref_head(trans, bytenr);
2328 if (!mutex_trylock(&head->mutex)) {
2329 atomic_inc(&head->node.refs);
2330 spin_unlock(&delayed_refs->lock);
2332 btrfs_release_path(root->fs_info->extent_root, path);
2334 mutex_lock(&head->mutex);
2335 mutex_unlock(&head->mutex);
2336 btrfs_put_delayed_ref(&head->node);
2340 node = rb_prev(&head->node.rb_node);
2344 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2346 if (ref->bytenr != bytenr)
2350 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2353 data_ref = btrfs_delayed_node_to_data_ref(ref);
2355 node = rb_prev(node);
2357 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2358 if (ref->bytenr == bytenr)
2362 if (data_ref->root != root->root_key.objectid ||
2363 data_ref->objectid != objectid || data_ref->offset != offset)
2368 mutex_unlock(&head->mutex);
2370 spin_unlock(&delayed_refs->lock);
2374 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2375 struct btrfs_root *root,
2376 struct btrfs_path *path,
2377 u64 objectid, u64 offset, u64 bytenr)
2379 struct btrfs_root *extent_root = root->fs_info->extent_root;
2380 struct extent_buffer *leaf;
2381 struct btrfs_extent_data_ref *ref;
2382 struct btrfs_extent_inline_ref *iref;
2383 struct btrfs_extent_item *ei;
2384 struct btrfs_key key;
2388 key.objectid = bytenr;
2389 key.offset = (u64)-1;
2390 key.type = BTRFS_EXTENT_ITEM_KEY;
2392 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2398 if (path->slots[0] == 0)
2402 leaf = path->nodes[0];
2403 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2405 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2409 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2411 if (item_size < sizeof(*ei)) {
2412 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2416 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2418 if (item_size != sizeof(*ei) +
2419 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2422 if (btrfs_extent_generation(leaf, ei) <=
2423 btrfs_root_last_snapshot(&root->root_item))
2426 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2427 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2428 BTRFS_EXTENT_DATA_REF_KEY)
2431 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2432 if (btrfs_extent_refs(leaf, ei) !=
2433 btrfs_extent_data_ref_count(leaf, ref) ||
2434 btrfs_extent_data_ref_root(leaf, ref) !=
2435 root->root_key.objectid ||
2436 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2437 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2445 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2446 struct btrfs_root *root,
2447 u64 objectid, u64 offset, u64 bytenr)
2449 struct btrfs_path *path;
2453 path = btrfs_alloc_path();
2458 ret = check_committed_ref(trans, root, path, objectid,
2460 if (ret && ret != -ENOENT)
2463 ret2 = check_delayed_ref(trans, root, path, objectid,
2465 } while (ret2 == -EAGAIN);
2467 if (ret2 && ret2 != -ENOENT) {
2472 if (ret != -ENOENT || ret2 != -ENOENT)
2475 btrfs_free_path(path);
2476 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2482 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2483 struct extent_buffer *buf, u32 nr_extents)
2485 struct btrfs_key key;
2486 struct btrfs_file_extent_item *fi;
2494 if (!root->ref_cows)
2497 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2499 root_gen = root->root_key.offset;
2502 root_gen = trans->transid - 1;
2505 level = btrfs_header_level(buf);
2506 nritems = btrfs_header_nritems(buf);
2509 struct btrfs_leaf_ref *ref;
2510 struct btrfs_extent_info *info;
2512 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2518 ref->root_gen = root_gen;
2519 ref->bytenr = buf->start;
2520 ref->owner = btrfs_header_owner(buf);
2521 ref->generation = btrfs_header_generation(buf);
2522 ref->nritems = nr_extents;
2523 info = ref->extents;
2525 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2527 btrfs_item_key_to_cpu(buf, &key, i);
2528 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2530 fi = btrfs_item_ptr(buf, i,
2531 struct btrfs_file_extent_item);
2532 if (btrfs_file_extent_type(buf, fi) ==
2533 BTRFS_FILE_EXTENT_INLINE)
2535 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2536 if (disk_bytenr == 0)
2539 info->bytenr = disk_bytenr;
2541 btrfs_file_extent_disk_num_bytes(buf, fi);
2542 info->objectid = key.objectid;
2543 info->offset = key.offset;
2547 ret = btrfs_add_leaf_ref(root, ref, shared);
2548 if (ret == -EEXIST && shared) {
2549 struct btrfs_leaf_ref *old;
2550 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2552 btrfs_remove_leaf_ref(root, old);
2553 btrfs_free_leaf_ref(root, old);
2554 ret = btrfs_add_leaf_ref(root, ref, shared);
2557 btrfs_free_leaf_ref(root, ref);
2563 /* when a block goes through cow, we update the reference counts of
2564 * everything that block points to. The internal pointers of the block
2565 * can be in just about any order, and it is likely to have clusters of
2566 * things that are close together and clusters of things that are not.
2568 * To help reduce the seeks that come with updating all of these reference
2569 * counts, sort them by byte number before actual updates are done.
2571 * struct refsort is used to match byte number to slot in the btree block.
2572 * we sort based on the byte number and then use the slot to actually
2575 * struct refsort is smaller than strcut btrfs_item and smaller than
2576 * struct btrfs_key_ptr. Since we're currently limited to the page size
2577 * for a btree block, there's no way for a kmalloc of refsorts for a
2578 * single node to be bigger than a page.
2586 * for passing into sort()
2588 static int refsort_cmp(const void *a_void, const void *b_void)
2590 const struct refsort *a = a_void;
2591 const struct refsort *b = b_void;
2593 if (a->bytenr < b->bytenr)
2595 if (a->bytenr > b->bytenr)
2601 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2602 struct btrfs_root *root,
2603 struct extent_buffer *buf,
2604 int full_backref, int inc)
2611 struct btrfs_key key;
2612 struct btrfs_file_extent_item *fi;
2616 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2617 u64, u64, u64, u64, u64, u64);
2619 ref_root = btrfs_header_owner(buf);
2620 nritems = btrfs_header_nritems(buf);
2621 level = btrfs_header_level(buf);
2623 if (!root->ref_cows && level == 0)
2627 process_func = btrfs_inc_extent_ref;
2629 process_func = btrfs_free_extent;
2632 parent = buf->start;
2636 for (i = 0; i < nritems; i++) {
2638 btrfs_item_key_to_cpu(buf, &key, i);
2639 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2641 fi = btrfs_item_ptr(buf, i,
2642 struct btrfs_file_extent_item);
2643 if (btrfs_file_extent_type(buf, fi) ==
2644 BTRFS_FILE_EXTENT_INLINE)
2646 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2650 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2651 key.offset -= btrfs_file_extent_offset(buf, fi);
2652 ret = process_func(trans, root, bytenr, num_bytes,
2653 parent, ref_root, key.objectid,
2658 bytenr = btrfs_node_blockptr(buf, i);
2659 num_bytes = btrfs_level_size(root, level - 1);
2660 ret = process_func(trans, root, bytenr, num_bytes,
2661 parent, ref_root, level - 1, 0);
2672 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2673 struct extent_buffer *buf, int full_backref)
2675 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2678 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2679 struct extent_buffer *buf, int full_backref)
2681 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2684 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2685 struct btrfs_root *root,
2686 struct btrfs_path *path,
2687 struct btrfs_block_group_cache *cache)
2690 struct btrfs_root *extent_root = root->fs_info->extent_root;
2692 struct extent_buffer *leaf;
2694 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2699 leaf = path->nodes[0];
2700 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2701 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2702 btrfs_mark_buffer_dirty(leaf);
2703 btrfs_release_path(extent_root, path);
2711 static struct btrfs_block_group_cache *
2712 next_block_group(struct btrfs_root *root,
2713 struct btrfs_block_group_cache *cache)
2715 struct rb_node *node;
2716 spin_lock(&root->fs_info->block_group_cache_lock);
2717 node = rb_next(&cache->cache_node);
2718 btrfs_put_block_group(cache);
2720 cache = rb_entry(node, struct btrfs_block_group_cache,
2722 btrfs_get_block_group(cache);
2725 spin_unlock(&root->fs_info->block_group_cache_lock);
2729 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2730 struct btrfs_trans_handle *trans,
2731 struct btrfs_path *path)
2733 struct btrfs_root *root = block_group->fs_info->tree_root;
2734 struct inode *inode = NULL;
2741 * If this block group is smaller than 100 megs don't bother caching the
2744 if (block_group->key.offset < (100 * 1024 * 1024)) {
2745 spin_lock(&block_group->lock);
2746 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2747 spin_unlock(&block_group->lock);
2752 inode = lookup_free_space_inode(root, block_group, path);
2753 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2754 ret = PTR_ERR(inode);
2755 btrfs_release_path(root, path);
2759 if (IS_ERR(inode)) {
2763 if (block_group->ro)
2766 ret = create_free_space_inode(root, trans, block_group, path);
2773 * We want to set the generation to 0, that way if anything goes wrong
2774 * from here on out we know not to trust this cache when we load up next
2777 BTRFS_I(inode)->generation = 0;
2778 ret = btrfs_update_inode(trans, root, inode);
2781 if (i_size_read(inode) > 0) {
2782 ret = btrfs_truncate_free_space_cache(root, trans, path,
2788 spin_lock(&block_group->lock);
2789 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2790 spin_unlock(&block_group->lock);
2793 spin_unlock(&block_group->lock);
2795 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2800 * Just to make absolutely sure we have enough space, we're going to
2801 * preallocate 12 pages worth of space for each block group. In
2802 * practice we ought to use at most 8, but we need extra space so we can
2803 * add our header and have a terminator between the extents and the
2807 num_pages *= PAGE_CACHE_SIZE;
2809 ret = btrfs_check_data_free_space(inode, num_pages);
2813 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2814 num_pages, num_pages,
2816 btrfs_free_reserved_data_space(inode, num_pages);
2820 btrfs_release_path(root, path);
2822 spin_lock(&block_group->lock);
2824 block_group->disk_cache_state = BTRFS_DC_ERROR;
2826 block_group->disk_cache_state = BTRFS_DC_SETUP;
2827 spin_unlock(&block_group->lock);
2832 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2833 struct btrfs_root *root)
2835 struct btrfs_block_group_cache *cache;
2837 struct btrfs_path *path;
2840 path = btrfs_alloc_path();
2846 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2848 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2850 cache = next_block_group(root, cache);
2858 err = cache_save_setup(cache, trans, path);
2859 last = cache->key.objectid + cache->key.offset;
2860 btrfs_put_block_group(cache);
2865 err = btrfs_run_delayed_refs(trans, root,
2870 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2872 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2873 btrfs_put_block_group(cache);
2879 cache = next_block_group(root, cache);
2888 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2889 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2891 last = cache->key.objectid + cache->key.offset;
2893 err = write_one_cache_group(trans, root, path, cache);
2895 btrfs_put_block_group(cache);
2900 * I don't think this is needed since we're just marking our
2901 * preallocated extent as written, but just in case it can't
2905 err = btrfs_run_delayed_refs(trans, root,
2910 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2913 * Really this shouldn't happen, but it could if we
2914 * couldn't write the entire preallocated extent and
2915 * splitting the extent resulted in a new block.
2918 btrfs_put_block_group(cache);
2921 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2923 cache = next_block_group(root, cache);
2932 btrfs_write_out_cache(root, trans, cache, path);
2935 * If we didn't have an error then the cache state is still
2936 * NEED_WRITE, so we can set it to WRITTEN.
2938 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2939 cache->disk_cache_state = BTRFS_DC_WRITTEN;
2940 last = cache->key.objectid + cache->key.offset;
2941 btrfs_put_block_group(cache);
2944 btrfs_free_path(path);
2948 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2950 struct btrfs_block_group_cache *block_group;
2953 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2954 if (!block_group || block_group->ro)
2957 btrfs_put_block_group(block_group);
2961 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2962 u64 total_bytes, u64 bytes_used,
2963 struct btrfs_space_info **space_info)
2965 struct btrfs_space_info *found;
2969 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2970 BTRFS_BLOCK_GROUP_RAID10))
2975 found = __find_space_info(info, flags);
2977 spin_lock(&found->lock);
2978 found->total_bytes += total_bytes;
2979 found->bytes_used += bytes_used;
2980 found->disk_used += bytes_used * factor;
2982 spin_unlock(&found->lock);
2983 *space_info = found;
2986 found = kzalloc(sizeof(*found), GFP_NOFS);
2990 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2991 INIT_LIST_HEAD(&found->block_groups[i]);
2992 init_rwsem(&found->groups_sem);
2993 spin_lock_init(&found->lock);
2994 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2995 BTRFS_BLOCK_GROUP_SYSTEM |
2996 BTRFS_BLOCK_GROUP_METADATA);
2997 found->total_bytes = total_bytes;
2998 found->bytes_used = bytes_used;
2999 found->disk_used = bytes_used * factor;
3000 found->bytes_pinned = 0;
3001 found->bytes_reserved = 0;
3002 found->bytes_readonly = 0;
3003 found->bytes_may_use = 0;
3005 found->force_alloc = 0;
3006 *space_info = found;
3007 list_add_rcu(&found->list, &info->space_info);
3008 atomic_set(&found->caching_threads, 0);
3012 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3014 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
3015 BTRFS_BLOCK_GROUP_RAID1 |
3016 BTRFS_BLOCK_GROUP_RAID10 |
3017 BTRFS_BLOCK_GROUP_DUP);
3019 if (flags & BTRFS_BLOCK_GROUP_DATA)
3020 fs_info->avail_data_alloc_bits |= extra_flags;
3021 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3022 fs_info->avail_metadata_alloc_bits |= extra_flags;
3023 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3024 fs_info->avail_system_alloc_bits |= extra_flags;
3028 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3030 u64 num_devices = root->fs_info->fs_devices->rw_devices;
3032 if (num_devices == 1)
3033 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3034 if (num_devices < 4)
3035 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3037 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3038 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3039 BTRFS_BLOCK_GROUP_RAID10))) {
3040 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3043 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3044 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3045 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3048 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3049 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3050 (flags & BTRFS_BLOCK_GROUP_RAID10) |
3051 (flags & BTRFS_BLOCK_GROUP_DUP)))
3052 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3056 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3058 if (flags & BTRFS_BLOCK_GROUP_DATA)
3059 flags |= root->fs_info->avail_data_alloc_bits &
3060 root->fs_info->data_alloc_profile;
3061 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3062 flags |= root->fs_info->avail_system_alloc_bits &
3063 root->fs_info->system_alloc_profile;
3064 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3065 flags |= root->fs_info->avail_metadata_alloc_bits &
3066 root->fs_info->metadata_alloc_profile;
3067 return btrfs_reduce_alloc_profile(root, flags);
3070 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3075 flags = BTRFS_BLOCK_GROUP_DATA;
3076 else if (root == root->fs_info->chunk_root)
3077 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3079 flags = BTRFS_BLOCK_GROUP_METADATA;
3081 return get_alloc_profile(root, flags);
3084 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3086 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3087 BTRFS_BLOCK_GROUP_DATA);
3091 * This will check the space that the inode allocates from to make sure we have
3092 * enough space for bytes.
3094 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3096 struct btrfs_space_info *data_sinfo;
3097 struct btrfs_root *root = BTRFS_I(inode)->root;
3099 int ret = 0, committed = 0, alloc_chunk = 1;
3101 /* make sure bytes are sectorsize aligned */
3102 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3104 if (root == root->fs_info->tree_root) {
3109 data_sinfo = BTRFS_I(inode)->space_info;
3114 /* make sure we have enough space to handle the data first */
3115 spin_lock(&data_sinfo->lock);
3116 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3117 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3118 data_sinfo->bytes_may_use;
3120 if (used + bytes > data_sinfo->total_bytes) {
3121 struct btrfs_trans_handle *trans;
3124 * if we don't have enough free bytes in this space then we need
3125 * to alloc a new chunk.
3127 if (!data_sinfo->full && alloc_chunk) {
3130 data_sinfo->force_alloc = 1;
3131 spin_unlock(&data_sinfo->lock);
3133 alloc_target = btrfs_get_alloc_profile(root, 1);
3134 trans = btrfs_join_transaction(root, 1);
3136 return PTR_ERR(trans);
3138 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3139 bytes + 2 * 1024 * 1024,
3141 btrfs_end_transaction(trans, root);
3146 btrfs_set_inode_space_info(root, inode);
3147 data_sinfo = BTRFS_I(inode)->space_info;
3151 spin_unlock(&data_sinfo->lock);
3153 /* commit the current transaction and try again */
3154 if (!committed && !root->fs_info->open_ioctl_trans) {
3156 trans = btrfs_join_transaction(root, 1);
3158 return PTR_ERR(trans);
3159 ret = btrfs_commit_transaction(trans, root);
3165 #if 0 /* I hope we never need this code again, just in case */
3166 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3167 "%llu bytes_reserved, " "%llu bytes_pinned, "
3168 "%llu bytes_readonly, %llu may use %llu total\n",
3169 (unsigned long long)bytes,
3170 (unsigned long long)data_sinfo->bytes_used,
3171 (unsigned long long)data_sinfo->bytes_reserved,
3172 (unsigned long long)data_sinfo->bytes_pinned,
3173 (unsigned long long)data_sinfo->bytes_readonly,
3174 (unsigned long long)data_sinfo->bytes_may_use,
3175 (unsigned long long)data_sinfo->total_bytes);
3179 data_sinfo->bytes_may_use += bytes;
3180 BTRFS_I(inode)->reserved_bytes += bytes;
3181 spin_unlock(&data_sinfo->lock);
3187 * called when we are clearing an delalloc extent from the
3188 * inode's io_tree or there was an error for whatever reason
3189 * after calling btrfs_check_data_free_space
3191 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3193 struct btrfs_root *root = BTRFS_I(inode)->root;
3194 struct btrfs_space_info *data_sinfo;
3196 /* make sure bytes are sectorsize aligned */
3197 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3199 data_sinfo = BTRFS_I(inode)->space_info;
3200 spin_lock(&data_sinfo->lock);
3201 data_sinfo->bytes_may_use -= bytes;
3202 BTRFS_I(inode)->reserved_bytes -= bytes;
3203 spin_unlock(&data_sinfo->lock);
3206 static void force_metadata_allocation(struct btrfs_fs_info *info)
3208 struct list_head *head = &info->space_info;
3209 struct btrfs_space_info *found;
3212 list_for_each_entry_rcu(found, head, list) {
3213 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3214 found->force_alloc = 1;
3219 static int should_alloc_chunk(struct btrfs_space_info *sinfo,
3222 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3224 if (sinfo->bytes_used + sinfo->bytes_reserved +
3225 alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3228 if (sinfo->bytes_used + sinfo->bytes_reserved +
3229 alloc_bytes < div_factor(num_bytes, 8))
3235 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3236 struct btrfs_root *extent_root, u64 alloc_bytes,
3237 u64 flags, int force)
3239 struct btrfs_space_info *space_info;
3240 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3243 mutex_lock(&fs_info->chunk_mutex);
3245 flags = btrfs_reduce_alloc_profile(extent_root, flags);
3247 space_info = __find_space_info(extent_root->fs_info, flags);
3249 ret = update_space_info(extent_root->fs_info, flags,
3253 BUG_ON(!space_info);
3255 spin_lock(&space_info->lock);
3256 if (space_info->force_alloc)
3258 if (space_info->full) {
3259 spin_unlock(&space_info->lock);
3263 if (!force && !should_alloc_chunk(space_info, alloc_bytes)) {
3264 spin_unlock(&space_info->lock);
3267 spin_unlock(&space_info->lock);
3270 * if we're doing a data chunk, go ahead and make sure that
3271 * we keep a reasonable number of metadata chunks allocated in the
3274 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3275 fs_info->data_chunk_allocations++;
3276 if (!(fs_info->data_chunk_allocations %
3277 fs_info->metadata_ratio))
3278 force_metadata_allocation(fs_info);
3281 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3282 spin_lock(&space_info->lock);
3284 space_info->full = 1;
3287 space_info->force_alloc = 0;
3288 spin_unlock(&space_info->lock);
3290 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3294 static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
3295 struct btrfs_root *root,
3296 struct btrfs_space_info *sinfo, u64 num_bytes)
3304 spin_lock(&sinfo->lock);
3305 ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024);
3306 spin_unlock(&sinfo->lock);
3311 trans = btrfs_join_transaction(root, 1);
3312 BUG_ON(IS_ERR(trans));
3316 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3317 num_bytes + 2 * 1024 * 1024,
3318 get_alloc_profile(root, sinfo->flags), 0);
3321 btrfs_end_transaction(trans, root);
3323 return ret == 1 ? 1 : 0;
3327 * shrink metadata reservation for delalloc
3329 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3330 struct btrfs_root *root, u64 to_reclaim)
3332 struct btrfs_block_rsv *block_rsv;
3339 block_rsv = &root->fs_info->delalloc_block_rsv;
3340 spin_lock(&block_rsv->lock);
3341 reserved = block_rsv->reserved;
3342 spin_unlock(&block_rsv->lock);
3347 max_reclaim = min(reserved, to_reclaim);
3350 ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0);
3352 __set_current_state(TASK_INTERRUPTIBLE);
3353 schedule_timeout(pause);
3355 if (pause > HZ / 10)
3361 spin_lock(&block_rsv->lock);
3362 if (reserved > block_rsv->reserved)
3363 reclaimed = reserved - block_rsv->reserved;
3364 reserved = block_rsv->reserved;
3365 spin_unlock(&block_rsv->lock);
3367 if (reserved == 0 || reclaimed >= max_reclaim)
3370 if (trans && trans->transaction->blocked)
3373 return reclaimed >= to_reclaim;
3376 static int should_retry_reserve(struct btrfs_trans_handle *trans,
3377 struct btrfs_root *root,
3378 struct btrfs_block_rsv *block_rsv,
3379 u64 num_bytes, int *retries)
3381 struct btrfs_space_info *space_info = block_rsv->space_info;
3387 ret = maybe_allocate_chunk(trans, root, space_info, num_bytes);
3391 if (trans && trans->transaction->in_commit)
3394 ret = shrink_delalloc(trans, root, num_bytes);
3398 spin_lock(&space_info->lock);
3399 if (space_info->bytes_pinned < num_bytes)
3401 spin_unlock(&space_info->lock);
3410 trans = btrfs_join_transaction(root, 1);
3411 BUG_ON(IS_ERR(trans));
3412 ret = btrfs_commit_transaction(trans, root);
3418 static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv,
3421 struct btrfs_space_info *space_info = block_rsv->space_info;
3425 spin_lock(&space_info->lock);
3426 unused = space_info->bytes_used + space_info->bytes_reserved +
3427 space_info->bytes_pinned + space_info->bytes_readonly;
3429 if (unused < space_info->total_bytes)
3430 unused = space_info->total_bytes - unused;
3434 if (unused >= num_bytes) {
3435 if (block_rsv->priority >= 10) {
3436 space_info->bytes_reserved += num_bytes;
3439 if ((unused + block_rsv->reserved) *
3440 block_rsv->priority >=
3441 (num_bytes + block_rsv->reserved) * 10) {
3442 space_info->bytes_reserved += num_bytes;
3447 spin_unlock(&space_info->lock);
3452 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3453 struct btrfs_root *root)
3455 struct btrfs_block_rsv *block_rsv;
3457 block_rsv = trans->block_rsv;
3459 block_rsv = root->block_rsv;
3462 block_rsv = &root->fs_info->empty_block_rsv;
3467 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3471 spin_lock(&block_rsv->lock);
3472 if (block_rsv->reserved >= num_bytes) {
3473 block_rsv->reserved -= num_bytes;
3474 if (block_rsv->reserved < block_rsv->size)
3475 block_rsv->full = 0;
3478 spin_unlock(&block_rsv->lock);
3482 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3483 u64 num_bytes, int update_size)
3485 spin_lock(&block_rsv->lock);
3486 block_rsv->reserved += num_bytes;
3488 block_rsv->size += num_bytes;
3489 else if (block_rsv->reserved >= block_rsv->size)
3490 block_rsv->full = 1;
3491 spin_unlock(&block_rsv->lock);
3494 void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3495 struct btrfs_block_rsv *dest, u64 num_bytes)
3497 struct btrfs_space_info *space_info = block_rsv->space_info;
3499 spin_lock(&block_rsv->lock);
3500 if (num_bytes == (u64)-1)
3501 num_bytes = block_rsv->size;
3502 block_rsv->size -= num_bytes;
3503 if (block_rsv->reserved >= block_rsv->size) {
3504 num_bytes = block_rsv->reserved - block_rsv->size;
3505 block_rsv->reserved = block_rsv->size;
3506 block_rsv->full = 1;
3510 spin_unlock(&block_rsv->lock);
3512 if (num_bytes > 0) {
3514 block_rsv_add_bytes(dest, num_bytes, 0);
3516 spin_lock(&space_info->lock);
3517 space_info->bytes_reserved -= num_bytes;
3518 spin_unlock(&space_info->lock);
3523 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3524 struct btrfs_block_rsv *dst, u64 num_bytes)
3528 ret = block_rsv_use_bytes(src, num_bytes);
3532 block_rsv_add_bytes(dst, num_bytes, 1);
3536 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3538 memset(rsv, 0, sizeof(*rsv));
3539 spin_lock_init(&rsv->lock);
3540 atomic_set(&rsv->usage, 1);
3542 INIT_LIST_HEAD(&rsv->list);
3545 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3547 struct btrfs_block_rsv *block_rsv;
3548 struct btrfs_fs_info *fs_info = root->fs_info;
3551 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3555 btrfs_init_block_rsv(block_rsv);
3557 alloc_target = btrfs_get_alloc_profile(root, 0);
3558 block_rsv->space_info = __find_space_info(fs_info,
3559 BTRFS_BLOCK_GROUP_METADATA);
3564 void btrfs_free_block_rsv(struct btrfs_root *root,
3565 struct btrfs_block_rsv *rsv)
3567 if (rsv && atomic_dec_and_test(&rsv->usage)) {
3568 btrfs_block_rsv_release(root, rsv, (u64)-1);
3575 * make the block_rsv struct be able to capture freed space.
3576 * the captured space will re-add to the the block_rsv struct
3577 * after transaction commit
3579 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3580 struct btrfs_block_rsv *block_rsv)
3582 block_rsv->durable = 1;
3583 mutex_lock(&fs_info->durable_block_rsv_mutex);
3584 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3585 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3588 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3589 struct btrfs_root *root,
3590 struct btrfs_block_rsv *block_rsv,
3591 u64 num_bytes, int *retries)
3598 ret = reserve_metadata_bytes(block_rsv, num_bytes);
3600 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3604 ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries);
3611 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3612 struct btrfs_root *root,
3613 struct btrfs_block_rsv *block_rsv,
3614 u64 min_reserved, int min_factor)
3617 int commit_trans = 0;
3623 spin_lock(&block_rsv->lock);
3625 num_bytes = div_factor(block_rsv->size, min_factor);
3626 if (min_reserved > num_bytes)
3627 num_bytes = min_reserved;
3629 if (block_rsv->reserved >= num_bytes) {
3632 num_bytes -= block_rsv->reserved;
3633 if (block_rsv->durable &&
3634 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3637 spin_unlock(&block_rsv->lock);
3641 if (block_rsv->refill_used) {
3642 ret = reserve_metadata_bytes(block_rsv, num_bytes);
3644 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3653 trans = btrfs_join_transaction(root, 1);
3654 BUG_ON(IS_ERR(trans));
3655 ret = btrfs_commit_transaction(trans, root);
3660 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3661 block_rsv->size, block_rsv->reserved,
3662 block_rsv->freed[0], block_rsv->freed[1]);
3667 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3668 struct btrfs_block_rsv *dst_rsv,
3671 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3674 void btrfs_block_rsv_release(struct btrfs_root *root,
3675 struct btrfs_block_rsv *block_rsv,
3678 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3679 if (global_rsv->full || global_rsv == block_rsv ||
3680 block_rsv->space_info != global_rsv->space_info)
3682 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3686 * helper to calculate size of global block reservation.
3687 * the desired value is sum of space used by extent tree,
3688 * checksum tree and root tree
3690 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3692 struct btrfs_space_info *sinfo;
3696 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3699 * per tree used space accounting can be inaccuracy, so we
3702 spin_lock(&fs_info->extent_root->accounting_lock);
3703 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3704 spin_unlock(&fs_info->extent_root->accounting_lock);
3706 spin_lock(&fs_info->csum_root->accounting_lock);
3707 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3708 spin_unlock(&fs_info->csum_root->accounting_lock);
3710 spin_lock(&fs_info->tree_root->accounting_lock);
3711 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3712 spin_unlock(&fs_info->tree_root->accounting_lock);
3714 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3715 spin_lock(&sinfo->lock);
3716 data_used = sinfo->bytes_used;
3717 spin_unlock(&sinfo->lock);
3719 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3720 spin_lock(&sinfo->lock);
3721 meta_used = sinfo->bytes_used;
3722 spin_unlock(&sinfo->lock);
3724 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3726 num_bytes += div64_u64(data_used + meta_used, 50);
3728 if (num_bytes * 3 > meta_used)
3729 num_bytes = div64_u64(meta_used, 3);
3731 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3734 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3736 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3737 struct btrfs_space_info *sinfo = block_rsv->space_info;
3740 num_bytes = calc_global_metadata_size(fs_info);
3742 spin_lock(&block_rsv->lock);
3743 spin_lock(&sinfo->lock);
3745 block_rsv->size = num_bytes;
3747 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3748 sinfo->bytes_reserved + sinfo->bytes_readonly;
3750 if (sinfo->total_bytes > num_bytes) {
3751 num_bytes = sinfo->total_bytes - num_bytes;
3752 block_rsv->reserved += num_bytes;
3753 sinfo->bytes_reserved += num_bytes;
3756 if (block_rsv->reserved >= block_rsv->size) {
3757 num_bytes = block_rsv->reserved - block_rsv->size;
3758 sinfo->bytes_reserved -= num_bytes;
3759 block_rsv->reserved = block_rsv->size;
3760 block_rsv->full = 1;
3763 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3764 block_rsv->size, block_rsv->reserved);
3766 spin_unlock(&sinfo->lock);
3767 spin_unlock(&block_rsv->lock);
3770 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3772 struct btrfs_space_info *space_info;
3774 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3775 fs_info->chunk_block_rsv.space_info = space_info;
3776 fs_info->chunk_block_rsv.priority = 10;
3778 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3779 fs_info->global_block_rsv.space_info = space_info;
3780 fs_info->global_block_rsv.priority = 10;
3781 fs_info->global_block_rsv.refill_used = 1;
3782 fs_info->delalloc_block_rsv.space_info = space_info;
3783 fs_info->trans_block_rsv.space_info = space_info;
3784 fs_info->empty_block_rsv.space_info = space_info;
3785 fs_info->empty_block_rsv.priority = 10;
3787 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3788 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3789 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3790 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3791 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3793 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3795 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3797 update_global_block_rsv(fs_info);
3800 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3802 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3803 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3804 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3805 WARN_ON(fs_info->trans_block_rsv.size > 0);
3806 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3807 WARN_ON(fs_info->chunk_block_rsv.size > 0);
3808 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3811 static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3813 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3817 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3818 struct btrfs_root *root,
3819 int num_items, int *retries)
3824 if (num_items == 0 || root->fs_info->chunk_root == root)
3827 num_bytes = calc_trans_metadata_size(root, num_items);
3828 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3829 num_bytes, retries);
3831 trans->bytes_reserved += num_bytes;
3832 trans->block_rsv = &root->fs_info->trans_block_rsv;
3837 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3838 struct btrfs_root *root)
3840 if (!trans->bytes_reserved)
3843 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3844 btrfs_block_rsv_release(root, trans->block_rsv,
3845 trans->bytes_reserved);
3846 trans->bytes_reserved = 0;
3849 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3850 struct inode *inode)
3852 struct btrfs_root *root = BTRFS_I(inode)->root;
3853 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3854 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3857 * one for deleting orphan item, one for updating inode and
3858 * two for calling btrfs_truncate_inode_items.
3860 * btrfs_truncate_inode_items is a delete operation, it frees
3861 * more space than it uses in most cases. So two units of
3862 * metadata space should be enough for calling it many times.
3863 * If all of the metadata space is used, we can commit
3864 * transaction and use space it freed.
3866 u64 num_bytes = calc_trans_metadata_size(root, 4);
3867 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3870 void btrfs_orphan_release_metadata(struct inode *inode)
3872 struct btrfs_root *root = BTRFS_I(inode)->root;
3873 u64 num_bytes = calc_trans_metadata_size(root, 4);
3874 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3877 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3878 struct btrfs_pending_snapshot *pending)
3880 struct btrfs_root *root = pending->root;
3881 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3882 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3884 * two for root back/forward refs, two for directory entries
3885 * and one for root of the snapshot.
3887 u64 num_bytes = calc_trans_metadata_size(root, 5);
3888 dst_rsv->space_info = src_rsv->space_info;
3889 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3892 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3894 return num_bytes >>= 3;
3897 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3899 struct btrfs_root *root = BTRFS_I(inode)->root;
3900 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3906 if (btrfs_transaction_in_commit(root->fs_info))
3907 schedule_timeout(1);
3909 num_bytes = ALIGN(num_bytes, root->sectorsize);
3911 spin_lock(&BTRFS_I(inode)->accounting_lock);
3912 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3913 if (nr_extents > BTRFS_I(inode)->reserved_extents) {
3914 nr_extents -= BTRFS_I(inode)->reserved_extents;
3915 to_reserve = calc_trans_metadata_size(root, nr_extents);
3921 to_reserve += calc_csum_metadata_size(inode, num_bytes);
3922 ret = reserve_metadata_bytes(block_rsv, to_reserve);
3924 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3925 ret = should_retry_reserve(NULL, root, block_rsv, to_reserve,
3932 BTRFS_I(inode)->reserved_extents += nr_extents;
3933 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3934 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3936 block_rsv_add_bytes(block_rsv, to_reserve, 1);
3938 if (block_rsv->size > 512 * 1024 * 1024)
3939 shrink_delalloc(NULL, root, to_reserve);
3944 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3946 struct btrfs_root *root = BTRFS_I(inode)->root;
3950 num_bytes = ALIGN(num_bytes, root->sectorsize);
3951 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
3953 spin_lock(&BTRFS_I(inode)->accounting_lock);
3954 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
3955 if (nr_extents < BTRFS_I(inode)->reserved_extents) {
3956 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
3957 BTRFS_I(inode)->reserved_extents -= nr_extents;
3961 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3963 to_free = calc_csum_metadata_size(inode, num_bytes);
3965 to_free += calc_trans_metadata_size(root, nr_extents);
3967 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
3971 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
3975 ret = btrfs_check_data_free_space(inode, num_bytes);
3979 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
3981 btrfs_free_reserved_data_space(inode, num_bytes);
3988 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
3990 btrfs_delalloc_release_metadata(inode, num_bytes);
3991 btrfs_free_reserved_data_space(inode, num_bytes);
3994 static int update_block_group(struct btrfs_trans_handle *trans,
3995 struct btrfs_root *root,
3996 u64 bytenr, u64 num_bytes, int alloc)
3998 struct btrfs_block_group_cache *cache = NULL;
3999 struct btrfs_fs_info *info = root->fs_info;
4000 u64 total = num_bytes;
4005 /* block accounting for super block */
4006 spin_lock(&info->delalloc_lock);
4007 old_val = btrfs_super_bytes_used(&info->super_copy);
4009 old_val += num_bytes;
4011 old_val -= num_bytes;
4012 btrfs_set_super_bytes_used(&info->super_copy, old_val);
4013 spin_unlock(&info->delalloc_lock);
4016 cache = btrfs_lookup_block_group(info, bytenr);
4019 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4020 BTRFS_BLOCK_GROUP_RAID1 |
4021 BTRFS_BLOCK_GROUP_RAID10))
4026 * If this block group has free space cache written out, we
4027 * need to make sure to load it if we are removing space. This
4028 * is because we need the unpinning stage to actually add the
4029 * space back to the block group, otherwise we will leak space.
4031 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4032 cache_block_group(cache, trans, 1);
4034 byte_in_group = bytenr - cache->key.objectid;
4035 WARN_ON(byte_in_group > cache->key.offset);
4037 spin_lock(&cache->space_info->lock);
4038 spin_lock(&cache->lock);
4040 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4041 cache->disk_cache_state < BTRFS_DC_CLEAR)
4042 cache->disk_cache_state = BTRFS_DC_CLEAR;
4045 old_val = btrfs_block_group_used(&cache->item);
4046 num_bytes = min(total, cache->key.offset - byte_in_group);
4048 old_val += num_bytes;
4049 btrfs_set_block_group_used(&cache->item, old_val);
4050 cache->reserved -= num_bytes;
4051 cache->space_info->bytes_reserved -= num_bytes;
4052 cache->space_info->bytes_used += num_bytes;
4053 cache->space_info->disk_used += num_bytes * factor;
4054 spin_unlock(&cache->lock);
4055 spin_unlock(&cache->space_info->lock);
4057 old_val -= num_bytes;
4058 btrfs_set_block_group_used(&cache->item, old_val);
4059 cache->pinned += num_bytes;
4060 cache->space_info->bytes_pinned += num_bytes;
4061 cache->space_info->bytes_used -= num_bytes;
4062 cache->space_info->disk_used -= num_bytes * factor;
4063 spin_unlock(&cache->lock);
4064 spin_unlock(&cache->space_info->lock);
4066 set_extent_dirty(info->pinned_extents,
4067 bytenr, bytenr + num_bytes - 1,
4068 GFP_NOFS | __GFP_NOFAIL);
4070 btrfs_put_block_group(cache);
4072 bytenr += num_bytes;
4077 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4079 struct btrfs_block_group_cache *cache;
4082 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4086 bytenr = cache->key.objectid;
4087 btrfs_put_block_group(cache);
4092 static int pin_down_extent(struct btrfs_root *root,
4093 struct btrfs_block_group_cache *cache,
4094 u64 bytenr, u64 num_bytes, int reserved)
4096 spin_lock(&cache->space_info->lock);
4097 spin_lock(&cache->lock);
4098 cache->pinned += num_bytes;
4099 cache->space_info->bytes_pinned += num_bytes;
4101 cache->reserved -= num_bytes;
4102 cache->space_info->bytes_reserved -= num_bytes;
4104 spin_unlock(&cache->lock);
4105 spin_unlock(&cache->space_info->lock);
4107 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4108 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4113 * this function must be called within transaction
4115 int btrfs_pin_extent(struct btrfs_root *root,
4116 u64 bytenr, u64 num_bytes, int reserved)
4118 struct btrfs_block_group_cache *cache;
4120 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4123 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4125 btrfs_put_block_group(cache);
4130 * update size of reserved extents. this function may return -EAGAIN
4131 * if 'reserve' is true or 'sinfo' is false.
4133 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4134 u64 num_bytes, int reserve, int sinfo)
4138 struct btrfs_space_info *space_info = cache->space_info;
4139 spin_lock(&space_info->lock);
4140 spin_lock(&cache->lock);
4145 cache->reserved += num_bytes;
4146 space_info->bytes_reserved += num_bytes;
4150 space_info->bytes_readonly += num_bytes;
4151 cache->reserved -= num_bytes;
4152 space_info->bytes_reserved -= num_bytes;
4154 spin_unlock(&cache->lock);
4155 spin_unlock(&space_info->lock);
4157 spin_lock(&cache->lock);
4162 cache->reserved += num_bytes;
4164 cache->reserved -= num_bytes;
4166 spin_unlock(&cache->lock);
4171 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4172 struct btrfs_root *root)
4174 struct btrfs_fs_info *fs_info = root->fs_info;
4175 struct btrfs_caching_control *next;
4176 struct btrfs_caching_control *caching_ctl;
4177 struct btrfs_block_group_cache *cache;
4179 down_write(&fs_info->extent_commit_sem);
4181 list_for_each_entry_safe(caching_ctl, next,
4182 &fs_info->caching_block_groups, list) {
4183 cache = caching_ctl->block_group;
4184 if (block_group_cache_done(cache)) {
4185 cache->last_byte_to_unpin = (u64)-1;
4186 list_del_init(&caching_ctl->list);
4187 put_caching_control(caching_ctl);
4189 cache->last_byte_to_unpin = caching_ctl->progress;
4193 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4194 fs_info->pinned_extents = &fs_info->freed_extents[1];
4196 fs_info->pinned_extents = &fs_info->freed_extents[0];
4198 up_write(&fs_info->extent_commit_sem);
4200 update_global_block_rsv(fs_info);
4204 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4206 struct btrfs_fs_info *fs_info = root->fs_info;
4207 struct btrfs_block_group_cache *cache = NULL;
4210 while (start <= end) {
4212 start >= cache->key.objectid + cache->key.offset) {
4214 btrfs_put_block_group(cache);
4215 cache = btrfs_lookup_block_group(fs_info, start);
4219 len = cache->key.objectid + cache->key.offset - start;
4220 len = min(len, end + 1 - start);
4222 if (start < cache->last_byte_to_unpin) {
4223 len = min(len, cache->last_byte_to_unpin - start);
4224 btrfs_add_free_space(cache, start, len);
4229 spin_lock(&cache->space_info->lock);
4230 spin_lock(&cache->lock);
4231 cache->pinned -= len;
4232 cache->space_info->bytes_pinned -= len;
4234 cache->space_info->bytes_readonly += len;
4235 } else if (cache->reserved_pinned > 0) {
4236 len = min(len, cache->reserved_pinned);
4237 cache->reserved_pinned -= len;
4238 cache->space_info->bytes_reserved += len;
4240 spin_unlock(&cache->lock);
4241 spin_unlock(&cache->space_info->lock);
4245 btrfs_put_block_group(cache);
4249 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4250 struct btrfs_root *root)
4252 struct btrfs_fs_info *fs_info = root->fs_info;
4253 struct extent_io_tree *unpin;
4254 struct btrfs_block_rsv *block_rsv;
4255 struct btrfs_block_rsv *next_rsv;
4261 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4262 unpin = &fs_info->freed_extents[1];
4264 unpin = &fs_info->freed_extents[0];
4267 ret = find_first_extent_bit(unpin, 0, &start, &end,
4272 ret = btrfs_discard_extent(root, start, end + 1 - start);
4274 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4275 unpin_extent_range(root, start, end);
4279 mutex_lock(&fs_info->durable_block_rsv_mutex);
4280 list_for_each_entry_safe(block_rsv, next_rsv,
4281 &fs_info->durable_block_rsv_list, list) {
4283 idx = trans->transid & 0x1;
4284 if (block_rsv->freed[idx] > 0) {
4285 block_rsv_add_bytes(block_rsv,
4286 block_rsv->freed[idx], 0);
4287 block_rsv->freed[idx] = 0;
4289 if (atomic_read(&block_rsv->usage) == 0) {
4290 btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4292 if (block_rsv->freed[0] == 0 &&
4293 block_rsv->freed[1] == 0) {
4294 list_del_init(&block_rsv->list);
4298 btrfs_block_rsv_release(root, block_rsv, 0);
4301 mutex_unlock(&fs_info->durable_block_rsv_mutex);
4306 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4307 struct btrfs_root *root,
4308 u64 bytenr, u64 num_bytes, u64 parent,
4309 u64 root_objectid, u64 owner_objectid,
4310 u64 owner_offset, int refs_to_drop,
4311 struct btrfs_delayed_extent_op *extent_op)
4313 struct btrfs_key key;
4314 struct btrfs_path *path;
4315 struct btrfs_fs_info *info = root->fs_info;
4316 struct btrfs_root *extent_root = info->extent_root;
4317 struct extent_buffer *leaf;
4318 struct btrfs_extent_item *ei;
4319 struct btrfs_extent_inline_ref *iref;
4322 int extent_slot = 0;
4323 int found_extent = 0;
4328 path = btrfs_alloc_path();
4333 path->leave_spinning = 1;
4335 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4336 BUG_ON(!is_data && refs_to_drop != 1);
4338 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4339 bytenr, num_bytes, parent,
4340 root_objectid, owner_objectid,
4343 extent_slot = path->slots[0];
4344 while (extent_slot >= 0) {
4345 btrfs_item_key_to_cpu(path->nodes[0], &key,
4347 if (key.objectid != bytenr)
4349 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4350 key.offset == num_bytes) {
4354 if (path->slots[0] - extent_slot > 5)
4358 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4359 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4360 if (found_extent && item_size < sizeof(*ei))
4363 if (!found_extent) {
4365 ret = remove_extent_backref(trans, extent_root, path,
4369 btrfs_release_path(extent_root, path);
4370 path->leave_spinning = 1;
4372 key.objectid = bytenr;
4373 key.type = BTRFS_EXTENT_ITEM_KEY;
4374 key.offset = num_bytes;
4376 ret = btrfs_search_slot(trans, extent_root,
4379 printk(KERN_ERR "umm, got %d back from search"
4380 ", was looking for %llu\n", ret,
4381 (unsigned long long)bytenr);
4382 btrfs_print_leaf(extent_root, path->nodes[0]);
4385 extent_slot = path->slots[0];
4388 btrfs_print_leaf(extent_root, path->nodes[0]);
4390 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4391 "parent %llu root %llu owner %llu offset %llu\n",
4392 (unsigned long long)bytenr,
4393 (unsigned long long)parent,
4394 (unsigned long long)root_objectid,
4395 (unsigned long long)owner_objectid,
4396 (unsigned long long)owner_offset);
4399 leaf = path->nodes[0];
4400 item_size = btrfs_item_size_nr(leaf, extent_slot);
4401 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4402 if (item_size < sizeof(*ei)) {
4403 BUG_ON(found_extent || extent_slot != path->slots[0]);
4404 ret = convert_extent_item_v0(trans, extent_root, path,
4408 btrfs_release_path(extent_root, path);
4409 path->leave_spinning = 1;
4411 key.objectid = bytenr;
4412 key.type = BTRFS_EXTENT_ITEM_KEY;
4413 key.offset = num_bytes;
4415 ret = btrfs_search_slot(trans, extent_root, &key, path,
4418 printk(KERN_ERR "umm, got %d back from search"
4419 ", was looking for %llu\n", ret,
4420 (unsigned long long)bytenr);
4421 btrfs_print_leaf(extent_root, path->nodes[0]);
4424 extent_slot = path->slots[0];
4425 leaf = path->nodes[0];
4426 item_size = btrfs_item_size_nr(leaf, extent_slot);
4429 BUG_ON(item_size < sizeof(*ei));
4430 ei = btrfs_item_ptr(leaf, extent_slot,
4431 struct btrfs_extent_item);
4432 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4433 struct btrfs_tree_block_info *bi;
4434 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4435 bi = (struct btrfs_tree_block_info *)(ei + 1);
4436 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4439 refs = btrfs_extent_refs(leaf, ei);
4440 BUG_ON(refs < refs_to_drop);
4441 refs -= refs_to_drop;
4445 __run_delayed_extent_op(extent_op, leaf, ei);
4447 * In the case of inline back ref, reference count will
4448 * be updated by remove_extent_backref
4451 BUG_ON(!found_extent);
4453 btrfs_set_extent_refs(leaf, ei, refs);
4454 btrfs_mark_buffer_dirty(leaf);
4457 ret = remove_extent_backref(trans, extent_root, path,
4464 BUG_ON(is_data && refs_to_drop !=
4465 extent_data_ref_count(root, path, iref));
4467 BUG_ON(path->slots[0] != extent_slot);
4469 BUG_ON(path->slots[0] != extent_slot + 1);
4470 path->slots[0] = extent_slot;
4475 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4478 btrfs_release_path(extent_root, path);
4481 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4484 invalidate_mapping_pages(info->btree_inode->i_mapping,
4485 bytenr >> PAGE_CACHE_SHIFT,
4486 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4489 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4492 btrfs_free_path(path);
4497 * when we free an block, it is possible (and likely) that we free the last
4498 * delayed ref for that extent as well. This searches the delayed ref tree for
4499 * a given extent, and if there are no other delayed refs to be processed, it
4500 * removes it from the tree.
4502 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4503 struct btrfs_root *root, u64 bytenr)
4505 struct btrfs_delayed_ref_head *head;
4506 struct btrfs_delayed_ref_root *delayed_refs;
4507 struct btrfs_delayed_ref_node *ref;
4508 struct rb_node *node;
4511 delayed_refs = &trans->transaction->delayed_refs;
4512 spin_lock(&delayed_refs->lock);
4513 head = btrfs_find_delayed_ref_head(trans, bytenr);
4517 node = rb_prev(&head->node.rb_node);
4521 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4523 /* there are still entries for this ref, we can't drop it */
4524 if (ref->bytenr == bytenr)
4527 if (head->extent_op) {
4528 if (!head->must_insert_reserved)
4530 kfree(head->extent_op);
4531 head->extent_op = NULL;
4535 * waiting for the lock here would deadlock. If someone else has it
4536 * locked they are already in the process of dropping it anyway
4538 if (!mutex_trylock(&head->mutex))
4542 * at this point we have a head with no other entries. Go
4543 * ahead and process it.
4545 head->node.in_tree = 0;
4546 rb_erase(&head->node.rb_node, &delayed_refs->root);
4548 delayed_refs->num_entries--;
4551 * we don't take a ref on the node because we're removing it from the
4552 * tree, so we just steal the ref the tree was holding.
4554 delayed_refs->num_heads--;
4555 if (list_empty(&head->cluster))
4556 delayed_refs->num_heads_ready--;
4558 list_del_init(&head->cluster);
4559 spin_unlock(&delayed_refs->lock);
4561 BUG_ON(head->extent_op);
4562 if (head->must_insert_reserved)
4565 mutex_unlock(&head->mutex);
4566 btrfs_put_delayed_ref(&head->node);
4569 spin_unlock(&delayed_refs->lock);
4573 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4574 struct btrfs_root *root,
4575 struct extent_buffer *buf,
4576 u64 parent, int last_ref)
4578 struct btrfs_block_rsv *block_rsv;
4579 struct btrfs_block_group_cache *cache = NULL;
4582 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4583 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4584 parent, root->root_key.objectid,
4585 btrfs_header_level(buf),
4586 BTRFS_DROP_DELAYED_REF, NULL);
4593 block_rsv = get_block_rsv(trans, root);
4594 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4595 if (block_rsv->space_info != cache->space_info)
4598 if (btrfs_header_generation(buf) == trans->transid) {
4599 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4600 ret = check_ref_cleanup(trans, root, buf->start);
4605 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4606 pin_down_extent(root, cache, buf->start, buf->len, 1);
4610 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4612 btrfs_add_free_space(cache, buf->start, buf->len);
4613 ret = update_reserved_bytes(cache, buf->len, 0, 0);
4614 if (ret == -EAGAIN) {
4615 /* block group became read-only */
4616 update_reserved_bytes(cache, buf->len, 0, 1);
4621 spin_lock(&block_rsv->lock);
4622 if (block_rsv->reserved < block_rsv->size) {
4623 block_rsv->reserved += buf->len;
4626 spin_unlock(&block_rsv->lock);
4629 spin_lock(&cache->space_info->lock);
4630 cache->space_info->bytes_reserved -= buf->len;
4631 spin_unlock(&cache->space_info->lock);
4636 if (block_rsv->durable && !cache->ro) {
4638 spin_lock(&cache->lock);
4640 cache->reserved_pinned += buf->len;
4643 spin_unlock(&cache->lock);
4646 spin_lock(&block_rsv->lock);
4647 block_rsv->freed[trans->transid & 0x1] += buf->len;
4648 spin_unlock(&block_rsv->lock);
4652 btrfs_put_block_group(cache);
4655 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4656 struct btrfs_root *root,
4657 u64 bytenr, u64 num_bytes, u64 parent,
4658 u64 root_objectid, u64 owner, u64 offset)
4663 * tree log blocks never actually go into the extent allocation
4664 * tree, just update pinning info and exit early.
4666 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4667 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4668 /* unlocks the pinned mutex */
4669 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4671 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4672 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4673 parent, root_objectid, (int)owner,
4674 BTRFS_DROP_DELAYED_REF, NULL);
4677 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4678 parent, root_objectid, owner,
4679 offset, BTRFS_DROP_DELAYED_REF, NULL);
4685 static u64 stripe_align(struct btrfs_root *root, u64 val)
4687 u64 mask = ((u64)root->stripesize - 1);
4688 u64 ret = (val + mask) & ~mask;
4693 * when we wait for progress in the block group caching, its because
4694 * our allocation attempt failed at least once. So, we must sleep
4695 * and let some progress happen before we try again.
4697 * This function will sleep at least once waiting for new free space to
4698 * show up, and then it will check the block group free space numbers
4699 * for our min num_bytes. Another option is to have it go ahead
4700 * and look in the rbtree for a free extent of a given size, but this
4704 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4707 struct btrfs_caching_control *caching_ctl;
4710 caching_ctl = get_caching_control(cache);
4714 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4715 (cache->free_space >= num_bytes));
4717 put_caching_control(caching_ctl);
4722 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4724 struct btrfs_caching_control *caching_ctl;
4727 caching_ctl = get_caching_control(cache);
4731 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4733 put_caching_control(caching_ctl);
4737 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4740 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4742 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4744 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4746 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4753 enum btrfs_loop_type {
4754 LOOP_FIND_IDEAL = 0,
4755 LOOP_CACHING_NOWAIT = 1,
4756 LOOP_CACHING_WAIT = 2,
4757 LOOP_ALLOC_CHUNK = 3,
4758 LOOP_NO_EMPTY_SIZE = 4,
4762 * walks the btree of allocated extents and find a hole of a given size.
4763 * The key ins is changed to record the hole:
4764 * ins->objectid == block start
4765 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4766 * ins->offset == number of blocks
4767 * Any available blocks before search_start are skipped.
4769 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4770 struct btrfs_root *orig_root,
4771 u64 num_bytes, u64 empty_size,
4772 u64 search_start, u64 search_end,
4773 u64 hint_byte, struct btrfs_key *ins,
4777 struct btrfs_root *root = orig_root->fs_info->extent_root;
4778 struct btrfs_free_cluster *last_ptr = NULL;
4779 struct btrfs_block_group_cache *block_group = NULL;
4780 int empty_cluster = 2 * 1024 * 1024;
4781 int allowed_chunk_alloc = 0;
4782 int done_chunk_alloc = 0;
4783 struct btrfs_space_info *space_info;
4784 int last_ptr_loop = 0;
4787 bool found_uncached_bg = false;
4788 bool failed_cluster_refill = false;
4789 bool failed_alloc = false;
4790 u64 ideal_cache_percent = 0;
4791 u64 ideal_cache_offset = 0;
4793 WARN_ON(num_bytes < root->sectorsize);
4794 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4798 space_info = __find_space_info(root->fs_info, data);
4800 printk(KERN_ERR "No space info for %d\n", data);
4804 if (orig_root->ref_cows || empty_size)
4805 allowed_chunk_alloc = 1;
4807 if (data & BTRFS_BLOCK_GROUP_METADATA) {
4808 last_ptr = &root->fs_info->meta_alloc_cluster;
4809 if (!btrfs_test_opt(root, SSD))
4810 empty_cluster = 64 * 1024;
4813 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
4814 last_ptr = &root->fs_info->data_alloc_cluster;
4818 spin_lock(&last_ptr->lock);
4819 if (last_ptr->block_group)
4820 hint_byte = last_ptr->window_start;
4821 spin_unlock(&last_ptr->lock);
4824 search_start = max(search_start, first_logical_byte(root, 0));
4825 search_start = max(search_start, hint_byte);
4830 if (search_start == hint_byte) {
4832 block_group = btrfs_lookup_block_group(root->fs_info,
4835 * we don't want to use the block group if it doesn't match our
4836 * allocation bits, or if its not cached.
4838 * However if we are re-searching with an ideal block group
4839 * picked out then we don't care that the block group is cached.
4841 if (block_group && block_group_bits(block_group, data) &&
4842 (block_group->cached != BTRFS_CACHE_NO ||
4843 search_start == ideal_cache_offset)) {
4844 down_read(&space_info->groups_sem);
4845 if (list_empty(&block_group->list) ||
4848 * someone is removing this block group,
4849 * we can't jump into the have_block_group
4850 * target because our list pointers are not
4853 btrfs_put_block_group(block_group);
4854 up_read(&space_info->groups_sem);
4856 index = get_block_group_index(block_group);
4857 goto have_block_group;
4859 } else if (block_group) {
4860 btrfs_put_block_group(block_group);
4864 down_read(&space_info->groups_sem);
4865 list_for_each_entry(block_group, &space_info->block_groups[index],
4870 btrfs_get_block_group(block_group);
4871 search_start = block_group->key.objectid;
4874 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4877 ret = cache_block_group(block_group, trans, 1);
4878 if (block_group->cached == BTRFS_CACHE_FINISHED)
4879 goto have_block_group;
4881 free_percent = btrfs_block_group_used(&block_group->item);
4882 free_percent *= 100;
4883 free_percent = div64_u64(free_percent,
4884 block_group->key.offset);
4885 free_percent = 100 - free_percent;
4886 if (free_percent > ideal_cache_percent &&
4887 likely(!block_group->ro)) {
4888 ideal_cache_offset = block_group->key.objectid;
4889 ideal_cache_percent = free_percent;
4893 * We only want to start kthread caching if we are at
4894 * the point where we will wait for caching to make
4895 * progress, or if our ideal search is over and we've
4896 * found somebody to start caching.
4898 if (loop > LOOP_CACHING_NOWAIT ||
4899 (loop > LOOP_FIND_IDEAL &&
4900 atomic_read(&space_info->caching_threads) < 2)) {
4901 ret = cache_block_group(block_group, trans, 0);
4904 found_uncached_bg = true;
4907 * If loop is set for cached only, try the next block
4910 if (loop == LOOP_FIND_IDEAL)
4914 cached = block_group_cache_done(block_group);
4915 if (unlikely(!cached))
4916 found_uncached_bg = true;
4918 if (unlikely(block_group->ro))
4922 * Ok we want to try and use the cluster allocator, so lets look
4923 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4924 * have tried the cluster allocator plenty of times at this
4925 * point and not have found anything, so we are likely way too
4926 * fragmented for the clustering stuff to find anything, so lets
4927 * just skip it and let the allocator find whatever block it can
4930 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
4932 * the refill lock keeps out other
4933 * people trying to start a new cluster
4935 spin_lock(&last_ptr->refill_lock);
4936 if (last_ptr->block_group &&
4937 (last_ptr->block_group->ro ||
4938 !block_group_bits(last_ptr->block_group, data))) {
4940 goto refill_cluster;
4943 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4944 num_bytes, search_start);
4946 /* we have a block, we're done */
4947 spin_unlock(&last_ptr->refill_lock);
4951 spin_lock(&last_ptr->lock);
4953 * whoops, this cluster doesn't actually point to
4954 * this block group. Get a ref on the block
4955 * group is does point to and try again
4957 if (!last_ptr_loop && last_ptr->block_group &&
4958 last_ptr->block_group != block_group) {
4960 btrfs_put_block_group(block_group);
4961 block_group = last_ptr->block_group;
4962 btrfs_get_block_group(block_group);
4963 spin_unlock(&last_ptr->lock);
4964 spin_unlock(&last_ptr->refill_lock);
4967 search_start = block_group->key.objectid;
4969 * we know this block group is properly
4970 * in the list because
4971 * btrfs_remove_block_group, drops the
4972 * cluster before it removes the block
4973 * group from the list
4975 goto have_block_group;
4977 spin_unlock(&last_ptr->lock);
4980 * this cluster didn't work out, free it and
4983 btrfs_return_cluster_to_free_space(NULL, last_ptr);
4987 /* allocate a cluster in this block group */
4988 ret = btrfs_find_space_cluster(trans, root,
4989 block_group, last_ptr,
4991 empty_cluster + empty_size);
4994 * now pull our allocation out of this
4997 offset = btrfs_alloc_from_cluster(block_group,
4998 last_ptr, num_bytes,
5001 /* we found one, proceed */
5002 spin_unlock(&last_ptr->refill_lock);
5005 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5006 && !failed_cluster_refill) {
5007 spin_unlock(&last_ptr->refill_lock);
5009 failed_cluster_refill = true;
5010 wait_block_group_cache_progress(block_group,
5011 num_bytes + empty_cluster + empty_size);
5012 goto have_block_group;
5016 * at this point we either didn't find a cluster
5017 * or we weren't able to allocate a block from our
5018 * cluster. Free the cluster we've been trying
5019 * to use, and go to the next block group
5021 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5022 spin_unlock(&last_ptr->refill_lock);
5026 offset = btrfs_find_space_for_alloc(block_group, search_start,
5027 num_bytes, empty_size);
5029 * If we didn't find a chunk, and we haven't failed on this
5030 * block group before, and this block group is in the middle of
5031 * caching and we are ok with waiting, then go ahead and wait
5032 * for progress to be made, and set failed_alloc to true.
5034 * If failed_alloc is true then we've already waited on this
5035 * block group once and should move on to the next block group.
5037 if (!offset && !failed_alloc && !cached &&
5038 loop > LOOP_CACHING_NOWAIT) {
5039 wait_block_group_cache_progress(block_group,
5040 num_bytes + empty_size);
5041 failed_alloc = true;
5042 goto have_block_group;
5043 } else if (!offset) {
5047 search_start = stripe_align(root, offset);
5048 /* move on to the next group */
5049 if (search_start + num_bytes >= search_end) {
5050 btrfs_add_free_space(block_group, offset, num_bytes);
5054 /* move on to the next group */
5055 if (search_start + num_bytes >
5056 block_group->key.objectid + block_group->key.offset) {
5057 btrfs_add_free_space(block_group, offset, num_bytes);
5061 ins->objectid = search_start;
5062 ins->offset = num_bytes;
5064 if (offset < search_start)
5065 btrfs_add_free_space(block_group, offset,
5066 search_start - offset);
5067 BUG_ON(offset > search_start);
5069 ret = update_reserved_bytes(block_group, num_bytes, 1,
5070 (data & BTRFS_BLOCK_GROUP_DATA));
5071 if (ret == -EAGAIN) {
5072 btrfs_add_free_space(block_group, offset, num_bytes);
5076 /* we are all good, lets return */
5077 ins->objectid = search_start;
5078 ins->offset = num_bytes;
5080 if (offset < search_start)
5081 btrfs_add_free_space(block_group, offset,
5082 search_start - offset);
5083 BUG_ON(offset > search_start);
5086 failed_cluster_refill = false;
5087 failed_alloc = false;
5088 BUG_ON(index != get_block_group_index(block_group));
5089 btrfs_put_block_group(block_group);
5091 up_read(&space_info->groups_sem);
5093 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5096 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5097 * for them to make caching progress. Also
5098 * determine the best possible bg to cache
5099 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5100 * caching kthreads as we move along
5101 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5102 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5103 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5106 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5107 (found_uncached_bg || empty_size || empty_cluster ||
5108 allowed_chunk_alloc)) {
5110 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5111 found_uncached_bg = false;
5113 if (!ideal_cache_percent &&
5114 atomic_read(&space_info->caching_threads))
5118 * 1 of the following 2 things have happened so far
5120 * 1) We found an ideal block group for caching that
5121 * is mostly full and will cache quickly, so we might
5122 * as well wait for it.
5124 * 2) We searched for cached only and we didn't find
5125 * anything, and we didn't start any caching kthreads
5126 * either, so chances are we will loop through and
5127 * start a couple caching kthreads, and then come back
5128 * around and just wait for them. This will be slower
5129 * because we will have 2 caching kthreads reading at
5130 * the same time when we could have just started one
5131 * and waited for it to get far enough to give us an
5132 * allocation, so go ahead and go to the wait caching
5135 loop = LOOP_CACHING_WAIT;
5136 search_start = ideal_cache_offset;
5137 ideal_cache_percent = 0;
5139 } else if (loop == LOOP_FIND_IDEAL) {
5141 * Didn't find a uncached bg, wait on anything we find
5144 loop = LOOP_CACHING_WAIT;
5148 if (loop < LOOP_CACHING_WAIT) {
5153 if (loop == LOOP_ALLOC_CHUNK) {
5158 if (allowed_chunk_alloc) {
5159 ret = do_chunk_alloc(trans, root, num_bytes +
5160 2 * 1024 * 1024, data, 1);
5161 allowed_chunk_alloc = 0;
5162 done_chunk_alloc = 1;
5163 } else if (!done_chunk_alloc) {
5164 space_info->force_alloc = 1;
5167 if (loop < LOOP_NO_EMPTY_SIZE) {
5172 } else if (!ins->objectid) {
5176 /* we found what we needed */
5177 if (ins->objectid) {
5178 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5179 trans->block_group = block_group->key.objectid;
5181 btrfs_put_block_group(block_group);
5188 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5189 int dump_block_groups)
5191 struct btrfs_block_group_cache *cache;
5194 spin_lock(&info->lock);
5195 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5196 (unsigned long long)(info->total_bytes - info->bytes_used -
5197 info->bytes_pinned - info->bytes_reserved -
5198 info->bytes_readonly),
5199 (info->full) ? "" : "not ");
5200 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5201 "reserved=%llu, may_use=%llu, readonly=%llu\n",
5202 (unsigned long long)info->total_bytes,
5203 (unsigned long long)info->bytes_used,
5204 (unsigned long long)info->bytes_pinned,
5205 (unsigned long long)info->bytes_reserved,
5206 (unsigned long long)info->bytes_may_use,
5207 (unsigned long long)info->bytes_readonly);
5208 spin_unlock(&info->lock);
5210 if (!dump_block_groups)
5213 down_read(&info->groups_sem);
5215 list_for_each_entry(cache, &info->block_groups[index], list) {
5216 spin_lock(&cache->lock);
5217 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5218 "%llu pinned %llu reserved\n",
5219 (unsigned long long)cache->key.objectid,
5220 (unsigned long long)cache->key.offset,
5221 (unsigned long long)btrfs_block_group_used(&cache->item),
5222 (unsigned long long)cache->pinned,
5223 (unsigned long long)cache->reserved);
5224 btrfs_dump_free_space(cache, bytes);
5225 spin_unlock(&cache->lock);
5227 if (++index < BTRFS_NR_RAID_TYPES)
5229 up_read(&info->groups_sem);
5232 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5233 struct btrfs_root *root,
5234 u64 num_bytes, u64 min_alloc_size,
5235 u64 empty_size, u64 hint_byte,
5236 u64 search_end, struct btrfs_key *ins,
5240 u64 search_start = 0;
5242 data = btrfs_get_alloc_profile(root, data);
5245 * the only place that sets empty_size is btrfs_realloc_node, which
5246 * is not called recursively on allocations
5248 if (empty_size || root->ref_cows)
5249 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5250 num_bytes + 2 * 1024 * 1024, data, 0);
5252 WARN_ON(num_bytes < root->sectorsize);
5253 ret = find_free_extent(trans, root, num_bytes, empty_size,
5254 search_start, search_end, hint_byte,
5257 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5258 num_bytes = num_bytes >> 1;
5259 num_bytes = num_bytes & ~(root->sectorsize - 1);
5260 num_bytes = max(num_bytes, min_alloc_size);
5261 do_chunk_alloc(trans, root->fs_info->extent_root,
5262 num_bytes, data, 1);
5265 if (ret == -ENOSPC) {
5266 struct btrfs_space_info *sinfo;
5268 sinfo = __find_space_info(root->fs_info, data);
5269 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5270 "wanted %llu\n", (unsigned long long)data,
5271 (unsigned long long)num_bytes);
5272 dump_space_info(sinfo, num_bytes, 1);
5278 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5280 struct btrfs_block_group_cache *cache;
5283 cache = btrfs_lookup_block_group(root->fs_info, start);
5285 printk(KERN_ERR "Unable to find block group for %llu\n",
5286 (unsigned long long)start);
5290 ret = btrfs_discard_extent(root, start, len);
5292 btrfs_add_free_space(cache, start, len);
5293 update_reserved_bytes(cache, len, 0, 1);
5294 btrfs_put_block_group(cache);
5299 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5300 struct btrfs_root *root,
5301 u64 parent, u64 root_objectid,
5302 u64 flags, u64 owner, u64 offset,
5303 struct btrfs_key *ins, int ref_mod)
5306 struct btrfs_fs_info *fs_info = root->fs_info;
5307 struct btrfs_extent_item *extent_item;
5308 struct btrfs_extent_inline_ref *iref;
5309 struct btrfs_path *path;
5310 struct extent_buffer *leaf;
5315 type = BTRFS_SHARED_DATA_REF_KEY;
5317 type = BTRFS_EXTENT_DATA_REF_KEY;
5319 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5321 path = btrfs_alloc_path();
5324 path->leave_spinning = 1;
5325 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5329 leaf = path->nodes[0];
5330 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5331 struct btrfs_extent_item);
5332 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5333 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5334 btrfs_set_extent_flags(leaf, extent_item,
5335 flags | BTRFS_EXTENT_FLAG_DATA);
5337 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5338 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5340 struct btrfs_shared_data_ref *ref;
5341 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5342 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5343 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5345 struct btrfs_extent_data_ref *ref;
5346 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5347 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5348 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5349 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5350 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5353 btrfs_mark_buffer_dirty(path->nodes[0]);
5354 btrfs_free_path(path);
5356 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5358 printk(KERN_ERR "btrfs update block group failed for %llu "
5359 "%llu\n", (unsigned long long)ins->objectid,
5360 (unsigned long long)ins->offset);
5366 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5367 struct btrfs_root *root,
5368 u64 parent, u64 root_objectid,
5369 u64 flags, struct btrfs_disk_key *key,
5370 int level, struct btrfs_key *ins)
5373 struct btrfs_fs_info *fs_info = root->fs_info;
5374 struct btrfs_extent_item *extent_item;
5375 struct btrfs_tree_block_info *block_info;
5376 struct btrfs_extent_inline_ref *iref;
5377 struct btrfs_path *path;
5378 struct extent_buffer *leaf;
5379 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5381 path = btrfs_alloc_path();
5384 path->leave_spinning = 1;
5385 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5389 leaf = path->nodes[0];
5390 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5391 struct btrfs_extent_item);
5392 btrfs_set_extent_refs(leaf, extent_item, 1);
5393 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5394 btrfs_set_extent_flags(leaf, extent_item,
5395 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5396 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5398 btrfs_set_tree_block_key(leaf, block_info, key);
5399 btrfs_set_tree_block_level(leaf, block_info, level);
5401 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5403 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5404 btrfs_set_extent_inline_ref_type(leaf, iref,
5405 BTRFS_SHARED_BLOCK_REF_KEY);
5406 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5408 btrfs_set_extent_inline_ref_type(leaf, iref,
5409 BTRFS_TREE_BLOCK_REF_KEY);
5410 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5413 btrfs_mark_buffer_dirty(leaf);
5414 btrfs_free_path(path);
5416 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5418 printk(KERN_ERR "btrfs update block group failed for %llu "
5419 "%llu\n", (unsigned long long)ins->objectid,
5420 (unsigned long long)ins->offset);
5426 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5427 struct btrfs_root *root,
5428 u64 root_objectid, u64 owner,
5429 u64 offset, struct btrfs_key *ins)
5433 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5435 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5436 0, root_objectid, owner, offset,
5437 BTRFS_ADD_DELAYED_EXTENT, NULL);
5442 * this is used by the tree logging recovery code. It records that
5443 * an extent has been allocated and makes sure to clear the free
5444 * space cache bits as well
5446 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5447 struct btrfs_root *root,
5448 u64 root_objectid, u64 owner, u64 offset,
5449 struct btrfs_key *ins)
5452 struct btrfs_block_group_cache *block_group;
5453 struct btrfs_caching_control *caching_ctl;
5454 u64 start = ins->objectid;
5455 u64 num_bytes = ins->offset;
5457 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5458 cache_block_group(block_group, trans, 0);
5459 caching_ctl = get_caching_control(block_group);
5462 BUG_ON(!block_group_cache_done(block_group));
5463 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5466 mutex_lock(&caching_ctl->mutex);
5468 if (start >= caching_ctl->progress) {
5469 ret = add_excluded_extent(root, start, num_bytes);
5471 } else if (start + num_bytes <= caching_ctl->progress) {
5472 ret = btrfs_remove_free_space(block_group,
5476 num_bytes = caching_ctl->progress - start;
5477 ret = btrfs_remove_free_space(block_group,
5481 start = caching_ctl->progress;
5482 num_bytes = ins->objectid + ins->offset -
5483 caching_ctl->progress;
5484 ret = add_excluded_extent(root, start, num_bytes);
5488 mutex_unlock(&caching_ctl->mutex);
5489 put_caching_control(caching_ctl);
5492 ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5494 btrfs_put_block_group(block_group);
5495 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5496 0, owner, offset, ins, 1);
5500 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5501 struct btrfs_root *root,
5502 u64 bytenr, u32 blocksize,
5505 struct extent_buffer *buf;
5507 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5509 return ERR_PTR(-ENOMEM);
5510 btrfs_set_header_generation(buf, trans->transid);
5511 btrfs_set_buffer_lockdep_class(buf, level);
5512 btrfs_tree_lock(buf);
5513 clean_tree_block(trans, root, buf);
5515 btrfs_set_lock_blocking(buf);
5516 btrfs_set_buffer_uptodate(buf);
5518 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5520 * we allow two log transactions at a time, use different
5521 * EXENT bit to differentiate dirty pages.
5523 if (root->log_transid % 2 == 0)
5524 set_extent_dirty(&root->dirty_log_pages, buf->start,
5525 buf->start + buf->len - 1, GFP_NOFS);
5527 set_extent_new(&root->dirty_log_pages, buf->start,
5528 buf->start + buf->len - 1, GFP_NOFS);
5530 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5531 buf->start + buf->len - 1, GFP_NOFS);
5533 trans->blocks_used++;
5534 /* this returns a buffer locked for blocking */
5538 static struct btrfs_block_rsv *
5539 use_block_rsv(struct btrfs_trans_handle *trans,
5540 struct btrfs_root *root, u32 blocksize)
5542 struct btrfs_block_rsv *block_rsv;
5545 block_rsv = get_block_rsv(trans, root);
5547 if (block_rsv->size == 0) {
5548 ret = reserve_metadata_bytes(block_rsv, blocksize);
5550 return ERR_PTR(ret);
5554 ret = block_rsv_use_bytes(block_rsv, blocksize);
5559 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
5560 block_rsv->size, block_rsv->reserved,
5561 block_rsv->freed[0], block_rsv->freed[1]);
5563 return ERR_PTR(-ENOSPC);
5566 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5568 block_rsv_add_bytes(block_rsv, blocksize, 0);
5569 block_rsv_release_bytes(block_rsv, NULL, 0);
5573 * finds a free extent and does all the dirty work required for allocation
5574 * returns the key for the extent through ins, and a tree buffer for
5575 * the first block of the extent through buf.
5577 * returns the tree buffer or NULL.
5579 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5580 struct btrfs_root *root, u32 blocksize,
5581 u64 parent, u64 root_objectid,
5582 struct btrfs_disk_key *key, int level,
5583 u64 hint, u64 empty_size)
5585 struct btrfs_key ins;
5586 struct btrfs_block_rsv *block_rsv;
5587 struct extent_buffer *buf;
5592 block_rsv = use_block_rsv(trans, root, blocksize);
5593 if (IS_ERR(block_rsv))
5594 return ERR_CAST(block_rsv);
5596 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5597 empty_size, hint, (u64)-1, &ins, 0);
5599 unuse_block_rsv(block_rsv, blocksize);
5600 return ERR_PTR(ret);
5603 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5605 BUG_ON(IS_ERR(buf));
5607 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5609 parent = ins.objectid;
5610 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5614 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5615 struct btrfs_delayed_extent_op *extent_op;
5616 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5619 memcpy(&extent_op->key, key, sizeof(extent_op->key));
5621 memset(&extent_op->key, 0, sizeof(extent_op->key));
5622 extent_op->flags_to_set = flags;
5623 extent_op->update_key = 1;
5624 extent_op->update_flags = 1;
5625 extent_op->is_data = 0;
5627 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5628 ins.offset, parent, root_objectid,
5629 level, BTRFS_ADD_DELAYED_EXTENT,
5636 struct walk_control {
5637 u64 refs[BTRFS_MAX_LEVEL];
5638 u64 flags[BTRFS_MAX_LEVEL];
5639 struct btrfs_key update_progress;
5649 #define DROP_REFERENCE 1
5650 #define UPDATE_BACKREF 2
5652 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5653 struct btrfs_root *root,
5654 struct walk_control *wc,
5655 struct btrfs_path *path)
5664 struct btrfs_key key;
5665 struct extent_buffer *eb;
5670 if (path->slots[wc->level] < wc->reada_slot) {
5671 wc->reada_count = wc->reada_count * 2 / 3;
5672 wc->reada_count = max(wc->reada_count, 2);
5674 wc->reada_count = wc->reada_count * 3 / 2;
5675 wc->reada_count = min_t(int, wc->reada_count,
5676 BTRFS_NODEPTRS_PER_BLOCK(root));
5679 eb = path->nodes[wc->level];
5680 nritems = btrfs_header_nritems(eb);
5681 blocksize = btrfs_level_size(root, wc->level - 1);
5683 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5684 if (nread >= wc->reada_count)
5688 bytenr = btrfs_node_blockptr(eb, slot);
5689 generation = btrfs_node_ptr_generation(eb, slot);
5691 if (slot == path->slots[wc->level])
5694 if (wc->stage == UPDATE_BACKREF &&
5695 generation <= root->root_key.offset)
5698 /* We don't lock the tree block, it's OK to be racy here */
5699 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5704 if (wc->stage == DROP_REFERENCE) {
5708 if (wc->level == 1 &&
5709 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5711 if (!wc->update_ref ||
5712 generation <= root->root_key.offset)
5714 btrfs_node_key_to_cpu(eb, &key, slot);
5715 ret = btrfs_comp_cpu_keys(&key,
5716 &wc->update_progress);
5720 if (wc->level == 1 &&
5721 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5725 ret = readahead_tree_block(root, bytenr, blocksize,
5729 last = bytenr + blocksize;
5732 wc->reada_slot = slot;
5736 * hepler to process tree block while walking down the tree.
5738 * when wc->stage == UPDATE_BACKREF, this function updates
5739 * back refs for pointers in the block.
5741 * NOTE: return value 1 means we should stop walking down.
5743 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5744 struct btrfs_root *root,
5745 struct btrfs_path *path,
5746 struct walk_control *wc, int lookup_info)
5748 int level = wc->level;
5749 struct extent_buffer *eb = path->nodes[level];
5750 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5753 if (wc->stage == UPDATE_BACKREF &&
5754 btrfs_header_owner(eb) != root->root_key.objectid)
5758 * when reference count of tree block is 1, it won't increase
5759 * again. once full backref flag is set, we never clear it.
5762 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5763 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5764 BUG_ON(!path->locks[level]);
5765 ret = btrfs_lookup_extent_info(trans, root,
5770 BUG_ON(wc->refs[level] == 0);
5773 if (wc->stage == DROP_REFERENCE) {
5774 if (wc->refs[level] > 1)
5777 if (path->locks[level] && !wc->keep_locks) {
5778 btrfs_tree_unlock(eb);
5779 path->locks[level] = 0;
5784 /* wc->stage == UPDATE_BACKREF */
5785 if (!(wc->flags[level] & flag)) {
5786 BUG_ON(!path->locks[level]);
5787 ret = btrfs_inc_ref(trans, root, eb, 1);
5789 ret = btrfs_dec_ref(trans, root, eb, 0);
5791 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5794 wc->flags[level] |= flag;
5798 * the block is shared by multiple trees, so it's not good to
5799 * keep the tree lock
5801 if (path->locks[level] && level > 0) {
5802 btrfs_tree_unlock(eb);
5803 path->locks[level] = 0;
5809 * hepler to process tree block pointer.
5811 * when wc->stage == DROP_REFERENCE, this function checks
5812 * reference count of the block pointed to. if the block
5813 * is shared and we need update back refs for the subtree
5814 * rooted at the block, this function changes wc->stage to
5815 * UPDATE_BACKREF. if the block is shared and there is no
5816 * need to update back, this function drops the reference
5819 * NOTE: return value 1 means we should stop walking down.
5821 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5822 struct btrfs_root *root,
5823 struct btrfs_path *path,
5824 struct walk_control *wc, int *lookup_info)
5830 struct btrfs_key key;
5831 struct extent_buffer *next;
5832 int level = wc->level;
5836 generation = btrfs_node_ptr_generation(path->nodes[level],
5837 path->slots[level]);
5839 * if the lower level block was created before the snapshot
5840 * was created, we know there is no need to update back refs
5843 if (wc->stage == UPDATE_BACKREF &&
5844 generation <= root->root_key.offset) {
5849 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5850 blocksize = btrfs_level_size(root, level - 1);
5852 next = btrfs_find_tree_block(root, bytenr, blocksize);
5854 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5859 btrfs_tree_lock(next);
5860 btrfs_set_lock_blocking(next);
5862 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5863 &wc->refs[level - 1],
5864 &wc->flags[level - 1]);
5866 BUG_ON(wc->refs[level - 1] == 0);
5869 if (wc->stage == DROP_REFERENCE) {
5870 if (wc->refs[level - 1] > 1) {
5872 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5875 if (!wc->update_ref ||
5876 generation <= root->root_key.offset)
5879 btrfs_node_key_to_cpu(path->nodes[level], &key,
5880 path->slots[level]);
5881 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5885 wc->stage = UPDATE_BACKREF;
5886 wc->shared_level = level - 1;
5890 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5894 if (!btrfs_buffer_uptodate(next, generation)) {
5895 btrfs_tree_unlock(next);
5896 free_extent_buffer(next);
5902 if (reada && level == 1)
5903 reada_walk_down(trans, root, wc, path);
5904 next = read_tree_block(root, bytenr, blocksize, generation);
5905 btrfs_tree_lock(next);
5906 btrfs_set_lock_blocking(next);
5910 BUG_ON(level != btrfs_header_level(next));
5911 path->nodes[level] = next;
5912 path->slots[level] = 0;
5913 path->locks[level] = 1;
5919 wc->refs[level - 1] = 0;
5920 wc->flags[level - 1] = 0;
5921 if (wc->stage == DROP_REFERENCE) {
5922 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5923 parent = path->nodes[level]->start;
5925 BUG_ON(root->root_key.objectid !=
5926 btrfs_header_owner(path->nodes[level]));
5930 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5931 root->root_key.objectid, level - 1, 0);
5934 btrfs_tree_unlock(next);
5935 free_extent_buffer(next);
5941 * hepler to process tree block while walking up the tree.
5943 * when wc->stage == DROP_REFERENCE, this function drops
5944 * reference count on the block.
5946 * when wc->stage == UPDATE_BACKREF, this function changes
5947 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5948 * to UPDATE_BACKREF previously while processing the block.
5950 * NOTE: return value 1 means we should stop walking up.
5952 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5953 struct btrfs_root *root,
5954 struct btrfs_path *path,
5955 struct walk_control *wc)
5958 int level = wc->level;
5959 struct extent_buffer *eb = path->nodes[level];
5962 if (wc->stage == UPDATE_BACKREF) {
5963 BUG_ON(wc->shared_level < level);
5964 if (level < wc->shared_level)
5967 ret = find_next_key(path, level + 1, &wc->update_progress);
5971 wc->stage = DROP_REFERENCE;
5972 wc->shared_level = -1;
5973 path->slots[level] = 0;
5976 * check reference count again if the block isn't locked.
5977 * we should start walking down the tree again if reference
5980 if (!path->locks[level]) {
5982 btrfs_tree_lock(eb);
5983 btrfs_set_lock_blocking(eb);
5984 path->locks[level] = 1;
5986 ret = btrfs_lookup_extent_info(trans, root,
5991 BUG_ON(wc->refs[level] == 0);
5992 if (wc->refs[level] == 1) {
5993 btrfs_tree_unlock(eb);
5994 path->locks[level] = 0;
6000 /* wc->stage == DROP_REFERENCE */
6001 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6003 if (wc->refs[level] == 1) {
6005 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6006 ret = btrfs_dec_ref(trans, root, eb, 1);
6008 ret = btrfs_dec_ref(trans, root, eb, 0);
6011 /* make block locked assertion in clean_tree_block happy */
6012 if (!path->locks[level] &&
6013 btrfs_header_generation(eb) == trans->transid) {
6014 btrfs_tree_lock(eb);
6015 btrfs_set_lock_blocking(eb);
6016 path->locks[level] = 1;
6018 clean_tree_block(trans, root, eb);
6021 if (eb == root->node) {
6022 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6025 BUG_ON(root->root_key.objectid !=
6026 btrfs_header_owner(eb));
6028 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6029 parent = path->nodes[level + 1]->start;
6031 BUG_ON(root->root_key.objectid !=
6032 btrfs_header_owner(path->nodes[level + 1]));
6035 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6037 wc->refs[level] = 0;
6038 wc->flags[level] = 0;
6042 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6043 struct btrfs_root *root,
6044 struct btrfs_path *path,
6045 struct walk_control *wc)
6047 int level = wc->level;
6048 int lookup_info = 1;
6051 while (level >= 0) {
6052 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6059 if (path->slots[level] >=
6060 btrfs_header_nritems(path->nodes[level]))
6063 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6065 path->slots[level]++;
6074 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6075 struct btrfs_root *root,
6076 struct btrfs_path *path,
6077 struct walk_control *wc, int max_level)
6079 int level = wc->level;
6082 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6083 while (level < max_level && path->nodes[level]) {
6085 if (path->slots[level] + 1 <
6086 btrfs_header_nritems(path->nodes[level])) {
6087 path->slots[level]++;
6090 ret = walk_up_proc(trans, root, path, wc);
6094 if (path->locks[level]) {
6095 btrfs_tree_unlock(path->nodes[level]);
6096 path->locks[level] = 0;
6098 free_extent_buffer(path->nodes[level]);
6099 path->nodes[level] = NULL;
6107 * drop a subvolume tree.
6109 * this function traverses the tree freeing any blocks that only
6110 * referenced by the tree.
6112 * when a shared tree block is found. this function decreases its
6113 * reference count by one. if update_ref is true, this function
6114 * also make sure backrefs for the shared block and all lower level
6115 * blocks are properly updated.
6117 int btrfs_drop_snapshot(struct btrfs_root *root,
6118 struct btrfs_block_rsv *block_rsv, int update_ref)
6120 struct btrfs_path *path;
6121 struct btrfs_trans_handle *trans;
6122 struct btrfs_root *tree_root = root->fs_info->tree_root;
6123 struct btrfs_root_item *root_item = &root->root_item;
6124 struct walk_control *wc;
6125 struct btrfs_key key;
6130 path = btrfs_alloc_path();
6133 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6136 trans = btrfs_start_transaction(tree_root, 0);
6138 trans->block_rsv = block_rsv;
6140 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6141 level = btrfs_header_level(root->node);
6142 path->nodes[level] = btrfs_lock_root_node(root);
6143 btrfs_set_lock_blocking(path->nodes[level]);
6144 path->slots[level] = 0;
6145 path->locks[level] = 1;
6146 memset(&wc->update_progress, 0,
6147 sizeof(wc->update_progress));
6149 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6150 memcpy(&wc->update_progress, &key,
6151 sizeof(wc->update_progress));
6153 level = root_item->drop_level;
6155 path->lowest_level = level;
6156 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6157 path->lowest_level = 0;
6165 * unlock our path, this is safe because only this
6166 * function is allowed to delete this snapshot
6168 btrfs_unlock_up_safe(path, 0);
6170 level = btrfs_header_level(root->node);
6172 btrfs_tree_lock(path->nodes[level]);
6173 btrfs_set_lock_blocking(path->nodes[level]);
6175 ret = btrfs_lookup_extent_info(trans, root,
6176 path->nodes[level]->start,
6177 path->nodes[level]->len,
6181 BUG_ON(wc->refs[level] == 0);
6183 if (level == root_item->drop_level)
6186 btrfs_tree_unlock(path->nodes[level]);
6187 WARN_ON(wc->refs[level] != 1);
6193 wc->shared_level = -1;
6194 wc->stage = DROP_REFERENCE;
6195 wc->update_ref = update_ref;
6197 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6200 ret = walk_down_tree(trans, root, path, wc);
6206 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6213 BUG_ON(wc->stage != DROP_REFERENCE);
6217 if (wc->stage == DROP_REFERENCE) {
6219 btrfs_node_key(path->nodes[level],
6220 &root_item->drop_progress,
6221 path->slots[level]);
6222 root_item->drop_level = level;
6225 BUG_ON(wc->level == 0);
6226 if (btrfs_should_end_transaction(trans, tree_root)) {
6227 ret = btrfs_update_root(trans, tree_root,
6232 btrfs_end_transaction_throttle(trans, tree_root);
6233 trans = btrfs_start_transaction(tree_root, 0);
6235 trans->block_rsv = block_rsv;
6238 btrfs_release_path(root, path);
6241 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6244 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6245 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6249 ret = btrfs_del_orphan_item(trans, tree_root,
6250 root->root_key.objectid);
6255 if (root->in_radix) {
6256 btrfs_free_fs_root(tree_root->fs_info, root);
6258 free_extent_buffer(root->node);
6259 free_extent_buffer(root->commit_root);
6263 btrfs_end_transaction_throttle(trans, tree_root);
6265 btrfs_free_path(path);
6270 * drop subtree rooted at tree block 'node'.
6272 * NOTE: this function will unlock and release tree block 'node'
6274 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6275 struct btrfs_root *root,
6276 struct extent_buffer *node,
6277 struct extent_buffer *parent)
6279 struct btrfs_path *path;
6280 struct walk_control *wc;
6286 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6288 path = btrfs_alloc_path();
6291 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6294 btrfs_assert_tree_locked(parent);
6295 parent_level = btrfs_header_level(parent);
6296 extent_buffer_get(parent);
6297 path->nodes[parent_level] = parent;
6298 path->slots[parent_level] = btrfs_header_nritems(parent);
6300 btrfs_assert_tree_locked(node);
6301 level = btrfs_header_level(node);
6302 path->nodes[level] = node;
6303 path->slots[level] = 0;
6304 path->locks[level] = 1;
6306 wc->refs[parent_level] = 1;
6307 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6309 wc->shared_level = -1;
6310 wc->stage = DROP_REFERENCE;
6313 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6316 wret = walk_down_tree(trans, root, path, wc);
6322 wret = walk_up_tree(trans, root, path, wc, parent_level);
6330 btrfs_free_path(path);
6335 static unsigned long calc_ra(unsigned long start, unsigned long last,
6338 return min(last, start + nr - 1);
6341 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6346 unsigned long first_index;
6347 unsigned long last_index;
6350 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6351 struct file_ra_state *ra;
6352 struct btrfs_ordered_extent *ordered;
6353 unsigned int total_read = 0;
6354 unsigned int total_dirty = 0;
6357 ra = kzalloc(sizeof(*ra), GFP_NOFS);
6359 mutex_lock(&inode->i_mutex);
6360 first_index = start >> PAGE_CACHE_SHIFT;
6361 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6363 /* make sure the dirty trick played by the caller work */
6364 ret = invalidate_inode_pages2_range(inode->i_mapping,
6365 first_index, last_index);
6369 file_ra_state_init(ra, inode->i_mapping);
6371 for (i = first_index ; i <= last_index; i++) {
6372 if (total_read % ra->ra_pages == 0) {
6373 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
6374 calc_ra(i, last_index, ra->ra_pages));
6378 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
6380 page = grab_cache_page(inode->i_mapping, i);
6385 if (!PageUptodate(page)) {
6386 btrfs_readpage(NULL, page);
6388 if (!PageUptodate(page)) {
6390 page_cache_release(page);
6395 wait_on_page_writeback(page);
6397 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6398 page_end = page_start + PAGE_CACHE_SIZE - 1;
6399 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
6401 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6403 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6405 page_cache_release(page);
6406 btrfs_start_ordered_extent(inode, ordered, 1);
6407 btrfs_put_ordered_extent(ordered);
6410 set_page_extent_mapped(page);
6412 if (i == first_index)
6413 set_extent_bits(io_tree, page_start, page_end,
6414 EXTENT_BOUNDARY, GFP_NOFS);
6415 btrfs_set_extent_delalloc(inode, page_start, page_end);
6417 set_page_dirty(page);
6420 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6422 page_cache_release(page);
6427 mutex_unlock(&inode->i_mutex);
6428 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6432 static noinline int relocate_data_extent(struct inode *reloc_inode,
6433 struct btrfs_key *extent_key,
6436 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6437 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6438 struct extent_map *em;
6439 u64 start = extent_key->objectid - offset;
6440 u64 end = start + extent_key->offset - 1;
6442 em = alloc_extent_map(GFP_NOFS);
6443 BUG_ON(!em || IS_ERR(em));
6446 em->len = extent_key->offset;
6447 em->block_len = extent_key->offset;
6448 em->block_start = extent_key->objectid;
6449 em->bdev = root->fs_info->fs_devices->latest_bdev;
6450 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6452 /* setup extent map to cheat btrfs_readpage */
6453 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6456 write_lock(&em_tree->lock);
6457 ret = add_extent_mapping(em_tree, em);
6458 write_unlock(&em_tree->lock);
6459 if (ret != -EEXIST) {
6460 free_extent_map(em);
6463 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
6465 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6467 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
6470 struct btrfs_ref_path {
6472 u64 nodes[BTRFS_MAX_LEVEL];
6474 u64 root_generation;
6481 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6482 u64 new_nodes[BTRFS_MAX_LEVEL];
6485 struct disk_extent {
6496 static int is_cowonly_root(u64 root_objectid)
6498 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6499 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6500 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6501 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
6502 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6503 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
6508 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
6509 struct btrfs_root *extent_root,
6510 struct btrfs_ref_path *ref_path,
6513 struct extent_buffer *leaf;
6514 struct btrfs_path *path;
6515 struct btrfs_extent_ref *ref;
6516 struct btrfs_key key;
6517 struct btrfs_key found_key;
6523 path = btrfs_alloc_path();
6528 ref_path->lowest_level = -1;
6529 ref_path->current_level = -1;
6530 ref_path->shared_level = -1;
6534 level = ref_path->current_level - 1;
6535 while (level >= -1) {
6537 if (level < ref_path->lowest_level)
6541 bytenr = ref_path->nodes[level];
6543 bytenr = ref_path->extent_start;
6544 BUG_ON(bytenr == 0);
6546 parent = ref_path->nodes[level + 1];
6547 ref_path->nodes[level + 1] = 0;
6548 ref_path->current_level = level;
6549 BUG_ON(parent == 0);
6551 key.objectid = bytenr;
6552 key.offset = parent + 1;
6553 key.type = BTRFS_EXTENT_REF_KEY;
6555 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6560 leaf = path->nodes[0];
6561 nritems = btrfs_header_nritems(leaf);
6562 if (path->slots[0] >= nritems) {
6563 ret = btrfs_next_leaf(extent_root, path);
6568 leaf = path->nodes[0];
6571 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6572 if (found_key.objectid == bytenr &&
6573 found_key.type == BTRFS_EXTENT_REF_KEY) {
6574 if (level < ref_path->shared_level)
6575 ref_path->shared_level = level;
6580 btrfs_release_path(extent_root, path);
6583 /* reached lowest level */
6587 level = ref_path->current_level;
6588 while (level < BTRFS_MAX_LEVEL - 1) {
6592 bytenr = ref_path->nodes[level];
6594 bytenr = ref_path->extent_start;
6596 BUG_ON(bytenr == 0);
6598 key.objectid = bytenr;
6600 key.type = BTRFS_EXTENT_REF_KEY;
6602 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6606 leaf = path->nodes[0];
6607 nritems = btrfs_header_nritems(leaf);
6608 if (path->slots[0] >= nritems) {
6609 ret = btrfs_next_leaf(extent_root, path);
6613 /* the extent was freed by someone */
6614 if (ref_path->lowest_level == level)
6616 btrfs_release_path(extent_root, path);
6619 leaf = path->nodes[0];
6622 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6623 if (found_key.objectid != bytenr ||
6624 found_key.type != BTRFS_EXTENT_REF_KEY) {
6625 /* the extent was freed by someone */
6626 if (ref_path->lowest_level == level) {
6630 btrfs_release_path(extent_root, path);
6634 ref = btrfs_item_ptr(leaf, path->slots[0],
6635 struct btrfs_extent_ref);
6636 ref_objectid = btrfs_ref_objectid(leaf, ref);
6637 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6639 level = (int)ref_objectid;
6640 BUG_ON(level >= BTRFS_MAX_LEVEL);
6641 ref_path->lowest_level = level;
6642 ref_path->current_level = level;
6643 ref_path->nodes[level] = bytenr;
6645 WARN_ON(ref_objectid != level);
6648 WARN_ON(level != -1);
6652 if (ref_path->lowest_level == level) {
6653 ref_path->owner_objectid = ref_objectid;
6654 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6658 * the block is tree root or the block isn't in reference
6661 if (found_key.objectid == found_key.offset ||
6662 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6663 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6664 ref_path->root_generation =
6665 btrfs_ref_generation(leaf, ref);
6667 /* special reference from the tree log */
6668 ref_path->nodes[0] = found_key.offset;
6669 ref_path->current_level = 0;
6676 BUG_ON(ref_path->nodes[level] != 0);
6677 ref_path->nodes[level] = found_key.offset;
6678 ref_path->current_level = level;
6681 * the reference was created in the running transaction,
6682 * no need to continue walking up.
6684 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6685 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6686 ref_path->root_generation =
6687 btrfs_ref_generation(leaf, ref);
6692 btrfs_release_path(extent_root, path);
6695 /* reached max tree level, but no tree root found. */
6698 btrfs_free_path(path);
6702 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6703 struct btrfs_root *extent_root,
6704 struct btrfs_ref_path *ref_path,
6707 memset(ref_path, 0, sizeof(*ref_path));
6708 ref_path->extent_start = extent_start;
6710 return __next_ref_path(trans, extent_root, ref_path, 1);
6713 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6714 struct btrfs_root *extent_root,
6715 struct btrfs_ref_path *ref_path)
6717 return __next_ref_path(trans, extent_root, ref_path, 0);
6720 static noinline int get_new_locations(struct inode *reloc_inode,
6721 struct btrfs_key *extent_key,
6722 u64 offset, int no_fragment,
6723 struct disk_extent **extents,
6726 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6727 struct btrfs_path *path;
6728 struct btrfs_file_extent_item *fi;
6729 struct extent_buffer *leaf;
6730 struct disk_extent *exts = *extents;
6731 struct btrfs_key found_key;
6736 int max = *nr_extents;
6739 WARN_ON(!no_fragment && *extents);
6742 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6747 path = btrfs_alloc_path();
6750 cur_pos = extent_key->objectid - offset;
6751 last_byte = extent_key->objectid + extent_key->offset;
6752 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6762 leaf = path->nodes[0];
6763 nritems = btrfs_header_nritems(leaf);
6764 if (path->slots[0] >= nritems) {
6765 ret = btrfs_next_leaf(root, path);
6770 leaf = path->nodes[0];
6773 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6774 if (found_key.offset != cur_pos ||
6775 found_key.type != BTRFS_EXTENT_DATA_KEY ||
6776 found_key.objectid != reloc_inode->i_ino)
6779 fi = btrfs_item_ptr(leaf, path->slots[0],
6780 struct btrfs_file_extent_item);
6781 if (btrfs_file_extent_type(leaf, fi) !=
6782 BTRFS_FILE_EXTENT_REG ||
6783 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6787 struct disk_extent *old = exts;
6789 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6790 memcpy(exts, old, sizeof(*exts) * nr);
6791 if (old != *extents)
6795 exts[nr].disk_bytenr =
6796 btrfs_file_extent_disk_bytenr(leaf, fi);
6797 exts[nr].disk_num_bytes =
6798 btrfs_file_extent_disk_num_bytes(leaf, fi);
6799 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6800 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6801 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6802 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6803 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6804 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6806 BUG_ON(exts[nr].offset > 0);
6807 BUG_ON(exts[nr].compression || exts[nr].encryption);
6808 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
6810 cur_pos += exts[nr].num_bytes;
6813 if (cur_pos + offset >= last_byte)
6823 BUG_ON(cur_pos + offset > last_byte);
6824 if (cur_pos + offset < last_byte) {
6830 btrfs_free_path(path);
6832 if (exts != *extents)
6841 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6842 struct btrfs_root *root,
6843 struct btrfs_path *path,
6844 struct btrfs_key *extent_key,
6845 struct btrfs_key *leaf_key,
6846 struct btrfs_ref_path *ref_path,
6847 struct disk_extent *new_extents,
6850 struct extent_buffer *leaf;
6851 struct btrfs_file_extent_item *fi;
6852 struct inode *inode = NULL;
6853 struct btrfs_key key;
6858 u64 search_end = (u64)-1;
6861 int extent_locked = 0;
6865 memcpy(&key, leaf_key, sizeof(key));
6866 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6867 if (key.objectid < ref_path->owner_objectid ||
6868 (key.objectid == ref_path->owner_objectid &&
6869 key.type < BTRFS_EXTENT_DATA_KEY)) {
6870 key.objectid = ref_path->owner_objectid;
6871 key.type = BTRFS_EXTENT_DATA_KEY;
6877 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6881 leaf = path->nodes[0];
6882 nritems = btrfs_header_nritems(leaf);
6884 if (extent_locked && ret > 0) {
6886 * the file extent item was modified by someone
6887 * before the extent got locked.
6889 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6890 lock_end, GFP_NOFS);
6894 if (path->slots[0] >= nritems) {
6895 if (++nr_scaned > 2)
6898 BUG_ON(extent_locked);
6899 ret = btrfs_next_leaf(root, path);
6904 leaf = path->nodes[0];
6905 nritems = btrfs_header_nritems(leaf);
6908 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6910 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6911 if ((key.objectid > ref_path->owner_objectid) ||
6912 (key.objectid == ref_path->owner_objectid &&
6913 key.type > BTRFS_EXTENT_DATA_KEY) ||
6914 key.offset >= search_end)
6918 if (inode && key.objectid != inode->i_ino) {
6919 BUG_ON(extent_locked);
6920 btrfs_release_path(root, path);
6921 mutex_unlock(&inode->i_mutex);
6927 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6932 fi = btrfs_item_ptr(leaf, path->slots[0],
6933 struct btrfs_file_extent_item);
6934 extent_type = btrfs_file_extent_type(leaf, fi);
6935 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6936 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
6937 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6938 extent_key->objectid)) {
6944 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6945 ext_offset = btrfs_file_extent_offset(leaf, fi);
6947 if (search_end == (u64)-1) {
6948 search_end = key.offset - ext_offset +
6949 btrfs_file_extent_ram_bytes(leaf, fi);
6952 if (!extent_locked) {
6953 lock_start = key.offset;
6954 lock_end = lock_start + num_bytes - 1;
6956 if (lock_start > key.offset ||
6957 lock_end + 1 < key.offset + num_bytes) {
6958 unlock_extent(&BTRFS_I(inode)->io_tree,
6959 lock_start, lock_end, GFP_NOFS);
6965 btrfs_release_path(root, path);
6967 inode = btrfs_iget_locked(root->fs_info->sb,
6968 key.objectid, root);
6969 if (inode->i_state & I_NEW) {
6970 BTRFS_I(inode)->root = root;
6971 BTRFS_I(inode)->location.objectid =
6973 BTRFS_I(inode)->location.type =
6974 BTRFS_INODE_ITEM_KEY;
6975 BTRFS_I(inode)->location.offset = 0;
6976 btrfs_read_locked_inode(inode);
6977 unlock_new_inode(inode);
6980 * some code call btrfs_commit_transaction while
6981 * holding the i_mutex, so we can't use mutex_lock
6984 if (is_bad_inode(inode) ||
6985 !mutex_trylock(&inode->i_mutex)) {
6988 key.offset = (u64)-1;
6993 if (!extent_locked) {
6994 struct btrfs_ordered_extent *ordered;
6996 btrfs_release_path(root, path);
6998 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6999 lock_end, GFP_NOFS);
7000 ordered = btrfs_lookup_first_ordered_extent(inode,
7003 ordered->file_offset <= lock_end &&
7004 ordered->file_offset + ordered->len > lock_start) {
7005 unlock_extent(&BTRFS_I(inode)->io_tree,
7006 lock_start, lock_end, GFP_NOFS);
7007 btrfs_start_ordered_extent(inode, ordered, 1);
7008 btrfs_put_ordered_extent(ordered);
7009 key.offset += num_bytes;
7013 btrfs_put_ordered_extent(ordered);
7019 if (nr_extents == 1) {
7020 /* update extent pointer in place */
7021 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7022 new_extents[0].disk_bytenr);
7023 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7024 new_extents[0].disk_num_bytes);
7025 btrfs_mark_buffer_dirty(leaf);
7027 btrfs_drop_extent_cache(inode, key.offset,
7028 key.offset + num_bytes - 1, 0);
7030 ret = btrfs_inc_extent_ref(trans, root,
7031 new_extents[0].disk_bytenr,
7032 new_extents[0].disk_num_bytes,
7034 root->root_key.objectid,
7039 ret = btrfs_free_extent(trans, root,
7040 extent_key->objectid,
7043 btrfs_header_owner(leaf),
7044 btrfs_header_generation(leaf),
7048 btrfs_release_path(root, path);
7049 key.offset += num_bytes;
7057 * drop old extent pointer at first, then insert the
7058 * new pointers one bye one
7060 btrfs_release_path(root, path);
7061 ret = btrfs_drop_extents(trans, root, inode, key.offset,
7062 key.offset + num_bytes,
7063 key.offset, &alloc_hint);
7066 for (i = 0; i < nr_extents; i++) {
7067 if (ext_offset >= new_extents[i].num_bytes) {
7068 ext_offset -= new_extents[i].num_bytes;
7071 extent_len = min(new_extents[i].num_bytes -
7072 ext_offset, num_bytes);
7074 ret = btrfs_insert_empty_item(trans, root,
7079 leaf = path->nodes[0];
7080 fi = btrfs_item_ptr(leaf, path->slots[0],
7081 struct btrfs_file_extent_item);
7082 btrfs_set_file_extent_generation(leaf, fi,
7084 btrfs_set_file_extent_type(leaf, fi,
7085 BTRFS_FILE_EXTENT_REG);
7086 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7087 new_extents[i].disk_bytenr);
7088 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7089 new_extents[i].disk_num_bytes);
7090 btrfs_set_file_extent_ram_bytes(leaf, fi,
7091 new_extents[i].ram_bytes);
7093 btrfs_set_file_extent_compression(leaf, fi,
7094 new_extents[i].compression);
7095 btrfs_set_file_extent_encryption(leaf, fi,
7096 new_extents[i].encryption);
7097 btrfs_set_file_extent_other_encoding(leaf, fi,
7098 new_extents[i].other_encoding);
7100 btrfs_set_file_extent_num_bytes(leaf, fi,
7102 ext_offset += new_extents[i].offset;
7103 btrfs_set_file_extent_offset(leaf, fi,
7105 btrfs_mark_buffer_dirty(leaf);
7107 btrfs_drop_extent_cache(inode, key.offset,
7108 key.offset + extent_len - 1, 0);
7110 ret = btrfs_inc_extent_ref(trans, root,
7111 new_extents[i].disk_bytenr,
7112 new_extents[i].disk_num_bytes,
7114 root->root_key.objectid,
7115 trans->transid, key.objectid);
7117 btrfs_release_path(root, path);
7119 inode_add_bytes(inode, extent_len);
7122 num_bytes -= extent_len;
7123 key.offset += extent_len;
7128 BUG_ON(i >= nr_extents);
7132 if (extent_locked) {
7133 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7134 lock_end, GFP_NOFS);
7138 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
7139 key.offset >= search_end)
7146 btrfs_release_path(root, path);
7148 mutex_unlock(&inode->i_mutex);
7149 if (extent_locked) {
7150 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7151 lock_end, GFP_NOFS);
7158 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7159 struct btrfs_root *root,
7160 struct extent_buffer *buf, u64 orig_start)
7165 BUG_ON(btrfs_header_generation(buf) != trans->transid);
7166 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7168 level = btrfs_header_level(buf);
7170 struct btrfs_leaf_ref *ref;
7171 struct btrfs_leaf_ref *orig_ref;
7173 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7177 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7179 btrfs_free_leaf_ref(root, orig_ref);
7183 ref->nritems = orig_ref->nritems;
7184 memcpy(ref->extents, orig_ref->extents,
7185 sizeof(ref->extents[0]) * ref->nritems);
7187 btrfs_free_leaf_ref(root, orig_ref);
7189 ref->root_gen = trans->transid;
7190 ref->bytenr = buf->start;
7191 ref->owner = btrfs_header_owner(buf);
7192 ref->generation = btrfs_header_generation(buf);
7194 ret = btrfs_add_leaf_ref(root, ref, 0);
7196 btrfs_free_leaf_ref(root, ref);
7201 static noinline int invalidate_extent_cache(struct btrfs_root *root,
7202 struct extent_buffer *leaf,
7203 struct btrfs_block_group_cache *group,
7204 struct btrfs_root *target_root)
7206 struct btrfs_key key;
7207 struct inode *inode = NULL;
7208 struct btrfs_file_extent_item *fi;
7209 struct extent_state *cached_state = NULL;
7211 u64 skip_objectid = 0;
7215 nritems = btrfs_header_nritems(leaf);
7216 for (i = 0; i < nritems; i++) {
7217 btrfs_item_key_to_cpu(leaf, &key, i);
7218 if (key.objectid == skip_objectid ||
7219 key.type != BTRFS_EXTENT_DATA_KEY)
7221 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7222 if (btrfs_file_extent_type(leaf, fi) ==
7223 BTRFS_FILE_EXTENT_INLINE)
7225 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7227 if (!inode || inode->i_ino != key.objectid) {
7229 inode = btrfs_ilookup(target_root->fs_info->sb,
7230 key.objectid, target_root, 1);
7233 skip_objectid = key.objectid;
7236 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7238 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7239 key.offset + num_bytes - 1, 0, &cached_state,
7241 btrfs_drop_extent_cache(inode, key.offset,
7242 key.offset + num_bytes - 1, 1);
7243 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7244 key.offset + num_bytes - 1, &cached_state,
7252 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7253 struct btrfs_root *root,
7254 struct extent_buffer *leaf,
7255 struct btrfs_block_group_cache *group,
7256 struct inode *reloc_inode)
7258 struct btrfs_key key;
7259 struct btrfs_key extent_key;
7260 struct btrfs_file_extent_item *fi;
7261 struct btrfs_leaf_ref *ref;
7262 struct disk_extent *new_extent;
7271 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7272 BUG_ON(!new_extent);
7274 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7278 nritems = btrfs_header_nritems(leaf);
7279 for (i = 0; i < nritems; i++) {
7280 btrfs_item_key_to_cpu(leaf, &key, i);
7281 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7283 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7284 if (btrfs_file_extent_type(leaf, fi) ==
7285 BTRFS_FILE_EXTENT_INLINE)
7287 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7288 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7293 if (bytenr >= group->key.objectid + group->key.offset ||
7294 bytenr + num_bytes <= group->key.objectid)
7297 extent_key.objectid = bytenr;
7298 extent_key.offset = num_bytes;
7299 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7301 ret = get_new_locations(reloc_inode, &extent_key,
7302 group->key.objectid, 1,
7303 &new_extent, &nr_extent);
7308 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7309 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7310 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7311 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7313 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7314 new_extent->disk_bytenr);
7315 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7316 new_extent->disk_num_bytes);
7317 btrfs_mark_buffer_dirty(leaf);
7319 ret = btrfs_inc_extent_ref(trans, root,
7320 new_extent->disk_bytenr,
7321 new_extent->disk_num_bytes,
7323 root->root_key.objectid,
7324 trans->transid, key.objectid);
7327 ret = btrfs_free_extent(trans, root,
7328 bytenr, num_bytes, leaf->start,
7329 btrfs_header_owner(leaf),
7330 btrfs_header_generation(leaf),
7336 BUG_ON(ext_index + 1 != ref->nritems);
7337 btrfs_free_leaf_ref(root, ref);
7341 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7342 struct btrfs_root *root)
7344 struct btrfs_root *reloc_root;
7347 if (root->reloc_root) {
7348 reloc_root = root->reloc_root;
7349 root->reloc_root = NULL;
7350 list_add(&reloc_root->dead_list,
7351 &root->fs_info->dead_reloc_roots);
7353 btrfs_set_root_bytenr(&reloc_root->root_item,
7354 reloc_root->node->start);
7355 btrfs_set_root_level(&root->root_item,
7356 btrfs_header_level(reloc_root->node));
7357 memset(&reloc_root->root_item.drop_progress, 0,
7358 sizeof(struct btrfs_disk_key));
7359 reloc_root->root_item.drop_level = 0;
7361 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7362 &reloc_root->root_key,
7363 &reloc_root->root_item);
7369 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7371 struct btrfs_trans_handle *trans;
7372 struct btrfs_root *reloc_root;
7373 struct btrfs_root *prev_root = NULL;
7374 struct list_head dead_roots;
7378 INIT_LIST_HEAD(&dead_roots);
7379 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7381 while (!list_empty(&dead_roots)) {
7382 reloc_root = list_entry(dead_roots.prev,
7383 struct btrfs_root, dead_list);
7384 list_del_init(&reloc_root->dead_list);
7386 BUG_ON(reloc_root->commit_root != NULL);
7388 trans = btrfs_join_transaction(root, 1);
7391 mutex_lock(&root->fs_info->drop_mutex);
7392 ret = btrfs_drop_snapshot(trans, reloc_root);
7395 mutex_unlock(&root->fs_info->drop_mutex);
7397 nr = trans->blocks_used;
7398 ret = btrfs_end_transaction(trans, root);
7400 btrfs_btree_balance_dirty(root, nr);
7403 free_extent_buffer(reloc_root->node);
7405 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7406 &reloc_root->root_key);
7408 mutex_unlock(&root->fs_info->drop_mutex);
7410 nr = trans->blocks_used;
7411 ret = btrfs_end_transaction(trans, root);
7413 btrfs_btree_balance_dirty(root, nr);
7416 prev_root = reloc_root;
7419 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7425 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7427 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7431 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7433 struct btrfs_root *reloc_root;
7434 struct btrfs_trans_handle *trans;
7435 struct btrfs_key location;
7439 mutex_lock(&root->fs_info->tree_reloc_mutex);
7440 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7442 found = !list_empty(&root->fs_info->dead_reloc_roots);
7443 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7446 trans = btrfs_start_transaction(root, 1);
7448 ret = btrfs_commit_transaction(trans, root);
7452 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7453 location.offset = (u64)-1;
7454 location.type = BTRFS_ROOT_ITEM_KEY;
7456 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7457 BUG_ON(!reloc_root);
7458 btrfs_orphan_cleanup(reloc_root);
7462 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7463 struct btrfs_root *root)
7465 struct btrfs_root *reloc_root;
7466 struct extent_buffer *eb;
7467 struct btrfs_root_item *root_item;
7468 struct btrfs_key root_key;
7471 BUG_ON(!root->ref_cows);
7472 if (root->reloc_root)
7475 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7478 ret = btrfs_copy_root(trans, root, root->commit_root,
7479 &eb, BTRFS_TREE_RELOC_OBJECTID);
7482 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7483 root_key.offset = root->root_key.objectid;
7484 root_key.type = BTRFS_ROOT_ITEM_KEY;
7486 memcpy(root_item, &root->root_item, sizeof(root_item));
7487 btrfs_set_root_refs(root_item, 0);
7488 btrfs_set_root_bytenr(root_item, eb->start);
7489 btrfs_set_root_level(root_item, btrfs_header_level(eb));
7490 btrfs_set_root_generation(root_item, trans->transid);
7492 btrfs_tree_unlock(eb);
7493 free_extent_buffer(eb);
7495 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7496 &root_key, root_item);
7500 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7502 BUG_ON(!reloc_root);
7503 reloc_root->last_trans = trans->transid;
7504 reloc_root->commit_root = NULL;
7505 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7507 root->reloc_root = reloc_root;
7512 * Core function of space balance.
7514 * The idea is using reloc trees to relocate tree blocks in reference
7515 * counted roots. There is one reloc tree for each subvol, and all
7516 * reloc trees share same root key objectid. Reloc trees are snapshots
7517 * of the latest committed roots of subvols (root->commit_root).
7519 * To relocate a tree block referenced by a subvol, there are two steps.
7520 * COW the block through subvol's reloc tree, then update block pointer
7521 * in the subvol to point to the new block. Since all reloc trees share
7522 * same root key objectid, doing special handing for tree blocks owned
7523 * by them is easy. Once a tree block has been COWed in one reloc tree,
7524 * we can use the resulting new block directly when the same block is
7525 * required to COW again through other reloc trees. By this way, relocated
7526 * tree blocks are shared between reloc trees, so they are also shared
7529 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
7530 struct btrfs_root *root,
7531 struct btrfs_path *path,
7532 struct btrfs_key *first_key,
7533 struct btrfs_ref_path *ref_path,
7534 struct btrfs_block_group_cache *group,
7535 struct inode *reloc_inode)
7537 struct btrfs_root *reloc_root;
7538 struct extent_buffer *eb = NULL;
7539 struct btrfs_key *keys;
7543 int lowest_level = 0;
7546 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7547 lowest_level = ref_path->owner_objectid;
7549 if (!root->ref_cows) {
7550 path->lowest_level = lowest_level;
7551 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7553 path->lowest_level = 0;
7554 btrfs_release_path(root, path);
7558 mutex_lock(&root->fs_info->tree_reloc_mutex);
7559 ret = init_reloc_tree(trans, root);
7561 reloc_root = root->reloc_root;
7563 shared_level = ref_path->shared_level;
7564 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
7566 keys = ref_path->node_keys;
7567 nodes = ref_path->new_nodes;
7568 memset(&keys[shared_level + 1], 0,
7569 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7570 memset(&nodes[shared_level + 1], 0,
7571 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
7573 if (nodes[lowest_level] == 0) {
7574 path->lowest_level = lowest_level;
7575 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7578 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7579 eb = path->nodes[level];
7580 if (!eb || eb == reloc_root->node)
7582 nodes[level] = eb->start;
7584 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7586 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7589 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7590 eb = path->nodes[0];
7591 ret = replace_extents_in_leaf(trans, reloc_root, eb,
7592 group, reloc_inode);
7595 btrfs_release_path(reloc_root, path);
7597 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
7603 * replace tree blocks in the fs tree with tree blocks in
7606 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7609 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7610 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7613 extent_buffer_get(path->nodes[0]);
7614 eb = path->nodes[0];
7615 btrfs_release_path(reloc_root, path);
7616 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7618 free_extent_buffer(eb);
7621 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7622 path->lowest_level = 0;
7626 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
7627 struct btrfs_root *root,
7628 struct btrfs_path *path,
7629 struct btrfs_key *first_key,
7630 struct btrfs_ref_path *ref_path)
7634 ret = relocate_one_path(trans, root, path, first_key,
7635 ref_path, NULL, NULL);
7641 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7642 struct btrfs_root *extent_root,
7643 struct btrfs_path *path,
7644 struct btrfs_key *extent_key)
7648 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7651 ret = btrfs_del_item(trans, extent_root, path);
7653 btrfs_release_path(extent_root, path);
7657 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7658 struct btrfs_ref_path *ref_path)
7660 struct btrfs_key root_key;
7662 root_key.objectid = ref_path->root_objectid;
7663 root_key.type = BTRFS_ROOT_ITEM_KEY;
7664 if (is_cowonly_root(ref_path->root_objectid))
7665 root_key.offset = 0;
7667 root_key.offset = (u64)-1;
7669 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7672 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7673 struct btrfs_path *path,
7674 struct btrfs_key *extent_key,
7675 struct btrfs_block_group_cache *group,
7676 struct inode *reloc_inode, int pass)
7678 struct btrfs_trans_handle *trans;
7679 struct btrfs_root *found_root;
7680 struct btrfs_ref_path *ref_path = NULL;
7681 struct disk_extent *new_extents = NULL;
7686 struct btrfs_key first_key;
7690 trans = btrfs_start_transaction(extent_root, 1);
7693 if (extent_key->objectid == 0) {
7694 ret = del_extent_zero(trans, extent_root, path, extent_key);
7698 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7704 for (loops = 0; ; loops++) {
7706 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7707 extent_key->objectid);
7709 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7716 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7717 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7720 found_root = read_ref_root(extent_root->fs_info, ref_path);
7721 BUG_ON(!found_root);
7723 * for reference counted tree, only process reference paths
7724 * rooted at the latest committed root.
7726 if (found_root->ref_cows &&
7727 ref_path->root_generation != found_root->root_key.offset)
7730 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7733 * copy data extents to new locations
7735 u64 group_start = group->key.objectid;
7736 ret = relocate_data_extent(reloc_inode,
7745 level = ref_path->owner_objectid;
7748 if (prev_block != ref_path->nodes[level]) {
7749 struct extent_buffer *eb;
7750 u64 block_start = ref_path->nodes[level];
7751 u64 block_size = btrfs_level_size(found_root, level);
7753 eb = read_tree_block(found_root, block_start,
7755 btrfs_tree_lock(eb);
7756 BUG_ON(level != btrfs_header_level(eb));
7759 btrfs_item_key_to_cpu(eb, &first_key, 0);
7761 btrfs_node_key_to_cpu(eb, &first_key, 0);
7763 btrfs_tree_unlock(eb);
7764 free_extent_buffer(eb);
7765 prev_block = block_start;
7768 mutex_lock(&extent_root->fs_info->trans_mutex);
7769 btrfs_record_root_in_trans(found_root);
7770 mutex_unlock(&extent_root->fs_info->trans_mutex);
7771 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7773 * try to update data extent references while
7774 * keeping metadata shared between snapshots.
7777 ret = relocate_one_path(trans, found_root,
7778 path, &first_key, ref_path,
7779 group, reloc_inode);
7785 * use fallback method to process the remaining
7789 u64 group_start = group->key.objectid;
7790 new_extents = kmalloc(sizeof(*new_extents),
7793 ret = get_new_locations(reloc_inode,
7801 ret = replace_one_extent(trans, found_root,
7803 &first_key, ref_path,
7804 new_extents, nr_extents);
7806 ret = relocate_tree_block(trans, found_root, path,
7807 &first_key, ref_path);
7814 btrfs_end_transaction(trans, extent_root);
7821 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7824 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7825 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7827 num_devices = root->fs_info->fs_devices->rw_devices;
7828 if (num_devices == 1) {
7829 stripped |= BTRFS_BLOCK_GROUP_DUP;
7830 stripped = flags & ~stripped;
7832 /* turn raid0 into single device chunks */
7833 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7836 /* turn mirroring into duplication */
7837 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7838 BTRFS_BLOCK_GROUP_RAID10))
7839 return stripped | BTRFS_BLOCK_GROUP_DUP;
7842 /* they already had raid on here, just return */
7843 if (flags & stripped)
7846 stripped |= BTRFS_BLOCK_GROUP_DUP;
7847 stripped = flags & ~stripped;
7849 /* switch duplicated blocks with raid1 */
7850 if (flags & BTRFS_BLOCK_GROUP_DUP)
7851 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7853 /* turn single device chunks into raid0 */
7854 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7859 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
7861 struct btrfs_space_info *sinfo = cache->space_info;
7868 spin_lock(&sinfo->lock);
7869 spin_lock(&cache->lock);
7870 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7871 cache->bytes_super - btrfs_block_group_used(&cache->item);
7873 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7874 sinfo->bytes_may_use + sinfo->bytes_readonly +
7875 cache->reserved_pinned + num_bytes < sinfo->total_bytes) {
7876 sinfo->bytes_readonly += num_bytes;
7877 sinfo->bytes_reserved += cache->reserved_pinned;
7878 cache->reserved_pinned = 0;
7882 spin_unlock(&cache->lock);
7883 spin_unlock(&sinfo->lock);
7887 int btrfs_set_block_group_ro(struct btrfs_root *root,
7888 struct btrfs_block_group_cache *cache)
7891 struct btrfs_trans_handle *trans;
7897 trans = btrfs_join_transaction(root, 1);
7898 BUG_ON(IS_ERR(trans));
7900 alloc_flags = update_block_group_flags(root, cache->flags);
7901 if (alloc_flags != cache->flags)
7902 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7904 ret = set_block_group_ro(cache);
7907 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7908 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7911 ret = set_block_group_ro(cache);
7913 btrfs_end_transaction(trans, root);
7917 int btrfs_set_block_group_rw(struct btrfs_root *root,
7918 struct btrfs_block_group_cache *cache)
7920 struct btrfs_space_info *sinfo = cache->space_info;
7925 spin_lock(&sinfo->lock);
7926 spin_lock(&cache->lock);
7927 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7928 cache->bytes_super - btrfs_block_group_used(&cache->item);
7929 sinfo->bytes_readonly -= num_bytes;
7931 spin_unlock(&cache->lock);
7932 spin_unlock(&sinfo->lock);
7937 * checks to see if its even possible to relocate this block group.
7939 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7940 * ok to go ahead and try.
7942 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7944 struct btrfs_block_group_cache *block_group;
7945 struct btrfs_space_info *space_info;
7946 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7947 struct btrfs_device *device;
7951 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7953 /* odd, couldn't find the block group, leave it alone */
7957 /* no bytes used, we're good */
7958 if (!btrfs_block_group_used(&block_group->item))
7961 space_info = block_group->space_info;
7962 spin_lock(&space_info->lock);
7964 full = space_info->full;
7967 * if this is the last block group we have in this space, we can't
7968 * relocate it unless we're able to allocate a new chunk below.
7970 * Otherwise, we need to make sure we have room in the space to handle
7971 * all of the extents from this block group. If we can, we're good
7973 if ((space_info->total_bytes != block_group->key.offset) &&
7974 (space_info->bytes_used + space_info->bytes_reserved +
7975 space_info->bytes_pinned + space_info->bytes_readonly +
7976 btrfs_block_group_used(&block_group->item) <
7977 space_info->total_bytes)) {
7978 spin_unlock(&space_info->lock);
7981 spin_unlock(&space_info->lock);
7984 * ok we don't have enough space, but maybe we have free space on our
7985 * devices to allocate new chunks for relocation, so loop through our
7986 * alloc devices and guess if we have enough space. However, if we
7987 * were marked as full, then we know there aren't enough chunks, and we
7994 mutex_lock(&root->fs_info->chunk_mutex);
7995 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7996 u64 min_free = btrfs_block_group_used(&block_group->item);
7997 u64 dev_offset, max_avail;
8000 * check to make sure we can actually find a chunk with enough
8001 * space to fit our block group in.
8003 if (device->total_bytes > device->bytes_used + min_free) {
8004 ret = find_free_dev_extent(NULL, device, min_free,
8005 &dev_offset, &max_avail);
8011 mutex_unlock(&root->fs_info->chunk_mutex);
8013 btrfs_put_block_group(block_group);
8017 static int find_first_block_group(struct btrfs_root *root,
8018 struct btrfs_path *path, struct btrfs_key *key)
8021 struct btrfs_key found_key;
8022 struct extent_buffer *leaf;
8025 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8030 slot = path->slots[0];
8031 leaf = path->nodes[0];
8032 if (slot >= btrfs_header_nritems(leaf)) {
8033 ret = btrfs_next_leaf(root, path);
8040 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8042 if (found_key.objectid >= key->objectid &&
8043 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8053 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8055 struct btrfs_block_group_cache *block_group;
8059 struct inode *inode;
8061 block_group = btrfs_lookup_first_block_group(info, last);
8062 while (block_group) {
8063 spin_lock(&block_group->lock);
8064 if (block_group->iref)
8066 spin_unlock(&block_group->lock);
8067 block_group = next_block_group(info->tree_root,
8077 inode = block_group->inode;
8078 block_group->iref = 0;
8079 block_group->inode = NULL;
8080 spin_unlock(&block_group->lock);
8082 last = block_group->key.objectid + block_group->key.offset;
8083 btrfs_put_block_group(block_group);
8087 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8089 struct btrfs_block_group_cache *block_group;
8090 struct btrfs_space_info *space_info;
8091 struct btrfs_caching_control *caching_ctl;
8094 down_write(&info->extent_commit_sem);
8095 while (!list_empty(&info->caching_block_groups)) {
8096 caching_ctl = list_entry(info->caching_block_groups.next,
8097 struct btrfs_caching_control, list);
8098 list_del(&caching_ctl->list);
8099 put_caching_control(caching_ctl);
8101 up_write(&info->extent_commit_sem);
8103 spin_lock(&info->block_group_cache_lock);
8104 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8105 block_group = rb_entry(n, struct btrfs_block_group_cache,
8107 rb_erase(&block_group->cache_node,
8108 &info->block_group_cache_tree);
8109 spin_unlock(&info->block_group_cache_lock);
8111 down_write(&block_group->space_info->groups_sem);
8112 list_del(&block_group->list);
8113 up_write(&block_group->space_info->groups_sem);
8115 if (block_group->cached == BTRFS_CACHE_STARTED)
8116 wait_block_group_cache_done(block_group);
8118 btrfs_remove_free_space_cache(block_group);
8119 btrfs_put_block_group(block_group);
8121 spin_lock(&info->block_group_cache_lock);
8123 spin_unlock(&info->block_group_cache_lock);
8125 /* now that all the block groups are freed, go through and
8126 * free all the space_info structs. This is only called during
8127 * the final stages of unmount, and so we know nobody is
8128 * using them. We call synchronize_rcu() once before we start,
8129 * just to be on the safe side.
8133 release_global_block_rsv(info);
8135 while(!list_empty(&info->space_info)) {
8136 space_info = list_entry(info->space_info.next,
8137 struct btrfs_space_info,
8139 if (space_info->bytes_pinned > 0 ||
8140 space_info->bytes_reserved > 0) {
8142 dump_space_info(space_info, 0, 0);
8144 list_del(&space_info->list);
8150 static void __link_block_group(struct btrfs_space_info *space_info,
8151 struct btrfs_block_group_cache *cache)
8153 int index = get_block_group_index(cache);
8155 down_write(&space_info->groups_sem);
8156 list_add_tail(&cache->list, &space_info->block_groups[index]);
8157 up_write(&space_info->groups_sem);
8160 int btrfs_read_block_groups(struct btrfs_root *root)
8162 struct btrfs_path *path;
8164 struct btrfs_block_group_cache *cache;
8165 struct btrfs_fs_info *info = root->fs_info;
8166 struct btrfs_space_info *space_info;
8167 struct btrfs_key key;
8168 struct btrfs_key found_key;
8169 struct extent_buffer *leaf;
8173 root = info->extent_root;
8176 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8177 path = btrfs_alloc_path();
8181 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
8182 if (cache_gen != 0 &&
8183 btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
8187 ret = find_first_block_group(root, path, &key);
8193 leaf = path->nodes[0];
8194 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8195 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8201 atomic_set(&cache->count, 1);
8202 spin_lock_init(&cache->lock);
8203 spin_lock_init(&cache->tree_lock);
8204 cache->fs_info = info;
8205 INIT_LIST_HEAD(&cache->list);
8206 INIT_LIST_HEAD(&cache->cluster_list);
8209 cache->disk_cache_state = BTRFS_DC_CLEAR;
8212 * we only want to have 32k of ram per block group for keeping
8213 * track of free space, and if we pass 1/2 of that we want to
8214 * start converting things over to using bitmaps
8216 cache->extents_thresh = ((1024 * 32) / 2) /
8217 sizeof(struct btrfs_free_space);
8219 read_extent_buffer(leaf, &cache->item,
8220 btrfs_item_ptr_offset(leaf, path->slots[0]),
8221 sizeof(cache->item));
8222 memcpy(&cache->key, &found_key, sizeof(found_key));
8224 key.objectid = found_key.objectid + found_key.offset;
8225 btrfs_release_path(root, path);
8226 cache->flags = btrfs_block_group_flags(&cache->item);
8227 cache->sectorsize = root->sectorsize;
8230 * check for two cases, either we are full, and therefore
8231 * don't need to bother with the caching work since we won't
8232 * find any space, or we are empty, and we can just add all
8233 * the space in and be done with it. This saves us _alot_ of
8234 * time, particularly in the full case.
8236 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8237 exclude_super_stripes(root, cache);
8238 cache->last_byte_to_unpin = (u64)-1;
8239 cache->cached = BTRFS_CACHE_FINISHED;
8240 free_excluded_extents(root, cache);
8241 } else if (btrfs_block_group_used(&cache->item) == 0) {
8242 exclude_super_stripes(root, cache);
8243 cache->last_byte_to_unpin = (u64)-1;
8244 cache->cached = BTRFS_CACHE_FINISHED;
8245 add_new_free_space(cache, root->fs_info,
8247 found_key.objectid +
8249 free_excluded_extents(root, cache);
8252 ret = update_space_info(info, cache->flags, found_key.offset,
8253 btrfs_block_group_used(&cache->item),
8256 cache->space_info = space_info;
8257 spin_lock(&cache->space_info->lock);
8258 cache->space_info->bytes_readonly += cache->bytes_super;
8259 spin_unlock(&cache->space_info->lock);
8261 __link_block_group(space_info, cache);
8263 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8266 set_avail_alloc_bits(root->fs_info, cache->flags);
8267 if (btrfs_chunk_readonly(root, cache->key.objectid))
8268 set_block_group_ro(cache);
8271 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8272 if (!(get_alloc_profile(root, space_info->flags) &
8273 (BTRFS_BLOCK_GROUP_RAID10 |
8274 BTRFS_BLOCK_GROUP_RAID1 |
8275 BTRFS_BLOCK_GROUP_DUP)))
8278 * avoid allocating from un-mirrored block group if there are
8279 * mirrored block groups.
8281 list_for_each_entry(cache, &space_info->block_groups[3], list)
8282 set_block_group_ro(cache);
8283 list_for_each_entry(cache, &space_info->block_groups[4], list)
8284 set_block_group_ro(cache);
8287 init_global_block_rsv(info);
8290 btrfs_free_path(path);
8294 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8295 struct btrfs_root *root, u64 bytes_used,
8296 u64 type, u64 chunk_objectid, u64 chunk_offset,
8300 struct btrfs_root *extent_root;
8301 struct btrfs_block_group_cache *cache;
8303 extent_root = root->fs_info->extent_root;
8305 root->fs_info->last_trans_log_full_commit = trans->transid;
8307 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8311 cache->key.objectid = chunk_offset;
8312 cache->key.offset = size;
8313 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8314 cache->sectorsize = root->sectorsize;
8315 cache->fs_info = root->fs_info;
8318 * we only want to have 32k of ram per block group for keeping track
8319 * of free space, and if we pass 1/2 of that we want to start
8320 * converting things over to using bitmaps
8322 cache->extents_thresh = ((1024 * 32) / 2) /
8323 sizeof(struct btrfs_free_space);
8324 atomic_set(&cache->count, 1);
8325 spin_lock_init(&cache->lock);
8326 spin_lock_init(&cache->tree_lock);
8327 INIT_LIST_HEAD(&cache->list);
8328 INIT_LIST_HEAD(&cache->cluster_list);
8330 btrfs_set_block_group_used(&cache->item, bytes_used);
8331 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8332 cache->flags = type;
8333 btrfs_set_block_group_flags(&cache->item, type);
8335 cache->last_byte_to_unpin = (u64)-1;
8336 cache->cached = BTRFS_CACHE_FINISHED;
8337 exclude_super_stripes(root, cache);
8339 add_new_free_space(cache, root->fs_info, chunk_offset,
8340 chunk_offset + size);
8342 free_excluded_extents(root, cache);
8344 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8345 &cache->space_info);
8348 spin_lock(&cache->space_info->lock);
8349 cache->space_info->bytes_readonly += cache->bytes_super;
8350 spin_unlock(&cache->space_info->lock);
8352 __link_block_group(cache->space_info, cache);
8354 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8357 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8358 sizeof(cache->item));
8361 set_avail_alloc_bits(extent_root->fs_info, type);
8366 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8367 struct btrfs_root *root, u64 group_start)
8369 struct btrfs_path *path;
8370 struct btrfs_block_group_cache *block_group;
8371 struct btrfs_free_cluster *cluster;
8372 struct btrfs_root *tree_root = root->fs_info->tree_root;
8373 struct btrfs_key key;
8374 struct inode *inode;
8377 root = root->fs_info->extent_root;
8379 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8380 BUG_ON(!block_group);
8381 BUG_ON(!block_group->ro);
8383 /* make sure this block group isn't part of an allocation cluster */
8384 cluster = &root->fs_info->data_alloc_cluster;
8385 spin_lock(&cluster->refill_lock);
8386 btrfs_return_cluster_to_free_space(block_group, cluster);
8387 spin_unlock(&cluster->refill_lock);
8390 * make sure this block group isn't part of a metadata
8391 * allocation cluster
8393 cluster = &root->fs_info->meta_alloc_cluster;
8394 spin_lock(&cluster->refill_lock);
8395 btrfs_return_cluster_to_free_space(block_group, cluster);
8396 spin_unlock(&cluster->refill_lock);
8398 path = btrfs_alloc_path();
8401 inode = lookup_free_space_inode(root, block_group, path);
8402 if (!IS_ERR(inode)) {
8403 btrfs_orphan_add(trans, inode);
8405 /* One for the block groups ref */
8406 spin_lock(&block_group->lock);
8407 if (block_group->iref) {
8408 block_group->iref = 0;
8409 block_group->inode = NULL;
8410 spin_unlock(&block_group->lock);
8413 spin_unlock(&block_group->lock);
8415 /* One for our lookup ref */
8419 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8420 key.offset = block_group->key.objectid;
8423 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8427 btrfs_release_path(tree_root, path);
8429 ret = btrfs_del_item(trans, tree_root, path);
8432 btrfs_release_path(tree_root, path);
8435 spin_lock(&root->fs_info->block_group_cache_lock);
8436 rb_erase(&block_group->cache_node,
8437 &root->fs_info->block_group_cache_tree);
8438 spin_unlock(&root->fs_info->block_group_cache_lock);
8440 down_write(&block_group->space_info->groups_sem);
8442 * we must use list_del_init so people can check to see if they
8443 * are still on the list after taking the semaphore
8445 list_del_init(&block_group->list);
8446 up_write(&block_group->space_info->groups_sem);
8448 if (block_group->cached == BTRFS_CACHE_STARTED)
8449 wait_block_group_cache_done(block_group);
8451 btrfs_remove_free_space_cache(block_group);
8453 spin_lock(&block_group->space_info->lock);
8454 block_group->space_info->total_bytes -= block_group->key.offset;
8455 block_group->space_info->bytes_readonly -= block_group->key.offset;
8456 spin_unlock(&block_group->space_info->lock);
8458 memcpy(&key, &block_group->key, sizeof(key));
8460 btrfs_clear_space_info_full(root->fs_info);
8462 btrfs_put_block_group(block_group);
8463 btrfs_put_block_group(block_group);
8465 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8471 ret = btrfs_del_item(trans, root, path);
8473 btrfs_free_path(path);