2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
29 #include "print-tree.h"
30 #include "transaction.h"
33 #include "free-space-cache.h"
35 static int update_reserved_extents(struct btrfs_root *root,
36 u64 bytenr, u64 num, int reserve);
37 static int update_block_group(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 u64 bytenr, u64 num_bytes, int alloc,
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
61 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
62 struct btrfs_root *extent_root, u64 alloc_bytes,
63 u64 flags, int force);
66 block_group_cache_done(struct btrfs_block_group_cache *cache)
69 return cache->cached == BTRFS_CACHE_FINISHED;
72 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
74 return (cache->flags & bits) == bits;
78 * this adds the block group to the fs_info rb tree for the block group
81 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
82 struct btrfs_block_group_cache *block_group)
85 struct rb_node *parent = NULL;
86 struct btrfs_block_group_cache *cache;
88 spin_lock(&info->block_group_cache_lock);
89 p = &info->block_group_cache_tree.rb_node;
93 cache = rb_entry(parent, struct btrfs_block_group_cache,
95 if (block_group->key.objectid < cache->key.objectid) {
97 } else if (block_group->key.objectid > cache->key.objectid) {
100 spin_unlock(&info->block_group_cache_lock);
105 rb_link_node(&block_group->cache_node, parent, p);
106 rb_insert_color(&block_group->cache_node,
107 &info->block_group_cache_tree);
108 spin_unlock(&info->block_group_cache_lock);
114 * This will return the block group at or after bytenr if contains is 0, else
115 * it will return the block group that contains the bytenr
117 static struct btrfs_block_group_cache *
118 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
121 struct btrfs_block_group_cache *cache, *ret = NULL;
125 spin_lock(&info->block_group_cache_lock);
126 n = info->block_group_cache_tree.rb_node;
129 cache = rb_entry(n, struct btrfs_block_group_cache,
131 end = cache->key.objectid + cache->key.offset - 1;
132 start = cache->key.objectid;
134 if (bytenr < start) {
135 if (!contains && (!ret || start < ret->key.objectid))
138 } else if (bytenr > start) {
139 if (contains && bytenr <= end) {
150 atomic_inc(&ret->count);
151 spin_unlock(&info->block_group_cache_lock);
157 * We always set EXTENT_LOCKED for the super mirror extents so we don't
158 * overwrite them, so those bits need to be unset. Also, if we are unmounting
159 * with pinned extents still sitting there because we had a block group caching,
160 * we need to clear those now, since we are done.
162 void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
164 u64 start, end, last = 0;
168 ret = find_first_extent_bit(&info->pinned_extents, last,
170 EXTENT_LOCKED|EXTENT_DIRTY);
174 clear_extent_bits(&info->pinned_extents, start, end,
175 EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
180 static int remove_sb_from_cache(struct btrfs_root *root,
181 struct btrfs_block_group_cache *cache)
183 struct btrfs_fs_info *fs_info = root->fs_info;
189 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190 bytenr = btrfs_sb_offset(i);
191 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192 cache->key.objectid, bytenr,
193 0, &logical, &nr, &stripe_len);
196 try_lock_extent(&fs_info->pinned_extents,
198 logical[nr] + stripe_len - 1, GFP_NOFS);
207 * this is only called by cache_block_group, since we could have freed extents
208 * we need to check the pinned_extents for any extents that can't be used yet
209 * since their free space will be released as soon as the transaction commits.
211 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
212 struct btrfs_fs_info *info, u64 start, u64 end)
214 u64 extent_start, extent_end, size, total_added = 0;
217 while (start < end) {
218 ret = find_first_extent_bit(&info->pinned_extents, start,
219 &extent_start, &extent_end,
220 EXTENT_DIRTY|EXTENT_LOCKED);
224 if (extent_start == start) {
225 start = extent_end + 1;
226 } else if (extent_start > start && extent_start < end) {
227 size = extent_start - start;
229 ret = btrfs_add_free_space(block_group, start,
232 start = extent_end + 1;
241 ret = btrfs_add_free_space(block_group, start, size);
248 static int caching_kthread(void *data)
250 struct btrfs_block_group_cache *block_group = data;
251 struct btrfs_fs_info *fs_info = block_group->fs_info;
253 struct btrfs_path *path;
255 struct btrfs_key key;
256 struct extent_buffer *leaf;
262 path = btrfs_alloc_path();
266 atomic_inc(&block_group->space_info->caching_threads);
267 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
269 /* need to make sure the commit_root doesn't disappear */
270 down_read(&fs_info->extent_commit_sem);
273 * We don't want to deadlock with somebody trying to allocate a new
274 * extent for the extent root while also trying to search the extent
275 * root to add free space. So we skip locking and search the commit
276 * root, since its read-only
278 path->skip_locking = 1;
279 path->search_commit_root = 1;
284 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
285 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
291 if (block_group->fs_info->closing > 1) {
296 leaf = path->nodes[0];
297 slot = path->slots[0];
298 if (slot >= btrfs_header_nritems(leaf)) {
299 ret = btrfs_next_leaf(fs_info->extent_root, path);
305 if (need_resched() ||
306 btrfs_transaction_in_commit(fs_info)) {
307 btrfs_release_path(fs_info->extent_root, path);
308 up_read(&fs_info->extent_commit_sem);
315 btrfs_item_key_to_cpu(leaf, &key, slot);
316 if (key.objectid < block_group->key.objectid)
319 if (key.objectid >= block_group->key.objectid +
320 block_group->key.offset)
323 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
324 total_found += add_new_free_space(block_group,
327 last = key.objectid + key.offset;
330 if (total_found > (1024 * 1024 * 2)) {
332 wake_up(&block_group->caching_q);
339 total_found += add_new_free_space(block_group, fs_info, last,
340 block_group->key.objectid +
341 block_group->key.offset);
343 spin_lock(&block_group->lock);
344 block_group->cached = BTRFS_CACHE_FINISHED;
345 spin_unlock(&block_group->lock);
348 btrfs_free_path(path);
349 up_read(&fs_info->extent_commit_sem);
350 atomic_dec(&block_group->space_info->caching_threads);
351 wake_up(&block_group->caching_q);
356 static int cache_block_group(struct btrfs_block_group_cache *cache)
358 struct task_struct *tsk;
361 spin_lock(&cache->lock);
362 if (cache->cached != BTRFS_CACHE_NO) {
363 spin_unlock(&cache->lock);
366 cache->cached = BTRFS_CACHE_STARTED;
367 spin_unlock(&cache->lock);
369 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
370 cache->key.objectid);
373 printk(KERN_ERR "error running thread %d\n", ret);
381 * return the block group that starts at or after bytenr
383 static struct btrfs_block_group_cache *
384 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
386 struct btrfs_block_group_cache *cache;
388 cache = block_group_cache_tree_search(info, bytenr, 0);
394 * return the block group that contains the given bytenr
396 struct btrfs_block_group_cache *btrfs_lookup_block_group(
397 struct btrfs_fs_info *info,
400 struct btrfs_block_group_cache *cache;
402 cache = block_group_cache_tree_search(info, bytenr, 1);
407 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
409 if (atomic_dec_and_test(&cache->count))
413 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
416 struct list_head *head = &info->space_info;
417 struct btrfs_space_info *found;
420 list_for_each_entry_rcu(found, head, list) {
421 if (found->flags == flags) {
431 * after adding space to the filesystem, we need to clear the full flags
432 * on all the space infos.
434 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
436 struct list_head *head = &info->space_info;
437 struct btrfs_space_info *found;
440 list_for_each_entry_rcu(found, head, list)
445 static u64 div_factor(u64 num, int factor)
454 u64 btrfs_find_block_group(struct btrfs_root *root,
455 u64 search_start, u64 search_hint, int owner)
457 struct btrfs_block_group_cache *cache;
459 u64 last = max(search_hint, search_start);
466 cache = btrfs_lookup_first_block_group(root->fs_info, last);
470 spin_lock(&cache->lock);
471 last = cache->key.objectid + cache->key.offset;
472 used = btrfs_block_group_used(&cache->item);
474 if ((full_search || !cache->ro) &&
475 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
476 if (used + cache->pinned + cache->reserved <
477 div_factor(cache->key.offset, factor)) {
478 group_start = cache->key.objectid;
479 spin_unlock(&cache->lock);
480 btrfs_put_block_group(cache);
484 spin_unlock(&cache->lock);
485 btrfs_put_block_group(cache);
493 if (!full_search && factor < 10) {
503 /* simple helper to search for an existing extent at a given offset */
504 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
507 struct btrfs_key key;
508 struct btrfs_path *path;
510 path = btrfs_alloc_path();
512 key.objectid = start;
514 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
515 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
517 btrfs_free_path(path);
522 * Back reference rules. Back refs have three main goals:
524 * 1) differentiate between all holders of references to an extent so that
525 * when a reference is dropped we can make sure it was a valid reference
526 * before freeing the extent.
528 * 2) Provide enough information to quickly find the holders of an extent
529 * if we notice a given block is corrupted or bad.
531 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
532 * maintenance. This is actually the same as #2, but with a slightly
533 * different use case.
535 * There are two kinds of back refs. The implicit back refs is optimized
536 * for pointers in non-shared tree blocks. For a given pointer in a block,
537 * back refs of this kind provide information about the block's owner tree
538 * and the pointer's key. These information allow us to find the block by
539 * b-tree searching. The full back refs is for pointers in tree blocks not
540 * referenced by their owner trees. The location of tree block is recorded
541 * in the back refs. Actually the full back refs is generic, and can be
542 * used in all cases the implicit back refs is used. The major shortcoming
543 * of the full back refs is its overhead. Every time a tree block gets
544 * COWed, we have to update back refs entry for all pointers in it.
546 * For a newly allocated tree block, we use implicit back refs for
547 * pointers in it. This means most tree related operations only involve
548 * implicit back refs. For a tree block created in old transaction, the
549 * only way to drop a reference to it is COW it. So we can detect the
550 * event that tree block loses its owner tree's reference and do the
551 * back refs conversion.
553 * When a tree block is COW'd through a tree, there are four cases:
555 * The reference count of the block is one and the tree is the block's
556 * owner tree. Nothing to do in this case.
558 * The reference count of the block is one and the tree is not the
559 * block's owner tree. In this case, full back refs is used for pointers
560 * in the block. Remove these full back refs, add implicit back refs for
561 * every pointers in the new block.
563 * The reference count of the block is greater than one and the tree is
564 * the block's owner tree. In this case, implicit back refs is used for
565 * pointers in the block. Add full back refs for every pointers in the
566 * block, increase lower level extents' reference counts. The original
567 * implicit back refs are entailed to the new block.
569 * The reference count of the block is greater than one and the tree is
570 * not the block's owner tree. Add implicit back refs for every pointer in
571 * the new block, increase lower level extents' reference count.
573 * Back Reference Key composing:
575 * The key objectid corresponds to the first byte in the extent,
576 * The key type is used to differentiate between types of back refs.
577 * There are different meanings of the key offset for different types
580 * File extents can be referenced by:
582 * - multiple snapshots, subvolumes, or different generations in one subvol
583 * - different files inside a single subvolume
584 * - different offsets inside a file (bookend extents in file.c)
586 * The extent ref structure for the implicit back refs has fields for:
588 * - Objectid of the subvolume root
589 * - objectid of the file holding the reference
590 * - original offset in the file
591 * - how many bookend extents
593 * The key offset for the implicit back refs is hash of the first
596 * The extent ref structure for the full back refs has field for:
598 * - number of pointers in the tree leaf
600 * The key offset for the implicit back refs is the first byte of
603 * When a file extent is allocated, The implicit back refs is used.
604 * the fields are filled in:
606 * (root_key.objectid, inode objectid, offset in file, 1)
608 * When a file extent is removed file truncation, we find the
609 * corresponding implicit back refs and check the following fields:
611 * (btrfs_header_owner(leaf), inode objectid, offset in file)
613 * Btree extents can be referenced by:
615 * - Different subvolumes
617 * Both the implicit back refs and the full back refs for tree blocks
618 * only consist of key. The key offset for the implicit back refs is
619 * objectid of block's owner tree. The key offset for the full back refs
620 * is the first byte of parent block.
622 * When implicit back refs is used, information about the lowest key and
623 * level of the tree block are required. These information are stored in
624 * tree block info structure.
627 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
628 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
629 struct btrfs_root *root,
630 struct btrfs_path *path,
631 u64 owner, u32 extra_size)
633 struct btrfs_extent_item *item;
634 struct btrfs_extent_item_v0 *ei0;
635 struct btrfs_extent_ref_v0 *ref0;
636 struct btrfs_tree_block_info *bi;
637 struct extent_buffer *leaf;
638 struct btrfs_key key;
639 struct btrfs_key found_key;
640 u32 new_size = sizeof(*item);
644 leaf = path->nodes[0];
645 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
647 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
648 ei0 = btrfs_item_ptr(leaf, path->slots[0],
649 struct btrfs_extent_item_v0);
650 refs = btrfs_extent_refs_v0(leaf, ei0);
652 if (owner == (u64)-1) {
654 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
655 ret = btrfs_next_leaf(root, path);
659 leaf = path->nodes[0];
661 btrfs_item_key_to_cpu(leaf, &found_key,
663 BUG_ON(key.objectid != found_key.objectid);
664 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
668 ref0 = btrfs_item_ptr(leaf, path->slots[0],
669 struct btrfs_extent_ref_v0);
670 owner = btrfs_ref_objectid_v0(leaf, ref0);
674 btrfs_release_path(root, path);
676 if (owner < BTRFS_FIRST_FREE_OBJECTID)
677 new_size += sizeof(*bi);
679 new_size -= sizeof(*ei0);
680 ret = btrfs_search_slot(trans, root, &key, path,
681 new_size + extra_size, 1);
686 ret = btrfs_extend_item(trans, root, path, new_size);
689 leaf = path->nodes[0];
690 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
691 btrfs_set_extent_refs(leaf, item, refs);
692 /* FIXME: get real generation */
693 btrfs_set_extent_generation(leaf, item, 0);
694 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
695 btrfs_set_extent_flags(leaf, item,
696 BTRFS_EXTENT_FLAG_TREE_BLOCK |
697 BTRFS_BLOCK_FLAG_FULL_BACKREF);
698 bi = (struct btrfs_tree_block_info *)(item + 1);
699 /* FIXME: get first key of the block */
700 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
701 btrfs_set_tree_block_level(leaf, bi, (int)owner);
703 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
705 btrfs_mark_buffer_dirty(leaf);
710 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
712 u32 high_crc = ~(u32)0;
713 u32 low_crc = ~(u32)0;
716 lenum = cpu_to_le64(root_objectid);
717 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
718 lenum = cpu_to_le64(owner);
719 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
720 lenum = cpu_to_le64(offset);
721 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
723 return ((u64)high_crc << 31) ^ (u64)low_crc;
726 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
727 struct btrfs_extent_data_ref *ref)
729 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
730 btrfs_extent_data_ref_objectid(leaf, ref),
731 btrfs_extent_data_ref_offset(leaf, ref));
734 static int match_extent_data_ref(struct extent_buffer *leaf,
735 struct btrfs_extent_data_ref *ref,
736 u64 root_objectid, u64 owner, u64 offset)
738 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
739 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
740 btrfs_extent_data_ref_offset(leaf, ref) != offset)
745 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
746 struct btrfs_root *root,
747 struct btrfs_path *path,
748 u64 bytenr, u64 parent,
750 u64 owner, u64 offset)
752 struct btrfs_key key;
753 struct btrfs_extent_data_ref *ref;
754 struct extent_buffer *leaf;
760 key.objectid = bytenr;
762 key.type = BTRFS_SHARED_DATA_REF_KEY;
765 key.type = BTRFS_EXTENT_DATA_REF_KEY;
766 key.offset = hash_extent_data_ref(root_objectid,
771 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
780 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
781 key.type = BTRFS_EXTENT_REF_V0_KEY;
782 btrfs_release_path(root, path);
783 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
794 leaf = path->nodes[0];
795 nritems = btrfs_header_nritems(leaf);
797 if (path->slots[0] >= nritems) {
798 ret = btrfs_next_leaf(root, path);
804 leaf = path->nodes[0];
805 nritems = btrfs_header_nritems(leaf);
809 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
810 if (key.objectid != bytenr ||
811 key.type != BTRFS_EXTENT_DATA_REF_KEY)
814 ref = btrfs_item_ptr(leaf, path->slots[0],
815 struct btrfs_extent_data_ref);
817 if (match_extent_data_ref(leaf, ref, root_objectid,
820 btrfs_release_path(root, path);
832 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
833 struct btrfs_root *root,
834 struct btrfs_path *path,
835 u64 bytenr, u64 parent,
836 u64 root_objectid, u64 owner,
837 u64 offset, int refs_to_add)
839 struct btrfs_key key;
840 struct extent_buffer *leaf;
845 key.objectid = bytenr;
847 key.type = BTRFS_SHARED_DATA_REF_KEY;
849 size = sizeof(struct btrfs_shared_data_ref);
851 key.type = BTRFS_EXTENT_DATA_REF_KEY;
852 key.offset = hash_extent_data_ref(root_objectid,
854 size = sizeof(struct btrfs_extent_data_ref);
857 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
858 if (ret && ret != -EEXIST)
861 leaf = path->nodes[0];
863 struct btrfs_shared_data_ref *ref;
864 ref = btrfs_item_ptr(leaf, path->slots[0],
865 struct btrfs_shared_data_ref);
867 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
869 num_refs = btrfs_shared_data_ref_count(leaf, ref);
870 num_refs += refs_to_add;
871 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
874 struct btrfs_extent_data_ref *ref;
875 while (ret == -EEXIST) {
876 ref = btrfs_item_ptr(leaf, path->slots[0],
877 struct btrfs_extent_data_ref);
878 if (match_extent_data_ref(leaf, ref, root_objectid,
881 btrfs_release_path(root, path);
883 ret = btrfs_insert_empty_item(trans, root, path, &key,
885 if (ret && ret != -EEXIST)
888 leaf = path->nodes[0];
890 ref = btrfs_item_ptr(leaf, path->slots[0],
891 struct btrfs_extent_data_ref);
893 btrfs_set_extent_data_ref_root(leaf, ref,
895 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
896 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
897 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
899 num_refs = btrfs_extent_data_ref_count(leaf, ref);
900 num_refs += refs_to_add;
901 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
904 btrfs_mark_buffer_dirty(leaf);
907 btrfs_release_path(root, path);
911 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
912 struct btrfs_root *root,
913 struct btrfs_path *path,
916 struct btrfs_key key;
917 struct btrfs_extent_data_ref *ref1 = NULL;
918 struct btrfs_shared_data_ref *ref2 = NULL;
919 struct extent_buffer *leaf;
923 leaf = path->nodes[0];
924 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
926 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
927 ref1 = btrfs_item_ptr(leaf, path->slots[0],
928 struct btrfs_extent_data_ref);
929 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
930 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
931 ref2 = btrfs_item_ptr(leaf, path->slots[0],
932 struct btrfs_shared_data_ref);
933 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
934 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
935 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
936 struct btrfs_extent_ref_v0 *ref0;
937 ref0 = btrfs_item_ptr(leaf, path->slots[0],
938 struct btrfs_extent_ref_v0);
939 num_refs = btrfs_ref_count_v0(leaf, ref0);
945 BUG_ON(num_refs < refs_to_drop);
946 num_refs -= refs_to_drop;
949 ret = btrfs_del_item(trans, root, path);
951 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
952 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
953 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
954 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
955 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
957 struct btrfs_extent_ref_v0 *ref0;
958 ref0 = btrfs_item_ptr(leaf, path->slots[0],
959 struct btrfs_extent_ref_v0);
960 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
963 btrfs_mark_buffer_dirty(leaf);
968 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
969 struct btrfs_path *path,
970 struct btrfs_extent_inline_ref *iref)
972 struct btrfs_key key;
973 struct extent_buffer *leaf;
974 struct btrfs_extent_data_ref *ref1;
975 struct btrfs_shared_data_ref *ref2;
978 leaf = path->nodes[0];
979 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
981 if (btrfs_extent_inline_ref_type(leaf, iref) ==
982 BTRFS_EXTENT_DATA_REF_KEY) {
983 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
984 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
986 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
987 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
989 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
990 ref1 = btrfs_item_ptr(leaf, path->slots[0],
991 struct btrfs_extent_data_ref);
992 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
993 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
994 ref2 = btrfs_item_ptr(leaf, path->slots[0],
995 struct btrfs_shared_data_ref);
996 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
997 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
998 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
999 struct btrfs_extent_ref_v0 *ref0;
1000 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1001 struct btrfs_extent_ref_v0);
1002 num_refs = btrfs_ref_count_v0(leaf, ref0);
1010 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1011 struct btrfs_root *root,
1012 struct btrfs_path *path,
1013 u64 bytenr, u64 parent,
1016 struct btrfs_key key;
1019 key.objectid = bytenr;
1021 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1022 key.offset = parent;
1024 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1025 key.offset = root_objectid;
1028 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1031 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1032 if (ret == -ENOENT && parent) {
1033 btrfs_release_path(root, path);
1034 key.type = BTRFS_EXTENT_REF_V0_KEY;
1035 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1043 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1044 struct btrfs_root *root,
1045 struct btrfs_path *path,
1046 u64 bytenr, u64 parent,
1049 struct btrfs_key key;
1052 key.objectid = bytenr;
1054 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1055 key.offset = parent;
1057 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1058 key.offset = root_objectid;
1061 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1062 btrfs_release_path(root, path);
1066 static inline int extent_ref_type(u64 parent, u64 owner)
1069 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1071 type = BTRFS_SHARED_BLOCK_REF_KEY;
1073 type = BTRFS_TREE_BLOCK_REF_KEY;
1076 type = BTRFS_SHARED_DATA_REF_KEY;
1078 type = BTRFS_EXTENT_DATA_REF_KEY;
1083 static int find_next_key(struct btrfs_path *path, int level,
1084 struct btrfs_key *key)
1087 for (; level < BTRFS_MAX_LEVEL; level++) {
1088 if (!path->nodes[level])
1090 if (path->slots[level] + 1 >=
1091 btrfs_header_nritems(path->nodes[level]))
1094 btrfs_item_key_to_cpu(path->nodes[level], key,
1095 path->slots[level] + 1);
1097 btrfs_node_key_to_cpu(path->nodes[level], key,
1098 path->slots[level] + 1);
1105 * look for inline back ref. if back ref is found, *ref_ret is set
1106 * to the address of inline back ref, and 0 is returned.
1108 * if back ref isn't found, *ref_ret is set to the address where it
1109 * should be inserted, and -ENOENT is returned.
1111 * if insert is true and there are too many inline back refs, the path
1112 * points to the extent item, and -EAGAIN is returned.
1114 * NOTE: inline back refs are ordered in the same way that back ref
1115 * items in the tree are ordered.
1117 static noinline_for_stack
1118 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1119 struct btrfs_root *root,
1120 struct btrfs_path *path,
1121 struct btrfs_extent_inline_ref **ref_ret,
1122 u64 bytenr, u64 num_bytes,
1123 u64 parent, u64 root_objectid,
1124 u64 owner, u64 offset, int insert)
1126 struct btrfs_key key;
1127 struct extent_buffer *leaf;
1128 struct btrfs_extent_item *ei;
1129 struct btrfs_extent_inline_ref *iref;
1140 key.objectid = bytenr;
1141 key.type = BTRFS_EXTENT_ITEM_KEY;
1142 key.offset = num_bytes;
1144 want = extent_ref_type(parent, owner);
1146 extra_size = btrfs_extent_inline_ref_size(want);
1147 path->keep_locks = 1;
1150 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1157 leaf = path->nodes[0];
1158 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1159 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1160 if (item_size < sizeof(*ei)) {
1165 ret = convert_extent_item_v0(trans, root, path, owner,
1171 leaf = path->nodes[0];
1172 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1175 BUG_ON(item_size < sizeof(*ei));
1177 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1178 flags = btrfs_extent_flags(leaf, ei);
1180 ptr = (unsigned long)(ei + 1);
1181 end = (unsigned long)ei + item_size;
1183 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1184 ptr += sizeof(struct btrfs_tree_block_info);
1187 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1196 iref = (struct btrfs_extent_inline_ref *)ptr;
1197 type = btrfs_extent_inline_ref_type(leaf, iref);
1201 ptr += btrfs_extent_inline_ref_size(type);
1205 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1206 struct btrfs_extent_data_ref *dref;
1207 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1208 if (match_extent_data_ref(leaf, dref, root_objectid,
1213 if (hash_extent_data_ref_item(leaf, dref) <
1214 hash_extent_data_ref(root_objectid, owner, offset))
1218 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1220 if (parent == ref_offset) {
1224 if (ref_offset < parent)
1227 if (root_objectid == ref_offset) {
1231 if (ref_offset < root_objectid)
1235 ptr += btrfs_extent_inline_ref_size(type);
1237 if (err == -ENOENT && insert) {
1238 if (item_size + extra_size >=
1239 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1244 * To add new inline back ref, we have to make sure
1245 * there is no corresponding back ref item.
1246 * For simplicity, we just do not add new inline back
1247 * ref if there is any kind of item for this block
1249 if (find_next_key(path, 0, &key) == 0 &&
1250 key.objectid == bytenr &&
1251 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1256 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1259 path->keep_locks = 0;
1260 btrfs_unlock_up_safe(path, 1);
1266 * helper to add new inline back ref
1268 static noinline_for_stack
1269 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1270 struct btrfs_root *root,
1271 struct btrfs_path *path,
1272 struct btrfs_extent_inline_ref *iref,
1273 u64 parent, u64 root_objectid,
1274 u64 owner, u64 offset, int refs_to_add,
1275 struct btrfs_delayed_extent_op *extent_op)
1277 struct extent_buffer *leaf;
1278 struct btrfs_extent_item *ei;
1281 unsigned long item_offset;
1287 leaf = path->nodes[0];
1288 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1289 item_offset = (unsigned long)iref - (unsigned long)ei;
1291 type = extent_ref_type(parent, owner);
1292 size = btrfs_extent_inline_ref_size(type);
1294 ret = btrfs_extend_item(trans, root, path, size);
1297 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1298 refs = btrfs_extent_refs(leaf, ei);
1299 refs += refs_to_add;
1300 btrfs_set_extent_refs(leaf, ei, refs);
1302 __run_delayed_extent_op(extent_op, leaf, ei);
1304 ptr = (unsigned long)ei + item_offset;
1305 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1306 if (ptr < end - size)
1307 memmove_extent_buffer(leaf, ptr + size, ptr,
1310 iref = (struct btrfs_extent_inline_ref *)ptr;
1311 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1312 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1313 struct btrfs_extent_data_ref *dref;
1314 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1315 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1316 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1317 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1318 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1319 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1320 struct btrfs_shared_data_ref *sref;
1321 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1322 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1323 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1324 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1325 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1327 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1329 btrfs_mark_buffer_dirty(leaf);
1333 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1334 struct btrfs_root *root,
1335 struct btrfs_path *path,
1336 struct btrfs_extent_inline_ref **ref_ret,
1337 u64 bytenr, u64 num_bytes, u64 parent,
1338 u64 root_objectid, u64 owner, u64 offset)
1342 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1343 bytenr, num_bytes, parent,
1344 root_objectid, owner, offset, 0);
1348 btrfs_release_path(root, path);
1351 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1352 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1355 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1356 root_objectid, owner, offset);
1362 * helper to update/remove inline back ref
1364 static noinline_for_stack
1365 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1366 struct btrfs_root *root,
1367 struct btrfs_path *path,
1368 struct btrfs_extent_inline_ref *iref,
1370 struct btrfs_delayed_extent_op *extent_op)
1372 struct extent_buffer *leaf;
1373 struct btrfs_extent_item *ei;
1374 struct btrfs_extent_data_ref *dref = NULL;
1375 struct btrfs_shared_data_ref *sref = NULL;
1384 leaf = path->nodes[0];
1385 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1386 refs = btrfs_extent_refs(leaf, ei);
1387 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1388 refs += refs_to_mod;
1389 btrfs_set_extent_refs(leaf, ei, refs);
1391 __run_delayed_extent_op(extent_op, leaf, ei);
1393 type = btrfs_extent_inline_ref_type(leaf, iref);
1395 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1396 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1397 refs = btrfs_extent_data_ref_count(leaf, dref);
1398 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1399 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1400 refs = btrfs_shared_data_ref_count(leaf, sref);
1403 BUG_ON(refs_to_mod != -1);
1406 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1407 refs += refs_to_mod;
1410 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1411 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1413 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1415 size = btrfs_extent_inline_ref_size(type);
1416 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1417 ptr = (unsigned long)iref;
1418 end = (unsigned long)ei + item_size;
1419 if (ptr + size < end)
1420 memmove_extent_buffer(leaf, ptr, ptr + size,
1423 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1426 btrfs_mark_buffer_dirty(leaf);
1430 static noinline_for_stack
1431 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1432 struct btrfs_root *root,
1433 struct btrfs_path *path,
1434 u64 bytenr, u64 num_bytes, u64 parent,
1435 u64 root_objectid, u64 owner,
1436 u64 offset, int refs_to_add,
1437 struct btrfs_delayed_extent_op *extent_op)
1439 struct btrfs_extent_inline_ref *iref;
1442 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1443 bytenr, num_bytes, parent,
1444 root_objectid, owner, offset, 1);
1446 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1447 ret = update_inline_extent_backref(trans, root, path, iref,
1448 refs_to_add, extent_op);
1449 } else if (ret == -ENOENT) {
1450 ret = setup_inline_extent_backref(trans, root, path, iref,
1451 parent, root_objectid,
1452 owner, offset, refs_to_add,
1458 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1459 struct btrfs_root *root,
1460 struct btrfs_path *path,
1461 u64 bytenr, u64 parent, u64 root_objectid,
1462 u64 owner, u64 offset, int refs_to_add)
1465 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1466 BUG_ON(refs_to_add != 1);
1467 ret = insert_tree_block_ref(trans, root, path, bytenr,
1468 parent, root_objectid);
1470 ret = insert_extent_data_ref(trans, root, path, bytenr,
1471 parent, root_objectid,
1472 owner, offset, refs_to_add);
1477 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1478 struct btrfs_root *root,
1479 struct btrfs_path *path,
1480 struct btrfs_extent_inline_ref *iref,
1481 int refs_to_drop, int is_data)
1485 BUG_ON(!is_data && refs_to_drop != 1);
1487 ret = update_inline_extent_backref(trans, root, path, iref,
1488 -refs_to_drop, NULL);
1489 } else if (is_data) {
1490 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1492 ret = btrfs_del_item(trans, root, path);
1497 #ifdef BIO_RW_DISCARD
1498 static void btrfs_issue_discard(struct block_device *bdev,
1501 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
1505 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1508 #ifdef BIO_RW_DISCARD
1510 u64 map_length = num_bytes;
1511 struct btrfs_multi_bio *multi = NULL;
1513 /* Tell the block device(s) that the sectors can be discarded */
1514 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1515 bytenr, &map_length, &multi, 0);
1517 struct btrfs_bio_stripe *stripe = multi->stripes;
1520 if (map_length > num_bytes)
1521 map_length = num_bytes;
1523 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1524 btrfs_issue_discard(stripe->dev->bdev,
1537 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1538 struct btrfs_root *root,
1539 u64 bytenr, u64 num_bytes, u64 parent,
1540 u64 root_objectid, u64 owner, u64 offset)
1543 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1544 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1546 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1547 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1548 parent, root_objectid, (int)owner,
1549 BTRFS_ADD_DELAYED_REF, NULL);
1551 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1552 parent, root_objectid, owner, offset,
1553 BTRFS_ADD_DELAYED_REF, NULL);
1558 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1559 struct btrfs_root *root,
1560 u64 bytenr, u64 num_bytes,
1561 u64 parent, u64 root_objectid,
1562 u64 owner, u64 offset, int refs_to_add,
1563 struct btrfs_delayed_extent_op *extent_op)
1565 struct btrfs_path *path;
1566 struct extent_buffer *leaf;
1567 struct btrfs_extent_item *item;
1572 path = btrfs_alloc_path();
1577 path->leave_spinning = 1;
1578 /* this will setup the path even if it fails to insert the back ref */
1579 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1580 path, bytenr, num_bytes, parent,
1581 root_objectid, owner, offset,
1582 refs_to_add, extent_op);
1586 if (ret != -EAGAIN) {
1591 leaf = path->nodes[0];
1592 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1593 refs = btrfs_extent_refs(leaf, item);
1594 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1596 __run_delayed_extent_op(extent_op, leaf, item);
1598 btrfs_mark_buffer_dirty(leaf);
1599 btrfs_release_path(root->fs_info->extent_root, path);
1602 path->leave_spinning = 1;
1604 /* now insert the actual backref */
1605 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1606 path, bytenr, parent, root_objectid,
1607 owner, offset, refs_to_add);
1610 btrfs_free_path(path);
1614 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1615 struct btrfs_root *root,
1616 struct btrfs_delayed_ref_node *node,
1617 struct btrfs_delayed_extent_op *extent_op,
1618 int insert_reserved)
1621 struct btrfs_delayed_data_ref *ref;
1622 struct btrfs_key ins;
1627 ins.objectid = node->bytenr;
1628 ins.offset = node->num_bytes;
1629 ins.type = BTRFS_EXTENT_ITEM_KEY;
1631 ref = btrfs_delayed_node_to_data_ref(node);
1632 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1633 parent = ref->parent;
1635 ref_root = ref->root;
1637 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1639 BUG_ON(extent_op->update_key);
1640 flags |= extent_op->flags_to_set;
1642 ret = alloc_reserved_file_extent(trans, root,
1643 parent, ref_root, flags,
1644 ref->objectid, ref->offset,
1645 &ins, node->ref_mod);
1646 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1647 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1648 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1649 node->num_bytes, parent,
1650 ref_root, ref->objectid,
1651 ref->offset, node->ref_mod,
1653 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1654 ret = __btrfs_free_extent(trans, root, node->bytenr,
1655 node->num_bytes, parent,
1656 ref_root, ref->objectid,
1657 ref->offset, node->ref_mod,
1665 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1666 struct extent_buffer *leaf,
1667 struct btrfs_extent_item *ei)
1669 u64 flags = btrfs_extent_flags(leaf, ei);
1670 if (extent_op->update_flags) {
1671 flags |= extent_op->flags_to_set;
1672 btrfs_set_extent_flags(leaf, ei, flags);
1675 if (extent_op->update_key) {
1676 struct btrfs_tree_block_info *bi;
1677 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1678 bi = (struct btrfs_tree_block_info *)(ei + 1);
1679 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1683 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1684 struct btrfs_root *root,
1685 struct btrfs_delayed_ref_node *node,
1686 struct btrfs_delayed_extent_op *extent_op)
1688 struct btrfs_key key;
1689 struct btrfs_path *path;
1690 struct btrfs_extent_item *ei;
1691 struct extent_buffer *leaf;
1696 path = btrfs_alloc_path();
1700 key.objectid = node->bytenr;
1701 key.type = BTRFS_EXTENT_ITEM_KEY;
1702 key.offset = node->num_bytes;
1705 path->leave_spinning = 1;
1706 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1717 leaf = path->nodes[0];
1718 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1719 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1720 if (item_size < sizeof(*ei)) {
1721 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1727 leaf = path->nodes[0];
1728 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1731 BUG_ON(item_size < sizeof(*ei));
1732 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1733 __run_delayed_extent_op(extent_op, leaf, ei);
1735 btrfs_mark_buffer_dirty(leaf);
1737 btrfs_free_path(path);
1741 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1742 struct btrfs_root *root,
1743 struct btrfs_delayed_ref_node *node,
1744 struct btrfs_delayed_extent_op *extent_op,
1745 int insert_reserved)
1748 struct btrfs_delayed_tree_ref *ref;
1749 struct btrfs_key ins;
1753 ins.objectid = node->bytenr;
1754 ins.offset = node->num_bytes;
1755 ins.type = BTRFS_EXTENT_ITEM_KEY;
1757 ref = btrfs_delayed_node_to_tree_ref(node);
1758 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1759 parent = ref->parent;
1761 ref_root = ref->root;
1763 BUG_ON(node->ref_mod != 1);
1764 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1765 BUG_ON(!extent_op || !extent_op->update_flags ||
1766 !extent_op->update_key);
1767 ret = alloc_reserved_tree_block(trans, root,
1769 extent_op->flags_to_set,
1772 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1773 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1774 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1775 node->num_bytes, parent, ref_root,
1776 ref->level, 0, 1, extent_op);
1777 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1778 ret = __btrfs_free_extent(trans, root, node->bytenr,
1779 node->num_bytes, parent, ref_root,
1780 ref->level, 0, 1, extent_op);
1788 /* helper function to actually process a single delayed ref entry */
1789 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1790 struct btrfs_root *root,
1791 struct btrfs_delayed_ref_node *node,
1792 struct btrfs_delayed_extent_op *extent_op,
1793 int insert_reserved)
1796 if (btrfs_delayed_ref_is_head(node)) {
1797 struct btrfs_delayed_ref_head *head;
1799 * we've hit the end of the chain and we were supposed
1800 * to insert this extent into the tree. But, it got
1801 * deleted before we ever needed to insert it, so all
1802 * we have to do is clean up the accounting
1805 head = btrfs_delayed_node_to_head(node);
1806 if (insert_reserved) {
1807 if (head->is_data) {
1808 ret = btrfs_del_csums(trans, root,
1813 btrfs_update_pinned_extents(root, node->bytenr,
1814 node->num_bytes, 1);
1815 update_reserved_extents(root, node->bytenr,
1816 node->num_bytes, 0);
1818 mutex_unlock(&head->mutex);
1822 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1823 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1824 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1826 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1827 node->type == BTRFS_SHARED_DATA_REF_KEY)
1828 ret = run_delayed_data_ref(trans, root, node, extent_op,
1835 static noinline struct btrfs_delayed_ref_node *
1836 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1838 struct rb_node *node;
1839 struct btrfs_delayed_ref_node *ref;
1840 int action = BTRFS_ADD_DELAYED_REF;
1843 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1844 * this prevents ref count from going down to zero when
1845 * there still are pending delayed ref.
1847 node = rb_prev(&head->node.rb_node);
1851 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1853 if (ref->bytenr != head->node.bytenr)
1855 if (ref->action == action)
1857 node = rb_prev(node);
1859 if (action == BTRFS_ADD_DELAYED_REF) {
1860 action = BTRFS_DROP_DELAYED_REF;
1866 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1867 struct btrfs_root *root,
1868 struct list_head *cluster)
1870 struct btrfs_delayed_ref_root *delayed_refs;
1871 struct btrfs_delayed_ref_node *ref;
1872 struct btrfs_delayed_ref_head *locked_ref = NULL;
1873 struct btrfs_delayed_extent_op *extent_op;
1876 int must_insert_reserved = 0;
1878 delayed_refs = &trans->transaction->delayed_refs;
1881 /* pick a new head ref from the cluster list */
1882 if (list_empty(cluster))
1885 locked_ref = list_entry(cluster->next,
1886 struct btrfs_delayed_ref_head, cluster);
1888 /* grab the lock that says we are going to process
1889 * all the refs for this head */
1890 ret = btrfs_delayed_ref_lock(trans, locked_ref);
1893 * we may have dropped the spin lock to get the head
1894 * mutex lock, and that might have given someone else
1895 * time to free the head. If that's true, it has been
1896 * removed from our list and we can move on.
1898 if (ret == -EAGAIN) {
1906 * record the must insert reserved flag before we
1907 * drop the spin lock.
1909 must_insert_reserved = locked_ref->must_insert_reserved;
1910 locked_ref->must_insert_reserved = 0;
1912 extent_op = locked_ref->extent_op;
1913 locked_ref->extent_op = NULL;
1916 * locked_ref is the head node, so we have to go one
1917 * node back for any delayed ref updates
1919 ref = select_delayed_ref(locked_ref);
1921 /* All delayed refs have been processed, Go ahead
1922 * and send the head node to run_one_delayed_ref,
1923 * so that any accounting fixes can happen
1925 ref = &locked_ref->node;
1927 if (extent_op && must_insert_reserved) {
1933 spin_unlock(&delayed_refs->lock);
1935 ret = run_delayed_extent_op(trans, root,
1941 spin_lock(&delayed_refs->lock);
1945 list_del_init(&locked_ref->cluster);
1950 rb_erase(&ref->rb_node, &delayed_refs->root);
1951 delayed_refs->num_entries--;
1953 spin_unlock(&delayed_refs->lock);
1955 ret = run_one_delayed_ref(trans, root, ref, extent_op,
1956 must_insert_reserved);
1959 btrfs_put_delayed_ref(ref);
1964 spin_lock(&delayed_refs->lock);
1970 * this starts processing the delayed reference count updates and
1971 * extent insertions we have queued up so far. count can be
1972 * 0, which means to process everything in the tree at the start
1973 * of the run (but not newly added entries), or it can be some target
1974 * number you'd like to process.
1976 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1977 struct btrfs_root *root, unsigned long count)
1979 struct rb_node *node;
1980 struct btrfs_delayed_ref_root *delayed_refs;
1981 struct btrfs_delayed_ref_node *ref;
1982 struct list_head cluster;
1984 int run_all = count == (unsigned long)-1;
1987 if (root == root->fs_info->extent_root)
1988 root = root->fs_info->tree_root;
1990 delayed_refs = &trans->transaction->delayed_refs;
1991 INIT_LIST_HEAD(&cluster);
1993 spin_lock(&delayed_refs->lock);
1995 count = delayed_refs->num_entries * 2;
1999 if (!(run_all || run_most) &&
2000 delayed_refs->num_heads_ready < 64)
2004 * go find something we can process in the rbtree. We start at
2005 * the beginning of the tree, and then build a cluster
2006 * of refs to process starting at the first one we are able to
2009 ret = btrfs_find_ref_cluster(trans, &cluster,
2010 delayed_refs->run_delayed_start);
2014 ret = run_clustered_refs(trans, root, &cluster);
2017 count -= min_t(unsigned long, ret, count);
2024 node = rb_first(&delayed_refs->root);
2027 count = (unsigned long)-1;
2030 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2032 if (btrfs_delayed_ref_is_head(ref)) {
2033 struct btrfs_delayed_ref_head *head;
2035 head = btrfs_delayed_node_to_head(ref);
2036 atomic_inc(&ref->refs);
2038 spin_unlock(&delayed_refs->lock);
2039 mutex_lock(&head->mutex);
2040 mutex_unlock(&head->mutex);
2042 btrfs_put_delayed_ref(ref);
2046 node = rb_next(node);
2048 spin_unlock(&delayed_refs->lock);
2049 schedule_timeout(1);
2053 spin_unlock(&delayed_refs->lock);
2057 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2058 struct btrfs_root *root,
2059 u64 bytenr, u64 num_bytes, u64 flags,
2062 struct btrfs_delayed_extent_op *extent_op;
2065 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2069 extent_op->flags_to_set = flags;
2070 extent_op->update_flags = 1;
2071 extent_op->update_key = 0;
2072 extent_op->is_data = is_data ? 1 : 0;
2074 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2080 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2081 struct btrfs_root *root,
2082 struct btrfs_path *path,
2083 u64 objectid, u64 offset, u64 bytenr)
2085 struct btrfs_delayed_ref_head *head;
2086 struct btrfs_delayed_ref_node *ref;
2087 struct btrfs_delayed_data_ref *data_ref;
2088 struct btrfs_delayed_ref_root *delayed_refs;
2089 struct rb_node *node;
2093 delayed_refs = &trans->transaction->delayed_refs;
2094 spin_lock(&delayed_refs->lock);
2095 head = btrfs_find_delayed_ref_head(trans, bytenr);
2099 if (!mutex_trylock(&head->mutex)) {
2100 atomic_inc(&head->node.refs);
2101 spin_unlock(&delayed_refs->lock);
2103 btrfs_release_path(root->fs_info->extent_root, path);
2105 mutex_lock(&head->mutex);
2106 mutex_unlock(&head->mutex);
2107 btrfs_put_delayed_ref(&head->node);
2111 node = rb_prev(&head->node.rb_node);
2115 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2117 if (ref->bytenr != bytenr)
2121 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2124 data_ref = btrfs_delayed_node_to_data_ref(ref);
2126 node = rb_prev(node);
2128 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2129 if (ref->bytenr == bytenr)
2133 if (data_ref->root != root->root_key.objectid ||
2134 data_ref->objectid != objectid || data_ref->offset != offset)
2139 mutex_unlock(&head->mutex);
2141 spin_unlock(&delayed_refs->lock);
2145 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2146 struct btrfs_root *root,
2147 struct btrfs_path *path,
2148 u64 objectid, u64 offset, u64 bytenr)
2150 struct btrfs_root *extent_root = root->fs_info->extent_root;
2151 struct extent_buffer *leaf;
2152 struct btrfs_extent_data_ref *ref;
2153 struct btrfs_extent_inline_ref *iref;
2154 struct btrfs_extent_item *ei;
2155 struct btrfs_key key;
2159 key.objectid = bytenr;
2160 key.offset = (u64)-1;
2161 key.type = BTRFS_EXTENT_ITEM_KEY;
2163 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2169 if (path->slots[0] == 0)
2173 leaf = path->nodes[0];
2174 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2176 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2180 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2181 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2182 if (item_size < sizeof(*ei)) {
2183 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2187 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2189 if (item_size != sizeof(*ei) +
2190 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2193 if (btrfs_extent_generation(leaf, ei) <=
2194 btrfs_root_last_snapshot(&root->root_item))
2197 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2198 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2199 BTRFS_EXTENT_DATA_REF_KEY)
2202 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2203 if (btrfs_extent_refs(leaf, ei) !=
2204 btrfs_extent_data_ref_count(leaf, ref) ||
2205 btrfs_extent_data_ref_root(leaf, ref) !=
2206 root->root_key.objectid ||
2207 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2208 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2216 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2217 struct btrfs_root *root,
2218 u64 objectid, u64 offset, u64 bytenr)
2220 struct btrfs_path *path;
2224 path = btrfs_alloc_path();
2229 ret = check_committed_ref(trans, root, path, objectid,
2231 if (ret && ret != -ENOENT)
2234 ret2 = check_delayed_ref(trans, root, path, objectid,
2236 } while (ret2 == -EAGAIN);
2238 if (ret2 && ret2 != -ENOENT) {
2243 if (ret != -ENOENT || ret2 != -ENOENT)
2246 btrfs_free_path(path);
2251 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2252 struct extent_buffer *buf, u32 nr_extents)
2254 struct btrfs_key key;
2255 struct btrfs_file_extent_item *fi;
2263 if (!root->ref_cows)
2266 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2268 root_gen = root->root_key.offset;
2271 root_gen = trans->transid - 1;
2274 level = btrfs_header_level(buf);
2275 nritems = btrfs_header_nritems(buf);
2278 struct btrfs_leaf_ref *ref;
2279 struct btrfs_extent_info *info;
2281 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2287 ref->root_gen = root_gen;
2288 ref->bytenr = buf->start;
2289 ref->owner = btrfs_header_owner(buf);
2290 ref->generation = btrfs_header_generation(buf);
2291 ref->nritems = nr_extents;
2292 info = ref->extents;
2294 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2296 btrfs_item_key_to_cpu(buf, &key, i);
2297 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2299 fi = btrfs_item_ptr(buf, i,
2300 struct btrfs_file_extent_item);
2301 if (btrfs_file_extent_type(buf, fi) ==
2302 BTRFS_FILE_EXTENT_INLINE)
2304 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2305 if (disk_bytenr == 0)
2308 info->bytenr = disk_bytenr;
2310 btrfs_file_extent_disk_num_bytes(buf, fi);
2311 info->objectid = key.objectid;
2312 info->offset = key.offset;
2316 ret = btrfs_add_leaf_ref(root, ref, shared);
2317 if (ret == -EEXIST && shared) {
2318 struct btrfs_leaf_ref *old;
2319 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2321 btrfs_remove_leaf_ref(root, old);
2322 btrfs_free_leaf_ref(root, old);
2323 ret = btrfs_add_leaf_ref(root, ref, shared);
2326 btrfs_free_leaf_ref(root, ref);
2332 /* when a block goes through cow, we update the reference counts of
2333 * everything that block points to. The internal pointers of the block
2334 * can be in just about any order, and it is likely to have clusters of
2335 * things that are close together and clusters of things that are not.
2337 * To help reduce the seeks that come with updating all of these reference
2338 * counts, sort them by byte number before actual updates are done.
2340 * struct refsort is used to match byte number to slot in the btree block.
2341 * we sort based on the byte number and then use the slot to actually
2344 * struct refsort is smaller than strcut btrfs_item and smaller than
2345 * struct btrfs_key_ptr. Since we're currently limited to the page size
2346 * for a btree block, there's no way for a kmalloc of refsorts for a
2347 * single node to be bigger than a page.
2355 * for passing into sort()
2357 static int refsort_cmp(const void *a_void, const void *b_void)
2359 const struct refsort *a = a_void;
2360 const struct refsort *b = b_void;
2362 if (a->bytenr < b->bytenr)
2364 if (a->bytenr > b->bytenr)
2370 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2371 struct btrfs_root *root,
2372 struct extent_buffer *buf,
2373 int full_backref, int inc)
2380 struct btrfs_key key;
2381 struct btrfs_file_extent_item *fi;
2385 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2386 u64, u64, u64, u64, u64, u64);
2388 ref_root = btrfs_header_owner(buf);
2389 nritems = btrfs_header_nritems(buf);
2390 level = btrfs_header_level(buf);
2392 if (!root->ref_cows && level == 0)
2396 process_func = btrfs_inc_extent_ref;
2398 process_func = btrfs_free_extent;
2401 parent = buf->start;
2405 for (i = 0; i < nritems; i++) {
2407 btrfs_item_key_to_cpu(buf, &key, i);
2408 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2410 fi = btrfs_item_ptr(buf, i,
2411 struct btrfs_file_extent_item);
2412 if (btrfs_file_extent_type(buf, fi) ==
2413 BTRFS_FILE_EXTENT_INLINE)
2415 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2419 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2420 key.offset -= btrfs_file_extent_offset(buf, fi);
2421 ret = process_func(trans, root, bytenr, num_bytes,
2422 parent, ref_root, key.objectid,
2427 bytenr = btrfs_node_blockptr(buf, i);
2428 num_bytes = btrfs_level_size(root, level - 1);
2429 ret = process_func(trans, root, bytenr, num_bytes,
2430 parent, ref_root, level - 1, 0);
2441 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2442 struct extent_buffer *buf, int full_backref)
2444 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2447 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2448 struct extent_buffer *buf, int full_backref)
2450 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2453 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2454 struct btrfs_root *root,
2455 struct btrfs_path *path,
2456 struct btrfs_block_group_cache *cache)
2459 struct btrfs_root *extent_root = root->fs_info->extent_root;
2461 struct extent_buffer *leaf;
2463 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2468 leaf = path->nodes[0];
2469 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2470 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2471 btrfs_mark_buffer_dirty(leaf);
2472 btrfs_release_path(extent_root, path);
2480 static struct btrfs_block_group_cache *
2481 next_block_group(struct btrfs_root *root,
2482 struct btrfs_block_group_cache *cache)
2484 struct rb_node *node;
2485 spin_lock(&root->fs_info->block_group_cache_lock);
2486 node = rb_next(&cache->cache_node);
2487 btrfs_put_block_group(cache);
2489 cache = rb_entry(node, struct btrfs_block_group_cache,
2491 atomic_inc(&cache->count);
2494 spin_unlock(&root->fs_info->block_group_cache_lock);
2498 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2499 struct btrfs_root *root)
2501 struct btrfs_block_group_cache *cache;
2503 struct btrfs_path *path;
2506 path = btrfs_alloc_path();
2512 err = btrfs_run_delayed_refs(trans, root,
2517 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2521 cache = next_block_group(root, cache);
2531 last = cache->key.objectid + cache->key.offset;
2533 err = write_one_cache_group(trans, root, path, cache);
2535 btrfs_put_block_group(cache);
2538 btrfs_free_path(path);
2542 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2544 struct btrfs_block_group_cache *block_group;
2547 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2548 if (!block_group || block_group->ro)
2551 btrfs_put_block_group(block_group);
2555 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2556 u64 total_bytes, u64 bytes_used,
2557 struct btrfs_space_info **space_info)
2559 struct btrfs_space_info *found;
2561 found = __find_space_info(info, flags);
2563 spin_lock(&found->lock);
2564 found->total_bytes += total_bytes;
2565 found->bytes_used += bytes_used;
2567 spin_unlock(&found->lock);
2568 *space_info = found;
2571 found = kzalloc(sizeof(*found), GFP_NOFS);
2575 INIT_LIST_HEAD(&found->block_groups);
2576 init_rwsem(&found->groups_sem);
2577 spin_lock_init(&found->lock);
2578 found->flags = flags;
2579 found->total_bytes = total_bytes;
2580 found->bytes_used = bytes_used;
2581 found->bytes_pinned = 0;
2582 found->bytes_reserved = 0;
2583 found->bytes_readonly = 0;
2584 found->bytes_delalloc = 0;
2586 found->force_alloc = 0;
2587 *space_info = found;
2588 list_add_rcu(&found->list, &info->space_info);
2589 atomic_set(&found->caching_threads, 0);
2593 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2595 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2596 BTRFS_BLOCK_GROUP_RAID1 |
2597 BTRFS_BLOCK_GROUP_RAID10 |
2598 BTRFS_BLOCK_GROUP_DUP);
2600 if (flags & BTRFS_BLOCK_GROUP_DATA)
2601 fs_info->avail_data_alloc_bits |= extra_flags;
2602 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2603 fs_info->avail_metadata_alloc_bits |= extra_flags;
2604 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2605 fs_info->avail_system_alloc_bits |= extra_flags;
2609 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2611 spin_lock(&cache->space_info->lock);
2612 spin_lock(&cache->lock);
2614 cache->space_info->bytes_readonly += cache->key.offset -
2615 btrfs_block_group_used(&cache->item);
2618 spin_unlock(&cache->lock);
2619 spin_unlock(&cache->space_info->lock);
2622 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2624 u64 num_devices = root->fs_info->fs_devices->rw_devices;
2626 if (num_devices == 1)
2627 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2628 if (num_devices < 4)
2629 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2631 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2632 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2633 BTRFS_BLOCK_GROUP_RAID10))) {
2634 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2637 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2638 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2639 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2642 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2643 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2644 (flags & BTRFS_BLOCK_GROUP_RAID10) |
2645 (flags & BTRFS_BLOCK_GROUP_DUP)))
2646 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2650 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2652 struct btrfs_fs_info *info = root->fs_info;
2656 alloc_profile = info->avail_data_alloc_bits &
2657 info->data_alloc_profile;
2658 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2659 } else if (root == root->fs_info->chunk_root) {
2660 alloc_profile = info->avail_system_alloc_bits &
2661 info->system_alloc_profile;
2662 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2664 alloc_profile = info->avail_metadata_alloc_bits &
2665 info->metadata_alloc_profile;
2666 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2669 return btrfs_reduce_alloc_profile(root, data);
2672 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2676 alloc_target = btrfs_get_alloc_profile(root, 1);
2677 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2682 * for now this just makes sure we have at least 5% of our metadata space free
2685 int btrfs_check_metadata_free_space(struct btrfs_root *root)
2687 struct btrfs_fs_info *info = root->fs_info;
2688 struct btrfs_space_info *meta_sinfo;
2689 u64 alloc_target, thresh;
2690 int committed = 0, ret;
2692 /* get the space info for where the metadata will live */
2693 alloc_target = btrfs_get_alloc_profile(root, 0);
2694 meta_sinfo = __find_space_info(info, alloc_target);
2697 spin_lock(&meta_sinfo->lock);
2698 if (!meta_sinfo->full)
2699 thresh = meta_sinfo->total_bytes * 80;
2701 thresh = meta_sinfo->total_bytes * 95;
2703 do_div(thresh, 100);
2705 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2706 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2707 struct btrfs_trans_handle *trans;
2708 if (!meta_sinfo->full) {
2709 meta_sinfo->force_alloc = 1;
2710 spin_unlock(&meta_sinfo->lock);
2712 trans = btrfs_start_transaction(root, 1);
2716 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2717 2 * 1024 * 1024, alloc_target, 0);
2718 btrfs_end_transaction(trans, root);
2721 spin_unlock(&meta_sinfo->lock);
2725 trans = btrfs_join_transaction(root, 1);
2728 ret = btrfs_commit_transaction(trans, root);
2735 spin_unlock(&meta_sinfo->lock);
2741 * This will check the space that the inode allocates from to make sure we have
2742 * enough space for bytes.
2744 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2747 struct btrfs_space_info *data_sinfo;
2748 int ret = 0, committed = 0;
2750 /* make sure bytes are sectorsize aligned */
2751 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2753 data_sinfo = BTRFS_I(inode)->space_info;
2755 /* make sure we have enough space to handle the data first */
2756 spin_lock(&data_sinfo->lock);
2757 if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2758 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2759 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2760 data_sinfo->bytes_may_use < bytes) {
2761 struct btrfs_trans_handle *trans;
2764 * if we don't have enough free bytes in this space then we need
2765 * to alloc a new chunk.
2767 if (!data_sinfo->full) {
2770 data_sinfo->force_alloc = 1;
2771 spin_unlock(&data_sinfo->lock);
2773 alloc_target = btrfs_get_alloc_profile(root, 1);
2774 trans = btrfs_start_transaction(root, 1);
2778 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2779 bytes + 2 * 1024 * 1024,
2781 btrfs_end_transaction(trans, root);
2786 spin_unlock(&data_sinfo->lock);
2788 /* commit the current transaction and try again */
2791 trans = btrfs_join_transaction(root, 1);
2794 ret = btrfs_commit_transaction(trans, root);
2800 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2801 ", %llu bytes_used, %llu bytes_reserved, "
2802 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2803 "%llu total\n", (unsigned long long)bytes,
2804 (unsigned long long)data_sinfo->bytes_delalloc,
2805 (unsigned long long)data_sinfo->bytes_used,
2806 (unsigned long long)data_sinfo->bytes_reserved,
2807 (unsigned long long)data_sinfo->bytes_pinned,
2808 (unsigned long long)data_sinfo->bytes_readonly,
2809 (unsigned long long)data_sinfo->bytes_may_use,
2810 (unsigned long long)data_sinfo->total_bytes);
2813 data_sinfo->bytes_may_use += bytes;
2814 BTRFS_I(inode)->reserved_bytes += bytes;
2815 spin_unlock(&data_sinfo->lock);
2817 return btrfs_check_metadata_free_space(root);
2821 * if there was an error for whatever reason after calling
2822 * btrfs_check_data_free_space, call this so we can cleanup the counters.
2824 void btrfs_free_reserved_data_space(struct btrfs_root *root,
2825 struct inode *inode, u64 bytes)
2827 struct btrfs_space_info *data_sinfo;
2829 /* make sure bytes are sectorsize aligned */
2830 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2832 data_sinfo = BTRFS_I(inode)->space_info;
2833 spin_lock(&data_sinfo->lock);
2834 data_sinfo->bytes_may_use -= bytes;
2835 BTRFS_I(inode)->reserved_bytes -= bytes;
2836 spin_unlock(&data_sinfo->lock);
2839 /* called when we are adding a delalloc extent to the inode's io_tree */
2840 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2843 struct btrfs_space_info *data_sinfo;
2845 /* get the space info for where this inode will be storing its data */
2846 data_sinfo = BTRFS_I(inode)->space_info;
2848 /* make sure we have enough space to handle the data first */
2849 spin_lock(&data_sinfo->lock);
2850 data_sinfo->bytes_delalloc += bytes;
2853 * we are adding a delalloc extent without calling
2854 * btrfs_check_data_free_space first. This happens on a weird
2855 * writepage condition, but shouldn't hurt our accounting
2857 if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2858 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2859 BTRFS_I(inode)->reserved_bytes = 0;
2861 data_sinfo->bytes_may_use -= bytes;
2862 BTRFS_I(inode)->reserved_bytes -= bytes;
2865 spin_unlock(&data_sinfo->lock);
2868 /* called when we are clearing an delalloc extent from the inode's io_tree */
2869 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2872 struct btrfs_space_info *info;
2874 info = BTRFS_I(inode)->space_info;
2876 spin_lock(&info->lock);
2877 info->bytes_delalloc -= bytes;
2878 spin_unlock(&info->lock);
2881 static void force_metadata_allocation(struct btrfs_fs_info *info)
2883 struct list_head *head = &info->space_info;
2884 struct btrfs_space_info *found;
2887 list_for_each_entry_rcu(found, head, list) {
2888 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2889 found->force_alloc = 1;
2894 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
2895 struct btrfs_root *extent_root, u64 alloc_bytes,
2896 u64 flags, int force)
2898 struct btrfs_space_info *space_info;
2899 struct btrfs_fs_info *fs_info = extent_root->fs_info;
2903 mutex_lock(&fs_info->chunk_mutex);
2905 flags = btrfs_reduce_alloc_profile(extent_root, flags);
2907 space_info = __find_space_info(extent_root->fs_info, flags);
2909 ret = update_space_info(extent_root->fs_info, flags,
2913 BUG_ON(!space_info);
2915 spin_lock(&space_info->lock);
2916 if (space_info->force_alloc) {
2918 space_info->force_alloc = 0;
2920 if (space_info->full) {
2921 spin_unlock(&space_info->lock);
2925 thresh = space_info->total_bytes - space_info->bytes_readonly;
2926 thresh = div_factor(thresh, 6);
2928 (space_info->bytes_used + space_info->bytes_pinned +
2929 space_info->bytes_reserved + alloc_bytes) < thresh) {
2930 spin_unlock(&space_info->lock);
2933 spin_unlock(&space_info->lock);
2936 * if we're doing a data chunk, go ahead and make sure that
2937 * we keep a reasonable number of metadata chunks allocated in the
2940 if (flags & BTRFS_BLOCK_GROUP_DATA) {
2941 fs_info->data_chunk_allocations++;
2942 if (!(fs_info->data_chunk_allocations %
2943 fs_info->metadata_ratio))
2944 force_metadata_allocation(fs_info);
2947 ret = btrfs_alloc_chunk(trans, extent_root, flags);
2949 space_info->full = 1;
2951 mutex_unlock(&extent_root->fs_info->chunk_mutex);
2955 static int update_block_group(struct btrfs_trans_handle *trans,
2956 struct btrfs_root *root,
2957 u64 bytenr, u64 num_bytes, int alloc,
2960 struct btrfs_block_group_cache *cache;
2961 struct btrfs_fs_info *info = root->fs_info;
2962 u64 total = num_bytes;
2966 /* block accounting for super block */
2967 spin_lock(&info->delalloc_lock);
2968 old_val = btrfs_super_bytes_used(&info->super_copy);
2970 old_val += num_bytes;
2972 old_val -= num_bytes;
2973 btrfs_set_super_bytes_used(&info->super_copy, old_val);
2975 /* block accounting for root item */
2976 old_val = btrfs_root_used(&root->root_item);
2978 old_val += num_bytes;
2980 old_val -= num_bytes;
2981 btrfs_set_root_used(&root->root_item, old_val);
2982 spin_unlock(&info->delalloc_lock);
2985 cache = btrfs_lookup_block_group(info, bytenr);
2988 byte_in_group = bytenr - cache->key.objectid;
2989 WARN_ON(byte_in_group > cache->key.offset);
2991 spin_lock(&cache->space_info->lock);
2992 spin_lock(&cache->lock);
2994 old_val = btrfs_block_group_used(&cache->item);
2995 num_bytes = min(total, cache->key.offset - byte_in_group);
2997 old_val += num_bytes;
2998 cache->space_info->bytes_used += num_bytes;
3000 cache->space_info->bytes_readonly -= num_bytes;
3001 btrfs_set_block_group_used(&cache->item, old_val);
3002 spin_unlock(&cache->lock);
3003 spin_unlock(&cache->space_info->lock);
3005 old_val -= num_bytes;
3006 cache->space_info->bytes_used -= num_bytes;
3008 cache->space_info->bytes_readonly += num_bytes;
3009 btrfs_set_block_group_used(&cache->item, old_val);
3010 spin_unlock(&cache->lock);
3011 spin_unlock(&cache->space_info->lock);
3015 ret = btrfs_discard_extent(root, bytenr,
3019 ret = btrfs_add_free_space(cache, bytenr,
3024 btrfs_put_block_group(cache);
3026 bytenr += num_bytes;