2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
27 #include "transaction.h"
30 #include "inode-map.h"
32 #define BTRFS_ROOT_TRANS_TAG 0
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) {
38 BUG_ON(!list_empty(&transaction->list));
39 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction);
44 static noinline void switch_commit_root(struct btrfs_root *root)
46 free_extent_buffer(root->commit_root);
47 root->commit_root = btrfs_root_node(root);
51 * either allocate a new transaction or hop into the existing one
53 static noinline int join_transaction(struct btrfs_root *root, int nofail)
55 struct btrfs_transaction *cur_trans;
57 spin_lock(&root->fs_info->trans_lock);
58 if (root->fs_info->trans_no_join) {
60 spin_unlock(&root->fs_info->trans_lock);
65 cur_trans = root->fs_info->running_transaction;
67 atomic_inc(&cur_trans->use_count);
68 atomic_inc(&cur_trans->num_writers);
69 cur_trans->num_joined++;
70 spin_unlock(&root->fs_info->trans_lock);
73 spin_unlock(&root->fs_info->trans_lock);
75 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
78 spin_lock(&root->fs_info->trans_lock);
79 if (root->fs_info->running_transaction) {
80 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
81 cur_trans = root->fs_info->running_transaction;
82 atomic_inc(&cur_trans->use_count);
83 atomic_inc(&cur_trans->num_writers);
84 cur_trans->num_joined++;
85 spin_unlock(&root->fs_info->trans_lock);
88 atomic_set(&cur_trans->num_writers, 1);
89 cur_trans->num_joined = 0;
90 init_waitqueue_head(&cur_trans->writer_wait);
91 init_waitqueue_head(&cur_trans->commit_wait);
92 cur_trans->in_commit = 0;
93 cur_trans->blocked = 0;
95 * One for this trans handle, one so it will live on until we
96 * commit the transaction.
98 atomic_set(&cur_trans->use_count, 2);
99 cur_trans->commit_done = 0;
100 cur_trans->start_time = get_seconds();
102 cur_trans->delayed_refs.root = RB_ROOT;
103 cur_trans->delayed_refs.num_entries = 0;
104 cur_trans->delayed_refs.num_heads_ready = 0;
105 cur_trans->delayed_refs.num_heads = 0;
106 cur_trans->delayed_refs.flushing = 0;
107 cur_trans->delayed_refs.run_delayed_start = 0;
108 spin_lock_init(&cur_trans->commit_lock);
109 spin_lock_init(&cur_trans->delayed_refs.lock);
111 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
112 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
113 extent_io_tree_init(&cur_trans->dirty_pages,
114 root->fs_info->btree_inode->i_mapping);
115 root->fs_info->generation++;
116 cur_trans->transid = root->fs_info->generation;
117 root->fs_info->running_transaction = cur_trans;
118 spin_unlock(&root->fs_info->trans_lock);
124 * this does all the record keeping required to make sure that a reference
125 * counted root is properly recorded in a given transaction. This is required
126 * to make sure the old root from before we joined the transaction is deleted
127 * when the transaction commits
129 static int record_root_in_trans(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root)
132 if (root->ref_cows && root->last_trans < trans->transid) {
133 WARN_ON(root == root->fs_info->extent_root);
134 WARN_ON(root->commit_root != root->node);
137 * see below for in_trans_setup usage rules
138 * we have the reloc mutex held now, so there
139 * is only one writer in this function
141 root->in_trans_setup = 1;
143 /* make sure readers find in_trans_setup before
144 * they find our root->last_trans update
148 spin_lock(&root->fs_info->fs_roots_radix_lock);
149 if (root->last_trans == trans->transid) {
150 spin_unlock(&root->fs_info->fs_roots_radix_lock);
153 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
154 (unsigned long)root->root_key.objectid,
155 BTRFS_ROOT_TRANS_TAG);
156 spin_unlock(&root->fs_info->fs_roots_radix_lock);
157 root->last_trans = trans->transid;
159 /* this is pretty tricky. We don't want to
160 * take the relocation lock in btrfs_record_root_in_trans
161 * unless we're really doing the first setup for this root in
164 * Normally we'd use root->last_trans as a flag to decide
165 * if we want to take the expensive mutex.
167 * But, we have to set root->last_trans before we
168 * init the relocation root, otherwise, we trip over warnings
169 * in ctree.c. The solution used here is to flag ourselves
170 * with root->in_trans_setup. When this is 1, we're still
171 * fixing up the reloc trees and everyone must wait.
173 * When this is zero, they can trust root->last_trans and fly
174 * through btrfs_record_root_in_trans without having to take the
175 * lock. smp_wmb() makes sure that all the writes above are
176 * done before we pop in the zero below
178 btrfs_init_reloc_root(trans, root);
180 root->in_trans_setup = 0;
186 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
187 struct btrfs_root *root)
193 * see record_root_in_trans for comments about in_trans_setup usage
197 if (root->last_trans == trans->transid &&
198 !root->in_trans_setup)
201 mutex_lock(&root->fs_info->reloc_mutex);
202 record_root_in_trans(trans, root);
203 mutex_unlock(&root->fs_info->reloc_mutex);
208 /* wait for commit against the current transaction to become unblocked
209 * when this is done, it is safe to start a new transaction, but the current
210 * transaction might not be fully on disk.
212 static void wait_current_trans(struct btrfs_root *root)
214 struct btrfs_transaction *cur_trans;
216 spin_lock(&root->fs_info->trans_lock);
217 cur_trans = root->fs_info->running_transaction;
218 if (cur_trans && cur_trans->blocked) {
220 atomic_inc(&cur_trans->use_count);
221 spin_unlock(&root->fs_info->trans_lock);
223 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
224 TASK_UNINTERRUPTIBLE);
225 if (!cur_trans->blocked)
229 finish_wait(&root->fs_info->transaction_wait, &wait);
230 put_transaction(cur_trans);
232 spin_unlock(&root->fs_info->trans_lock);
236 enum btrfs_trans_type {
243 static int may_wait_transaction(struct btrfs_root *root, int type)
245 if (root->fs_info->log_root_recovering)
248 if (type == TRANS_USERSPACE)
251 if (type == TRANS_START &&
252 !atomic_read(&root->fs_info->open_ioctl_trans))
258 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
259 u64 num_items, int type)
261 struct btrfs_trans_handle *h;
262 struct btrfs_transaction *cur_trans;
266 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
267 return ERR_PTR(-EROFS);
269 if (current->journal_info) {
270 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
271 h = current->journal_info;
273 h->orig_rsv = h->block_rsv;
279 * Do the reservation before we join the transaction so we can do all
280 * the appropriate flushing if need be.
282 if (num_items > 0 && root != root->fs_info->chunk_root) {
283 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
284 ret = btrfs_block_rsv_add(NULL, root,
285 &root->fs_info->trans_block_rsv,
291 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
293 return ERR_PTR(-ENOMEM);
295 if (may_wait_transaction(root, type))
296 wait_current_trans(root);
299 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
301 wait_current_trans(root);
302 } while (ret == -EBUSY);
305 kmem_cache_free(btrfs_trans_handle_cachep, h);
309 cur_trans = root->fs_info->running_transaction;
311 h->transid = cur_trans->transid;
312 h->transaction = cur_trans;
314 h->bytes_reserved = 0;
315 h->delayed_ref_updates = 0;
321 if (cur_trans->blocked && may_wait_transaction(root, type)) {
322 btrfs_commit_transaction(h, root);
327 h->block_rsv = &root->fs_info->trans_block_rsv;
328 h->bytes_reserved = num_bytes;
332 btrfs_record_root_in_trans(h, root);
334 if (!current->journal_info && type != TRANS_USERSPACE)
335 current->journal_info = h;
339 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
342 return start_transaction(root, num_items, TRANS_START);
344 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
346 return start_transaction(root, 0, TRANS_JOIN);
349 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
351 return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
354 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
356 return start_transaction(root, 0, TRANS_USERSPACE);
359 /* wait for a transaction commit to be fully complete */
360 static noinline int wait_for_commit(struct btrfs_root *root,
361 struct btrfs_transaction *commit)
364 while (!commit->commit_done) {
365 prepare_to_wait(&commit->commit_wait, &wait,
366 TASK_UNINTERRUPTIBLE);
367 if (commit->commit_done)
371 finish_wait(&commit->commit_wait, &wait);
375 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
377 struct btrfs_transaction *cur_trans = NULL, *t;
382 if (transid <= root->fs_info->last_trans_committed)
385 /* find specified transaction */
386 spin_lock(&root->fs_info->trans_lock);
387 list_for_each_entry(t, &root->fs_info->trans_list, list) {
388 if (t->transid == transid) {
390 atomic_inc(&cur_trans->use_count);
393 if (t->transid > transid)
396 spin_unlock(&root->fs_info->trans_lock);
399 goto out; /* bad transid */
401 /* find newest transaction that is committing | committed */
402 spin_lock(&root->fs_info->trans_lock);
403 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
409 atomic_inc(&cur_trans->use_count);
413 spin_unlock(&root->fs_info->trans_lock);
415 goto out; /* nothing committing|committed */
418 wait_for_commit(root, cur_trans);
420 put_transaction(cur_trans);
426 void btrfs_throttle(struct btrfs_root *root)
428 if (!atomic_read(&root->fs_info->open_ioctl_trans))
429 wait_current_trans(root);
432 static int should_end_transaction(struct btrfs_trans_handle *trans,
433 struct btrfs_root *root)
436 ret = btrfs_block_rsv_check(trans, root,
437 &root->fs_info->global_block_rsv, 0, 5);
441 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
442 struct btrfs_root *root)
444 struct btrfs_transaction *cur_trans = trans->transaction;
448 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
451 updates = trans->delayed_ref_updates;
452 trans->delayed_ref_updates = 0;
454 btrfs_run_delayed_refs(trans, root, updates);
456 return should_end_transaction(trans, root);
459 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
460 struct btrfs_root *root, int throttle, int lock)
462 struct btrfs_transaction *cur_trans = trans->transaction;
463 struct btrfs_fs_info *info = root->fs_info;
466 if (--trans->use_count) {
467 trans->block_rsv = trans->orig_rsv;
472 unsigned long cur = trans->delayed_ref_updates;
473 trans->delayed_ref_updates = 0;
475 trans->transaction->delayed_refs.num_heads_ready > 64) {
476 trans->delayed_ref_updates = 0;
479 * do a full flush if the transaction is trying
482 if (trans->transaction->delayed_refs.flushing)
484 btrfs_run_delayed_refs(trans, root, cur);
491 btrfs_trans_release_metadata(trans, root);
493 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
494 should_end_transaction(trans, root)) {
495 trans->transaction->blocked = 1;
499 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
501 return btrfs_commit_transaction(trans, root);
503 wake_up_process(info->transaction_kthread);
506 WARN_ON(cur_trans != info->running_transaction);
507 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
508 atomic_dec(&cur_trans->num_writers);
511 if (waitqueue_active(&cur_trans->writer_wait))
512 wake_up(&cur_trans->writer_wait);
513 put_transaction(cur_trans);
515 if (current->journal_info == trans)
516 current->journal_info = NULL;
517 memset(trans, 0, sizeof(*trans));
518 kmem_cache_free(btrfs_trans_handle_cachep, trans);
521 btrfs_run_delayed_iputs(root);
526 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
527 struct btrfs_root *root)
531 ret = __btrfs_end_transaction(trans, root, 0, 1);
537 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
538 struct btrfs_root *root)
542 ret = __btrfs_end_transaction(trans, root, 1, 1);
548 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
549 struct btrfs_root *root)
553 ret = __btrfs_end_transaction(trans, root, 0, 0);
559 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
560 struct btrfs_root *root)
562 return __btrfs_end_transaction(trans, root, 1, 1);
566 * when btree blocks are allocated, they have some corresponding bits set for
567 * them in one of two extent_io trees. This is used to make sure all of
568 * those extents are sent to disk but does not wait on them
570 int btrfs_write_marked_extents(struct btrfs_root *root,
571 struct extent_io_tree *dirty_pages, int mark)
577 struct inode *btree_inode = root->fs_info->btree_inode;
583 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
587 while (start <= end) {
590 index = start >> PAGE_CACHE_SHIFT;
591 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
592 page = find_get_page(btree_inode->i_mapping, index);
596 btree_lock_page_hook(page);
597 if (!page->mapping) {
599 page_cache_release(page);
603 if (PageWriteback(page)) {
605 wait_on_page_writeback(page);
608 page_cache_release(page);
612 err = write_one_page(page, 0);
615 page_cache_release(page);
624 * when btree blocks are allocated, they have some corresponding bits set for
625 * them in one of two extent_io trees. This is used to make sure all of
626 * those extents are on disk for transaction or log commit. We wait
627 * on all the pages and clear them from the dirty pages state tree
629 int btrfs_wait_marked_extents(struct btrfs_root *root,
630 struct extent_io_tree *dirty_pages, int mark)
636 struct inode *btree_inode = root->fs_info->btree_inode;
642 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
647 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
648 while (start <= end) {
649 index = start >> PAGE_CACHE_SHIFT;
650 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
651 page = find_get_page(btree_inode->i_mapping, index);
654 if (PageDirty(page)) {
655 btree_lock_page_hook(page);
656 wait_on_page_writeback(page);
657 err = write_one_page(page, 0);
661 wait_on_page_writeback(page);
662 page_cache_release(page);
672 * when btree blocks are allocated, they have some corresponding bits set for
673 * them in one of two extent_io trees. This is used to make sure all of
674 * those extents are on disk for transaction or log commit
676 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
677 struct extent_io_tree *dirty_pages, int mark)
682 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
683 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
687 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
688 struct btrfs_root *root)
690 if (!trans || !trans->transaction) {
691 struct inode *btree_inode;
692 btree_inode = root->fs_info->btree_inode;
693 return filemap_write_and_wait(btree_inode->i_mapping);
695 return btrfs_write_and_wait_marked_extents(root,
696 &trans->transaction->dirty_pages,
701 * this is used to update the root pointer in the tree of tree roots.
703 * But, in the case of the extent allocation tree, updating the root
704 * pointer may allocate blocks which may change the root of the extent
707 * So, this loops and repeats and makes sure the cowonly root didn't
708 * change while the root pointer was being updated in the metadata.
710 static int update_cowonly_root(struct btrfs_trans_handle *trans,
711 struct btrfs_root *root)
716 struct btrfs_root *tree_root = root->fs_info->tree_root;
718 old_root_used = btrfs_root_used(&root->root_item);
719 btrfs_write_dirty_block_groups(trans, root);
722 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
723 if (old_root_bytenr == root->node->start &&
724 old_root_used == btrfs_root_used(&root->root_item))
727 btrfs_set_root_node(&root->root_item, root->node);
728 ret = btrfs_update_root(trans, tree_root,
733 old_root_used = btrfs_root_used(&root->root_item);
734 ret = btrfs_write_dirty_block_groups(trans, root);
738 if (root != root->fs_info->extent_root)
739 switch_commit_root(root);
745 * update all the cowonly tree roots on disk
747 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
748 struct btrfs_root *root)
750 struct btrfs_fs_info *fs_info = root->fs_info;
751 struct list_head *next;
752 struct extent_buffer *eb;
755 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
758 eb = btrfs_lock_root_node(fs_info->tree_root);
759 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
760 btrfs_tree_unlock(eb);
761 free_extent_buffer(eb);
763 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
766 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
767 next = fs_info->dirty_cowonly_roots.next;
769 root = list_entry(next, struct btrfs_root, dirty_list);
771 update_cowonly_root(trans, root);
774 down_write(&fs_info->extent_commit_sem);
775 switch_commit_root(fs_info->extent_root);
776 up_write(&fs_info->extent_commit_sem);
782 * dead roots are old snapshots that need to be deleted. This allocates
783 * a dirty root struct and adds it into the list of dead roots that need to
786 int btrfs_add_dead_root(struct btrfs_root *root)
788 spin_lock(&root->fs_info->trans_lock);
789 list_add(&root->root_list, &root->fs_info->dead_roots);
790 spin_unlock(&root->fs_info->trans_lock);
795 * update all the cowonly tree roots on disk
797 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
798 struct btrfs_root *root)
800 struct btrfs_root *gang[8];
801 struct btrfs_fs_info *fs_info = root->fs_info;
806 spin_lock(&fs_info->fs_roots_radix_lock);
808 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
811 BTRFS_ROOT_TRANS_TAG);
814 for (i = 0; i < ret; i++) {
816 radix_tree_tag_clear(&fs_info->fs_roots_radix,
817 (unsigned long)root->root_key.objectid,
818 BTRFS_ROOT_TRANS_TAG);
819 spin_unlock(&fs_info->fs_roots_radix_lock);
821 btrfs_free_log(trans, root);
822 btrfs_update_reloc_root(trans, root);
823 btrfs_orphan_commit_root(trans, root);
825 btrfs_save_ino_cache(root, trans);
827 if (root->commit_root != root->node) {
828 mutex_lock(&root->fs_commit_mutex);
829 switch_commit_root(root);
830 btrfs_unpin_free_ino(root);
831 mutex_unlock(&root->fs_commit_mutex);
833 btrfs_set_root_node(&root->root_item,
837 err = btrfs_update_root(trans, fs_info->tree_root,
840 spin_lock(&fs_info->fs_roots_radix_lock);
845 spin_unlock(&fs_info->fs_roots_radix_lock);
850 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
851 * otherwise every leaf in the btree is read and defragged.
853 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
855 struct btrfs_fs_info *info = root->fs_info;
856 struct btrfs_trans_handle *trans;
860 if (xchg(&root->defrag_running, 1))
864 trans = btrfs_start_transaction(root, 0);
866 return PTR_ERR(trans);
868 ret = btrfs_defrag_leaves(trans, root, cacheonly);
870 nr = trans->blocks_used;
871 btrfs_end_transaction(trans, root);
872 btrfs_btree_balance_dirty(info->tree_root, nr);
875 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
878 root->defrag_running = 0;
883 * new snapshots need to be created at a very specific time in the
884 * transaction commit. This does the actual creation
886 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
887 struct btrfs_fs_info *fs_info,
888 struct btrfs_pending_snapshot *pending)
890 struct btrfs_key key;
891 struct btrfs_root_item *new_root_item;
892 struct btrfs_root *tree_root = fs_info->tree_root;
893 struct btrfs_root *root = pending->root;
894 struct btrfs_root *parent_root;
895 struct inode *parent_inode;
896 struct dentry *parent;
897 struct dentry *dentry;
898 struct extent_buffer *tmp;
899 struct extent_buffer *old;
906 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
907 if (!new_root_item) {
908 pending->error = -ENOMEM;
912 ret = btrfs_find_free_objectid(tree_root, &objectid);
914 pending->error = ret;
918 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
919 btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);
921 if (to_reserve > 0) {
922 ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
925 pending->error = ret;
930 key.objectid = objectid;
931 key.offset = (u64)-1;
932 key.type = BTRFS_ROOT_ITEM_KEY;
934 trans->block_rsv = &pending->block_rsv;
936 dentry = pending->dentry;
937 parent = dget_parent(dentry);
938 parent_inode = parent->d_inode;
939 parent_root = BTRFS_I(parent_inode)->root;
940 record_root_in_trans(trans, parent_root);
943 * insert the directory item
945 ret = btrfs_set_inode_index(parent_inode, &index);
947 ret = btrfs_insert_dir_item(trans, parent_root,
948 dentry->d_name.name, dentry->d_name.len,
950 BTRFS_FT_DIR, index);
953 btrfs_i_size_write(parent_inode, parent_inode->i_size +
954 dentry->d_name.len * 2);
955 ret = btrfs_update_inode(trans, parent_root, parent_inode);
959 * pull in the delayed directory update
960 * and the delayed inode item
961 * otherwise we corrupt the FS during
964 ret = btrfs_run_delayed_items(trans, root);
967 record_root_in_trans(trans, root);
968 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
969 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
970 btrfs_check_and_init_root_item(new_root_item);
972 root_flags = btrfs_root_flags(new_root_item);
973 if (pending->readonly)
974 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
976 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
977 btrfs_set_root_flags(new_root_item, root_flags);
979 old = btrfs_lock_root_node(root);
980 btrfs_cow_block(trans, root, old, NULL, 0, &old);
981 btrfs_set_lock_blocking(old);
983 btrfs_copy_root(trans, root, old, &tmp, objectid);
984 btrfs_tree_unlock(old);
985 free_extent_buffer(old);
987 btrfs_set_root_node(new_root_item, tmp);
988 /* record when the snapshot was created in key.offset */
989 key.offset = trans->transid;
990 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
991 btrfs_tree_unlock(tmp);
992 free_extent_buffer(tmp);
996 * insert root back/forward references
998 ret = btrfs_add_root_ref(trans, tree_root, objectid,
999 parent_root->root_key.objectid,
1000 btrfs_ino(parent_inode), index,
1001 dentry->d_name.name, dentry->d_name.len);
1005 key.offset = (u64)-1;
1006 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1007 BUG_ON(IS_ERR(pending->snap));
1009 btrfs_reloc_post_snapshot(trans, pending);
1010 btrfs_orphan_post_snapshot(trans, pending);
1012 kfree(new_root_item);
1013 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1018 * create all the snapshots we've scheduled for creation
1020 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1021 struct btrfs_fs_info *fs_info)
1023 struct btrfs_pending_snapshot *pending;
1024 struct list_head *head = &trans->transaction->pending_snapshots;
1027 list_for_each_entry(pending, head, list) {
1028 ret = create_pending_snapshot(trans, fs_info, pending);
1034 static void update_super_roots(struct btrfs_root *root)
1036 struct btrfs_root_item *root_item;
1037 struct btrfs_super_block *super;
1039 super = &root->fs_info->super_copy;
1041 root_item = &root->fs_info->chunk_root->root_item;
1042 super->chunk_root = root_item->bytenr;
1043 super->chunk_root_generation = root_item->generation;
1044 super->chunk_root_level = root_item->level;
1046 root_item = &root->fs_info->tree_root->root_item;
1047 super->root = root_item->bytenr;
1048 super->generation = root_item->generation;
1049 super->root_level = root_item->level;
1050 if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE))
1051 super->cache_generation = root_item->generation;
1054 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1057 spin_lock(&info->trans_lock);
1058 if (info->running_transaction)
1059 ret = info->running_transaction->in_commit;
1060 spin_unlock(&info->trans_lock);
1064 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1067 spin_lock(&info->trans_lock);
1068 if (info->running_transaction)
1069 ret = info->running_transaction->blocked;
1070 spin_unlock(&info->trans_lock);
1075 * wait for the current transaction commit to start and block subsequent
1078 static void wait_current_trans_commit_start(struct btrfs_root *root,
1079 struct btrfs_transaction *trans)
1083 if (trans->in_commit)
1087 prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
1088 TASK_UNINTERRUPTIBLE);
1089 if (trans->in_commit) {
1090 finish_wait(&root->fs_info->transaction_blocked_wait,
1095 finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
1100 * wait for the current transaction to start and then become unblocked.
1103 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1104 struct btrfs_transaction *trans)
1108 if (trans->commit_done || (trans->in_commit && !trans->blocked))
1112 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
1113 TASK_UNINTERRUPTIBLE);
1114 if (trans->commit_done ||
1115 (trans->in_commit && !trans->blocked)) {
1116 finish_wait(&root->fs_info->transaction_wait,
1121 finish_wait(&root->fs_info->transaction_wait,
1127 * commit transactions asynchronously. once btrfs_commit_transaction_async
1128 * returns, any subsequent transaction will not be allowed to join.
1130 struct btrfs_async_commit {
1131 struct btrfs_trans_handle *newtrans;
1132 struct btrfs_root *root;
1133 struct delayed_work work;
1136 static void do_async_commit(struct work_struct *work)
1138 struct btrfs_async_commit *ac =
1139 container_of(work, struct btrfs_async_commit, work.work);
1141 btrfs_commit_transaction(ac->newtrans, ac->root);
1145 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1146 struct btrfs_root *root,
1147 int wait_for_unblock)
1149 struct btrfs_async_commit *ac;
1150 struct btrfs_transaction *cur_trans;
1152 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1156 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1158 ac->newtrans = btrfs_join_transaction(root);
1159 if (IS_ERR(ac->newtrans)) {
1160 int err = PTR_ERR(ac->newtrans);
1165 /* take transaction reference */
1166 cur_trans = trans->transaction;
1167 atomic_inc(&cur_trans->use_count);
1169 btrfs_end_transaction(trans, root);
1170 schedule_delayed_work(&ac->work, 0);
1172 /* wait for transaction to start and unblock */
1173 if (wait_for_unblock)
1174 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1176 wait_current_trans_commit_start(root, cur_trans);
1178 if (current->journal_info == trans)
1179 current->journal_info = NULL;
1181 put_transaction(cur_trans);
1186 * btrfs_transaction state sequence:
1187 * in_commit = 0, blocked = 0 (initial)
1188 * in_commit = 1, blocked = 1
1192 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1193 struct btrfs_root *root)
1195 unsigned long joined = 0;
1196 struct btrfs_transaction *cur_trans;
1197 struct btrfs_transaction *prev_trans = NULL;
1200 int should_grow = 0;
1201 unsigned long now = get_seconds();
1202 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1204 btrfs_run_ordered_operations(root, 0);
1206 /* make a pass through all the delayed refs we have so far
1207 * any runnings procs may add more while we are here
1209 ret = btrfs_run_delayed_refs(trans, root, 0);
1212 btrfs_trans_release_metadata(trans, root);
1214 cur_trans = trans->transaction;
1216 * set the flushing flag so procs in this transaction have to
1217 * start sending their work down.
1219 cur_trans->delayed_refs.flushing = 1;
1221 ret = btrfs_run_delayed_refs(trans, root, 0);
1224 spin_lock(&cur_trans->commit_lock);
1225 if (cur_trans->in_commit) {
1226 spin_unlock(&cur_trans->commit_lock);
1227 atomic_inc(&cur_trans->use_count);
1228 btrfs_end_transaction(trans, root);
1230 ret = wait_for_commit(root, cur_trans);
1233 put_transaction(cur_trans);
1238 trans->transaction->in_commit = 1;
1239 trans->transaction->blocked = 1;
1240 spin_unlock(&cur_trans->commit_lock);
1241 wake_up(&root->fs_info->transaction_blocked_wait);
1243 spin_lock(&root->fs_info->trans_lock);
1244 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1245 prev_trans = list_entry(cur_trans->list.prev,
1246 struct btrfs_transaction, list);
1247 if (!prev_trans->commit_done) {
1248 atomic_inc(&prev_trans->use_count);
1249 spin_unlock(&root->fs_info->trans_lock);
1251 wait_for_commit(root, prev_trans);
1253 put_transaction(prev_trans);
1255 spin_unlock(&root->fs_info->trans_lock);
1258 spin_unlock(&root->fs_info->trans_lock);
1261 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1265 int snap_pending = 0;
1267 joined = cur_trans->num_joined;
1268 if (!list_empty(&trans->transaction->pending_snapshots))
1271 WARN_ON(cur_trans != trans->transaction);
1273 if (flush_on_commit || snap_pending) {
1274 btrfs_start_delalloc_inodes(root, 1);
1275 ret = btrfs_wait_ordered_extents(root, 0, 1);
1279 ret = btrfs_run_delayed_items(trans, root);
1283 * rename don't use btrfs_join_transaction, so, once we
1284 * set the transaction to blocked above, we aren't going
1285 * to get any new ordered operations. We can safely run
1286 * it here and no for sure that nothing new will be added
1289 btrfs_run_ordered_operations(root, 1);
1291 prepare_to_wait(&cur_trans->writer_wait, &wait,
1292 TASK_UNINTERRUPTIBLE);
1294 if (atomic_read(&cur_trans->num_writers) > 1)
1295 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1296 else if (should_grow)
1297 schedule_timeout(1);
1299 finish_wait(&cur_trans->writer_wait, &wait);
1300 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1301 (should_grow && cur_trans->num_joined != joined));
1304 * Ok now we need to make sure to block out any other joins while we
1305 * commit the transaction. We could have started a join before setting
1306 * no_join so make sure to wait for num_writers to == 1 again.
1308 spin_lock(&root->fs_info->trans_lock);
1309 root->fs_info->trans_no_join = 1;
1310 spin_unlock(&root->fs_info->trans_lock);
1311 wait_event(cur_trans->writer_wait,
1312 atomic_read(&cur_trans->num_writers) == 1);
1315 * the reloc mutex makes sure that we stop
1316 * the balancing code from coming in and moving
1317 * extents around in the middle of the commit
1319 mutex_lock(&root->fs_info->reloc_mutex);
1321 ret = btrfs_run_delayed_items(trans, root);
1324 ret = create_pending_snapshots(trans, root->fs_info);
1327 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1331 * make sure none of the code above managed to slip in a
1334 btrfs_assert_delayed_root_empty(root);
1336 WARN_ON(cur_trans != trans->transaction);
1338 btrfs_scrub_pause(root);
1339 /* btrfs_commit_tree_roots is responsible for getting the
1340 * various roots consistent with each other. Every pointer
1341 * in the tree of tree roots has to point to the most up to date
1342 * root for every subvolume and other tree. So, we have to keep
1343 * the tree logging code from jumping in and changing any
1346 * At this point in the commit, there can't be any tree-log
1347 * writers, but a little lower down we drop the trans mutex
1348 * and let new people in. By holding the tree_log_mutex
1349 * from now until after the super is written, we avoid races
1350 * with the tree-log code.
1352 mutex_lock(&root->fs_info->tree_log_mutex);
1354 ret = commit_fs_roots(trans, root);
1357 /* commit_fs_roots gets rid of all the tree log roots, it is now
1358 * safe to free the root of tree log roots
1360 btrfs_free_log_root_tree(trans, root->fs_info);
1362 ret = commit_cowonly_roots(trans, root);
1365 btrfs_prepare_extent_commit(trans, root);
1367 cur_trans = root->fs_info->running_transaction;
1369 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1370 root->fs_info->tree_root->node);
1371 switch_commit_root(root->fs_info->tree_root);
1373 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1374 root->fs_info->chunk_root->node);
1375 switch_commit_root(root->fs_info->chunk_root);
1377 update_super_roots(root);
1379 if (!root->fs_info->log_root_recovering) {
1380 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1381 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1384 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1385 sizeof(root->fs_info->super_copy));
1387 trans->transaction->blocked = 0;
1388 spin_lock(&root->fs_info->trans_lock);
1389 root->fs_info->running_transaction = NULL;
1390 root->fs_info->trans_no_join = 0;
1391 spin_unlock(&root->fs_info->trans_lock);
1392 mutex_unlock(&root->fs_info->reloc_mutex);
1394 wake_up(&root->fs_info->transaction_wait);
1396 ret = btrfs_write_and_wait_transaction(trans, root);
1398 write_ctree_super(trans, root, 0);
1401 * the super is written, we can safely allow the tree-loggers
1402 * to go about their business
1404 mutex_unlock(&root->fs_info->tree_log_mutex);
1406 btrfs_finish_extent_commit(trans, root);
1408 cur_trans->commit_done = 1;
1410 root->fs_info->last_trans_committed = cur_trans->transid;
1412 wake_up(&cur_trans->commit_wait);
1414 spin_lock(&root->fs_info->trans_lock);
1415 list_del_init(&cur_trans->list);
1416 spin_unlock(&root->fs_info->trans_lock);
1418 put_transaction(cur_trans);
1419 put_transaction(cur_trans);
1421 trace_btrfs_transaction_commit(root);
1423 btrfs_scrub_continue(root);
1425 if (current->journal_info == trans)
1426 current->journal_info = NULL;
1428 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1430 if (current != root->fs_info->transaction_kthread)
1431 btrfs_run_delayed_iputs(root);
1437 * interface function to delete all the snapshots we have scheduled for deletion
1439 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1442 struct btrfs_fs_info *fs_info = root->fs_info;
1444 spin_lock(&fs_info->trans_lock);
1445 list_splice_init(&fs_info->dead_roots, &list);
1446 spin_unlock(&fs_info->trans_lock);
1448 while (!list_empty(&list)) {
1449 root = list_entry(list.next, struct btrfs_root, root_list);
1450 list_del(&root->root_list);
1452 btrfs_kill_all_delayed_nodes(root);
1454 if (btrfs_header_backref_rev(root->node) <
1455 BTRFS_MIXED_BACKREF_REV)
1456 btrfs_drop_snapshot(root, NULL, 0);
1458 btrfs_drop_snapshot(root, NULL, 1);