2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
25 #include "transaction.h"
27 #include "ref-cache.h"
29 static int total_trans = 0;
30 extern struct kmem_cache *btrfs_trans_handle_cachep;
31 extern struct kmem_cache *btrfs_transaction_cachep;
33 #define BTRFS_ROOT_TRANS_TAG 0
35 static noinline void put_transaction(struct btrfs_transaction *transaction)
37 WARN_ON(transaction->use_count == 0);
38 transaction->use_count--;
39 if (transaction->use_count == 0) {
40 WARN_ON(total_trans == 0);
42 list_del_init(&transaction->list);
43 memset(transaction, 0, sizeof(*transaction));
44 kmem_cache_free(btrfs_transaction_cachep, transaction);
48 static noinline int join_transaction(struct btrfs_root *root)
50 struct btrfs_transaction *cur_trans;
51 cur_trans = root->fs_info->running_transaction;
53 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
57 root->fs_info->generation++;
58 root->fs_info->last_alloc = 0;
59 root->fs_info->last_data_alloc = 0;
60 cur_trans->num_writers = 1;
61 cur_trans->num_joined = 0;
62 cur_trans->transid = root->fs_info->generation;
63 init_waitqueue_head(&cur_trans->writer_wait);
64 init_waitqueue_head(&cur_trans->commit_wait);
65 cur_trans->in_commit = 0;
66 cur_trans->blocked = 0;
67 cur_trans->use_count = 1;
68 cur_trans->commit_done = 0;
69 cur_trans->start_time = get_seconds();
70 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
71 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
72 extent_io_tree_init(&cur_trans->dirty_pages,
73 root->fs_info->btree_inode->i_mapping,
75 spin_lock(&root->fs_info->new_trans_lock);
76 root->fs_info->running_transaction = cur_trans;
77 spin_unlock(&root->fs_info->new_trans_lock);
79 cur_trans->num_writers++;
80 cur_trans->num_joined++;
86 static noinline int record_root_in_trans(struct btrfs_root *root)
88 struct btrfs_dirty_root *dirty;
89 u64 running_trans_id = root->fs_info->running_transaction->transid;
90 if (root->ref_cows && root->last_trans < running_trans_id) {
91 WARN_ON(root == root->fs_info->extent_root);
92 if (root->root_item.refs != 0) {
93 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
94 (unsigned long)root->root_key.objectid,
95 BTRFS_ROOT_TRANS_TAG);
97 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
99 dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
100 BUG_ON(!dirty->root);
101 dirty->latest_root = root;
102 INIT_LIST_HEAD(&dirty->list);
104 root->commit_root = btrfs_root_node(root);
106 memcpy(dirty->root, root, sizeof(*root));
107 spin_lock_init(&dirty->root->node_lock);
108 spin_lock_init(&dirty->root->list_lock);
109 mutex_init(&dirty->root->objectid_mutex);
110 INIT_LIST_HEAD(&dirty->root->dead_list);
111 dirty->root->node = root->commit_root;
112 dirty->root->commit_root = NULL;
114 spin_lock(&root->list_lock);
115 list_add(&dirty->root->dead_list, &root->dead_list);
116 spin_unlock(&root->list_lock);
118 root->dirty_root = dirty;
122 root->last_trans = running_trans_id;
127 static void wait_current_trans(struct btrfs_root *root)
129 struct btrfs_transaction *cur_trans;
131 cur_trans = root->fs_info->running_transaction;
132 if (cur_trans && cur_trans->blocked) {
134 cur_trans->use_count++;
136 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
137 TASK_UNINTERRUPTIBLE);
138 if (cur_trans->blocked) {
139 mutex_unlock(&root->fs_info->trans_mutex);
141 mutex_lock(&root->fs_info->trans_mutex);
142 finish_wait(&root->fs_info->transaction_wait,
145 finish_wait(&root->fs_info->transaction_wait,
150 put_transaction(cur_trans);
154 struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
155 int num_blocks, int join)
157 struct btrfs_trans_handle *h =
158 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
161 mutex_lock(&root->fs_info->trans_mutex);
163 wait_current_trans(root);
164 ret = join_transaction(root);
167 record_root_in_trans(root);
168 h->transid = root->fs_info->running_transaction->transid;
169 h->transaction = root->fs_info->running_transaction;
170 h->blocks_reserved = num_blocks;
172 h->block_group = NULL;
173 h->alloc_exclude_nr = 0;
174 h->alloc_exclude_start = 0;
175 root->fs_info->running_transaction->use_count++;
176 mutex_unlock(&root->fs_info->trans_mutex);
180 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
183 return start_transaction(root, num_blocks, 0);
185 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
188 return start_transaction(root, num_blocks, 1);
191 static noinline int wait_for_commit(struct btrfs_root *root,
192 struct btrfs_transaction *commit)
195 mutex_lock(&root->fs_info->trans_mutex);
196 while(!commit->commit_done) {
197 prepare_to_wait(&commit->commit_wait, &wait,
198 TASK_UNINTERRUPTIBLE);
199 if (commit->commit_done)
201 mutex_unlock(&root->fs_info->trans_mutex);
203 mutex_lock(&root->fs_info->trans_mutex);
205 mutex_unlock(&root->fs_info->trans_mutex);
206 finish_wait(&commit->commit_wait, &wait);
210 static void throttle_on_drops(struct btrfs_root *root)
212 struct btrfs_fs_info *info = root->fs_info;
213 int harder_count = 0;
216 if (atomic_read(&info->throttles)) {
219 thr = atomic_read(&info->throttle_gen);
222 prepare_to_wait(&info->transaction_throttle,
223 &wait, TASK_UNINTERRUPTIBLE);
224 if (!atomic_read(&info->throttles)) {
225 finish_wait(&info->transaction_throttle, &wait);
229 finish_wait(&info->transaction_throttle, &wait);
230 } while (thr == atomic_read(&info->throttle_gen));
233 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
237 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
241 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
247 void btrfs_throttle(struct btrfs_root *root)
249 mutex_lock(&root->fs_info->trans_mutex);
250 wait_current_trans(root);
251 mutex_unlock(&root->fs_info->trans_mutex);
253 throttle_on_drops(root);
256 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
257 struct btrfs_root *root, int throttle)
259 struct btrfs_transaction *cur_trans;
260 struct btrfs_fs_info *info = root->fs_info;
262 mutex_lock(&info->trans_mutex);
263 cur_trans = info->running_transaction;
264 WARN_ON(cur_trans != trans->transaction);
265 WARN_ON(cur_trans->num_writers < 1);
266 cur_trans->num_writers--;
268 if (waitqueue_active(&cur_trans->writer_wait))
269 wake_up(&cur_trans->writer_wait);
270 put_transaction(cur_trans);
271 mutex_unlock(&info->trans_mutex);
272 memset(trans, 0, sizeof(*trans));
273 kmem_cache_free(btrfs_trans_handle_cachep, trans);
276 throttle_on_drops(root);
281 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
282 struct btrfs_root *root)
284 return __btrfs_end_transaction(trans, root, 0);
287 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
288 struct btrfs_root *root)
290 return __btrfs_end_transaction(trans, root, 1);
294 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
295 struct btrfs_root *root)
300 struct extent_io_tree *dirty_pages;
302 struct inode *btree_inode = root->fs_info->btree_inode;
307 if (!trans || !trans->transaction) {
308 return filemap_write_and_wait(btree_inode->i_mapping);
310 dirty_pages = &trans->transaction->dirty_pages;
312 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
316 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
317 while(start <= end) {
318 index = start >> PAGE_CACHE_SHIFT;
319 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
320 page = find_lock_page(btree_inode->i_mapping, index);
323 if (PageWriteback(page)) {
325 wait_on_page_writeback(page);
328 page_cache_release(page);
332 err = write_one_page(page, 0);
335 page_cache_release(page);
338 err = filemap_fdatawait(btree_inode->i_mapping);
344 static int update_cowonly_root(struct btrfs_trans_handle *trans,
345 struct btrfs_root *root)
349 struct btrfs_root *tree_root = root->fs_info->tree_root;
351 btrfs_write_dirty_block_groups(trans, root);
353 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
354 if (old_root_bytenr == root->node->start)
356 btrfs_set_root_bytenr(&root->root_item,
358 btrfs_set_root_level(&root->root_item,
359 btrfs_header_level(root->node));
360 ret = btrfs_update_root(trans, tree_root,
364 btrfs_write_dirty_block_groups(trans, root);
369 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
370 struct btrfs_root *root)
372 struct btrfs_fs_info *fs_info = root->fs_info;
373 struct list_head *next;
375 while(!list_empty(&fs_info->dirty_cowonly_roots)) {
376 next = fs_info->dirty_cowonly_roots.next;
378 root = list_entry(next, struct btrfs_root, dirty_list);
379 update_cowonly_root(trans, root);
384 int btrfs_add_dead_root(struct btrfs_root *root,
385 struct btrfs_root *latest,
386 struct list_head *dead_list)
388 struct btrfs_dirty_root *dirty;
390 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
394 dirty->latest_root = latest;
395 list_add(&dirty->list, dead_list);
399 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
400 struct radix_tree_root *radix,
401 struct list_head *list)
403 struct btrfs_dirty_root *dirty;
404 struct btrfs_root *gang[8];
405 struct btrfs_root *root;
412 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
414 BTRFS_ROOT_TRANS_TAG);
417 for (i = 0; i < ret; i++) {
419 radix_tree_tag_clear(radix,
420 (unsigned long)root->root_key.objectid,
421 BTRFS_ROOT_TRANS_TAG);
423 BUG_ON(!root->ref_tree);
424 dirty = root->dirty_root;
426 if (root->commit_root == root->node) {
427 WARN_ON(root->node->start !=
428 btrfs_root_bytenr(&root->root_item));
430 free_extent_buffer(root->commit_root);
431 root->commit_root = NULL;
433 spin_lock(&root->list_lock);
434 list_del_init(&dirty->root->dead_list);
435 spin_unlock(&root->list_lock);
440 /* make sure to update the root on disk
441 * so we get any updates to the block used
444 err = btrfs_update_root(trans,
445 root->fs_info->tree_root,
451 memset(&root->root_item.drop_progress, 0,
452 sizeof(struct btrfs_disk_key));
453 root->root_item.drop_level = 0;
454 root->commit_root = NULL;
455 root->root_key.offset = root->fs_info->generation;
456 btrfs_set_root_bytenr(&root->root_item,
458 btrfs_set_root_level(&root->root_item,
459 btrfs_header_level(root->node));
460 err = btrfs_insert_root(trans, root->fs_info->tree_root,
466 refs = btrfs_root_refs(&dirty->root->root_item);
467 btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
468 err = btrfs_update_root(trans, root->fs_info->tree_root,
469 &dirty->root->root_key,
470 &dirty->root->root_item);
474 list_add(&dirty->list, list);
477 free_extent_buffer(dirty->root->node);
486 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
488 struct btrfs_fs_info *info = root->fs_info;
490 struct btrfs_trans_handle *trans;
494 if (root->defrag_running)
496 trans = btrfs_start_transaction(root, 1);
498 root->defrag_running = 1;
499 ret = btrfs_defrag_leaves(trans, root, cacheonly);
500 nr = trans->blocks_used;
501 btrfs_end_transaction(trans, root);
502 btrfs_btree_balance_dirty(info->tree_root, nr);
505 trans = btrfs_start_transaction(root, 1);
506 if (root->fs_info->closing || ret != -EAGAIN)
509 root->defrag_running = 0;
511 btrfs_end_transaction(trans, root);
515 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
516 struct list_head *list)
518 struct btrfs_dirty_root *dirty;
519 struct btrfs_trans_handle *trans;
527 while(!list_empty(list)) {
528 struct btrfs_root *root;
530 dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
531 list_del_init(&dirty->list);
533 num_bytes = btrfs_root_used(&dirty->root->root_item);
534 root = dirty->latest_root;
535 atomic_inc(&root->fs_info->throttles);
537 mutex_lock(&root->fs_info->drop_mutex);
539 trans = btrfs_start_transaction(tree_root, 1);
540 ret = btrfs_drop_snapshot(trans, dirty->root);
541 if (ret != -EAGAIN) {
545 err = btrfs_update_root(trans,
547 &dirty->root->root_key,
548 &dirty->root->root_item);
551 nr = trans->blocks_used;
552 ret = btrfs_end_transaction(trans, tree_root);
555 mutex_unlock(&root->fs_info->drop_mutex);
556 btrfs_btree_balance_dirty(tree_root, nr);
558 mutex_lock(&root->fs_info->drop_mutex);
561 atomic_dec(&root->fs_info->throttles);
562 wake_up(&root->fs_info->transaction_throttle);
564 mutex_lock(&root->fs_info->alloc_mutex);
565 num_bytes -= btrfs_root_used(&dirty->root->root_item);
566 bytes_used = btrfs_root_used(&root->root_item);
568 record_root_in_trans(root);
569 btrfs_set_root_used(&root->root_item,
570 bytes_used - num_bytes);
572 mutex_unlock(&root->fs_info->alloc_mutex);
574 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
579 mutex_unlock(&root->fs_info->drop_mutex);
581 spin_lock(&root->list_lock);
582 list_del_init(&dirty->root->dead_list);
583 if (!list_empty(&root->dead_list)) {
584 struct btrfs_root *oldest;
585 oldest = list_entry(root->dead_list.prev,
586 struct btrfs_root, dead_list);
587 max_useless = oldest->root_key.offset - 1;
589 max_useless = root->root_key.offset - 1;
591 spin_unlock(&root->list_lock);
593 nr = trans->blocks_used;
594 ret = btrfs_end_transaction(trans, tree_root);
597 ret = btrfs_remove_leaf_refs(root, max_useless);
600 free_extent_buffer(dirty->root->node);
604 btrfs_btree_balance_dirty(tree_root, nr);
610 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
611 struct btrfs_fs_info *fs_info,
612 struct btrfs_pending_snapshot *pending)
614 struct btrfs_key key;
615 struct btrfs_root_item *new_root_item;
616 struct btrfs_root *tree_root = fs_info->tree_root;
617 struct btrfs_root *root = pending->root;
618 struct extent_buffer *tmp;
619 struct extent_buffer *old;
624 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
625 if (!new_root_item) {
629 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
633 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
635 key.objectid = objectid;
637 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
639 old = btrfs_lock_root_node(root);
640 btrfs_cow_block(trans, root, old, NULL, 0, &old, 0);
642 btrfs_copy_root(trans, root, old, &tmp, objectid);
643 btrfs_tree_unlock(old);
644 free_extent_buffer(old);
646 btrfs_set_root_bytenr(new_root_item, tmp->start);
647 btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
648 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
650 btrfs_tree_unlock(tmp);
651 free_extent_buffer(tmp);
656 * insert the directory item
658 key.offset = (u64)-1;
659 namelen = strlen(pending->name);
660 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
661 pending->name, namelen,
662 root->fs_info->sb->s_root->d_inode->i_ino,
663 &key, BTRFS_FT_DIR, 0);
668 ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
669 pending->name, strlen(pending->name), objectid,
670 root->fs_info->sb->s_root->d_inode->i_ino, 0);
672 /* Invalidate existing dcache entry for new snapshot. */
673 btrfs_invalidate_dcache_root(root, pending->name, namelen);
676 kfree(new_root_item);
680 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
681 struct btrfs_fs_info *fs_info)
683 struct btrfs_pending_snapshot *pending;
684 struct list_head *head = &trans->transaction->pending_snapshots;
687 while(!list_empty(head)) {
688 pending = list_entry(head->next,
689 struct btrfs_pending_snapshot, list);
690 ret = create_pending_snapshot(trans, fs_info, pending);
692 list_del(&pending->list);
693 kfree(pending->name);
699 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
700 struct btrfs_root *root)
702 unsigned long joined = 0;
703 unsigned long timeout = 1;
704 struct btrfs_transaction *cur_trans;
705 struct btrfs_transaction *prev_trans = NULL;
706 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
707 struct list_head dirty_fs_roots;
708 struct extent_io_tree *pinned_copy;
712 INIT_LIST_HEAD(&dirty_fs_roots);
714 mutex_lock(&root->fs_info->trans_mutex);
715 if (trans->transaction->in_commit) {
716 cur_trans = trans->transaction;
717 trans->transaction->use_count++;
718 mutex_unlock(&root->fs_info->trans_mutex);
719 btrfs_end_transaction(trans, root);
721 ret = wait_for_commit(root, cur_trans);
724 mutex_lock(&root->fs_info->trans_mutex);
725 put_transaction(cur_trans);
726 mutex_unlock(&root->fs_info->trans_mutex);
731 pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
735 extent_io_tree_init(pinned_copy,
736 root->fs_info->btree_inode->i_mapping, GFP_NOFS);
738 trans->transaction->in_commit = 1;
739 trans->transaction->blocked = 1;
740 cur_trans = trans->transaction;
741 if (cur_trans->list.prev != &root->fs_info->trans_list) {
742 prev_trans = list_entry(cur_trans->list.prev,
743 struct btrfs_transaction, list);
744 if (!prev_trans->commit_done) {
745 prev_trans->use_count++;
746 mutex_unlock(&root->fs_info->trans_mutex);
748 wait_for_commit(root, prev_trans);
750 mutex_lock(&root->fs_info->trans_mutex);
751 put_transaction(prev_trans);
756 joined = cur_trans->num_joined;
757 WARN_ON(cur_trans != trans->transaction);
758 prepare_to_wait(&cur_trans->writer_wait, &wait,
759 TASK_UNINTERRUPTIBLE);
761 if (cur_trans->num_writers > 1)
762 timeout = MAX_SCHEDULE_TIMEOUT;
766 mutex_unlock(&root->fs_info->trans_mutex);
768 schedule_timeout(timeout);
770 mutex_lock(&root->fs_info->trans_mutex);
771 finish_wait(&cur_trans->writer_wait, &wait);
772 } while (cur_trans->num_writers > 1 ||
773 (cur_trans->num_joined != joined));
775 ret = create_pending_snapshots(trans, root->fs_info);
778 WARN_ON(cur_trans != trans->transaction);
780 ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
784 ret = btrfs_commit_tree_roots(trans, root);
787 cur_trans = root->fs_info->running_transaction;
788 spin_lock(&root->fs_info->new_trans_lock);
789 root->fs_info->running_transaction = NULL;
790 spin_unlock(&root->fs_info->new_trans_lock);
791 btrfs_set_super_generation(&root->fs_info->super_copy,
793 btrfs_set_super_root(&root->fs_info->super_copy,
794 root->fs_info->tree_root->node->start);
795 btrfs_set_super_root_level(&root->fs_info->super_copy,
796 btrfs_header_level(root->fs_info->tree_root->node));
798 btrfs_set_super_chunk_root(&root->fs_info->super_copy,
799 chunk_root->node->start);
800 btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
801 btrfs_header_level(chunk_root->node));
802 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
803 sizeof(root->fs_info->super_copy));
805 btrfs_copy_pinned(root, pinned_copy);
807 trans->transaction->blocked = 0;
808 wake_up(&root->fs_info->transaction_throttle);
809 wake_up(&root->fs_info->transaction_wait);
811 mutex_unlock(&root->fs_info->trans_mutex);
812 ret = btrfs_write_and_wait_transaction(trans, root);
814 write_ctree_super(trans, root);
816 btrfs_finish_extent_commit(trans, root, pinned_copy);
817 mutex_lock(&root->fs_info->trans_mutex);
821 cur_trans->commit_done = 1;
822 root->fs_info->last_trans_committed = cur_trans->transid;
823 wake_up(&cur_trans->commit_wait);
824 put_transaction(cur_trans);
825 put_transaction(cur_trans);
827 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
828 if (root->fs_info->closing)
829 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
831 mutex_unlock(&root->fs_info->trans_mutex);
832 kmem_cache_free(btrfs_trans_handle_cachep, trans);
834 if (root->fs_info->closing) {
835 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
840 int btrfs_clean_old_snapshots(struct btrfs_root *root)
842 struct list_head dirty_roots;
843 INIT_LIST_HEAD(&dirty_roots);
845 mutex_lock(&root->fs_info->trans_mutex);
846 list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
847 mutex_unlock(&root->fs_info->trans_mutex);
849 if (!list_empty(&dirty_roots)) {
850 drop_dirty_roots(root, &dirty_roots);