Merge branch 'fix/asoc' into for-linus
[pandora-kernel.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30
31 #define BTRFS_ROOT_TRANS_TAG 0
32
33 static noinline void put_transaction(struct btrfs_transaction *transaction)
34 {
35         WARN_ON(transaction->use_count == 0);
36         transaction->use_count--;
37         if (transaction->use_count == 0) {
38                 list_del_init(&transaction->list);
39                 memset(transaction, 0, sizeof(*transaction));
40                 kmem_cache_free(btrfs_transaction_cachep, transaction);
41         }
42 }
43
44 static noinline void switch_commit_root(struct btrfs_root *root)
45 {
46         free_extent_buffer(root->commit_root);
47         root->commit_root = btrfs_root_node(root);
48 }
49
50 /*
51  * either allocate a new transaction or hop into the existing one
52  */
53 static noinline int join_transaction(struct btrfs_root *root)
54 {
55         struct btrfs_transaction *cur_trans;
56         cur_trans = root->fs_info->running_transaction;
57         if (!cur_trans) {
58                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
59                                              GFP_NOFS);
60                 if (!cur_trans)
61                         return -ENOMEM;
62                 root->fs_info->generation++;
63                 cur_trans->num_writers = 1;
64                 cur_trans->num_joined = 0;
65                 cur_trans->transid = root->fs_info->generation;
66                 init_waitqueue_head(&cur_trans->writer_wait);
67                 init_waitqueue_head(&cur_trans->commit_wait);
68                 cur_trans->in_commit = 0;
69                 cur_trans->blocked = 0;
70                 cur_trans->use_count = 1;
71                 cur_trans->commit_done = 0;
72                 cur_trans->start_time = get_seconds();
73
74                 cur_trans->delayed_refs.root = RB_ROOT;
75                 cur_trans->delayed_refs.num_entries = 0;
76                 cur_trans->delayed_refs.num_heads_ready = 0;
77                 cur_trans->delayed_refs.num_heads = 0;
78                 cur_trans->delayed_refs.flushing = 0;
79                 cur_trans->delayed_refs.run_delayed_start = 0;
80                 spin_lock_init(&cur_trans->delayed_refs.lock);
81
82                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
83                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
84                 extent_io_tree_init(&cur_trans->dirty_pages,
85                                      root->fs_info->btree_inode->i_mapping,
86                                      GFP_NOFS);
87                 spin_lock(&root->fs_info->new_trans_lock);
88                 root->fs_info->running_transaction = cur_trans;
89                 spin_unlock(&root->fs_info->new_trans_lock);
90         } else {
91                 cur_trans->num_writers++;
92                 cur_trans->num_joined++;
93         }
94
95         return 0;
96 }
97
98 /*
99  * this does all the record keeping required to make sure that a reference
100  * counted root is properly recorded in a given transaction.  This is required
101  * to make sure the old root from before we joined the transaction is deleted
102  * when the transaction commits
103  */
104 static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
105                                          struct btrfs_root *root)
106 {
107         if (root->ref_cows && root->last_trans < trans->transid) {
108                 WARN_ON(root == root->fs_info->extent_root);
109                 WARN_ON(root->commit_root != root->node);
110
111                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
112                            (unsigned long)root->root_key.objectid,
113                            BTRFS_ROOT_TRANS_TAG);
114                 root->last_trans = trans->transid;
115                 btrfs_init_reloc_root(trans, root);
116         }
117         return 0;
118 }
119
120 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
121                                struct btrfs_root *root)
122 {
123         if (!root->ref_cows)
124                 return 0;
125
126         mutex_lock(&root->fs_info->trans_mutex);
127         if (root->last_trans == trans->transid) {
128                 mutex_unlock(&root->fs_info->trans_mutex);
129                 return 0;
130         }
131
132         record_root_in_trans(trans, root);
133         mutex_unlock(&root->fs_info->trans_mutex);
134         return 0;
135 }
136
137 /* wait for commit against the current transaction to become unblocked
138  * when this is done, it is safe to start a new transaction, but the current
139  * transaction might not be fully on disk.
140  */
141 static void wait_current_trans(struct btrfs_root *root)
142 {
143         struct btrfs_transaction *cur_trans;
144
145         cur_trans = root->fs_info->running_transaction;
146         if (cur_trans && cur_trans->blocked) {
147                 DEFINE_WAIT(wait);
148                 cur_trans->use_count++;
149                 while (1) {
150                         prepare_to_wait(&root->fs_info->transaction_wait, &wait,
151                                         TASK_UNINTERRUPTIBLE);
152                         if (!cur_trans->blocked)
153                                 break;
154                         mutex_unlock(&root->fs_info->trans_mutex);
155                         schedule();
156                         mutex_lock(&root->fs_info->trans_mutex);
157                 }
158                 finish_wait(&root->fs_info->transaction_wait, &wait);
159                 put_transaction(cur_trans);
160         }
161 }
162
163 enum btrfs_trans_type {
164         TRANS_START,
165         TRANS_JOIN,
166         TRANS_USERSPACE,
167         TRANS_JOIN_NOLOCK,
168 };
169
170 static int may_wait_transaction(struct btrfs_root *root, int type)
171 {
172         if (!root->fs_info->log_root_recovering &&
173             ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
174              type == TRANS_USERSPACE))
175                 return 1;
176         return 0;
177 }
178
179 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
180                                                     u64 num_items, int type)
181 {
182         struct btrfs_trans_handle *h;
183         struct btrfs_transaction *cur_trans;
184         int ret;
185
186         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
187                 return ERR_PTR(-EROFS);
188 again:
189         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
190         if (!h)
191                 return ERR_PTR(-ENOMEM);
192
193         if (type != TRANS_JOIN_NOLOCK)
194                 mutex_lock(&root->fs_info->trans_mutex);
195         if (may_wait_transaction(root, type))
196                 wait_current_trans(root);
197
198         ret = join_transaction(root);
199         if (ret < 0) {
200                 if (type != TRANS_JOIN_NOLOCK)
201                         mutex_unlock(&root->fs_info->trans_mutex);
202                 return ERR_PTR(ret);
203         }
204
205         cur_trans = root->fs_info->running_transaction;
206         cur_trans->use_count++;
207         if (type != TRANS_JOIN_NOLOCK)
208                 mutex_unlock(&root->fs_info->trans_mutex);
209
210         h->transid = cur_trans->transid;
211         h->transaction = cur_trans;
212         h->blocks_used = 0;
213         h->block_group = 0;
214         h->bytes_reserved = 0;
215         h->delayed_ref_updates = 0;
216         h->block_rsv = NULL;
217
218         smp_mb();
219         if (cur_trans->blocked && may_wait_transaction(root, type)) {
220                 btrfs_commit_transaction(h, root);
221                 goto again;
222         }
223
224         if (num_items > 0) {
225                 ret = btrfs_trans_reserve_metadata(h, root, num_items);
226                 if (ret == -EAGAIN) {
227                         btrfs_commit_transaction(h, root);
228                         goto again;
229                 }
230                 if (ret < 0) {
231                         btrfs_end_transaction(h, root);
232                         return ERR_PTR(ret);
233                 }
234         }
235
236         if (type != TRANS_JOIN_NOLOCK)
237                 mutex_lock(&root->fs_info->trans_mutex);
238         record_root_in_trans(h, root);
239         if (type != TRANS_JOIN_NOLOCK)
240                 mutex_unlock(&root->fs_info->trans_mutex);
241
242         if (!current->journal_info && type != TRANS_USERSPACE)
243                 current->journal_info = h;
244         return h;
245 }
246
247 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
248                                                    int num_items)
249 {
250         return start_transaction(root, num_items, TRANS_START);
251 }
252 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
253                                                    int num_blocks)
254 {
255         return start_transaction(root, 0, TRANS_JOIN);
256 }
257
258 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root,
259                                                           int num_blocks)
260 {
261         return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
262 }
263
264 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
265                                                          int num_blocks)
266 {
267         return start_transaction(r, 0, TRANS_USERSPACE);
268 }
269
270 /* wait for a transaction commit to be fully complete */
271 static noinline int wait_for_commit(struct btrfs_root *root,
272                                     struct btrfs_transaction *commit)
273 {
274         DEFINE_WAIT(wait);
275         mutex_lock(&root->fs_info->trans_mutex);
276         while (!commit->commit_done) {
277                 prepare_to_wait(&commit->commit_wait, &wait,
278                                 TASK_UNINTERRUPTIBLE);
279                 if (commit->commit_done)
280                         break;
281                 mutex_unlock(&root->fs_info->trans_mutex);
282                 schedule();
283                 mutex_lock(&root->fs_info->trans_mutex);
284         }
285         mutex_unlock(&root->fs_info->trans_mutex);
286         finish_wait(&commit->commit_wait, &wait);
287         return 0;
288 }
289
290 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
291 {
292         struct btrfs_transaction *cur_trans = NULL, *t;
293         int ret;
294
295         mutex_lock(&root->fs_info->trans_mutex);
296
297         ret = 0;
298         if (transid) {
299                 if (transid <= root->fs_info->last_trans_committed)
300                         goto out_unlock;
301
302                 /* find specified transaction */
303                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
304                         if (t->transid == transid) {
305                                 cur_trans = t;
306                                 break;
307                         }
308                         if (t->transid > transid)
309                                 break;
310                 }
311                 ret = -EINVAL;
312                 if (!cur_trans)
313                         goto out_unlock;  /* bad transid */
314         } else {
315                 /* find newest transaction that is committing | committed */
316                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
317                                             list) {
318                         if (t->in_commit) {
319                                 if (t->commit_done)
320                                         goto out_unlock;
321                                 cur_trans = t;
322                                 break;
323                         }
324                 }
325                 if (!cur_trans)
326                         goto out_unlock;  /* nothing committing|committed */
327         }
328
329         cur_trans->use_count++;
330         mutex_unlock(&root->fs_info->trans_mutex);
331
332         wait_for_commit(root, cur_trans);
333
334         mutex_lock(&root->fs_info->trans_mutex);
335         put_transaction(cur_trans);
336         ret = 0;
337 out_unlock:
338         mutex_unlock(&root->fs_info->trans_mutex);
339         return ret;
340 }
341
342 #if 0
343 /*
344  * rate limit against the drop_snapshot code.  This helps to slow down new
345  * operations if the drop_snapshot code isn't able to keep up.
346  */
347 static void throttle_on_drops(struct btrfs_root *root)
348 {
349         struct btrfs_fs_info *info = root->fs_info;
350         int harder_count = 0;
351
352 harder:
353         if (atomic_read(&info->throttles)) {
354                 DEFINE_WAIT(wait);
355                 int thr;
356                 thr = atomic_read(&info->throttle_gen);
357
358                 do {
359                         prepare_to_wait(&info->transaction_throttle,
360                                         &wait, TASK_UNINTERRUPTIBLE);
361                         if (!atomic_read(&info->throttles)) {
362                                 finish_wait(&info->transaction_throttle, &wait);
363                                 break;
364                         }
365                         schedule();
366                         finish_wait(&info->transaction_throttle, &wait);
367                 } while (thr == atomic_read(&info->throttle_gen));
368                 harder_count++;
369
370                 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
371                     harder_count < 2)
372                         goto harder;
373
374                 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
375                     harder_count < 10)
376                         goto harder;
377
378                 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
379                     harder_count < 20)
380                         goto harder;
381         }
382 }
383 #endif
384
385 void btrfs_throttle(struct btrfs_root *root)
386 {
387         mutex_lock(&root->fs_info->trans_mutex);
388         if (!root->fs_info->open_ioctl_trans)
389                 wait_current_trans(root);
390         mutex_unlock(&root->fs_info->trans_mutex);
391 }
392
393 static int should_end_transaction(struct btrfs_trans_handle *trans,
394                                   struct btrfs_root *root)
395 {
396         int ret;
397         ret = btrfs_block_rsv_check(trans, root,
398                                     &root->fs_info->global_block_rsv, 0, 5);
399         return ret ? 1 : 0;
400 }
401
402 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
403                                  struct btrfs_root *root)
404 {
405         struct btrfs_transaction *cur_trans = trans->transaction;
406         int updates;
407
408         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
409                 return 1;
410
411         updates = trans->delayed_ref_updates;
412         trans->delayed_ref_updates = 0;
413         if (updates)
414                 btrfs_run_delayed_refs(trans, root, updates);
415
416         return should_end_transaction(trans, root);
417 }
418
419 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
420                           struct btrfs_root *root, int throttle, int lock)
421 {
422         struct btrfs_transaction *cur_trans = trans->transaction;
423         struct btrfs_fs_info *info = root->fs_info;
424         int count = 0;
425
426         while (count < 4) {
427                 unsigned long cur = trans->delayed_ref_updates;
428                 trans->delayed_ref_updates = 0;
429                 if (cur &&
430                     trans->transaction->delayed_refs.num_heads_ready > 64) {
431                         trans->delayed_ref_updates = 0;
432
433                         /*
434                          * do a full flush if the transaction is trying
435                          * to close
436                          */
437                         if (trans->transaction->delayed_refs.flushing)
438                                 cur = 0;
439                         btrfs_run_delayed_refs(trans, root, cur);
440                 } else {
441                         break;
442                 }
443                 count++;
444         }
445
446         btrfs_trans_release_metadata(trans, root);
447
448         if (lock && !root->fs_info->open_ioctl_trans &&
449             should_end_transaction(trans, root))
450                 trans->transaction->blocked = 1;
451
452         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
453                 if (throttle)
454                         return btrfs_commit_transaction(trans, root);
455                 else
456                         wake_up_process(info->transaction_kthread);
457         }
458
459         if (lock)
460                 mutex_lock(&info->trans_mutex);
461         WARN_ON(cur_trans != info->running_transaction);
462         WARN_ON(cur_trans->num_writers < 1);
463         cur_trans->num_writers--;
464
465         smp_mb();
466         if (waitqueue_active(&cur_trans->writer_wait))
467                 wake_up(&cur_trans->writer_wait);
468         put_transaction(cur_trans);
469         if (lock)
470                 mutex_unlock(&info->trans_mutex);
471
472         if (current->journal_info == trans)
473                 current->journal_info = NULL;
474         memset(trans, 0, sizeof(*trans));
475         kmem_cache_free(btrfs_trans_handle_cachep, trans);
476
477         if (throttle)
478                 btrfs_run_delayed_iputs(root);
479
480         return 0;
481 }
482
483 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
484                           struct btrfs_root *root)
485 {
486         return __btrfs_end_transaction(trans, root, 0, 1);
487 }
488
489 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
490                                    struct btrfs_root *root)
491 {
492         return __btrfs_end_transaction(trans, root, 1, 1);
493 }
494
495 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
496                                  struct btrfs_root *root)
497 {
498         return __btrfs_end_transaction(trans, root, 0, 0);
499 }
500
501 /*
502  * when btree blocks are allocated, they have some corresponding bits set for
503  * them in one of two extent_io trees.  This is used to make sure all of
504  * those extents are sent to disk but does not wait on them
505  */
506 int btrfs_write_marked_extents(struct btrfs_root *root,
507                                struct extent_io_tree *dirty_pages, int mark)
508 {
509         int ret;
510         int err = 0;
511         int werr = 0;
512         struct page *page;
513         struct inode *btree_inode = root->fs_info->btree_inode;
514         u64 start = 0;
515         u64 end;
516         unsigned long index;
517
518         while (1) {
519                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
520                                             mark);
521                 if (ret)
522                         break;
523                 while (start <= end) {
524                         cond_resched();
525
526                         index = start >> PAGE_CACHE_SHIFT;
527                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
528                         page = find_get_page(btree_inode->i_mapping, index);
529                         if (!page)
530                                 continue;
531
532                         btree_lock_page_hook(page);
533                         if (!page->mapping) {
534                                 unlock_page(page);
535                                 page_cache_release(page);
536                                 continue;
537                         }
538
539                         if (PageWriteback(page)) {
540                                 if (PageDirty(page))
541                                         wait_on_page_writeback(page);
542                                 else {
543                                         unlock_page(page);
544                                         page_cache_release(page);
545                                         continue;
546                                 }
547                         }
548                         err = write_one_page(page, 0);
549                         if (err)
550                                 werr = err;
551                         page_cache_release(page);
552                 }
553         }
554         if (err)
555                 werr = err;
556         return werr;
557 }
558
559 /*
560  * when btree blocks are allocated, they have some corresponding bits set for
561  * them in one of two extent_io trees.  This is used to make sure all of
562  * those extents are on disk for transaction or log commit.  We wait
563  * on all the pages and clear them from the dirty pages state tree
564  */
565 int btrfs_wait_marked_extents(struct btrfs_root *root,
566                               struct extent_io_tree *dirty_pages, int mark)
567 {
568         int ret;
569         int err = 0;
570         int werr = 0;
571         struct page *page;
572         struct inode *btree_inode = root->fs_info->btree_inode;
573         u64 start = 0;
574         u64 end;
575         unsigned long index;
576
577         while (1) {
578                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
579                                             mark);
580                 if (ret)
581                         break;
582
583                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
584                 while (start <= end) {
585                         index = start >> PAGE_CACHE_SHIFT;
586                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
587                         page = find_get_page(btree_inode->i_mapping, index);
588                         if (!page)
589                                 continue;
590                         if (PageDirty(page)) {
591                                 btree_lock_page_hook(page);
592                                 wait_on_page_writeback(page);
593                                 err = write_one_page(page, 0);
594                                 if (err)
595                                         werr = err;
596                         }
597                         wait_on_page_writeback(page);
598                         page_cache_release(page);
599                         cond_resched();
600                 }
601         }
602         if (err)
603                 werr = err;
604         return werr;
605 }
606
607 /*
608  * when btree blocks are allocated, they have some corresponding bits set for
609  * them in one of two extent_io trees.  This is used to make sure all of
610  * those extents are on disk for transaction or log commit
611  */
612 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
613                                 struct extent_io_tree *dirty_pages, int mark)
614 {
615         int ret;
616         int ret2;
617
618         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
619         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
620         return ret || ret2;
621 }
622
623 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
624                                      struct btrfs_root *root)
625 {
626         if (!trans || !trans->transaction) {
627                 struct inode *btree_inode;
628                 btree_inode = root->fs_info->btree_inode;
629                 return filemap_write_and_wait(btree_inode->i_mapping);
630         }
631         return btrfs_write_and_wait_marked_extents(root,
632                                            &trans->transaction->dirty_pages,
633                                            EXTENT_DIRTY);
634 }
635
636 /*
637  * this is used to update the root pointer in the tree of tree roots.
638  *
639  * But, in the case of the extent allocation tree, updating the root
640  * pointer may allocate blocks which may change the root of the extent
641  * allocation tree.
642  *
643  * So, this loops and repeats and makes sure the cowonly root didn't
644  * change while the root pointer was being updated in the metadata.
645  */
646 static int update_cowonly_root(struct btrfs_trans_handle *trans,
647                                struct btrfs_root *root)
648 {
649         int ret;
650         u64 old_root_bytenr;
651         u64 old_root_used;
652         struct btrfs_root *tree_root = root->fs_info->tree_root;
653
654         old_root_used = btrfs_root_used(&root->root_item);
655         btrfs_write_dirty_block_groups(trans, root);
656
657         while (1) {
658                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
659                 if (old_root_bytenr == root->node->start &&
660                     old_root_used == btrfs_root_used(&root->root_item))
661                         break;
662
663                 btrfs_set_root_node(&root->root_item, root->node);
664                 ret = btrfs_update_root(trans, tree_root,
665                                         &root->root_key,
666                                         &root->root_item);
667                 BUG_ON(ret);
668
669                 old_root_used = btrfs_root_used(&root->root_item);
670                 ret = btrfs_write_dirty_block_groups(trans, root);
671                 BUG_ON(ret);
672         }
673
674         if (root != root->fs_info->extent_root)
675                 switch_commit_root(root);
676
677         return 0;
678 }
679
680 /*
681  * update all the cowonly tree roots on disk
682  */
683 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
684                                          struct btrfs_root *root)
685 {
686         struct btrfs_fs_info *fs_info = root->fs_info;
687         struct list_head *next;
688         struct extent_buffer *eb;
689         int ret;
690
691         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
692         BUG_ON(ret);
693
694         eb = btrfs_lock_root_node(fs_info->tree_root);
695         btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
696         btrfs_tree_unlock(eb);
697         free_extent_buffer(eb);
698
699         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
700         BUG_ON(ret);
701
702         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
703                 next = fs_info->dirty_cowonly_roots.next;
704                 list_del_init(next);
705                 root = list_entry(next, struct btrfs_root, dirty_list);
706
707                 update_cowonly_root(trans, root);
708         }
709
710         down_write(&fs_info->extent_commit_sem);
711         switch_commit_root(fs_info->extent_root);
712         up_write(&fs_info->extent_commit_sem);
713
714         return 0;
715 }
716
717 /*
718  * dead roots are old snapshots that need to be deleted.  This allocates
719  * a dirty root struct and adds it into the list of dead roots that need to
720  * be deleted
721  */
722 int btrfs_add_dead_root(struct btrfs_root *root)
723 {
724         mutex_lock(&root->fs_info->trans_mutex);
725         list_add(&root->root_list, &root->fs_info->dead_roots);
726         mutex_unlock(&root->fs_info->trans_mutex);
727         return 0;
728 }
729
730 /*
731  * update all the cowonly tree roots on disk
732  */
733 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
734                                     struct btrfs_root *root)
735 {
736         struct btrfs_root *gang[8];
737         struct btrfs_fs_info *fs_info = root->fs_info;
738         int i;
739         int ret;
740         int err = 0;
741
742         while (1) {
743                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
744                                                  (void **)gang, 0,
745                                                  ARRAY_SIZE(gang),
746                                                  BTRFS_ROOT_TRANS_TAG);
747                 if (ret == 0)
748                         break;
749                 for (i = 0; i < ret; i++) {
750                         root = gang[i];
751                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
752                                         (unsigned long)root->root_key.objectid,
753                                         BTRFS_ROOT_TRANS_TAG);
754
755                         btrfs_free_log(trans, root);
756                         btrfs_update_reloc_root(trans, root);
757                         btrfs_orphan_commit_root(trans, root);
758
759                         if (root->commit_root != root->node) {
760                                 switch_commit_root(root);
761                                 btrfs_set_root_node(&root->root_item,
762                                                     root->node);
763                         }
764
765                         err = btrfs_update_root(trans, fs_info->tree_root,
766                                                 &root->root_key,
767                                                 &root->root_item);
768                         if (err)
769                                 break;
770                 }
771         }
772         return err;
773 }
774
775 /*
776  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
777  * otherwise every leaf in the btree is read and defragged.
778  */
779 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
780 {
781         struct btrfs_fs_info *info = root->fs_info;
782         struct btrfs_trans_handle *trans;
783         int ret;
784         unsigned long nr;
785
786         if (xchg(&root->defrag_running, 1))
787                 return 0;
788
789         while (1) {
790                 trans = btrfs_start_transaction(root, 0);
791                 if (IS_ERR(trans))
792                         return PTR_ERR(trans);
793
794                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
795
796                 nr = trans->blocks_used;
797                 btrfs_end_transaction(trans, root);
798                 btrfs_btree_balance_dirty(info->tree_root, nr);
799                 cond_resched();
800
801                 if (root->fs_info->closing || ret != -EAGAIN)
802                         break;
803         }
804         root->defrag_running = 0;
805         return ret;
806 }
807
808 #if 0
809 /*
810  * when dropping snapshots, we generate a ton of delayed refs, and it makes
811  * sense not to join the transaction while it is trying to flush the current
812  * queue of delayed refs out.
813  *
814  * This is used by the drop snapshot code only
815  */
816 static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
817 {
818         DEFINE_WAIT(wait);
819
820         mutex_lock(&info->trans_mutex);
821         while (info->running_transaction &&
822                info->running_transaction->delayed_refs.flushing) {
823                 prepare_to_wait(&info->transaction_wait, &wait,
824                                 TASK_UNINTERRUPTIBLE);
825                 mutex_unlock(&info->trans_mutex);
826
827                 schedule();
828
829                 mutex_lock(&info->trans_mutex);
830                 finish_wait(&info->transaction_wait, &wait);
831         }
832         mutex_unlock(&info->trans_mutex);
833         return 0;
834 }
835
836 /*
837  * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
838  * all of them
839  */
840 int btrfs_drop_dead_root(struct btrfs_root *root)
841 {
842         struct btrfs_trans_handle *trans;
843         struct btrfs_root *tree_root = root->fs_info->tree_root;
844         unsigned long nr;
845         int ret;
846
847         while (1) {
848                 /*
849                  * we don't want to jump in and create a bunch of
850                  * delayed refs if the transaction is starting to close
851                  */
852                 wait_transaction_pre_flush(tree_root->fs_info);
853                 trans = btrfs_start_transaction(tree_root, 1);
854
855                 /*
856                  * we've joined a transaction, make sure it isn't
857                  * closing right now
858                  */
859                 if (trans->transaction->delayed_refs.flushing) {
860                         btrfs_end_transaction(trans, tree_root);
861                         continue;
862                 }
863
864                 ret = btrfs_drop_snapshot(trans, root);
865                 if (ret != -EAGAIN)
866                         break;
867
868                 ret = btrfs_update_root(trans, tree_root,
869                                         &root->root_key,
870                                         &root->root_item);
871                 if (ret)
872                         break;
873
874                 nr = trans->blocks_used;
875                 ret = btrfs_end_transaction(trans, tree_root);
876                 BUG_ON(ret);
877
878                 btrfs_btree_balance_dirty(tree_root, nr);
879                 cond_resched();
880         }
881         BUG_ON(ret);
882
883         ret = btrfs_del_root(trans, tree_root, &root->root_key);
884         BUG_ON(ret);
885
886         nr = trans->blocks_used;
887         ret = btrfs_end_transaction(trans, tree_root);
888         BUG_ON(ret);
889
890         free_extent_buffer(root->node);
891         free_extent_buffer(root->commit_root);
892         kfree(root);
893
894         btrfs_btree_balance_dirty(tree_root, nr);
895         return ret;
896 }
897 #endif
898
899 /*
900  * new snapshots need to be created at a very specific time in the
901  * transaction commit.  This does the actual creation
902  */
903 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
904                                    struct btrfs_fs_info *fs_info,
905                                    struct btrfs_pending_snapshot *pending)
906 {
907         struct btrfs_key key;
908         struct btrfs_root_item *new_root_item;
909         struct btrfs_root *tree_root = fs_info->tree_root;
910         struct btrfs_root *root = pending->root;
911         struct btrfs_root *parent_root;
912         struct inode *parent_inode;
913         struct dentry *parent;
914         struct dentry *dentry;
915         struct extent_buffer *tmp;
916         struct extent_buffer *old;
917         int ret;
918         u64 to_reserve = 0;
919         u64 index = 0;
920         u64 objectid;
921         u64 root_flags;
922
923         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
924         if (!new_root_item) {
925                 pending->error = -ENOMEM;
926                 goto fail;
927         }
928
929         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
930         if (ret) {
931                 pending->error = ret;
932                 goto fail;
933         }
934
935         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
936         btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);
937
938         if (to_reserve > 0) {
939                 ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
940                                           to_reserve);
941                 if (ret) {
942                         pending->error = ret;
943                         goto fail;
944                 }
945         }
946
947         key.objectid = objectid;
948         key.offset = (u64)-1;
949         key.type = BTRFS_ROOT_ITEM_KEY;
950
951         trans->block_rsv = &pending->block_rsv;
952
953         dentry = pending->dentry;
954         parent = dget_parent(dentry);
955         parent_inode = parent->d_inode;
956         parent_root = BTRFS_I(parent_inode)->root;
957         record_root_in_trans(trans, parent_root);
958
959         /*
960          * insert the directory item
961          */
962         ret = btrfs_set_inode_index(parent_inode, &index);
963         BUG_ON(ret);
964         ret = btrfs_insert_dir_item(trans, parent_root,
965                                 dentry->d_name.name, dentry->d_name.len,
966                                 parent_inode->i_ino, &key,
967                                 BTRFS_FT_DIR, index);
968         BUG_ON(ret);
969
970         btrfs_i_size_write(parent_inode, parent_inode->i_size +
971                                          dentry->d_name.len * 2);
972         ret = btrfs_update_inode(trans, parent_root, parent_inode);
973         BUG_ON(ret);
974
975         record_root_in_trans(trans, root);
976         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
977         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
978
979         root_flags = btrfs_root_flags(new_root_item);
980         if (pending->readonly)
981                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
982         else
983                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
984         btrfs_set_root_flags(new_root_item, root_flags);
985
986         old = btrfs_lock_root_node(root);
987         btrfs_cow_block(trans, root, old, NULL, 0, &old);
988         btrfs_set_lock_blocking(old);
989
990         btrfs_copy_root(trans, root, old, &tmp, objectid);
991         btrfs_tree_unlock(old);
992         free_extent_buffer(old);
993
994         btrfs_set_root_node(new_root_item, tmp);
995         /* record when the snapshot was created in key.offset */
996         key.offset = trans->transid;
997         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
998         btrfs_tree_unlock(tmp);
999         free_extent_buffer(tmp);
1000         BUG_ON(ret);
1001
1002         /*
1003          * insert root back/forward references
1004          */
1005         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1006                                  parent_root->root_key.objectid,
1007                                  parent_inode->i_ino, index,
1008                                  dentry->d_name.name, dentry->d_name.len);
1009         BUG_ON(ret);
1010         dput(parent);
1011
1012         key.offset = (u64)-1;
1013         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1014         BUG_ON(IS_ERR(pending->snap));
1015
1016         btrfs_reloc_post_snapshot(trans, pending);
1017         btrfs_orphan_post_snapshot(trans, pending);
1018 fail:
1019         kfree(new_root_item);
1020         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1021         return 0;
1022 }
1023
1024 /*
1025  * create all the snapshots we've scheduled for creation
1026  */
1027 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1028                                              struct btrfs_fs_info *fs_info)
1029 {
1030         struct btrfs_pending_snapshot *pending;
1031         struct list_head *head = &trans->transaction->pending_snapshots;
1032         int ret;
1033
1034         list_for_each_entry(pending, head, list) {
1035                 ret = create_pending_snapshot(trans, fs_info, pending);
1036                 BUG_ON(ret);
1037         }
1038         return 0;
1039 }
1040
1041 static void update_super_roots(struct btrfs_root *root)
1042 {
1043         struct btrfs_root_item *root_item;
1044         struct btrfs_super_block *super;
1045
1046         super = &root->fs_info->super_copy;
1047
1048         root_item = &root->fs_info->chunk_root->root_item;
1049         super->chunk_root = root_item->bytenr;
1050         super->chunk_root_generation = root_item->generation;
1051         super->chunk_root_level = root_item->level;
1052
1053         root_item = &root->fs_info->tree_root->root_item;
1054         super->root = root_item->bytenr;
1055         super->generation = root_item->generation;
1056         super->root_level = root_item->level;
1057         if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE))
1058                 super->cache_generation = root_item->generation;
1059 }
1060
1061 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1062 {
1063         int ret = 0;
1064         spin_lock(&info->new_trans_lock);
1065         if (info->running_transaction)
1066                 ret = info->running_transaction->in_commit;
1067         spin_unlock(&info->new_trans_lock);
1068         return ret;
1069 }
1070
1071 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1072 {
1073         int ret = 0;
1074         spin_lock(&info->new_trans_lock);
1075         if (info->running_transaction)
1076                 ret = info->running_transaction->blocked;
1077         spin_unlock(&info->new_trans_lock);
1078         return ret;
1079 }
1080
1081 /*
1082  * wait for the current transaction commit to start and block subsequent
1083  * transaction joins
1084  */
1085 static void wait_current_trans_commit_start(struct btrfs_root *root,
1086                                             struct btrfs_transaction *trans)
1087 {
1088         DEFINE_WAIT(wait);
1089
1090         if (trans->in_commit)
1091                 return;
1092
1093         while (1) {
1094                 prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
1095                                 TASK_UNINTERRUPTIBLE);
1096                 if (trans->in_commit) {
1097                         finish_wait(&root->fs_info->transaction_blocked_wait,
1098                                     &wait);
1099                         break;
1100                 }
1101                 mutex_unlock(&root->fs_info->trans_mutex);
1102                 schedule();
1103                 mutex_lock(&root->fs_info->trans_mutex);
1104                 finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
1105         }
1106 }
1107
1108 /*
1109  * wait for the current transaction to start and then become unblocked.
1110  * caller holds ref.
1111  */
1112 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1113                                          struct btrfs_transaction *trans)
1114 {
1115         DEFINE_WAIT(wait);
1116
1117         if (trans->commit_done || (trans->in_commit && !trans->blocked))
1118                 return;
1119
1120         while (1) {
1121                 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
1122                                 TASK_UNINTERRUPTIBLE);
1123                 if (trans->commit_done ||
1124                     (trans->in_commit && !trans->blocked)) {
1125                         finish_wait(&root->fs_info->transaction_wait,
1126                                     &wait);
1127                         break;
1128                 }
1129                 mutex_unlock(&root->fs_info->trans_mutex);
1130                 schedule();
1131                 mutex_lock(&root->fs_info->trans_mutex);
1132                 finish_wait(&root->fs_info->transaction_wait,
1133                             &wait);
1134         }
1135 }
1136
1137 /*
1138  * commit transactions asynchronously. once btrfs_commit_transaction_async
1139  * returns, any subsequent transaction will not be allowed to join.
1140  */
1141 struct btrfs_async_commit {
1142         struct btrfs_trans_handle *newtrans;
1143         struct btrfs_root *root;
1144         struct delayed_work work;
1145 };
1146
1147 static void do_async_commit(struct work_struct *work)
1148 {
1149         struct btrfs_async_commit *ac =
1150                 container_of(work, struct btrfs_async_commit, work.work);
1151
1152         btrfs_commit_transaction(ac->newtrans, ac->root);
1153         kfree(ac);
1154 }
1155
1156 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1157                                    struct btrfs_root *root,
1158                                    int wait_for_unblock)
1159 {
1160         struct btrfs_async_commit *ac;
1161         struct btrfs_transaction *cur_trans;
1162
1163         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1164         if (!ac)
1165                 return -ENOMEM;
1166
1167         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1168         ac->root = root;
1169         ac->newtrans = btrfs_join_transaction(root, 0);
1170         if (IS_ERR(ac->newtrans)) {
1171                 int err = PTR_ERR(ac->newtrans);
1172                 kfree(ac);
1173                 return err;
1174         }
1175
1176         /* take transaction reference */
1177         mutex_lock(&root->fs_info->trans_mutex);
1178         cur_trans = trans->transaction;
1179         cur_trans->use_count++;
1180         mutex_unlock(&root->fs_info->trans_mutex);
1181
1182         btrfs_end_transaction(trans, root);
1183         schedule_delayed_work(&ac->work, 0);
1184
1185         /* wait for transaction to start and unblock */
1186         mutex_lock(&root->fs_info->trans_mutex);
1187         if (wait_for_unblock)
1188                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1189         else
1190                 wait_current_trans_commit_start(root, cur_trans);
1191         put_transaction(cur_trans);
1192         mutex_unlock(&root->fs_info->trans_mutex);
1193
1194         return 0;
1195 }
1196
1197 /*
1198  * btrfs_transaction state sequence:
1199  *    in_commit = 0, blocked = 0  (initial)
1200  *    in_commit = 1, blocked = 1
1201  *    blocked = 0
1202  *    commit_done = 1
1203  */
1204 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1205                              struct btrfs_root *root)
1206 {
1207         unsigned long joined = 0;
1208         struct btrfs_transaction *cur_trans;
1209         struct btrfs_transaction *prev_trans = NULL;
1210         DEFINE_WAIT(wait);
1211         int ret;
1212         int should_grow = 0;
1213         unsigned long now = get_seconds();
1214         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1215
1216         btrfs_run_ordered_operations(root, 0);
1217
1218         /* make a pass through all the delayed refs we have so far
1219          * any runnings procs may add more while we are here
1220          */
1221         ret = btrfs_run_delayed_refs(trans, root, 0);
1222         BUG_ON(ret);
1223
1224         btrfs_trans_release_metadata(trans, root);
1225
1226         cur_trans = trans->transaction;
1227         /*
1228          * set the flushing flag so procs in this transaction have to
1229          * start sending their work down.
1230          */
1231         cur_trans->delayed_refs.flushing = 1;
1232
1233         ret = btrfs_run_delayed_refs(trans, root, 0);
1234         BUG_ON(ret);
1235
1236         mutex_lock(&root->fs_info->trans_mutex);
1237         if (cur_trans->in_commit) {
1238                 cur_trans->use_count++;
1239                 mutex_unlock(&root->fs_info->trans_mutex);
1240                 btrfs_end_transaction(trans, root);
1241
1242                 ret = wait_for_commit(root, cur_trans);
1243                 BUG_ON(ret);
1244
1245                 mutex_lock(&root->fs_info->trans_mutex);
1246                 put_transaction(cur_trans);
1247                 mutex_unlock(&root->fs_info->trans_mutex);
1248
1249                 return 0;
1250         }
1251
1252         trans->transaction->in_commit = 1;
1253         trans->transaction->blocked = 1;
1254         wake_up(&root->fs_info->transaction_blocked_wait);
1255
1256         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1257                 prev_trans = list_entry(cur_trans->list.prev,
1258                                         struct btrfs_transaction, list);
1259                 if (!prev_trans->commit_done) {
1260                         prev_trans->use_count++;
1261                         mutex_unlock(&root->fs_info->trans_mutex);
1262
1263                         wait_for_commit(root, prev_trans);
1264
1265                         mutex_lock(&root->fs_info->trans_mutex);
1266                         put_transaction(prev_trans);
1267                 }
1268         }
1269
1270         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1271                 should_grow = 1;
1272
1273         do {
1274                 int snap_pending = 0;
1275                 joined = cur_trans->num_joined;
1276                 if (!list_empty(&trans->transaction->pending_snapshots))
1277                         snap_pending = 1;
1278
1279                 WARN_ON(cur_trans != trans->transaction);
1280                 mutex_unlock(&root->fs_info->trans_mutex);
1281
1282                 if (flush_on_commit || snap_pending) {
1283                         btrfs_start_delalloc_inodes(root, 1);
1284                         ret = btrfs_wait_ordered_extents(root, 0, 1);
1285                         BUG_ON(ret);
1286                 }
1287
1288                 /*
1289                  * rename don't use btrfs_join_transaction, so, once we
1290                  * set the transaction to blocked above, we aren't going
1291                  * to get any new ordered operations.  We can safely run
1292                  * it here and no for sure that nothing new will be added
1293                  * to the list
1294                  */
1295                 btrfs_run_ordered_operations(root, 1);
1296
1297                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1298                                 TASK_UNINTERRUPTIBLE);
1299
1300                 smp_mb();
1301                 if (cur_trans->num_writers > 1)
1302                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1303                 else if (should_grow)
1304                         schedule_timeout(1);
1305
1306                 mutex_lock(&root->fs_info->trans_mutex);
1307                 finish_wait(&cur_trans->writer_wait, &wait);
1308         } while (cur_trans->num_writers > 1 ||
1309                  (should_grow && cur_trans->num_joined != joined));
1310
1311         ret = create_pending_snapshots(trans, root->fs_info);
1312         BUG_ON(ret);
1313
1314         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1315         BUG_ON(ret);
1316
1317         WARN_ON(cur_trans != trans->transaction);
1318
1319         /* btrfs_commit_tree_roots is responsible for getting the
1320          * various roots consistent with each other.  Every pointer
1321          * in the tree of tree roots has to point to the most up to date
1322          * root for every subvolume and other tree.  So, we have to keep
1323          * the tree logging code from jumping in and changing any
1324          * of the trees.
1325          *
1326          * At this point in the commit, there can't be any tree-log
1327          * writers, but a little lower down we drop the trans mutex
1328          * and let new people in.  By holding the tree_log_mutex
1329          * from now until after the super is written, we avoid races
1330          * with the tree-log code.
1331          */
1332         mutex_lock(&root->fs_info->tree_log_mutex);
1333
1334         ret = commit_fs_roots(trans, root);
1335         BUG_ON(ret);
1336
1337         /* commit_fs_roots gets rid of all the tree log roots, it is now
1338          * safe to free the root of tree log roots
1339          */
1340         btrfs_free_log_root_tree(trans, root->fs_info);
1341
1342         ret = commit_cowonly_roots(trans, root);
1343         BUG_ON(ret);
1344
1345         btrfs_prepare_extent_commit(trans, root);
1346
1347         cur_trans = root->fs_info->running_transaction;
1348         spin_lock(&root->fs_info->new_trans_lock);
1349         root->fs_info->running_transaction = NULL;
1350         spin_unlock(&root->fs_info->new_trans_lock);
1351
1352         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1353                             root->fs_info->tree_root->node);
1354         switch_commit_root(root->fs_info->tree_root);
1355
1356         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1357                             root->fs_info->chunk_root->node);
1358         switch_commit_root(root->fs_info->chunk_root);
1359
1360         update_super_roots(root);
1361
1362         if (!root->fs_info->log_root_recovering) {
1363                 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1364                 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1365         }
1366
1367         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1368                sizeof(root->fs_info->super_copy));
1369
1370         trans->transaction->blocked = 0;
1371
1372         wake_up(&root->fs_info->transaction_wait);
1373
1374         mutex_unlock(&root->fs_info->trans_mutex);
1375         ret = btrfs_write_and_wait_transaction(trans, root);
1376         BUG_ON(ret);
1377         write_ctree_super(trans, root, 0);
1378
1379         /*
1380          * the super is written, we can safely allow the tree-loggers
1381          * to go about their business
1382          */
1383         mutex_unlock(&root->fs_info->tree_log_mutex);
1384
1385         btrfs_finish_extent_commit(trans, root);
1386
1387         mutex_lock(&root->fs_info->trans_mutex);
1388
1389         cur_trans->commit_done = 1;
1390
1391         root->fs_info->last_trans_committed = cur_trans->transid;
1392
1393         wake_up(&cur_trans->commit_wait);
1394
1395         put_transaction(cur_trans);
1396         put_transaction(cur_trans);
1397
1398         trace_btrfs_transaction_commit(root);
1399
1400         mutex_unlock(&root->fs_info->trans_mutex);
1401
1402         if (current->journal_info == trans)
1403                 current->journal_info = NULL;
1404
1405         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1406
1407         if (current != root->fs_info->transaction_kthread)
1408                 btrfs_run_delayed_iputs(root);
1409
1410         return ret;
1411 }
1412
1413 /*
1414  * interface function to delete all the snapshots we have scheduled for deletion
1415  */
1416 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1417 {
1418         LIST_HEAD(list);
1419         struct btrfs_fs_info *fs_info = root->fs_info;
1420
1421         mutex_lock(&fs_info->trans_mutex);
1422         list_splice_init(&fs_info->dead_roots, &list);
1423         mutex_unlock(&fs_info->trans_mutex);
1424
1425         while (!list_empty(&list)) {
1426                 root = list_entry(list.next, struct btrfs_root, root_list);
1427                 list_del(&root->root_list);
1428
1429                 if (btrfs_header_backref_rev(root->node) <
1430                     BTRFS_MIXED_BACKREF_REV)
1431                         btrfs_drop_snapshot(root, NULL, 0);
1432                 else
1433                         btrfs_drop_snapshot(root, NULL, 1);
1434         }
1435         return 0;
1436 }