Btrfs: fix race in sync and freeze again
[pandora-kernel.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33
34 #define BTRFS_ROOT_TRANS_TAG 0
35
36 void put_transaction(struct btrfs_transaction *transaction)
37 {
38         WARN_ON(atomic_read(&transaction->use_count) == 0);
39         if (atomic_dec_and_test(&transaction->use_count)) {
40                 BUG_ON(!list_empty(&transaction->list));
41                 WARN_ON(transaction->delayed_refs.root.rb_node);
42                 memset(transaction, 0, sizeof(*transaction));
43                 kmem_cache_free(btrfs_transaction_cachep, transaction);
44         }
45 }
46
47 static noinline void switch_commit_root(struct btrfs_root *root)
48 {
49         free_extent_buffer(root->commit_root);
50         root->commit_root = btrfs_root_node(root);
51 }
52
53 /*
54  * either allocate a new transaction or hop into the existing one
55  */
56 static noinline int join_transaction(struct btrfs_root *root, int nofail)
57 {
58         struct btrfs_transaction *cur_trans;
59         struct btrfs_fs_info *fs_info = root->fs_info;
60
61         spin_lock(&fs_info->trans_lock);
62 loop:
63         /* The file system has been taken offline. No new transactions. */
64         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
65                 spin_unlock(&fs_info->trans_lock);
66                 return -EROFS;
67         }
68
69         if (fs_info->trans_no_join) {
70                 if (!nofail) {
71                         spin_unlock(&fs_info->trans_lock);
72                         return -EBUSY;
73                 }
74         }
75
76         cur_trans = fs_info->running_transaction;
77         if (cur_trans) {
78                 if (cur_trans->aborted) {
79                         spin_unlock(&fs_info->trans_lock);
80                         return cur_trans->aborted;
81                 }
82                 atomic_inc(&cur_trans->use_count);
83                 atomic_inc(&cur_trans->num_writers);
84                 cur_trans->num_joined++;
85                 spin_unlock(&fs_info->trans_lock);
86                 return 0;
87         }
88         spin_unlock(&fs_info->trans_lock);
89
90         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
91         if (!cur_trans)
92                 return -ENOMEM;
93
94         spin_lock(&fs_info->trans_lock);
95         if (fs_info->running_transaction) {
96                 /*
97                  * someone started a transaction after we unlocked.  Make sure
98                  * to redo the trans_no_join checks above
99                  */
100                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
101                 cur_trans = fs_info->running_transaction;
102                 goto loop;
103         } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
104                 spin_unlock(&fs_info->trans_lock);
105                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
106                 return -EROFS;
107         }
108
109         atomic_set(&cur_trans->num_writers, 1);
110         cur_trans->num_joined = 0;
111         init_waitqueue_head(&cur_trans->writer_wait);
112         init_waitqueue_head(&cur_trans->commit_wait);
113         cur_trans->in_commit = 0;
114         cur_trans->blocked = 0;
115         /*
116          * One for this trans handle, one so it will live on until we
117          * commit the transaction.
118          */
119         atomic_set(&cur_trans->use_count, 2);
120         cur_trans->commit_done = 0;
121         cur_trans->start_time = get_seconds();
122
123         cur_trans->delayed_refs.root = RB_ROOT;
124         cur_trans->delayed_refs.num_entries = 0;
125         cur_trans->delayed_refs.num_heads_ready = 0;
126         cur_trans->delayed_refs.num_heads = 0;
127         cur_trans->delayed_refs.flushing = 0;
128         cur_trans->delayed_refs.run_delayed_start = 0;
129
130         /*
131          * although the tree mod log is per file system and not per transaction,
132          * the log must never go across transaction boundaries.
133          */
134         smp_mb();
135         if (!list_empty(&fs_info->tree_mod_seq_list)) {
136                 printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
137                         "creating a fresh transaction\n");
138                 WARN_ON(1);
139         }
140         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
141                 printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
142                         "creating a fresh transaction\n");
143                 WARN_ON(1);
144         }
145         atomic_set(&fs_info->tree_mod_seq, 0);
146
147         spin_lock_init(&cur_trans->commit_lock);
148         spin_lock_init(&cur_trans->delayed_refs.lock);
149
150         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
151         list_add_tail(&cur_trans->list, &fs_info->trans_list);
152         extent_io_tree_init(&cur_trans->dirty_pages,
153                              fs_info->btree_inode->i_mapping);
154         fs_info->generation++;
155         cur_trans->transid = fs_info->generation;
156         fs_info->running_transaction = cur_trans;
157         cur_trans->aborted = 0;
158         spin_unlock(&fs_info->trans_lock);
159
160         return 0;
161 }
162
163 /*
164  * this does all the record keeping required to make sure that a reference
165  * counted root is properly recorded in a given transaction.  This is required
166  * to make sure the old root from before we joined the transaction is deleted
167  * when the transaction commits
168  */
169 static int record_root_in_trans(struct btrfs_trans_handle *trans,
170                                struct btrfs_root *root)
171 {
172         if (root->ref_cows && root->last_trans < trans->transid) {
173                 WARN_ON(root == root->fs_info->extent_root);
174                 WARN_ON(root->commit_root != root->node);
175
176                 /*
177                  * see below for in_trans_setup usage rules
178                  * we have the reloc mutex held now, so there
179                  * is only one writer in this function
180                  */
181                 root->in_trans_setup = 1;
182
183                 /* make sure readers find in_trans_setup before
184                  * they find our root->last_trans update
185                  */
186                 smp_wmb();
187
188                 spin_lock(&root->fs_info->fs_roots_radix_lock);
189                 if (root->last_trans == trans->transid) {
190                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
191                         return 0;
192                 }
193                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
194                            (unsigned long)root->root_key.objectid,
195                            BTRFS_ROOT_TRANS_TAG);
196                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
197                 root->last_trans = trans->transid;
198
199                 /* this is pretty tricky.  We don't want to
200                  * take the relocation lock in btrfs_record_root_in_trans
201                  * unless we're really doing the first setup for this root in
202                  * this transaction.
203                  *
204                  * Normally we'd use root->last_trans as a flag to decide
205                  * if we want to take the expensive mutex.
206                  *
207                  * But, we have to set root->last_trans before we
208                  * init the relocation root, otherwise, we trip over warnings
209                  * in ctree.c.  The solution used here is to flag ourselves
210                  * with root->in_trans_setup.  When this is 1, we're still
211                  * fixing up the reloc trees and everyone must wait.
212                  *
213                  * When this is zero, they can trust root->last_trans and fly
214                  * through btrfs_record_root_in_trans without having to take the
215                  * lock.  smp_wmb() makes sure that all the writes above are
216                  * done before we pop in the zero below
217                  */
218                 btrfs_init_reloc_root(trans, root);
219                 smp_wmb();
220                 root->in_trans_setup = 0;
221         }
222         return 0;
223 }
224
225
226 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
227                                struct btrfs_root *root)
228 {
229         if (!root->ref_cows)
230                 return 0;
231
232         /*
233          * see record_root_in_trans for comments about in_trans_setup usage
234          * and barriers
235          */
236         smp_rmb();
237         if (root->last_trans == trans->transid &&
238             !root->in_trans_setup)
239                 return 0;
240
241         mutex_lock(&root->fs_info->reloc_mutex);
242         record_root_in_trans(trans, root);
243         mutex_unlock(&root->fs_info->reloc_mutex);
244
245         return 0;
246 }
247
248 /* wait for commit against the current transaction to become unblocked
249  * when this is done, it is safe to start a new transaction, but the current
250  * transaction might not be fully on disk.
251  */
252 static void wait_current_trans(struct btrfs_root *root)
253 {
254         struct btrfs_transaction *cur_trans;
255
256         spin_lock(&root->fs_info->trans_lock);
257         cur_trans = root->fs_info->running_transaction;
258         if (cur_trans && cur_trans->blocked) {
259                 atomic_inc(&cur_trans->use_count);
260                 spin_unlock(&root->fs_info->trans_lock);
261
262                 wait_event(root->fs_info->transaction_wait,
263                            !cur_trans->blocked);
264                 put_transaction(cur_trans);
265         } else {
266                 spin_unlock(&root->fs_info->trans_lock);
267         }
268 }
269
270 enum btrfs_trans_type {
271         TRANS_START,
272         TRANS_JOIN,
273         TRANS_USERSPACE,
274         TRANS_JOIN_NOLOCK,
275         TRANS_JOIN_FREEZE,
276 };
277
278 static int may_wait_transaction(struct btrfs_root *root, int type)
279 {
280         if (root->fs_info->log_root_recovering)
281                 return 0;
282
283         if (type == TRANS_USERSPACE)
284                 return 1;
285
286         if (type == TRANS_START &&
287             !atomic_read(&root->fs_info->open_ioctl_trans))
288                 return 1;
289
290         return 0;
291 }
292
293 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
294                                                     u64 num_items, int type,
295                                                     int noflush)
296 {
297         struct btrfs_trans_handle *h;
298         struct btrfs_transaction *cur_trans;
299         u64 num_bytes = 0;
300         int ret;
301         u64 qgroup_reserved = 0;
302
303         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
304                 return ERR_PTR(-EROFS);
305
306         if (current->journal_info) {
307                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
308                 h = current->journal_info;
309                 h->use_count++;
310                 h->orig_rsv = h->block_rsv;
311                 h->block_rsv = NULL;
312                 goto got_it;
313         }
314
315         /*
316          * Do the reservation before we join the transaction so we can do all
317          * the appropriate flushing if need be.
318          */
319         if (num_items > 0 && root != root->fs_info->chunk_root) {
320                 if (root->fs_info->quota_enabled &&
321                     is_fstree(root->root_key.objectid)) {
322                         qgroup_reserved = num_items * root->leafsize;
323                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
324                         if (ret)
325                                 return ERR_PTR(ret);
326                 }
327
328                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
329                 if (noflush)
330                         ret = btrfs_block_rsv_add_noflush(root,
331                                                 &root->fs_info->trans_block_rsv,
332                                                 num_bytes);
333                 else
334                         ret = btrfs_block_rsv_add(root,
335                                                 &root->fs_info->trans_block_rsv,
336                                                 num_bytes);
337                 if (ret)
338                         return ERR_PTR(ret);
339         }
340 again:
341         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
342         if (!h)
343                 return ERR_PTR(-ENOMEM);
344
345         if (!__sb_start_write(root->fs_info->sb, SB_FREEZE_FS, false)) {
346                 if (type == TRANS_JOIN_FREEZE)
347                         return ERR_PTR(-EPERM);
348                 sb_start_intwrite(root->fs_info->sb);
349         }
350
351         if (may_wait_transaction(root, type))
352                 wait_current_trans(root);
353
354         do {
355                 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
356                 if (ret == -EBUSY)
357                         wait_current_trans(root);
358         } while (ret == -EBUSY);
359
360         if (ret < 0) {
361                 sb_end_intwrite(root->fs_info->sb);
362                 kmem_cache_free(btrfs_trans_handle_cachep, h);
363                 return ERR_PTR(ret);
364         }
365
366         cur_trans = root->fs_info->running_transaction;
367
368         h->transid = cur_trans->transid;
369         h->transaction = cur_trans;
370         h->blocks_used = 0;
371         h->bytes_reserved = 0;
372         h->root = root;
373         h->delayed_ref_updates = 0;
374         h->use_count = 1;
375         h->adding_csums = 0;
376         h->block_rsv = NULL;
377         h->orig_rsv = NULL;
378         h->aborted = 0;
379         h->qgroup_reserved = qgroup_reserved;
380         h->delayed_ref_elem.seq = 0;
381         INIT_LIST_HEAD(&h->qgroup_ref_list);
382         INIT_LIST_HEAD(&h->new_bgs);
383
384         smp_mb();
385         if (cur_trans->blocked && may_wait_transaction(root, type)) {
386                 btrfs_commit_transaction(h, root);
387                 goto again;
388         }
389
390         if (num_bytes) {
391                 trace_btrfs_space_reservation(root->fs_info, "transaction",
392                                               h->transid, num_bytes, 1);
393                 h->block_rsv = &root->fs_info->trans_block_rsv;
394                 h->bytes_reserved = num_bytes;
395         }
396
397 got_it:
398         btrfs_record_root_in_trans(h, root);
399
400         if (!current->journal_info && type != TRANS_USERSPACE)
401                 current->journal_info = h;
402         return h;
403 }
404
405 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
406                                                    int num_items)
407 {
408         return start_transaction(root, num_items, TRANS_START, 0);
409 }
410
411 struct btrfs_trans_handle *btrfs_start_transaction_noflush(
412                                         struct btrfs_root *root, int num_items)
413 {
414         return start_transaction(root, num_items, TRANS_START, 1);
415 }
416
417 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
418 {
419         return start_transaction(root, 0, TRANS_JOIN, 0);
420 }
421
422 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
423 {
424         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
425 }
426
427 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
428 {
429         return start_transaction(root, 0, TRANS_USERSPACE, 0);
430 }
431
432 struct btrfs_trans_handle *btrfs_join_transaction_freeze(struct btrfs_root *root)
433 {
434         return start_transaction(root, 0, TRANS_JOIN_FREEZE, 0);
435 }
436
437 /* wait for a transaction commit to be fully complete */
438 static noinline void wait_for_commit(struct btrfs_root *root,
439                                     struct btrfs_transaction *commit)
440 {
441         wait_event(commit->commit_wait, commit->commit_done);
442 }
443
444 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
445 {
446         struct btrfs_transaction *cur_trans = NULL, *t;
447         int ret;
448
449         ret = 0;
450         if (transid) {
451                 if (transid <= root->fs_info->last_trans_committed)
452                         goto out;
453
454                 /* find specified transaction */
455                 spin_lock(&root->fs_info->trans_lock);
456                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
457                         if (t->transid == transid) {
458                                 cur_trans = t;
459                                 atomic_inc(&cur_trans->use_count);
460                                 break;
461                         }
462                         if (t->transid > transid)
463                                 break;
464                 }
465                 spin_unlock(&root->fs_info->trans_lock);
466                 ret = -EINVAL;
467                 if (!cur_trans)
468                         goto out;  /* bad transid */
469         } else {
470                 /* find newest transaction that is committing | committed */
471                 spin_lock(&root->fs_info->trans_lock);
472                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
473                                             list) {
474                         if (t->in_commit) {
475                                 if (t->commit_done)
476                                         break;
477                                 cur_trans = t;
478                                 atomic_inc(&cur_trans->use_count);
479                                 break;
480                         }
481                 }
482                 spin_unlock(&root->fs_info->trans_lock);
483                 if (!cur_trans)
484                         goto out;  /* nothing committing|committed */
485         }
486
487         wait_for_commit(root, cur_trans);
488
489         put_transaction(cur_trans);
490         ret = 0;
491 out:
492         return ret;
493 }
494
495 void btrfs_throttle(struct btrfs_root *root)
496 {
497         if (!atomic_read(&root->fs_info->open_ioctl_trans))
498                 wait_current_trans(root);
499 }
500
501 static int should_end_transaction(struct btrfs_trans_handle *trans,
502                                   struct btrfs_root *root)
503 {
504         int ret;
505
506         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
507         return ret ? 1 : 0;
508 }
509
510 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
511                                  struct btrfs_root *root)
512 {
513         struct btrfs_transaction *cur_trans = trans->transaction;
514         int updates;
515         int err;
516
517         smp_mb();
518         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
519                 return 1;
520
521         updates = trans->delayed_ref_updates;
522         trans->delayed_ref_updates = 0;
523         if (updates) {
524                 err = btrfs_run_delayed_refs(trans, root, updates);
525                 if (err) /* Error code will also eval true */
526                         return err;
527         }
528
529         return should_end_transaction(trans, root);
530 }
531
532 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
533                           struct btrfs_root *root, int throttle, int lock)
534 {
535         struct btrfs_transaction *cur_trans = trans->transaction;
536         struct btrfs_fs_info *info = root->fs_info;
537         int count = 0;
538         int err = 0;
539
540         if (--trans->use_count) {
541                 trans->block_rsv = trans->orig_rsv;
542                 return 0;
543         }
544
545         /*
546          * do the qgroup accounting as early as possible
547          */
548         err = btrfs_delayed_refs_qgroup_accounting(trans, info);
549
550         btrfs_trans_release_metadata(trans, root);
551         trans->block_rsv = NULL;
552         /*
553          * the same root has to be passed to start_transaction and
554          * end_transaction. Subvolume quota depends on this.
555          */
556         WARN_ON(trans->root != root);
557
558         if (trans->qgroup_reserved) {
559                 btrfs_qgroup_free(root, trans->qgroup_reserved);
560                 trans->qgroup_reserved = 0;
561         }
562
563         if (!list_empty(&trans->new_bgs))
564                 btrfs_create_pending_block_groups(trans, root);
565
566         while (count < 2) {
567                 unsigned long cur = trans->delayed_ref_updates;
568                 trans->delayed_ref_updates = 0;
569                 if (cur &&
570                     trans->transaction->delayed_refs.num_heads_ready > 64) {
571                         trans->delayed_ref_updates = 0;
572                         btrfs_run_delayed_refs(trans, root, cur);
573                 } else {
574                         break;
575                 }
576                 count++;
577         }
578         btrfs_trans_release_metadata(trans, root);
579         trans->block_rsv = NULL;
580
581         if (!list_empty(&trans->new_bgs))
582                 btrfs_create_pending_block_groups(trans, root);
583
584         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
585             should_end_transaction(trans, root)) {
586                 trans->transaction->blocked = 1;
587                 smp_wmb();
588         }
589
590         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
591                 if (throttle) {
592                         /*
593                          * We may race with somebody else here so end up having
594                          * to call end_transaction on ourselves again, so inc
595                          * our use_count.
596                          */
597                         trans->use_count++;
598                         return btrfs_commit_transaction(trans, root);
599                 } else {
600                         wake_up_process(info->transaction_kthread);
601                 }
602         }
603
604         sb_end_intwrite(root->fs_info->sb);
605
606         WARN_ON(cur_trans != info->running_transaction);
607         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
608         atomic_dec(&cur_trans->num_writers);
609
610         smp_mb();
611         if (waitqueue_active(&cur_trans->writer_wait))
612                 wake_up(&cur_trans->writer_wait);
613         put_transaction(cur_trans);
614
615         if (current->journal_info == trans)
616                 current->journal_info = NULL;
617
618         if (throttle)
619                 btrfs_run_delayed_iputs(root);
620
621         if (trans->aborted ||
622             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
623                 err = -EIO;
624         }
625         assert_qgroups_uptodate(trans);
626
627         memset(trans, 0, sizeof(*trans));
628         kmem_cache_free(btrfs_trans_handle_cachep, trans);
629         return err;
630 }
631
632 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
633                           struct btrfs_root *root)
634 {
635         int ret;
636
637         ret = __btrfs_end_transaction(trans, root, 0, 1);
638         if (ret)
639                 return ret;
640         return 0;
641 }
642
643 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
644                                    struct btrfs_root *root)
645 {
646         int ret;
647
648         ret = __btrfs_end_transaction(trans, root, 1, 1);
649         if (ret)
650                 return ret;
651         return 0;
652 }
653
654 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
655                                  struct btrfs_root *root)
656 {
657         int ret;
658
659         ret = __btrfs_end_transaction(trans, root, 0, 0);
660         if (ret)
661                 return ret;
662         return 0;
663 }
664
665 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
666                                 struct btrfs_root *root)
667 {
668         return __btrfs_end_transaction(trans, root, 1, 1);
669 }
670
671 /*
672  * when btree blocks are allocated, they have some corresponding bits set for
673  * them in one of two extent_io trees.  This is used to make sure all of
674  * those extents are sent to disk but does not wait on them
675  */
676 int btrfs_write_marked_extents(struct btrfs_root *root,
677                                struct extent_io_tree *dirty_pages, int mark)
678 {
679         int err = 0;
680         int werr = 0;
681         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
682         u64 start = 0;
683         u64 end;
684
685         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
686                                       mark)) {
687                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
688                                    GFP_NOFS);
689                 err = filemap_fdatawrite_range(mapping, start, end);
690                 if (err)
691                         werr = err;
692                 cond_resched();
693                 start = end + 1;
694         }
695         if (err)
696                 werr = err;
697         return werr;
698 }
699
700 /*
701  * when btree blocks are allocated, they have some corresponding bits set for
702  * them in one of two extent_io trees.  This is used to make sure all of
703  * those extents are on disk for transaction or log commit.  We wait
704  * on all the pages and clear them from the dirty pages state tree
705  */
706 int btrfs_wait_marked_extents(struct btrfs_root *root,
707                               struct extent_io_tree *dirty_pages, int mark)
708 {
709         int err = 0;
710         int werr = 0;
711         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
712         u64 start = 0;
713         u64 end;
714
715         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
716                                       EXTENT_NEED_WAIT)) {
717                 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
718                 err = filemap_fdatawait_range(mapping, start, end);
719                 if (err)
720                         werr = err;
721                 cond_resched();
722                 start = end + 1;
723         }
724         if (err)
725                 werr = err;
726         return werr;
727 }
728
729 /*
730  * when btree blocks are allocated, they have some corresponding bits set for
731  * them in one of two extent_io trees.  This is used to make sure all of
732  * those extents are on disk for transaction or log commit
733  */
734 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
735                                 struct extent_io_tree *dirty_pages, int mark)
736 {
737         int ret;
738         int ret2;
739
740         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
741         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
742
743         if (ret)
744                 return ret;
745         if (ret2)
746                 return ret2;
747         return 0;
748 }
749
750 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
751                                      struct btrfs_root *root)
752 {
753         if (!trans || !trans->transaction) {
754                 struct inode *btree_inode;
755                 btree_inode = root->fs_info->btree_inode;
756                 return filemap_write_and_wait(btree_inode->i_mapping);
757         }
758         return btrfs_write_and_wait_marked_extents(root,
759                                            &trans->transaction->dirty_pages,
760                                            EXTENT_DIRTY);
761 }
762
763 /*
764  * this is used to update the root pointer in the tree of tree roots.
765  *
766  * But, in the case of the extent allocation tree, updating the root
767  * pointer may allocate blocks which may change the root of the extent
768  * allocation tree.
769  *
770  * So, this loops and repeats and makes sure the cowonly root didn't
771  * change while the root pointer was being updated in the metadata.
772  */
773 static int update_cowonly_root(struct btrfs_trans_handle *trans,
774                                struct btrfs_root *root)
775 {
776         int ret;
777         u64 old_root_bytenr;
778         u64 old_root_used;
779         struct btrfs_root *tree_root = root->fs_info->tree_root;
780
781         old_root_used = btrfs_root_used(&root->root_item);
782         btrfs_write_dirty_block_groups(trans, root);
783
784         while (1) {
785                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
786                 if (old_root_bytenr == root->node->start &&
787                     old_root_used == btrfs_root_used(&root->root_item))
788                         break;
789
790                 btrfs_set_root_node(&root->root_item, root->node);
791                 ret = btrfs_update_root(trans, tree_root,
792                                         &root->root_key,
793                                         &root->root_item);
794                 if (ret)
795                         return ret;
796
797                 old_root_used = btrfs_root_used(&root->root_item);
798                 ret = btrfs_write_dirty_block_groups(trans, root);
799                 if (ret)
800                         return ret;
801         }
802
803         if (root != root->fs_info->extent_root)
804                 switch_commit_root(root);
805
806         return 0;
807 }
808
809 /*
810  * update all the cowonly tree roots on disk
811  *
812  * The error handling in this function may not be obvious. Any of the
813  * failures will cause the file system to go offline. We still need
814  * to clean up the delayed refs.
815  */
816 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
817                                          struct btrfs_root *root)
818 {
819         struct btrfs_fs_info *fs_info = root->fs_info;
820         struct list_head *next;
821         struct extent_buffer *eb;
822         int ret;
823
824         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
825         if (ret)
826                 return ret;
827
828         eb = btrfs_lock_root_node(fs_info->tree_root);
829         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
830                               0, &eb);
831         btrfs_tree_unlock(eb);
832         free_extent_buffer(eb);
833
834         if (ret)
835                 return ret;
836
837         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
838         if (ret)
839                 return ret;
840
841         ret = btrfs_run_dev_stats(trans, root->fs_info);
842         BUG_ON(ret);
843
844         ret = btrfs_run_qgroups(trans, root->fs_info);
845         BUG_ON(ret);
846
847         /* run_qgroups might have added some more refs */
848         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
849         BUG_ON(ret);
850
851         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
852                 next = fs_info->dirty_cowonly_roots.next;
853                 list_del_init(next);
854                 root = list_entry(next, struct btrfs_root, dirty_list);
855
856                 ret = update_cowonly_root(trans, root);
857                 if (ret)
858                         return ret;
859         }
860
861         down_write(&fs_info->extent_commit_sem);
862         switch_commit_root(fs_info->extent_root);
863         up_write(&fs_info->extent_commit_sem);
864
865         return 0;
866 }
867
868 /*
869  * dead roots are old snapshots that need to be deleted.  This allocates
870  * a dirty root struct and adds it into the list of dead roots that need to
871  * be deleted
872  */
873 int btrfs_add_dead_root(struct btrfs_root *root)
874 {
875         spin_lock(&root->fs_info->trans_lock);
876         list_add(&root->root_list, &root->fs_info->dead_roots);
877         spin_unlock(&root->fs_info->trans_lock);
878         return 0;
879 }
880
881 /*
882  * update all the cowonly tree roots on disk
883  */
884 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
885                                     struct btrfs_root *root)
886 {
887         struct btrfs_root *gang[8];
888         struct btrfs_fs_info *fs_info = root->fs_info;
889         int i;
890         int ret;
891         int err = 0;
892
893         spin_lock(&fs_info->fs_roots_radix_lock);
894         while (1) {
895                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
896                                                  (void **)gang, 0,
897                                                  ARRAY_SIZE(gang),
898                                                  BTRFS_ROOT_TRANS_TAG);
899                 if (ret == 0)
900                         break;
901                 for (i = 0; i < ret; i++) {
902                         root = gang[i];
903                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
904                                         (unsigned long)root->root_key.objectid,
905                                         BTRFS_ROOT_TRANS_TAG);
906                         spin_unlock(&fs_info->fs_roots_radix_lock);
907
908                         btrfs_free_log(trans, root);
909                         btrfs_update_reloc_root(trans, root);
910                         btrfs_orphan_commit_root(trans, root);
911
912                         btrfs_save_ino_cache(root, trans);
913
914                         /* see comments in should_cow_block() */
915                         root->force_cow = 0;
916                         smp_wmb();
917
918                         if (root->commit_root != root->node) {
919                                 mutex_lock(&root->fs_commit_mutex);
920                                 switch_commit_root(root);
921                                 btrfs_unpin_free_ino(root);
922                                 mutex_unlock(&root->fs_commit_mutex);
923
924                                 btrfs_set_root_node(&root->root_item,
925                                                     root->node);
926                         }
927
928                         err = btrfs_update_root(trans, fs_info->tree_root,
929                                                 &root->root_key,
930                                                 &root->root_item);
931                         spin_lock(&fs_info->fs_roots_radix_lock);
932                         if (err)
933                                 break;
934                 }
935         }
936         spin_unlock(&fs_info->fs_roots_radix_lock);
937         return err;
938 }
939
940 /*
941  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
942  * otherwise every leaf in the btree is read and defragged.
943  */
944 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
945 {
946         struct btrfs_fs_info *info = root->fs_info;
947         struct btrfs_trans_handle *trans;
948         int ret;
949         unsigned long nr;
950
951         if (xchg(&root->defrag_running, 1))
952                 return 0;
953
954         while (1) {
955                 trans = btrfs_start_transaction(root, 0);
956                 if (IS_ERR(trans))
957                         return PTR_ERR(trans);
958
959                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
960
961                 nr = trans->blocks_used;
962                 btrfs_end_transaction(trans, root);
963                 btrfs_btree_balance_dirty(info->tree_root, nr);
964                 cond_resched();
965
966                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
967                         break;
968         }
969         root->defrag_running = 0;
970         return ret;
971 }
972
973 /*
974  * new snapshots need to be created at a very specific time in the
975  * transaction commit.  This does the actual creation
976  */
977 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
978                                    struct btrfs_fs_info *fs_info,
979                                    struct btrfs_pending_snapshot *pending)
980 {
981         struct btrfs_key key;
982         struct btrfs_root_item *new_root_item;
983         struct btrfs_root *tree_root = fs_info->tree_root;
984         struct btrfs_root *root = pending->root;
985         struct btrfs_root *parent_root;
986         struct btrfs_block_rsv *rsv;
987         struct inode *parent_inode;
988         struct btrfs_path *path;
989         struct btrfs_dir_item *dir_item;
990         struct dentry *parent;
991         struct dentry *dentry;
992         struct extent_buffer *tmp;
993         struct extent_buffer *old;
994         struct timespec cur_time = CURRENT_TIME;
995         int ret;
996         u64 to_reserve = 0;
997         u64 index = 0;
998         u64 objectid;
999         u64 root_flags;
1000         uuid_le new_uuid;
1001
1002         path = btrfs_alloc_path();
1003         if (!path) {
1004                 ret = pending->error = -ENOMEM;
1005                 goto path_alloc_fail;
1006         }
1007
1008         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1009         if (!new_root_item) {
1010                 ret = pending->error = -ENOMEM;
1011                 goto root_item_alloc_fail;
1012         }
1013
1014         ret = btrfs_find_free_objectid(tree_root, &objectid);
1015         if (ret) {
1016                 pending->error = ret;
1017                 goto no_free_objectid;
1018         }
1019
1020         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1021
1022         if (to_reserve > 0) {
1023                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
1024                                                   to_reserve);
1025                 if (ret) {
1026                         pending->error = ret;
1027                         goto no_free_objectid;
1028                 }
1029         }
1030
1031         ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
1032                                    objectid, pending->inherit);
1033         if (ret) {
1034                 pending->error = ret;
1035                 goto no_free_objectid;
1036         }
1037
1038         key.objectid = objectid;
1039         key.offset = (u64)-1;
1040         key.type = BTRFS_ROOT_ITEM_KEY;
1041
1042         rsv = trans->block_rsv;
1043         trans->block_rsv = &pending->block_rsv;
1044
1045         dentry = pending->dentry;
1046         parent = dget_parent(dentry);
1047         parent_inode = parent->d_inode;
1048         parent_root = BTRFS_I(parent_inode)->root;
1049         record_root_in_trans(trans, parent_root);
1050
1051         /*
1052          * insert the directory item
1053          */
1054         ret = btrfs_set_inode_index(parent_inode, &index);
1055         BUG_ON(ret); /* -ENOMEM */
1056
1057         /* check if there is a file/dir which has the same name. */
1058         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1059                                          btrfs_ino(parent_inode),
1060                                          dentry->d_name.name,
1061                                          dentry->d_name.len, 0);
1062         if (dir_item != NULL && !IS_ERR(dir_item)) {
1063                 pending->error = -EEXIST;
1064                 goto fail;
1065         } else if (IS_ERR(dir_item)) {
1066                 ret = PTR_ERR(dir_item);
1067                 goto abort_trans;
1068         }
1069         btrfs_release_path(path);
1070
1071         /*
1072          * pull in the delayed directory update
1073          * and the delayed inode item
1074          * otherwise we corrupt the FS during
1075          * snapshot
1076          */
1077         ret = btrfs_run_delayed_items(trans, root);
1078         if (ret)        /* Transaction aborted */
1079                 goto abort_trans;
1080
1081         record_root_in_trans(trans, root);
1082         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1083         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1084         btrfs_check_and_init_root_item(new_root_item);
1085
1086         root_flags = btrfs_root_flags(new_root_item);
1087         if (pending->readonly)
1088                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1089         else
1090                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1091         btrfs_set_root_flags(new_root_item, root_flags);
1092
1093         btrfs_set_root_generation_v2(new_root_item,
1094                         trans->transid);
1095         uuid_le_gen(&new_uuid);
1096         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1097         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1098                         BTRFS_UUID_SIZE);
1099         new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1100         new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1101         btrfs_set_root_otransid(new_root_item, trans->transid);
1102         memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1103         memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1104         btrfs_set_root_stransid(new_root_item, 0);
1105         btrfs_set_root_rtransid(new_root_item, 0);
1106
1107         old = btrfs_lock_root_node(root);
1108         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1109         if (ret) {
1110                 btrfs_tree_unlock(old);
1111                 free_extent_buffer(old);
1112                 goto abort_trans;
1113         }
1114
1115         btrfs_set_lock_blocking(old);
1116
1117         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1118         /* clean up in any case */
1119         btrfs_tree_unlock(old);
1120         free_extent_buffer(old);
1121         if (ret)
1122                 goto abort_trans;
1123
1124         /* see comments in should_cow_block() */
1125         root->force_cow = 1;
1126         smp_wmb();
1127
1128         btrfs_set_root_node(new_root_item, tmp);
1129         /* record when the snapshot was created in key.offset */
1130         key.offset = trans->transid;
1131         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1132         btrfs_tree_unlock(tmp);
1133         free_extent_buffer(tmp);
1134         if (ret)
1135                 goto abort_trans;
1136
1137         /*
1138          * insert root back/forward references
1139          */
1140         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1141                                  parent_root->root_key.objectid,
1142                                  btrfs_ino(parent_inode), index,
1143                                  dentry->d_name.name, dentry->d_name.len);
1144         if (ret)
1145                 goto abort_trans;
1146
1147         key.offset = (u64)-1;
1148         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1149         if (IS_ERR(pending->snap)) {
1150                 ret = PTR_ERR(pending->snap);
1151                 goto abort_trans;
1152         }
1153
1154         ret = btrfs_reloc_post_snapshot(trans, pending);
1155         if (ret)
1156                 goto abort_trans;
1157
1158         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1159         if (ret)
1160                 goto abort_trans;
1161
1162         ret = btrfs_insert_dir_item(trans, parent_root,
1163                                     dentry->d_name.name, dentry->d_name.len,
1164                                     parent_inode, &key,
1165                                     BTRFS_FT_DIR, index);
1166         /* We have check then name at the beginning, so it is impossible. */
1167         BUG_ON(ret == -EEXIST);
1168         if (ret)
1169                 goto abort_trans;
1170
1171         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1172                                          dentry->d_name.len * 2);
1173         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1174         ret = btrfs_update_inode(trans, parent_root, parent_inode);
1175         if (ret)
1176                 goto abort_trans;
1177 fail:
1178         dput(parent);
1179         trans->block_rsv = rsv;
1180 no_free_objectid:
1181         kfree(new_root_item);
1182 root_item_alloc_fail:
1183         btrfs_free_path(path);
1184 path_alloc_fail:
1185         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1186         return ret;
1187
1188 abort_trans:
1189         btrfs_abort_transaction(trans, root, ret);
1190         goto fail;
1191 }
1192
1193 /*
1194  * create all the snapshots we've scheduled for creation
1195  */
1196 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1197                                              struct btrfs_fs_info *fs_info)
1198 {
1199         struct btrfs_pending_snapshot *pending;
1200         struct list_head *head = &trans->transaction->pending_snapshots;
1201
1202         list_for_each_entry(pending, head, list)
1203                 create_pending_snapshot(trans, fs_info, pending);
1204         return 0;
1205 }
1206
1207 static void update_super_roots(struct btrfs_root *root)
1208 {
1209         struct btrfs_root_item *root_item;
1210         struct btrfs_super_block *super;
1211
1212         super = root->fs_info->super_copy;
1213
1214         root_item = &root->fs_info->chunk_root->root_item;
1215         super->chunk_root = root_item->bytenr;
1216         super->chunk_root_generation = root_item->generation;
1217         super->chunk_root_level = root_item->level;
1218
1219         root_item = &root->fs_info->tree_root->root_item;
1220         super->root = root_item->bytenr;
1221         super->generation = root_item->generation;
1222         super->root_level = root_item->level;
1223         if (btrfs_test_opt(root, SPACE_CACHE))
1224                 super->cache_generation = root_item->generation;
1225 }
1226
1227 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1228 {
1229         int ret = 0;
1230         spin_lock(&info->trans_lock);
1231         if (info->running_transaction)
1232                 ret = info->running_transaction->in_commit;
1233         spin_unlock(&info->trans_lock);
1234         return ret;
1235 }
1236
1237 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1238 {
1239         int ret = 0;
1240         spin_lock(&info->trans_lock);
1241         if (info->running_transaction)
1242                 ret = info->running_transaction->blocked;
1243         spin_unlock(&info->trans_lock);
1244         return ret;
1245 }
1246
1247 /*
1248  * wait for the current transaction commit to start and block subsequent
1249  * transaction joins
1250  */
1251 static void wait_current_trans_commit_start(struct btrfs_root *root,
1252                                             struct btrfs_transaction *trans)
1253 {
1254         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1255 }
1256
1257 /*
1258  * wait for the current transaction to start and then become unblocked.
1259  * caller holds ref.
1260  */
1261 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1262                                          struct btrfs_transaction *trans)
1263 {
1264         wait_event(root->fs_info->transaction_wait,
1265                    trans->commit_done || (trans->in_commit && !trans->blocked));
1266 }
1267
1268 /*
1269  * commit transactions asynchronously. once btrfs_commit_transaction_async
1270  * returns, any subsequent transaction will not be allowed to join.
1271  */
1272 struct btrfs_async_commit {
1273         struct btrfs_trans_handle *newtrans;
1274         struct btrfs_root *root;
1275         struct delayed_work work;
1276 };
1277
1278 static void do_async_commit(struct work_struct *work)
1279 {
1280         struct btrfs_async_commit *ac =
1281                 container_of(work, struct btrfs_async_commit, work.work);
1282
1283         /*
1284          * We've got freeze protection passed with the transaction.
1285          * Tell lockdep about it.
1286          */
1287         rwsem_acquire_read(
1288                 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1289                 0, 1, _THIS_IP_);
1290
1291         current->journal_info = ac->newtrans;
1292
1293         btrfs_commit_transaction(ac->newtrans, ac->root);
1294         kfree(ac);
1295 }
1296
1297 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1298                                    struct btrfs_root *root,
1299                                    int wait_for_unblock)
1300 {
1301         struct btrfs_async_commit *ac;
1302         struct btrfs_transaction *cur_trans;
1303
1304         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1305         if (!ac)
1306                 return -ENOMEM;
1307
1308         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1309         ac->root = root;
1310         ac->newtrans = btrfs_join_transaction(root);
1311         if (IS_ERR(ac->newtrans)) {
1312                 int err = PTR_ERR(ac->newtrans);
1313                 kfree(ac);
1314                 return err;
1315         }
1316
1317         /* take transaction reference */
1318         cur_trans = trans->transaction;
1319         atomic_inc(&cur_trans->use_count);
1320
1321         btrfs_end_transaction(trans, root);
1322
1323         /*
1324          * Tell lockdep we've released the freeze rwsem, since the
1325          * async commit thread will be the one to unlock it.
1326          */
1327         rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1328                       1, _THIS_IP_);
1329
1330         schedule_delayed_work(&ac->work, 0);
1331
1332         /* wait for transaction to start and unblock */
1333         if (wait_for_unblock)
1334                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1335         else
1336                 wait_current_trans_commit_start(root, cur_trans);
1337
1338         if (current->journal_info == trans)
1339                 current->journal_info = NULL;
1340
1341         put_transaction(cur_trans);
1342         return 0;
1343 }
1344
1345
1346 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1347                                 struct btrfs_root *root, int err)
1348 {
1349         struct btrfs_transaction *cur_trans = trans->transaction;
1350
1351         WARN_ON(trans->use_count > 1);
1352
1353         btrfs_abort_transaction(trans, root, err);
1354
1355         spin_lock(&root->fs_info->trans_lock);
1356         list_del_init(&cur_trans->list);
1357         if (cur_trans == root->fs_info->running_transaction) {
1358                 root->fs_info->running_transaction = NULL;
1359                 root->fs_info->trans_no_join = 0;
1360         }
1361         spin_unlock(&root->fs_info->trans_lock);
1362
1363         btrfs_cleanup_one_transaction(trans->transaction, root);
1364
1365         put_transaction(cur_trans);
1366         put_transaction(cur_trans);
1367
1368         trace_btrfs_transaction_commit(root);
1369
1370         btrfs_scrub_continue(root);
1371
1372         if (current->journal_info == trans)
1373                 current->journal_info = NULL;
1374
1375         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1376 }
1377
1378 /*
1379  * btrfs_transaction state sequence:
1380  *    in_commit = 0, blocked = 0  (initial)
1381  *    in_commit = 1, blocked = 1
1382  *    blocked = 0
1383  *    commit_done = 1
1384  */
1385 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1386                              struct btrfs_root *root)
1387 {
1388         unsigned long joined = 0;
1389         struct btrfs_transaction *cur_trans = trans->transaction;
1390         struct btrfs_transaction *prev_trans = NULL;
1391         DEFINE_WAIT(wait);
1392         int ret = -EIO;
1393         int should_grow = 0;
1394         unsigned long now = get_seconds();
1395         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1396
1397         btrfs_run_ordered_operations(root, 0);
1398
1399         if (cur_trans->aborted)
1400                 goto cleanup_transaction;
1401
1402         /* make a pass through all the delayed refs we have so far
1403          * any runnings procs may add more while we are here
1404          */
1405         ret = btrfs_run_delayed_refs(trans, root, 0);
1406         if (ret)
1407                 goto cleanup_transaction;
1408
1409         btrfs_trans_release_metadata(trans, root);
1410         trans->block_rsv = NULL;
1411
1412         cur_trans = trans->transaction;
1413
1414         /*
1415          * set the flushing flag so procs in this transaction have to
1416          * start sending their work down.
1417          */
1418         cur_trans->delayed_refs.flushing = 1;
1419
1420         if (!list_empty(&trans->new_bgs))
1421                 btrfs_create_pending_block_groups(trans, root);
1422
1423         ret = btrfs_run_delayed_refs(trans, root, 0);
1424         if (ret)
1425                 goto cleanup_transaction;
1426
1427         spin_lock(&cur_trans->commit_lock);
1428         if (cur_trans->in_commit) {
1429                 spin_unlock(&cur_trans->commit_lock);
1430                 atomic_inc(&cur_trans->use_count);
1431                 ret = btrfs_end_transaction(trans, root);
1432
1433                 wait_for_commit(root, cur_trans);
1434
1435                 put_transaction(cur_trans);
1436
1437                 return ret;
1438         }
1439
1440         trans->transaction->in_commit = 1;
1441         trans->transaction->blocked = 1;
1442         spin_unlock(&cur_trans->commit_lock);
1443         wake_up(&root->fs_info->transaction_blocked_wait);
1444
1445         spin_lock(&root->fs_info->trans_lock);
1446         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1447                 prev_trans = list_entry(cur_trans->list.prev,
1448                                         struct btrfs_transaction, list);
1449                 if (!prev_trans->commit_done) {
1450                         atomic_inc(&prev_trans->use_count);
1451                         spin_unlock(&root->fs_info->trans_lock);
1452
1453                         wait_for_commit(root, prev_trans);
1454
1455                         put_transaction(prev_trans);
1456                 } else {
1457                         spin_unlock(&root->fs_info->trans_lock);
1458                 }
1459         } else {
1460                 spin_unlock(&root->fs_info->trans_lock);
1461         }
1462
1463         if (!btrfs_test_opt(root, SSD) &&
1464             (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1465                 should_grow = 1;
1466
1467         do {
1468                 int snap_pending = 0;
1469
1470                 joined = cur_trans->num_joined;
1471                 if (!list_empty(&trans->transaction->pending_snapshots))
1472                         snap_pending = 1;
1473
1474                 WARN_ON(cur_trans != trans->transaction);
1475
1476                 if (flush_on_commit || snap_pending) {
1477                         btrfs_start_delalloc_inodes(root, 1);
1478                         btrfs_wait_ordered_extents(root, 0, 1);
1479                 }
1480
1481                 ret = btrfs_run_delayed_items(trans, root);
1482                 if (ret)
1483                         goto cleanup_transaction;
1484
1485                 /*
1486                  * running the delayed items may have added new refs. account
1487                  * them now so that they hinder processing of more delayed refs
1488                  * as little as possible.
1489                  */
1490                 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1491
1492                 /*
1493                  * rename don't use btrfs_join_transaction, so, once we
1494                  * set the transaction to blocked above, we aren't going
1495                  * to get any new ordered operations.  We can safely run
1496                  * it here and no for sure that nothing new will be added
1497                  * to the list
1498                  */
1499                 btrfs_run_ordered_operations(root, 1);
1500
1501                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1502                                 TASK_UNINTERRUPTIBLE);
1503
1504                 if (atomic_read(&cur_trans->num_writers) > 1)
1505                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1506                 else if (should_grow)
1507                         schedule_timeout(1);
1508
1509                 finish_wait(&cur_trans->writer_wait, &wait);
1510         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1511                  (should_grow && cur_trans->num_joined != joined));
1512
1513         /*
1514          * Ok now we need to make sure to block out any other joins while we
1515          * commit the transaction.  We could have started a join before setting
1516          * no_join so make sure to wait for num_writers to == 1 again.
1517          */
1518         spin_lock(&root->fs_info->trans_lock);
1519         root->fs_info->trans_no_join = 1;
1520         spin_unlock(&root->fs_info->trans_lock);
1521         wait_event(cur_trans->writer_wait,
1522                    atomic_read(&cur_trans->num_writers) == 1);
1523
1524         /*
1525          * the reloc mutex makes sure that we stop
1526          * the balancing code from coming in and moving
1527          * extents around in the middle of the commit
1528          */
1529         mutex_lock(&root->fs_info->reloc_mutex);
1530
1531         /*
1532          * We needn't worry about the delayed items because we will
1533          * deal with them in create_pending_snapshot(), which is the
1534          * core function of the snapshot creation.
1535          */
1536         ret = create_pending_snapshots(trans, root->fs_info);
1537         if (ret) {
1538                 mutex_unlock(&root->fs_info->reloc_mutex);
1539                 goto cleanup_transaction;
1540         }
1541
1542         /*
1543          * We insert the dir indexes of the snapshots and update the inode
1544          * of the snapshots' parents after the snapshot creation, so there
1545          * are some delayed items which are not dealt with. Now deal with
1546          * them.
1547          *
1548          * We needn't worry that this operation will corrupt the snapshots,
1549          * because all the tree which are snapshoted will be forced to COW
1550          * the nodes and leaves.
1551          */
1552         ret = btrfs_run_delayed_items(trans, root);
1553         if (ret) {
1554                 mutex_unlock(&root->fs_info->reloc_mutex);
1555                 goto cleanup_transaction;
1556         }
1557
1558         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1559         if (ret) {
1560                 mutex_unlock(&root->fs_info->reloc_mutex);
1561                 goto cleanup_transaction;
1562         }
1563
1564         /*
1565          * make sure none of the code above managed to slip in a
1566          * delayed item
1567          */
1568         btrfs_assert_delayed_root_empty(root);
1569
1570         WARN_ON(cur_trans != trans->transaction);
1571
1572         btrfs_scrub_pause(root);
1573         /* btrfs_commit_tree_roots is responsible for getting the
1574          * various roots consistent with each other.  Every pointer
1575          * in the tree of tree roots has to point to the most up to date
1576          * root for every subvolume and other tree.  So, we have to keep
1577          * the tree logging code from jumping in and changing any
1578          * of the trees.
1579          *
1580          * At this point in the commit, there can't be any tree-log
1581          * writers, but a little lower down we drop the trans mutex
1582          * and let new people in.  By holding the tree_log_mutex
1583          * from now until after the super is written, we avoid races
1584          * with the tree-log code.
1585          */
1586         mutex_lock(&root->fs_info->tree_log_mutex);
1587
1588         ret = commit_fs_roots(trans, root);
1589         if (ret) {
1590                 mutex_unlock(&root->fs_info->tree_log_mutex);
1591                 mutex_unlock(&root->fs_info->reloc_mutex);
1592                 goto cleanup_transaction;
1593         }
1594
1595         /* commit_fs_roots gets rid of all the tree log roots, it is now
1596          * safe to free the root of tree log roots
1597          */
1598         btrfs_free_log_root_tree(trans, root->fs_info);
1599
1600         ret = commit_cowonly_roots(trans, root);
1601         if (ret) {
1602                 mutex_unlock(&root->fs_info->tree_log_mutex);
1603                 mutex_unlock(&root->fs_info->reloc_mutex);
1604                 goto cleanup_transaction;
1605         }
1606
1607         btrfs_prepare_extent_commit(trans, root);
1608
1609         cur_trans = root->fs_info->running_transaction;
1610
1611         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1612                             root->fs_info->tree_root->node);
1613         switch_commit_root(root->fs_info->tree_root);
1614
1615         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1616                             root->fs_info->chunk_root->node);
1617         switch_commit_root(root->fs_info->chunk_root);
1618
1619         assert_qgroups_uptodate(trans);
1620         update_super_roots(root);
1621
1622         if (!root->fs_info->log_root_recovering) {
1623                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1624                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1625         }
1626
1627         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1628                sizeof(*root->fs_info->super_copy));
1629
1630         trans->transaction->blocked = 0;
1631         spin_lock(&root->fs_info->trans_lock);
1632         root->fs_info->running_transaction = NULL;
1633         root->fs_info->trans_no_join = 0;
1634         spin_unlock(&root->fs_info->trans_lock);
1635         mutex_unlock(&root->fs_info->reloc_mutex);
1636
1637         wake_up(&root->fs_info->transaction_wait);
1638
1639         ret = btrfs_write_and_wait_transaction(trans, root);
1640         if (ret) {
1641                 btrfs_error(root->fs_info, ret,
1642                             "Error while writing out transaction.");
1643                 mutex_unlock(&root->fs_info->tree_log_mutex);
1644                 goto cleanup_transaction;
1645         }
1646
1647         ret = write_ctree_super(trans, root, 0);
1648         if (ret) {
1649                 mutex_unlock(&root->fs_info->tree_log_mutex);
1650                 goto cleanup_transaction;
1651         }
1652
1653         /*
1654          * the super is written, we can safely allow the tree-loggers
1655          * to go about their business
1656          */
1657         mutex_unlock(&root->fs_info->tree_log_mutex);
1658
1659         btrfs_finish_extent_commit(trans, root);
1660
1661         cur_trans->commit_done = 1;
1662
1663         root->fs_info->last_trans_committed = cur_trans->transid;
1664
1665         wake_up(&cur_trans->commit_wait);
1666
1667         spin_lock(&root->fs_info->trans_lock);
1668         list_del_init(&cur_trans->list);
1669         spin_unlock(&root->fs_info->trans_lock);
1670
1671         put_transaction(cur_trans);
1672         put_transaction(cur_trans);
1673
1674         sb_end_intwrite(root->fs_info->sb);
1675
1676         trace_btrfs_transaction_commit(root);
1677
1678         btrfs_scrub_continue(root);
1679
1680         if (current->journal_info == trans)
1681                 current->journal_info = NULL;
1682
1683         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1684
1685         if (current != root->fs_info->transaction_kthread)
1686                 btrfs_run_delayed_iputs(root);
1687
1688         return ret;
1689
1690 cleanup_transaction:
1691         btrfs_trans_release_metadata(trans, root);
1692         trans->block_rsv = NULL;
1693         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1694 //      WARN_ON(1);
1695         if (current->journal_info == trans)
1696                 current->journal_info = NULL;
1697         cleanup_transaction(trans, root, ret);
1698
1699         return ret;
1700 }
1701
1702 /*
1703  * interface function to delete all the snapshots we have scheduled for deletion
1704  */
1705 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1706 {
1707         LIST_HEAD(list);
1708         struct btrfs_fs_info *fs_info = root->fs_info;
1709
1710         spin_lock(&fs_info->trans_lock);
1711         list_splice_init(&fs_info->dead_roots, &list);
1712         spin_unlock(&fs_info->trans_lock);
1713
1714         while (!list_empty(&list)) {
1715                 int ret;
1716
1717                 root = list_entry(list.next, struct btrfs_root, root_list);
1718                 list_del(&root->root_list);
1719
1720                 btrfs_kill_all_delayed_nodes(root);
1721
1722                 if (btrfs_header_backref_rev(root->node) <
1723                     BTRFS_MIXED_BACKREF_REV)
1724                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1725                 else
1726                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1727                 BUG_ON(ret < 0);
1728         }
1729         return 0;
1730 }