Merge branch 'for-linus' of git://git.o-hand.com/linux-rpurdie-leds
[pandora-kernel.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30
31 #define BTRFS_ROOT_TRANS_TAG 0
32
33 static noinline void put_transaction(struct btrfs_transaction *transaction)
34 {
35         WARN_ON(transaction->use_count == 0);
36         transaction->use_count--;
37         if (transaction->use_count == 0) {
38                 list_del_init(&transaction->list);
39                 memset(transaction, 0, sizeof(*transaction));
40                 kmem_cache_free(btrfs_transaction_cachep, transaction);
41         }
42 }
43
44 static noinline void switch_commit_root(struct btrfs_root *root)
45 {
46         free_extent_buffer(root->commit_root);
47         root->commit_root = btrfs_root_node(root);
48 }
49
50 /*
51  * either allocate a new transaction or hop into the existing one
52  */
53 static noinline int join_transaction(struct btrfs_root *root)
54 {
55         struct btrfs_transaction *cur_trans;
56         cur_trans = root->fs_info->running_transaction;
57         if (!cur_trans) {
58                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
59                                              GFP_NOFS);
60                 BUG_ON(!cur_trans);
61                 root->fs_info->generation++;
62                 cur_trans->num_writers = 1;
63                 cur_trans->num_joined = 0;
64                 cur_trans->transid = root->fs_info->generation;
65                 init_waitqueue_head(&cur_trans->writer_wait);
66                 init_waitqueue_head(&cur_trans->commit_wait);
67                 cur_trans->in_commit = 0;
68                 cur_trans->blocked = 0;
69                 cur_trans->use_count = 1;
70                 cur_trans->commit_done = 0;
71                 cur_trans->start_time = get_seconds();
72
73                 cur_trans->delayed_refs.root = RB_ROOT;
74                 cur_trans->delayed_refs.num_entries = 0;
75                 cur_trans->delayed_refs.num_heads_ready = 0;
76                 cur_trans->delayed_refs.num_heads = 0;
77                 cur_trans->delayed_refs.flushing = 0;
78                 cur_trans->delayed_refs.run_delayed_start = 0;
79                 spin_lock_init(&cur_trans->delayed_refs.lock);
80
81                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
82                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
83                 extent_io_tree_init(&cur_trans->dirty_pages,
84                                      root->fs_info->btree_inode->i_mapping,
85                                      GFP_NOFS);
86                 spin_lock(&root->fs_info->new_trans_lock);
87                 root->fs_info->running_transaction = cur_trans;
88                 spin_unlock(&root->fs_info->new_trans_lock);
89         } else {
90                 cur_trans->num_writers++;
91                 cur_trans->num_joined++;
92         }
93
94         return 0;
95 }
96
97 /*
98  * this does all the record keeping required to make sure that a reference
99  * counted root is properly recorded in a given transaction.  This is required
100  * to make sure the old root from before we joined the transaction is deleted
101  * when the transaction commits
102  */
103 static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
104                                          struct btrfs_root *root)
105 {
106         if (root->ref_cows && root->last_trans < trans->transid) {
107                 WARN_ON(root == root->fs_info->extent_root);
108                 WARN_ON(root->commit_root != root->node);
109
110                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
111                            (unsigned long)root->root_key.objectid,
112                            BTRFS_ROOT_TRANS_TAG);
113                 root->last_trans = trans->transid;
114                 btrfs_init_reloc_root(trans, root);
115         }
116         return 0;
117 }
118
119 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
120                                struct btrfs_root *root)
121 {
122         if (!root->ref_cows)
123                 return 0;
124
125         mutex_lock(&root->fs_info->trans_mutex);
126         if (root->last_trans == trans->transid) {
127                 mutex_unlock(&root->fs_info->trans_mutex);
128                 return 0;
129         }
130
131         record_root_in_trans(trans, root);
132         mutex_unlock(&root->fs_info->trans_mutex);
133         return 0;
134 }
135
136 /* wait for commit against the current transaction to become unblocked
137  * when this is done, it is safe to start a new transaction, but the current
138  * transaction might not be fully on disk.
139  */
140 static void wait_current_trans(struct btrfs_root *root)
141 {
142         struct btrfs_transaction *cur_trans;
143
144         cur_trans = root->fs_info->running_transaction;
145         if (cur_trans && cur_trans->blocked) {
146                 DEFINE_WAIT(wait);
147                 cur_trans->use_count++;
148                 while (1) {
149                         prepare_to_wait(&root->fs_info->transaction_wait, &wait,
150                                         TASK_UNINTERRUPTIBLE);
151                         if (!cur_trans->blocked)
152                                 break;
153                         mutex_unlock(&root->fs_info->trans_mutex);
154                         schedule();
155                         mutex_lock(&root->fs_info->trans_mutex);
156                 }
157                 finish_wait(&root->fs_info->transaction_wait, &wait);
158                 put_transaction(cur_trans);
159         }
160 }
161
162 enum btrfs_trans_type {
163         TRANS_START,
164         TRANS_JOIN,
165         TRANS_USERSPACE,
166 };
167
168 static int may_wait_transaction(struct btrfs_root *root, int type)
169 {
170         if (!root->fs_info->log_root_recovering &&
171             ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
172              type == TRANS_USERSPACE))
173                 return 1;
174         return 0;
175 }
176
177 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
178                                                     u64 num_items, int type)
179 {
180         struct btrfs_trans_handle *h;
181         struct btrfs_transaction *cur_trans;
182         int retries = 0;
183         int ret;
184 again:
185         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
186         if (!h)
187                 return ERR_PTR(-ENOMEM);
188
189         mutex_lock(&root->fs_info->trans_mutex);
190         if (may_wait_transaction(root, type))
191                 wait_current_trans(root);
192
193         ret = join_transaction(root);
194         BUG_ON(ret);
195
196         cur_trans = root->fs_info->running_transaction;
197         cur_trans->use_count++;
198         mutex_unlock(&root->fs_info->trans_mutex);
199
200         h->transid = cur_trans->transid;
201         h->transaction = cur_trans;
202         h->blocks_used = 0;
203         h->block_group = 0;
204         h->bytes_reserved = 0;
205         h->delayed_ref_updates = 0;
206         h->block_rsv = NULL;
207
208         smp_mb();
209         if (cur_trans->blocked && may_wait_transaction(root, type)) {
210                 btrfs_commit_transaction(h, root);
211                 goto again;
212         }
213
214         if (num_items > 0) {
215                 ret = btrfs_trans_reserve_metadata(h, root, num_items,
216                                                    &retries);
217                 if (ret == -EAGAIN) {
218                         btrfs_commit_transaction(h, root);
219                         goto again;
220                 }
221                 if (ret < 0) {
222                         btrfs_end_transaction(h, root);
223                         return ERR_PTR(ret);
224                 }
225         }
226
227         mutex_lock(&root->fs_info->trans_mutex);
228         record_root_in_trans(h, root);
229         mutex_unlock(&root->fs_info->trans_mutex);
230
231         if (!current->journal_info && type != TRANS_USERSPACE)
232                 current->journal_info = h;
233         return h;
234 }
235
236 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
237                                                    int num_items)
238 {
239         return start_transaction(root, num_items, TRANS_START);
240 }
241 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
242                                                    int num_blocks)
243 {
244         return start_transaction(root, 0, TRANS_JOIN);
245 }
246
247 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
248                                                          int num_blocks)
249 {
250         return start_transaction(r, 0, TRANS_USERSPACE);
251 }
252
253 /* wait for a transaction commit to be fully complete */
254 static noinline int wait_for_commit(struct btrfs_root *root,
255                                     struct btrfs_transaction *commit)
256 {
257         DEFINE_WAIT(wait);
258         mutex_lock(&root->fs_info->trans_mutex);
259         while (!commit->commit_done) {
260                 prepare_to_wait(&commit->commit_wait, &wait,
261                                 TASK_UNINTERRUPTIBLE);
262                 if (commit->commit_done)
263                         break;
264                 mutex_unlock(&root->fs_info->trans_mutex);
265                 schedule();
266                 mutex_lock(&root->fs_info->trans_mutex);
267         }
268         mutex_unlock(&root->fs_info->trans_mutex);
269         finish_wait(&commit->commit_wait, &wait);
270         return 0;
271 }
272
273 #if 0
274 /*
275  * rate limit against the drop_snapshot code.  This helps to slow down new
276  * operations if the drop_snapshot code isn't able to keep up.
277  */
278 static void throttle_on_drops(struct btrfs_root *root)
279 {
280         struct btrfs_fs_info *info = root->fs_info;
281         int harder_count = 0;
282
283 harder:
284         if (atomic_read(&info->throttles)) {
285                 DEFINE_WAIT(wait);
286                 int thr;
287                 thr = atomic_read(&info->throttle_gen);
288
289                 do {
290                         prepare_to_wait(&info->transaction_throttle,
291                                         &wait, TASK_UNINTERRUPTIBLE);
292                         if (!atomic_read(&info->throttles)) {
293                                 finish_wait(&info->transaction_throttle, &wait);
294                                 break;
295                         }
296                         schedule();
297                         finish_wait(&info->transaction_throttle, &wait);
298                 } while (thr == atomic_read(&info->throttle_gen));
299                 harder_count++;
300
301                 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
302                     harder_count < 2)
303                         goto harder;
304
305                 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
306                     harder_count < 10)
307                         goto harder;
308
309                 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
310                     harder_count < 20)
311                         goto harder;
312         }
313 }
314 #endif
315
316 void btrfs_throttle(struct btrfs_root *root)
317 {
318         mutex_lock(&root->fs_info->trans_mutex);
319         if (!root->fs_info->open_ioctl_trans)
320                 wait_current_trans(root);
321         mutex_unlock(&root->fs_info->trans_mutex);
322 }
323
324 static int should_end_transaction(struct btrfs_trans_handle *trans,
325                                   struct btrfs_root *root)
326 {
327         int ret;
328         ret = btrfs_block_rsv_check(trans, root,
329                                     &root->fs_info->global_block_rsv, 0, 5);
330         return ret ? 1 : 0;
331 }
332
333 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
334                                  struct btrfs_root *root)
335 {
336         struct btrfs_transaction *cur_trans = trans->transaction;
337         int updates;
338
339         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
340                 return 1;
341
342         updates = trans->delayed_ref_updates;
343         trans->delayed_ref_updates = 0;
344         if (updates)
345                 btrfs_run_delayed_refs(trans, root, updates);
346
347         return should_end_transaction(trans, root);
348 }
349
350 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
351                           struct btrfs_root *root, int throttle)
352 {
353         struct btrfs_transaction *cur_trans = trans->transaction;
354         struct btrfs_fs_info *info = root->fs_info;
355         int count = 0;
356
357         while (count < 4) {
358                 unsigned long cur = trans->delayed_ref_updates;
359                 trans->delayed_ref_updates = 0;
360                 if (cur &&
361                     trans->transaction->delayed_refs.num_heads_ready > 64) {
362                         trans->delayed_ref_updates = 0;
363
364                         /*
365                          * do a full flush if the transaction is trying
366                          * to close
367                          */
368                         if (trans->transaction->delayed_refs.flushing)
369                                 cur = 0;
370                         btrfs_run_delayed_refs(trans, root, cur);
371                 } else {
372                         break;
373                 }
374                 count++;
375         }
376
377         btrfs_trans_release_metadata(trans, root);
378
379         if (!root->fs_info->open_ioctl_trans &&
380             should_end_transaction(trans, root))
381                 trans->transaction->blocked = 1;
382
383         if (cur_trans->blocked && !cur_trans->in_commit) {
384                 if (throttle)
385                         return btrfs_commit_transaction(trans, root);
386                 else
387                         wake_up_process(info->transaction_kthread);
388         }
389
390         mutex_lock(&info->trans_mutex);
391         WARN_ON(cur_trans != info->running_transaction);
392         WARN_ON(cur_trans->num_writers < 1);
393         cur_trans->num_writers--;
394
395         if (waitqueue_active(&cur_trans->writer_wait))
396                 wake_up(&cur_trans->writer_wait);
397         put_transaction(cur_trans);
398         mutex_unlock(&info->trans_mutex);
399
400         if (current->journal_info == trans)
401                 current->journal_info = NULL;
402         memset(trans, 0, sizeof(*trans));
403         kmem_cache_free(btrfs_trans_handle_cachep, trans);
404
405         if (throttle)
406                 btrfs_run_delayed_iputs(root);
407
408         return 0;
409 }
410
411 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
412                           struct btrfs_root *root)
413 {
414         return __btrfs_end_transaction(trans, root, 0);
415 }
416
417 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
418                                    struct btrfs_root *root)
419 {
420         return __btrfs_end_transaction(trans, root, 1);
421 }
422
423 /*
424  * when btree blocks are allocated, they have some corresponding bits set for
425  * them in one of two extent_io trees.  This is used to make sure all of
426  * those extents are sent to disk but does not wait on them
427  */
428 int btrfs_write_marked_extents(struct btrfs_root *root,
429                                struct extent_io_tree *dirty_pages, int mark)
430 {
431         int ret;
432         int err = 0;
433         int werr = 0;
434         struct page *page;
435         struct inode *btree_inode = root->fs_info->btree_inode;
436         u64 start = 0;
437         u64 end;
438         unsigned long index;
439
440         while (1) {
441                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
442                                             mark);
443                 if (ret)
444                         break;
445                 while (start <= end) {
446                         cond_resched();
447
448                         index = start >> PAGE_CACHE_SHIFT;
449                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
450                         page = find_get_page(btree_inode->i_mapping, index);
451                         if (!page)
452                                 continue;
453
454                         btree_lock_page_hook(page);
455                         if (!page->mapping) {
456                                 unlock_page(page);
457                                 page_cache_release(page);
458                                 continue;
459                         }
460
461                         if (PageWriteback(page)) {
462                                 if (PageDirty(page))
463                                         wait_on_page_writeback(page);
464                                 else {
465                                         unlock_page(page);
466                                         page_cache_release(page);
467                                         continue;
468                                 }
469                         }
470                         err = write_one_page(page, 0);
471                         if (err)
472                                 werr = err;
473                         page_cache_release(page);
474                 }
475         }
476         if (err)
477                 werr = err;
478         return werr;
479 }
480
481 /*
482  * when btree blocks are allocated, they have some corresponding bits set for
483  * them in one of two extent_io trees.  This is used to make sure all of
484  * those extents are on disk for transaction or log commit.  We wait
485  * on all the pages and clear them from the dirty pages state tree
486  */
487 int btrfs_wait_marked_extents(struct btrfs_root *root,
488                               struct extent_io_tree *dirty_pages, int mark)
489 {
490         int ret;
491         int err = 0;
492         int werr = 0;
493         struct page *page;
494         struct inode *btree_inode = root->fs_info->btree_inode;
495         u64 start = 0;
496         u64 end;
497         unsigned long index;
498
499         while (1) {
500                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
501                                             mark);
502                 if (ret)
503                         break;
504
505                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
506                 while (start <= end) {
507                         index = start >> PAGE_CACHE_SHIFT;
508                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
509                         page = find_get_page(btree_inode->i_mapping, index);
510                         if (!page)
511                                 continue;
512                         if (PageDirty(page)) {
513                                 btree_lock_page_hook(page);
514                                 wait_on_page_writeback(page);
515                                 err = write_one_page(page, 0);
516                                 if (err)
517                                         werr = err;
518                         }
519                         wait_on_page_writeback(page);
520                         page_cache_release(page);
521                         cond_resched();
522                 }
523         }
524         if (err)
525                 werr = err;
526         return werr;
527 }
528
529 /*
530  * when btree blocks are allocated, they have some corresponding bits set for
531  * them in one of two extent_io trees.  This is used to make sure all of
532  * those extents are on disk for transaction or log commit
533  */
534 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
535                                 struct extent_io_tree *dirty_pages, int mark)
536 {
537         int ret;
538         int ret2;
539
540         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
541         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
542         return ret || ret2;
543 }
544
545 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
546                                      struct btrfs_root *root)
547 {
548         if (!trans || !trans->transaction) {
549                 struct inode *btree_inode;
550                 btree_inode = root->fs_info->btree_inode;
551                 return filemap_write_and_wait(btree_inode->i_mapping);
552         }
553         return btrfs_write_and_wait_marked_extents(root,
554                                            &trans->transaction->dirty_pages,
555                                            EXTENT_DIRTY);
556 }
557
558 /*
559  * this is used to update the root pointer in the tree of tree roots.
560  *
561  * But, in the case of the extent allocation tree, updating the root
562  * pointer may allocate blocks which may change the root of the extent
563  * allocation tree.
564  *
565  * So, this loops and repeats and makes sure the cowonly root didn't
566  * change while the root pointer was being updated in the metadata.
567  */
568 static int update_cowonly_root(struct btrfs_trans_handle *trans,
569                                struct btrfs_root *root)
570 {
571         int ret;
572         u64 old_root_bytenr;
573         u64 old_root_used;
574         struct btrfs_root *tree_root = root->fs_info->tree_root;
575
576         old_root_used = btrfs_root_used(&root->root_item);
577         btrfs_write_dirty_block_groups(trans, root);
578
579         while (1) {
580                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
581                 if (old_root_bytenr == root->node->start &&
582                     old_root_used == btrfs_root_used(&root->root_item))
583                         break;
584
585                 btrfs_set_root_node(&root->root_item, root->node);
586                 ret = btrfs_update_root(trans, tree_root,
587                                         &root->root_key,
588                                         &root->root_item);
589                 BUG_ON(ret);
590
591                 old_root_used = btrfs_root_used(&root->root_item);
592                 ret = btrfs_write_dirty_block_groups(trans, root);
593                 BUG_ON(ret);
594         }
595
596         if (root != root->fs_info->extent_root)
597                 switch_commit_root(root);
598
599         return 0;
600 }
601
602 /*
603  * update all the cowonly tree roots on disk
604  */
605 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
606                                          struct btrfs_root *root)
607 {
608         struct btrfs_fs_info *fs_info = root->fs_info;
609         struct list_head *next;
610         struct extent_buffer *eb;
611         int ret;
612
613         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
614         BUG_ON(ret);
615
616         eb = btrfs_lock_root_node(fs_info->tree_root);
617         btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
618         btrfs_tree_unlock(eb);
619         free_extent_buffer(eb);
620
621         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
622         BUG_ON(ret);
623
624         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
625                 next = fs_info->dirty_cowonly_roots.next;
626                 list_del_init(next);
627                 root = list_entry(next, struct btrfs_root, dirty_list);
628
629                 update_cowonly_root(trans, root);
630         }
631
632         down_write(&fs_info->extent_commit_sem);
633         switch_commit_root(fs_info->extent_root);
634         up_write(&fs_info->extent_commit_sem);
635
636         return 0;
637 }
638
639 /*
640  * dead roots are old snapshots that need to be deleted.  This allocates
641  * a dirty root struct and adds it into the list of dead roots that need to
642  * be deleted
643  */
644 int btrfs_add_dead_root(struct btrfs_root *root)
645 {
646         mutex_lock(&root->fs_info->trans_mutex);
647         list_add(&root->root_list, &root->fs_info->dead_roots);
648         mutex_unlock(&root->fs_info->trans_mutex);
649         return 0;
650 }
651
652 /*
653  * update all the cowonly tree roots on disk
654  */
655 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
656                                     struct btrfs_root *root)
657 {
658         struct btrfs_root *gang[8];
659         struct btrfs_fs_info *fs_info = root->fs_info;
660         int i;
661         int ret;
662         int err = 0;
663
664         while (1) {
665                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
666                                                  (void **)gang, 0,
667                                                  ARRAY_SIZE(gang),
668                                                  BTRFS_ROOT_TRANS_TAG);
669                 if (ret == 0)
670                         break;
671                 for (i = 0; i < ret; i++) {
672                         root = gang[i];
673                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
674                                         (unsigned long)root->root_key.objectid,
675                                         BTRFS_ROOT_TRANS_TAG);
676
677                         btrfs_free_log(trans, root);
678                         btrfs_update_reloc_root(trans, root);
679                         btrfs_orphan_commit_root(trans, root);
680
681                         if (root->commit_root != root->node) {
682                                 switch_commit_root(root);
683                                 btrfs_set_root_node(&root->root_item,
684                                                     root->node);
685                         }
686
687                         err = btrfs_update_root(trans, fs_info->tree_root,
688                                                 &root->root_key,
689                                                 &root->root_item);
690                         if (err)
691                                 break;
692                 }
693         }
694         return err;
695 }
696
697 /*
698  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
699  * otherwise every leaf in the btree is read and defragged.
700  */
701 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
702 {
703         struct btrfs_fs_info *info = root->fs_info;
704         struct btrfs_trans_handle *trans;
705         int ret;
706         unsigned long nr;
707
708         if (xchg(&root->defrag_running, 1))
709                 return 0;
710
711         while (1) {
712                 trans = btrfs_start_transaction(root, 0);
713                 if (IS_ERR(trans))
714                         return PTR_ERR(trans);
715
716                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
717
718                 nr = trans->blocks_used;
719                 btrfs_end_transaction(trans, root);
720                 btrfs_btree_balance_dirty(info->tree_root, nr);
721                 cond_resched();
722
723                 if (root->fs_info->closing || ret != -EAGAIN)
724                         break;
725         }
726         root->defrag_running = 0;
727         return ret;
728 }
729
730 #if 0
731 /*
732  * when dropping snapshots, we generate a ton of delayed refs, and it makes
733  * sense not to join the transaction while it is trying to flush the current
734  * queue of delayed refs out.
735  *
736  * This is used by the drop snapshot code only
737  */
738 static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
739 {
740         DEFINE_WAIT(wait);
741
742         mutex_lock(&info->trans_mutex);
743         while (info->running_transaction &&
744                info->running_transaction->delayed_refs.flushing) {
745                 prepare_to_wait(&info->transaction_wait, &wait,
746                                 TASK_UNINTERRUPTIBLE);
747                 mutex_unlock(&info->trans_mutex);
748
749                 schedule();
750
751                 mutex_lock(&info->trans_mutex);
752                 finish_wait(&info->transaction_wait, &wait);
753         }
754         mutex_unlock(&info->trans_mutex);
755         return 0;
756 }
757
758 /*
759  * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
760  * all of them
761  */
762 int btrfs_drop_dead_root(struct btrfs_root *root)
763 {
764         struct btrfs_trans_handle *trans;
765         struct btrfs_root *tree_root = root->fs_info->tree_root;
766         unsigned long nr;
767         int ret;
768
769         while (1) {
770                 /*
771                  * we don't want to jump in and create a bunch of
772                  * delayed refs if the transaction is starting to close
773                  */
774                 wait_transaction_pre_flush(tree_root->fs_info);
775                 trans = btrfs_start_transaction(tree_root, 1);
776
777                 /*
778                  * we've joined a transaction, make sure it isn't
779                  * closing right now
780                  */
781                 if (trans->transaction->delayed_refs.flushing) {
782                         btrfs_end_transaction(trans, tree_root);
783                         continue;
784                 }
785
786                 ret = btrfs_drop_snapshot(trans, root);
787                 if (ret != -EAGAIN)
788                         break;
789
790                 ret = btrfs_update_root(trans, tree_root,
791                                         &root->root_key,
792                                         &root->root_item);
793                 if (ret)
794                         break;
795
796                 nr = trans->blocks_used;
797                 ret = btrfs_end_transaction(trans, tree_root);
798                 BUG_ON(ret);
799
800                 btrfs_btree_balance_dirty(tree_root, nr);
801                 cond_resched();
802         }
803         BUG_ON(ret);
804
805         ret = btrfs_del_root(trans, tree_root, &root->root_key);
806         BUG_ON(ret);
807
808         nr = trans->blocks_used;
809         ret = btrfs_end_transaction(trans, tree_root);
810         BUG_ON(ret);
811
812         free_extent_buffer(root->node);
813         free_extent_buffer(root->commit_root);
814         kfree(root);
815
816         btrfs_btree_balance_dirty(tree_root, nr);
817         return ret;
818 }
819 #endif
820
821 /*
822  * new snapshots need to be created at a very specific time in the
823  * transaction commit.  This does the actual creation
824  */
825 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
826                                    struct btrfs_fs_info *fs_info,
827                                    struct btrfs_pending_snapshot *pending)
828 {
829         struct btrfs_key key;
830         struct btrfs_root_item *new_root_item;
831         struct btrfs_root *tree_root = fs_info->tree_root;
832         struct btrfs_root *root = pending->root;
833         struct btrfs_root *parent_root;
834         struct inode *parent_inode;
835         struct dentry *dentry;
836         struct extent_buffer *tmp;
837         struct extent_buffer *old;
838         int ret;
839         int retries = 0;
840         u64 to_reserve = 0;
841         u64 index = 0;
842         u64 objectid;
843
844         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
845         if (!new_root_item) {
846                 pending->error = -ENOMEM;
847                 goto fail;
848         }
849
850         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
851         if (ret) {
852                 pending->error = ret;
853                 goto fail;
854         }
855
856         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
857         btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);
858
859         if (to_reserve > 0) {
860                 ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
861                                           to_reserve, &retries);
862                 if (ret) {
863                         pending->error = ret;
864                         goto fail;
865                 }
866         }
867
868         key.objectid = objectid;
869         key.offset = (u64)-1;
870         key.type = BTRFS_ROOT_ITEM_KEY;
871
872         trans->block_rsv = &pending->block_rsv;
873
874         dentry = pending->dentry;
875         parent_inode = dentry->d_parent->d_inode;
876         parent_root = BTRFS_I(parent_inode)->root;
877         record_root_in_trans(trans, parent_root);
878
879         /*
880          * insert the directory item
881          */
882         ret = btrfs_set_inode_index(parent_inode, &index);
883         BUG_ON(ret);
884         ret = btrfs_insert_dir_item(trans, parent_root,
885                                 dentry->d_name.name, dentry->d_name.len,
886                                 parent_inode->i_ino, &key,
887                                 BTRFS_FT_DIR, index);
888         BUG_ON(ret);
889
890         btrfs_i_size_write(parent_inode, parent_inode->i_size +
891                                          dentry->d_name.len * 2);
892         ret = btrfs_update_inode(trans, parent_root, parent_inode);
893         BUG_ON(ret);
894
895         record_root_in_trans(trans, root);
896         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
897         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
898
899         old = btrfs_lock_root_node(root);
900         btrfs_cow_block(trans, root, old, NULL, 0, &old);
901         btrfs_set_lock_blocking(old);
902
903         btrfs_copy_root(trans, root, old, &tmp, objectid);
904         btrfs_tree_unlock(old);
905         free_extent_buffer(old);
906
907         btrfs_set_root_node(new_root_item, tmp);
908         /* record when the snapshot was created in key.offset */
909         key.offset = trans->transid;
910         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
911         btrfs_tree_unlock(tmp);
912         free_extent_buffer(tmp);
913         BUG_ON(ret);
914
915         /*
916          * insert root back/forward references
917          */
918         ret = btrfs_add_root_ref(trans, tree_root, objectid,
919                                  parent_root->root_key.objectid,
920                                  parent_inode->i_ino, index,
921                                  dentry->d_name.name, dentry->d_name.len);
922         BUG_ON(ret);
923
924         key.offset = (u64)-1;
925         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
926         BUG_ON(IS_ERR(pending->snap));
927
928         btrfs_reloc_post_snapshot(trans, pending);
929         btrfs_orphan_post_snapshot(trans, pending);
930 fail:
931         kfree(new_root_item);
932         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
933         return 0;
934 }
935
936 /*
937  * create all the snapshots we've scheduled for creation
938  */
939 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
940                                              struct btrfs_fs_info *fs_info)
941 {
942         struct btrfs_pending_snapshot *pending;
943         struct list_head *head = &trans->transaction->pending_snapshots;
944         int ret;
945
946         list_for_each_entry(pending, head, list) {
947                 ret = create_pending_snapshot(trans, fs_info, pending);
948                 BUG_ON(ret);
949         }
950         return 0;
951 }
952
953 static void update_super_roots(struct btrfs_root *root)
954 {
955         struct btrfs_root_item *root_item;
956         struct btrfs_super_block *super;
957
958         super = &root->fs_info->super_copy;
959
960         root_item = &root->fs_info->chunk_root->root_item;
961         super->chunk_root = root_item->bytenr;
962         super->chunk_root_generation = root_item->generation;
963         super->chunk_root_level = root_item->level;
964
965         root_item = &root->fs_info->tree_root->root_item;
966         super->root = root_item->bytenr;
967         super->generation = root_item->generation;
968         super->root_level = root_item->level;
969 }
970
971 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
972 {
973         int ret = 0;
974         spin_lock(&info->new_trans_lock);
975         if (info->running_transaction)
976                 ret = info->running_transaction->in_commit;
977         spin_unlock(&info->new_trans_lock);
978         return ret;
979 }
980
981 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
982 {
983         int ret = 0;
984         spin_lock(&info->new_trans_lock);
985         if (info->running_transaction)
986                 ret = info->running_transaction->blocked;
987         spin_unlock(&info->new_trans_lock);
988         return ret;
989 }
990
991 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
992                              struct btrfs_root *root)
993 {
994         unsigned long joined = 0;
995         unsigned long timeout = 1;
996         struct btrfs_transaction *cur_trans;
997         struct btrfs_transaction *prev_trans = NULL;
998         DEFINE_WAIT(wait);
999         int ret;
1000         int should_grow = 0;
1001         unsigned long now = get_seconds();
1002         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1003
1004         btrfs_run_ordered_operations(root, 0);
1005
1006         /* make a pass through all the delayed refs we have so far
1007          * any runnings procs may add more while we are here
1008          */
1009         ret = btrfs_run_delayed_refs(trans, root, 0);
1010         BUG_ON(ret);
1011
1012         btrfs_trans_release_metadata(trans, root);
1013
1014         cur_trans = trans->transaction;
1015         /*
1016          * set the flushing flag so procs in this transaction have to
1017          * start sending their work down.
1018          */
1019         cur_trans->delayed_refs.flushing = 1;
1020
1021         ret = btrfs_run_delayed_refs(trans, root, 0);
1022         BUG_ON(ret);
1023
1024         mutex_lock(&root->fs_info->trans_mutex);
1025         if (cur_trans->in_commit) {
1026                 cur_trans->use_count++;
1027                 mutex_unlock(&root->fs_info->trans_mutex);
1028                 btrfs_end_transaction(trans, root);
1029
1030                 ret = wait_for_commit(root, cur_trans);
1031                 BUG_ON(ret);
1032
1033                 mutex_lock(&root->fs_info->trans_mutex);
1034                 put_transaction(cur_trans);
1035                 mutex_unlock(&root->fs_info->trans_mutex);
1036
1037                 return 0;
1038         }
1039
1040         trans->transaction->in_commit = 1;
1041         trans->transaction->blocked = 1;
1042         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1043                 prev_trans = list_entry(cur_trans->list.prev,
1044                                         struct btrfs_transaction, list);
1045                 if (!prev_trans->commit_done) {
1046                         prev_trans->use_count++;
1047                         mutex_unlock(&root->fs_info->trans_mutex);
1048
1049                         wait_for_commit(root, prev_trans);
1050
1051                         mutex_lock(&root->fs_info->trans_mutex);
1052                         put_transaction(prev_trans);
1053                 }
1054         }
1055
1056         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1057                 should_grow = 1;
1058
1059         do {
1060                 int snap_pending = 0;
1061                 joined = cur_trans->num_joined;
1062                 if (!list_empty(&trans->transaction->pending_snapshots))
1063                         snap_pending = 1;
1064
1065                 WARN_ON(cur_trans != trans->transaction);
1066                 if (cur_trans->num_writers > 1)
1067                         timeout = MAX_SCHEDULE_TIMEOUT;
1068                 else if (should_grow)
1069                         timeout = 1;
1070
1071                 mutex_unlock(&root->fs_info->trans_mutex);
1072
1073                 if (flush_on_commit || snap_pending) {
1074                         btrfs_start_delalloc_inodes(root, 1);
1075                         ret = btrfs_wait_ordered_extents(root, 0, 1);
1076                         BUG_ON(ret);
1077                 }
1078
1079                 /*
1080                  * rename don't use btrfs_join_transaction, so, once we
1081                  * set the transaction to blocked above, we aren't going
1082                  * to get any new ordered operations.  We can safely run
1083                  * it here and no for sure that nothing new will be added
1084                  * to the list
1085                  */
1086                 btrfs_run_ordered_operations(root, 1);
1087
1088                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1089                                 TASK_UNINTERRUPTIBLE);
1090
1091                 smp_mb();
1092                 if (cur_trans->num_writers > 1 || should_grow)
1093                         schedule_timeout(timeout);
1094
1095                 mutex_lock(&root->fs_info->trans_mutex);
1096                 finish_wait(&cur_trans->writer_wait, &wait);
1097         } while (cur_trans->num_writers > 1 ||
1098                  (should_grow && cur_trans->num_joined != joined));
1099
1100         ret = create_pending_snapshots(trans, root->fs_info);
1101         BUG_ON(ret);
1102
1103         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1104         BUG_ON(ret);
1105
1106         WARN_ON(cur_trans != trans->transaction);
1107
1108         /* btrfs_commit_tree_roots is responsible for getting the
1109          * various roots consistent with each other.  Every pointer
1110          * in the tree of tree roots has to point to the most up to date
1111          * root for every subvolume and other tree.  So, we have to keep
1112          * the tree logging code from jumping in and changing any
1113          * of the trees.
1114          *
1115          * At this point in the commit, there can't be any tree-log
1116          * writers, but a little lower down we drop the trans mutex
1117          * and let new people in.  By holding the tree_log_mutex
1118          * from now until after the super is written, we avoid races
1119          * with the tree-log code.
1120          */
1121         mutex_lock(&root->fs_info->tree_log_mutex);
1122
1123         ret = commit_fs_roots(trans, root);
1124         BUG_ON(ret);
1125
1126         /* commit_fs_roots gets rid of all the tree log roots, it is now
1127          * safe to free the root of tree log roots
1128          */
1129         btrfs_free_log_root_tree(trans, root->fs_info);
1130
1131         ret = commit_cowonly_roots(trans, root);
1132         BUG_ON(ret);
1133
1134         btrfs_prepare_extent_commit(trans, root);
1135
1136         cur_trans = root->fs_info->running_transaction;
1137         spin_lock(&root->fs_info->new_trans_lock);
1138         root->fs_info->running_transaction = NULL;
1139         spin_unlock(&root->fs_info->new_trans_lock);
1140
1141         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1142                             root->fs_info->tree_root->node);
1143         switch_commit_root(root->fs_info->tree_root);
1144
1145         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1146                             root->fs_info->chunk_root->node);
1147         switch_commit_root(root->fs_info->chunk_root);
1148
1149         update_super_roots(root);
1150
1151         if (!root->fs_info->log_root_recovering) {
1152                 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1153                 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1154         }
1155
1156         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1157                sizeof(root->fs_info->super_copy));
1158
1159         trans->transaction->blocked = 0;
1160
1161         wake_up(&root->fs_info->transaction_wait);
1162
1163         mutex_unlock(&root->fs_info->trans_mutex);
1164         ret = btrfs_write_and_wait_transaction(trans, root);
1165         BUG_ON(ret);
1166         write_ctree_super(trans, root, 0);
1167
1168         /*
1169          * the super is written, we can safely allow the tree-loggers
1170          * to go about their business
1171          */
1172         mutex_unlock(&root->fs_info->tree_log_mutex);
1173
1174         btrfs_finish_extent_commit(trans, root);
1175
1176         mutex_lock(&root->fs_info->trans_mutex);
1177
1178         cur_trans->commit_done = 1;
1179
1180         root->fs_info->last_trans_committed = cur_trans->transid;
1181
1182         wake_up(&cur_trans->commit_wait);
1183
1184         put_transaction(cur_trans);
1185         put_transaction(cur_trans);
1186
1187         mutex_unlock(&root->fs_info->trans_mutex);
1188
1189         if (current->journal_info == trans)
1190                 current->journal_info = NULL;
1191
1192         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1193
1194         if (current != root->fs_info->transaction_kthread)
1195                 btrfs_run_delayed_iputs(root);
1196
1197         return ret;
1198 }
1199
1200 /*
1201  * interface function to delete all the snapshots we have scheduled for deletion
1202  */
1203 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1204 {
1205         LIST_HEAD(list);
1206         struct btrfs_fs_info *fs_info = root->fs_info;
1207
1208         mutex_lock(&fs_info->trans_mutex);
1209         list_splice_init(&fs_info->dead_roots, &list);
1210         mutex_unlock(&fs_info->trans_mutex);
1211
1212         while (!list_empty(&list)) {
1213                 root = list_entry(list.next, struct btrfs_root, root_list);
1214                 list_del(&root->root_list);
1215
1216                 if (btrfs_header_backref_rev(root->node) <
1217                     BTRFS_MIXED_BACKREF_REV)
1218                         btrfs_drop_snapshot(root, NULL, 0);
1219                 else
1220                         btrfs_drop_snapshot(root, NULL, 1);
1221         }
1222         return 0;
1223 }