Merge tag 'xfs-for-linus-3.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/slab.h>
30 #include <linux/migrate.h>
31 #include <linux/ratelimit.h>
32 #include <linux/uuid.h>
33 #include <linux/semaphore.h>
34 #include <asm/unaligned.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "hash.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "volumes.h"
41 #include "print-tree.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52
53 #ifdef CONFIG_X86
54 #include <asm/cpufeature.h>
55 #endif
56
57 static struct extent_io_ops btree_extent_io_ops;
58 static void end_workqueue_fn(struct btrfs_work *work);
59 static void free_fs_root(struct btrfs_root *root);
60 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
61                                     int read_only);
62 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
63 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
64                                       struct btrfs_root *root);
65 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
66 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
67                                         struct extent_io_tree *dirty_pages,
68                                         int mark);
69 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
70                                        struct extent_io_tree *pinned_extents);
71 static int btrfs_cleanup_transaction(struct btrfs_root *root);
72 static void btrfs_error_commit_super(struct btrfs_root *root);
73
74 /*
75  * end_io_wq structs are used to do processing in task context when an IO is
76  * complete.  This is used during reads to verify checksums, and it is used
77  * by writes to insert metadata for new file extents after IO is complete.
78  */
79 struct end_io_wq {
80         struct bio *bio;
81         bio_end_io_t *end_io;
82         void *private;
83         struct btrfs_fs_info *info;
84         int error;
85         int metadata;
86         struct list_head list;
87         struct btrfs_work work;
88 };
89
90 /*
91  * async submit bios are used to offload expensive checksumming
92  * onto the worker threads.  They checksum file and metadata bios
93  * just before they are sent down the IO stack.
94  */
95 struct async_submit_bio {
96         struct inode *inode;
97         struct bio *bio;
98         struct list_head list;
99         extent_submit_bio_hook_t *submit_bio_start;
100         extent_submit_bio_hook_t *submit_bio_done;
101         int rw;
102         int mirror_num;
103         unsigned long bio_flags;
104         /*
105          * bio_offset is optional, can be used if the pages in the bio
106          * can't tell us where in the file the bio should go
107          */
108         u64 bio_offset;
109         struct btrfs_work work;
110         int error;
111 };
112
113 /*
114  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
115  * eb, the lockdep key is determined by the btrfs_root it belongs to and
116  * the level the eb occupies in the tree.
117  *
118  * Different roots are used for different purposes and may nest inside each
119  * other and they require separate keysets.  As lockdep keys should be
120  * static, assign keysets according to the purpose of the root as indicated
121  * by btrfs_root->objectid.  This ensures that all special purpose roots
122  * have separate keysets.
123  *
124  * Lock-nesting across peer nodes is always done with the immediate parent
125  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
126  * subclass to avoid triggering lockdep warning in such cases.
127  *
128  * The key is set by the readpage_end_io_hook after the buffer has passed
129  * csum validation but before the pages are unlocked.  It is also set by
130  * btrfs_init_new_buffer on freshly allocated blocks.
131  *
132  * We also add a check to make sure the highest level of the tree is the
133  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
134  * needs update as well.
135  */
136 #ifdef CONFIG_DEBUG_LOCK_ALLOC
137 # if BTRFS_MAX_LEVEL != 8
138 #  error
139 # endif
140
141 static struct btrfs_lockdep_keyset {
142         u64                     id;             /* root objectid */
143         const char              *name_stem;     /* lock name stem */
144         char                    names[BTRFS_MAX_LEVEL + 1][20];
145         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
146 } btrfs_lockdep_keysets[] = {
147         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
148         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
149         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
150         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
151         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
152         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
153         { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
154         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
155         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
156         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
157         { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
158         { .id = 0,                              .name_stem = "tree"     },
159 };
160
161 void __init btrfs_init_lockdep(void)
162 {
163         int i, j;
164
165         /* initialize lockdep class names */
166         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
167                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
168
169                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
170                         snprintf(ks->names[j], sizeof(ks->names[j]),
171                                  "btrfs-%s-%02d", ks->name_stem, j);
172         }
173 }
174
175 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
176                                     int level)
177 {
178         struct btrfs_lockdep_keyset *ks;
179
180         BUG_ON(level >= ARRAY_SIZE(ks->keys));
181
182         /* find the matching keyset, id 0 is the default entry */
183         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
184                 if (ks->id == objectid)
185                         break;
186
187         lockdep_set_class_and_name(&eb->lock,
188                                    &ks->keys[level], ks->names[level]);
189 }
190
191 #endif
192
193 /*
194  * extents on the btree inode are pretty simple, there's one extent
195  * that covers the entire device
196  */
197 static struct extent_map *btree_get_extent(struct inode *inode,
198                 struct page *page, size_t pg_offset, u64 start, u64 len,
199                 int create)
200 {
201         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
202         struct extent_map *em;
203         int ret;
204
205         read_lock(&em_tree->lock);
206         em = lookup_extent_mapping(em_tree, start, len);
207         if (em) {
208                 em->bdev =
209                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
210                 read_unlock(&em_tree->lock);
211                 goto out;
212         }
213         read_unlock(&em_tree->lock);
214
215         em = alloc_extent_map();
216         if (!em) {
217                 em = ERR_PTR(-ENOMEM);
218                 goto out;
219         }
220         em->start = 0;
221         em->len = (u64)-1;
222         em->block_len = (u64)-1;
223         em->block_start = 0;
224         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
225
226         write_lock(&em_tree->lock);
227         ret = add_extent_mapping(em_tree, em, 0);
228         if (ret == -EEXIST) {
229                 free_extent_map(em);
230                 em = lookup_extent_mapping(em_tree, start, len);
231                 if (!em)
232                         em = ERR_PTR(-EIO);
233         } else if (ret) {
234                 free_extent_map(em);
235                 em = ERR_PTR(ret);
236         }
237         write_unlock(&em_tree->lock);
238
239 out:
240         return em;
241 }
242
243 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
244 {
245         return btrfs_crc32c(seed, data, len);
246 }
247
248 void btrfs_csum_final(u32 crc, char *result)
249 {
250         put_unaligned_le32(~crc, result);
251 }
252
253 /*
254  * compute the csum for a btree block, and either verify it or write it
255  * into the csum field of the block.
256  */
257 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
258                            int verify)
259 {
260         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
261         char *result = NULL;
262         unsigned long len;
263         unsigned long cur_len;
264         unsigned long offset = BTRFS_CSUM_SIZE;
265         char *kaddr;
266         unsigned long map_start;
267         unsigned long map_len;
268         int err;
269         u32 crc = ~(u32)0;
270         unsigned long inline_result;
271
272         len = buf->len - offset;
273         while (len > 0) {
274                 err = map_private_extent_buffer(buf, offset, 32,
275                                         &kaddr, &map_start, &map_len);
276                 if (err)
277                         return 1;
278                 cur_len = min(len, map_len - (offset - map_start));
279                 crc = btrfs_csum_data(kaddr + offset - map_start,
280                                       crc, cur_len);
281                 len -= cur_len;
282                 offset += cur_len;
283         }
284         if (csum_size > sizeof(inline_result)) {
285                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
286                 if (!result)
287                         return 1;
288         } else {
289                 result = (char *)&inline_result;
290         }
291
292         btrfs_csum_final(crc, result);
293
294         if (verify) {
295                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
296                         u32 val;
297                         u32 found = 0;
298                         memcpy(&found, result, csum_size);
299
300                         read_extent_buffer(buf, &val, 0, csum_size);
301                         printk_ratelimited(KERN_INFO
302                                 "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
303                                 "level %d\n",
304                                 root->fs_info->sb->s_id, buf->start,
305                                 val, found, btrfs_header_level(buf));
306                         if (result != (char *)&inline_result)
307                                 kfree(result);
308                         return 1;
309                 }
310         } else {
311                 write_extent_buffer(buf, result, 0, csum_size);
312         }
313         if (result != (char *)&inline_result)
314                 kfree(result);
315         return 0;
316 }
317
318 /*
319  * we can't consider a given block up to date unless the transid of the
320  * block matches the transid in the parent node's pointer.  This is how we
321  * detect blocks that either didn't get written at all or got written
322  * in the wrong place.
323  */
324 static int verify_parent_transid(struct extent_io_tree *io_tree,
325                                  struct extent_buffer *eb, u64 parent_transid,
326                                  int atomic)
327 {
328         struct extent_state *cached_state = NULL;
329         int ret;
330         bool need_lock = (current->journal_info ==
331                           (void *)BTRFS_SEND_TRANS_STUB);
332
333         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
334                 return 0;
335
336         if (atomic)
337                 return -EAGAIN;
338
339         if (need_lock) {
340                 btrfs_tree_read_lock(eb);
341                 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
342         }
343
344         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
345                          0, &cached_state);
346         if (extent_buffer_uptodate(eb) &&
347             btrfs_header_generation(eb) == parent_transid) {
348                 ret = 0;
349                 goto out;
350         }
351         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
352                        "found %llu\n",
353                        eb->start, parent_transid, btrfs_header_generation(eb));
354         ret = 1;
355
356         /*
357          * Things reading via commit roots that don't have normal protection,
358          * like send, can have a really old block in cache that may point at a
359          * block that has been free'd and re-allocated.  So don't clear uptodate
360          * if we find an eb that is under IO (dirty/writeback) because we could
361          * end up reading in the stale data and then writing it back out and
362          * making everybody very sad.
363          */
364         if (!extent_buffer_under_io(eb))
365                 clear_extent_buffer_uptodate(eb);
366 out:
367         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
368                              &cached_state, GFP_NOFS);
369         if (need_lock)
370                 btrfs_tree_read_unlock_blocking(eb);
371         return ret;
372 }
373
374 /*
375  * Return 0 if the superblock checksum type matches the checksum value of that
376  * algorithm. Pass the raw disk superblock data.
377  */
378 static int btrfs_check_super_csum(char *raw_disk_sb)
379 {
380         struct btrfs_super_block *disk_sb =
381                 (struct btrfs_super_block *)raw_disk_sb;
382         u16 csum_type = btrfs_super_csum_type(disk_sb);
383         int ret = 0;
384
385         if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
386                 u32 crc = ~(u32)0;
387                 const int csum_size = sizeof(crc);
388                 char result[csum_size];
389
390                 /*
391                  * The super_block structure does not span the whole
392                  * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
393                  * is filled with zeros and is included in the checkum.
394                  */
395                 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
396                                 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
397                 btrfs_csum_final(crc, result);
398
399                 if (memcmp(raw_disk_sb, result, csum_size))
400                         ret = 1;
401
402                 if (ret && btrfs_super_generation(disk_sb) < 10) {
403                         printk(KERN_WARNING
404                                 "BTRFS: super block crcs don't match, older mkfs detected\n");
405                         ret = 0;
406                 }
407         }
408
409         if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
410                 printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
411                                 csum_type);
412                 ret = 1;
413         }
414
415         return ret;
416 }
417
418 /*
419  * helper to read a given tree block, doing retries as required when
420  * the checksums don't match and we have alternate mirrors to try.
421  */
422 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
423                                           struct extent_buffer *eb,
424                                           u64 start, u64 parent_transid)
425 {
426         struct extent_io_tree *io_tree;
427         int failed = 0;
428         int ret;
429         int num_copies = 0;
430         int mirror_num = 0;
431         int failed_mirror = 0;
432
433         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
434         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
435         while (1) {
436                 ret = read_extent_buffer_pages(io_tree, eb, start,
437                                                WAIT_COMPLETE,
438                                                btree_get_extent, mirror_num);
439                 if (!ret) {
440                         if (!verify_parent_transid(io_tree, eb,
441                                                    parent_transid, 0))
442                                 break;
443                         else
444                                 ret = -EIO;
445                 }
446
447                 /*
448                  * This buffer's crc is fine, but its contents are corrupted, so
449                  * there is no reason to read the other copies, they won't be
450                  * any less wrong.
451                  */
452                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
453                         break;
454
455                 num_copies = btrfs_num_copies(root->fs_info,
456                                               eb->start, eb->len);
457                 if (num_copies == 1)
458                         break;
459
460                 if (!failed_mirror) {
461                         failed = 1;
462                         failed_mirror = eb->read_mirror;
463                 }
464
465                 mirror_num++;
466                 if (mirror_num == failed_mirror)
467                         mirror_num++;
468
469                 if (mirror_num > num_copies)
470                         break;
471         }
472
473         if (failed && !ret && failed_mirror)
474                 repair_eb_io_failure(root, eb, failed_mirror);
475
476         return ret;
477 }
478
479 /*
480  * checksum a dirty tree block before IO.  This has extra checks to make sure
481  * we only fill in the checksum field in the first page of a multi-page block
482  */
483
484 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
485 {
486         u64 start = page_offset(page);
487         u64 found_start;
488         struct extent_buffer *eb;
489
490         eb = (struct extent_buffer *)page->private;
491         if (page != eb->pages[0])
492                 return 0;
493         found_start = btrfs_header_bytenr(eb);
494         if (WARN_ON(found_start != start || !PageUptodate(page)))
495                 return 0;
496         csum_tree_block(root, eb, 0);
497         return 0;
498 }
499
500 static int check_tree_block_fsid(struct btrfs_root *root,
501                                  struct extent_buffer *eb)
502 {
503         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
504         u8 fsid[BTRFS_UUID_SIZE];
505         int ret = 1;
506
507         read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
508         while (fs_devices) {
509                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
510                         ret = 0;
511                         break;
512                 }
513                 fs_devices = fs_devices->seed;
514         }
515         return ret;
516 }
517
518 #define CORRUPT(reason, eb, root, slot)                         \
519         btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu,"       \
520                    "root=%llu, slot=%d", reason,                        \
521                btrfs_header_bytenr(eb), root->objectid, slot)
522
523 static noinline int check_leaf(struct btrfs_root *root,
524                                struct extent_buffer *leaf)
525 {
526         struct btrfs_key key;
527         struct btrfs_key leaf_key;
528         u32 nritems = btrfs_header_nritems(leaf);
529         int slot;
530
531         if (nritems == 0)
532                 return 0;
533
534         /* Check the 0 item */
535         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
536             BTRFS_LEAF_DATA_SIZE(root)) {
537                 CORRUPT("invalid item offset size pair", leaf, root, 0);
538                 return -EIO;
539         }
540
541         /*
542          * Check to make sure each items keys are in the correct order and their
543          * offsets make sense.  We only have to loop through nritems-1 because
544          * we check the current slot against the next slot, which verifies the
545          * next slot's offset+size makes sense and that the current's slot
546          * offset is correct.
547          */
548         for (slot = 0; slot < nritems - 1; slot++) {
549                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
550                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
551
552                 /* Make sure the keys are in the right order */
553                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
554                         CORRUPT("bad key order", leaf, root, slot);
555                         return -EIO;
556                 }
557
558                 /*
559                  * Make sure the offset and ends are right, remember that the
560                  * item data starts at the end of the leaf and grows towards the
561                  * front.
562                  */
563                 if (btrfs_item_offset_nr(leaf, slot) !=
564                         btrfs_item_end_nr(leaf, slot + 1)) {
565                         CORRUPT("slot offset bad", leaf, root, slot);
566                         return -EIO;
567                 }
568
569                 /*
570                  * Check to make sure that we don't point outside of the leaf,
571                  * just incase all the items are consistent to eachother, but
572                  * all point outside of the leaf.
573                  */
574                 if (btrfs_item_end_nr(leaf, slot) >
575                     BTRFS_LEAF_DATA_SIZE(root)) {
576                         CORRUPT("slot end outside of leaf", leaf, root, slot);
577                         return -EIO;
578                 }
579         }
580
581         return 0;
582 }
583
584 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
585                                       u64 phy_offset, struct page *page,
586                                       u64 start, u64 end, int mirror)
587 {
588         u64 found_start;
589         int found_level;
590         struct extent_buffer *eb;
591         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
592         int ret = 0;
593         int reads_done;
594
595         if (!page->private)
596                 goto out;
597
598         eb = (struct extent_buffer *)page->private;
599
600         /* the pending IO might have been the only thing that kept this buffer
601          * in memory.  Make sure we have a ref for all this other checks
602          */
603         extent_buffer_get(eb);
604
605         reads_done = atomic_dec_and_test(&eb->io_pages);
606         if (!reads_done)
607                 goto err;
608
609         eb->read_mirror = mirror;
610         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
611                 ret = -EIO;
612                 goto err;
613         }
614
615         found_start = btrfs_header_bytenr(eb);
616         if (found_start != eb->start) {
617                 printk_ratelimited(KERN_INFO "BTRFS: bad tree block start "
618                                "%llu %llu\n",
619                                found_start, eb->start);
620                 ret = -EIO;
621                 goto err;
622         }
623         if (check_tree_block_fsid(root, eb)) {
624                 printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n",
625                                eb->start);
626                 ret = -EIO;
627                 goto err;
628         }
629         found_level = btrfs_header_level(eb);
630         if (found_level >= BTRFS_MAX_LEVEL) {
631                 btrfs_info(root->fs_info, "bad tree block level %d",
632                            (int)btrfs_header_level(eb));
633                 ret = -EIO;
634                 goto err;
635         }
636
637         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
638                                        eb, found_level);
639
640         ret = csum_tree_block(root, eb, 1);
641         if (ret) {
642                 ret = -EIO;
643                 goto err;
644         }
645
646         /*
647          * If this is a leaf block and it is corrupt, set the corrupt bit so
648          * that we don't try and read the other copies of this block, just
649          * return -EIO.
650          */
651         if (found_level == 0 && check_leaf(root, eb)) {
652                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
653                 ret = -EIO;
654         }
655
656         if (!ret)
657                 set_extent_buffer_uptodate(eb);
658 err:
659         if (reads_done &&
660             test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
661                 btree_readahead_hook(root, eb, eb->start, ret);
662
663         if (ret) {
664                 /*
665                  * our io error hook is going to dec the io pages
666                  * again, we have to make sure it has something
667                  * to decrement
668                  */
669                 atomic_inc(&eb->io_pages);
670                 clear_extent_buffer_uptodate(eb);
671         }
672         free_extent_buffer(eb);
673 out:
674         return ret;
675 }
676
677 static int btree_io_failed_hook(struct page *page, int failed_mirror)
678 {
679         struct extent_buffer *eb;
680         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
681
682         eb = (struct extent_buffer *)page->private;
683         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
684         eb->read_mirror = failed_mirror;
685         atomic_dec(&eb->io_pages);
686         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
687                 btree_readahead_hook(root, eb, eb->start, -EIO);
688         return -EIO;    /* we fixed nothing */
689 }
690
691 static void end_workqueue_bio(struct bio *bio, int err)
692 {
693         struct end_io_wq *end_io_wq = bio->bi_private;
694         struct btrfs_fs_info *fs_info;
695         struct btrfs_workqueue *wq;
696         btrfs_work_func_t func;
697
698         fs_info = end_io_wq->info;
699         end_io_wq->error = err;
700
701         if (bio->bi_rw & REQ_WRITE) {
702                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
703                         wq = fs_info->endio_meta_write_workers;
704                         func = btrfs_endio_meta_write_helper;
705                 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
706                         wq = fs_info->endio_freespace_worker;
707                         func = btrfs_freespace_write_helper;
708                 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
709                         wq = fs_info->endio_raid56_workers;
710                         func = btrfs_endio_raid56_helper;
711                 } else {
712                         wq = fs_info->endio_write_workers;
713                         func = btrfs_endio_write_helper;
714                 }
715         } else {
716                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
717                         wq = fs_info->endio_raid56_workers;
718                         func = btrfs_endio_raid56_helper;
719                 } else if (end_io_wq->metadata) {
720                         wq = fs_info->endio_meta_workers;
721                         func = btrfs_endio_meta_helper;
722                 } else {
723                         wq = fs_info->endio_workers;
724                         func = btrfs_endio_helper;
725                 }
726         }
727
728         btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
729         btrfs_queue_work(wq, &end_io_wq->work);
730 }
731
732 /*
733  * For the metadata arg you want
734  *
735  * 0 - if data
736  * 1 - if normal metadta
737  * 2 - if writing to the free space cache area
738  * 3 - raid parity work
739  */
740 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
741                         int metadata)
742 {
743         struct end_io_wq *end_io_wq;
744         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
745         if (!end_io_wq)
746                 return -ENOMEM;
747
748         end_io_wq->private = bio->bi_private;
749         end_io_wq->end_io = bio->bi_end_io;
750         end_io_wq->info = info;
751         end_io_wq->error = 0;
752         end_io_wq->bio = bio;
753         end_io_wq->metadata = metadata;
754
755         bio->bi_private = end_io_wq;
756         bio->bi_end_io = end_workqueue_bio;
757         return 0;
758 }
759
760 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
761 {
762         unsigned long limit = min_t(unsigned long,
763                                     info->thread_pool_size,
764                                     info->fs_devices->open_devices);
765         return 256 * limit;
766 }
767
768 static void run_one_async_start(struct btrfs_work *work)
769 {
770         struct async_submit_bio *async;
771         int ret;
772
773         async = container_of(work, struct  async_submit_bio, work);
774         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
775                                       async->mirror_num, async->bio_flags,
776                                       async->bio_offset);
777         if (ret)
778                 async->error = ret;
779 }
780
781 static void run_one_async_done(struct btrfs_work *work)
782 {
783         struct btrfs_fs_info *fs_info;
784         struct async_submit_bio *async;
785         int limit;
786
787         async = container_of(work, struct  async_submit_bio, work);
788         fs_info = BTRFS_I(async->inode)->root->fs_info;
789
790         limit = btrfs_async_submit_limit(fs_info);
791         limit = limit * 2 / 3;
792
793         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
794             waitqueue_active(&fs_info->async_submit_wait))
795                 wake_up(&fs_info->async_submit_wait);
796
797         /* If an error occured we just want to clean up the bio and move on */
798         if (async->error) {
799                 bio_endio(async->bio, async->error);
800                 return;
801         }
802
803         async->submit_bio_done(async->inode, async->rw, async->bio,
804                                async->mirror_num, async->bio_flags,
805                                async->bio_offset);
806 }
807
808 static void run_one_async_free(struct btrfs_work *work)
809 {
810         struct async_submit_bio *async;
811
812         async = container_of(work, struct  async_submit_bio, work);
813         kfree(async);
814 }
815
816 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
817                         int rw, struct bio *bio, int mirror_num,
818                         unsigned long bio_flags,
819                         u64 bio_offset,
820                         extent_submit_bio_hook_t *submit_bio_start,
821                         extent_submit_bio_hook_t *submit_bio_done)
822 {
823         struct async_submit_bio *async;
824
825         async = kmalloc(sizeof(*async), GFP_NOFS);
826         if (!async)
827                 return -ENOMEM;
828
829         async->inode = inode;
830         async->rw = rw;
831         async->bio = bio;
832         async->mirror_num = mirror_num;
833         async->submit_bio_start = submit_bio_start;
834         async->submit_bio_done = submit_bio_done;
835
836         btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
837                         run_one_async_done, run_one_async_free);
838
839         async->bio_flags = bio_flags;
840         async->bio_offset = bio_offset;
841
842         async->error = 0;
843
844         atomic_inc(&fs_info->nr_async_submits);
845
846         if (rw & REQ_SYNC)
847                 btrfs_set_work_high_priority(&async->work);
848
849         btrfs_queue_work(fs_info->workers, &async->work);
850
851         while (atomic_read(&fs_info->async_submit_draining) &&
852               atomic_read(&fs_info->nr_async_submits)) {
853                 wait_event(fs_info->async_submit_wait,
854                            (atomic_read(&fs_info->nr_async_submits) == 0));
855         }
856
857         return 0;
858 }
859
860 static int btree_csum_one_bio(struct bio *bio)
861 {
862         struct bio_vec *bvec;
863         struct btrfs_root *root;
864         int i, ret = 0;
865
866         bio_for_each_segment_all(bvec, bio, i) {
867                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
868                 ret = csum_dirty_buffer(root, bvec->bv_page);
869                 if (ret)
870                         break;
871         }
872
873         return ret;
874 }
875
876 static int __btree_submit_bio_start(struct inode *inode, int rw,
877                                     struct bio *bio, int mirror_num,
878                                     unsigned long bio_flags,
879                                     u64 bio_offset)
880 {
881         /*
882          * when we're called for a write, we're already in the async
883          * submission context.  Just jump into btrfs_map_bio
884          */
885         return btree_csum_one_bio(bio);
886 }
887
888 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
889                                  int mirror_num, unsigned long bio_flags,
890                                  u64 bio_offset)
891 {
892         int ret;
893
894         /*
895          * when we're called for a write, we're already in the async
896          * submission context.  Just jump into btrfs_map_bio
897          */
898         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
899         if (ret)
900                 bio_endio(bio, ret);
901         return ret;
902 }
903
904 static int check_async_write(struct inode *inode, unsigned long bio_flags)
905 {
906         if (bio_flags & EXTENT_BIO_TREE_LOG)
907                 return 0;
908 #ifdef CONFIG_X86
909         if (cpu_has_xmm4_2)
910                 return 0;
911 #endif
912         return 1;
913 }
914
915 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
916                                  int mirror_num, unsigned long bio_flags,
917                                  u64 bio_offset)
918 {
919         int async = check_async_write(inode, bio_flags);
920         int ret;
921
922         if (!(rw & REQ_WRITE)) {
923                 /*
924                  * called for a read, do the setup so that checksum validation
925                  * can happen in the async kernel threads
926                  */
927                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
928                                           bio, 1);
929                 if (ret)
930                         goto out_w_error;
931                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
932                                     mirror_num, 0);
933         } else if (!async) {
934                 ret = btree_csum_one_bio(bio);
935                 if (ret)
936                         goto out_w_error;
937                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
938                                     mirror_num, 0);
939         } else {
940                 /*
941                  * kthread helpers are used to submit writes so that
942                  * checksumming can happen in parallel across all CPUs
943                  */
944                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
945                                           inode, rw, bio, mirror_num, 0,
946                                           bio_offset,
947                                           __btree_submit_bio_start,
948                                           __btree_submit_bio_done);
949         }
950
951         if (ret) {
952 out_w_error:
953                 bio_endio(bio, ret);
954         }
955         return ret;
956 }
957
958 #ifdef CONFIG_MIGRATION
959 static int btree_migratepage(struct address_space *mapping,
960                         struct page *newpage, struct page *page,
961                         enum migrate_mode mode)
962 {
963         /*
964          * we can't safely write a btree page from here,
965          * we haven't done the locking hook
966          */
967         if (PageDirty(page))
968                 return -EAGAIN;
969         /*
970          * Buffers may be managed in a filesystem specific way.
971          * We must have no buffers or drop them.
972          */
973         if (page_has_private(page) &&
974             !try_to_release_page(page, GFP_KERNEL))
975                 return -EAGAIN;
976         return migrate_page(mapping, newpage, page, mode);
977 }
978 #endif
979
980
981 static int btree_writepages(struct address_space *mapping,
982                             struct writeback_control *wbc)
983 {
984         struct btrfs_fs_info *fs_info;
985         int ret;
986
987         if (wbc->sync_mode == WB_SYNC_NONE) {
988
989                 if (wbc->for_kupdate)
990                         return 0;
991
992                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
993                 /* this is a bit racy, but that's ok */
994                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
995                                              BTRFS_DIRTY_METADATA_THRESH);
996                 if (ret < 0)
997                         return 0;
998         }
999         return btree_write_cache_pages(mapping, wbc);
1000 }
1001
1002 static int btree_readpage(struct file *file, struct page *page)
1003 {
1004         struct extent_io_tree *tree;
1005         tree = &BTRFS_I(page->mapping->host)->io_tree;
1006         return extent_read_full_page(tree, page, btree_get_extent, 0);
1007 }
1008
1009 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1010 {
1011         if (PageWriteback(page) || PageDirty(page))
1012                 return 0;
1013
1014         return try_release_extent_buffer(page);
1015 }
1016
1017 static void btree_invalidatepage(struct page *page, unsigned int offset,
1018                                  unsigned int length)
1019 {
1020         struct extent_io_tree *tree;
1021         tree = &BTRFS_I(page->mapping->host)->io_tree;
1022         extent_invalidatepage(tree, page, offset);
1023         btree_releasepage(page, GFP_NOFS);
1024         if (PagePrivate(page)) {
1025                 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1026                            "page private not zero on page %llu",
1027                            (unsigned long long)page_offset(page));
1028                 ClearPagePrivate(page);
1029                 set_page_private(page, 0);
1030                 page_cache_release(page);
1031         }
1032 }
1033
1034 static int btree_set_page_dirty(struct page *page)
1035 {
1036 #ifdef DEBUG
1037         struct extent_buffer *eb;
1038
1039         BUG_ON(!PagePrivate(page));
1040         eb = (struct extent_buffer *)page->private;
1041         BUG_ON(!eb);
1042         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1043         BUG_ON(!atomic_read(&eb->refs));
1044         btrfs_assert_tree_locked(eb);
1045 #endif
1046         return __set_page_dirty_nobuffers(page);
1047 }
1048
1049 static const struct address_space_operations btree_aops = {
1050         .readpage       = btree_readpage,
1051         .writepages     = btree_writepages,
1052         .releasepage    = btree_releasepage,
1053         .invalidatepage = btree_invalidatepage,
1054 #ifdef CONFIG_MIGRATION
1055         .migratepage    = btree_migratepage,
1056 #endif
1057         .set_page_dirty = btree_set_page_dirty,
1058 };
1059
1060 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1061                          u64 parent_transid)
1062 {
1063         struct extent_buffer *buf = NULL;
1064         struct inode *btree_inode = root->fs_info->btree_inode;
1065         int ret = 0;
1066
1067         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1068         if (!buf)
1069                 return 0;
1070         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1071                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1072         free_extent_buffer(buf);
1073         return ret;
1074 }
1075
1076 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1077                          int mirror_num, struct extent_buffer **eb)
1078 {
1079         struct extent_buffer *buf = NULL;
1080         struct inode *btree_inode = root->fs_info->btree_inode;
1081         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1082         int ret;
1083
1084         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1085         if (!buf)
1086                 return 0;
1087
1088         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1089
1090         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1091                                        btree_get_extent, mirror_num);
1092         if (ret) {
1093                 free_extent_buffer(buf);
1094                 return ret;
1095         }
1096
1097         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1098                 free_extent_buffer(buf);
1099                 return -EIO;
1100         } else if (extent_buffer_uptodate(buf)) {
1101                 *eb = buf;
1102         } else {
1103                 free_extent_buffer(buf);
1104         }
1105         return 0;
1106 }
1107
1108 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1109                                             u64 bytenr, u32 blocksize)
1110 {
1111         return find_extent_buffer(root->fs_info, bytenr);
1112 }
1113
1114 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1115                                                  u64 bytenr, u32 blocksize)
1116 {
1117 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1118         if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
1119                 return alloc_test_extent_buffer(root->fs_info, bytenr,
1120                                                 blocksize);
1121 #endif
1122         return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
1123 }
1124
1125
1126 int btrfs_write_tree_block(struct extent_buffer *buf)
1127 {
1128         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1129                                         buf->start + buf->len - 1);
1130 }
1131
1132 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1133 {
1134         return filemap_fdatawait_range(buf->pages[0]->mapping,
1135                                        buf->start, buf->start + buf->len - 1);
1136 }
1137
1138 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1139                                       u32 blocksize, u64 parent_transid)
1140 {
1141         struct extent_buffer *buf = NULL;
1142         int ret;
1143
1144         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1145         if (!buf)
1146                 return NULL;
1147
1148         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1149         if (ret) {
1150                 free_extent_buffer(buf);
1151                 return NULL;
1152         }
1153         return buf;
1154
1155 }
1156
1157 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1158                       struct extent_buffer *buf)
1159 {
1160         struct btrfs_fs_info *fs_info = root->fs_info;
1161
1162         if (btrfs_header_generation(buf) ==
1163             fs_info->running_transaction->transid) {
1164                 btrfs_assert_tree_locked(buf);
1165
1166                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1167                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1168                                              -buf->len,
1169                                              fs_info->dirty_metadata_batch);
1170                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1171                         btrfs_set_lock_blocking(buf);
1172                         clear_extent_buffer_dirty(buf);
1173                 }
1174         }
1175 }
1176
1177 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1178 {
1179         struct btrfs_subvolume_writers *writers;
1180         int ret;
1181
1182         writers = kmalloc(sizeof(*writers), GFP_NOFS);
1183         if (!writers)
1184                 return ERR_PTR(-ENOMEM);
1185
1186         ret = percpu_counter_init(&writers->counter, 0);
1187         if (ret < 0) {
1188                 kfree(writers);
1189                 return ERR_PTR(ret);
1190         }
1191
1192         init_waitqueue_head(&writers->wait);
1193         return writers;
1194 }
1195
1196 static void
1197 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1198 {
1199         percpu_counter_destroy(&writers->counter);
1200         kfree(writers);
1201 }
1202
1203 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1204                          u32 stripesize, struct btrfs_root *root,
1205                          struct btrfs_fs_info *fs_info,
1206                          u64 objectid)
1207 {
1208         root->node = NULL;
1209         root->commit_root = NULL;
1210         root->sectorsize = sectorsize;
1211         root->nodesize = nodesize;
1212         root->leafsize = leafsize;
1213         root->stripesize = stripesize;
1214         root->state = 0;
1215         root->orphan_cleanup_state = 0;
1216
1217         root->objectid = objectid;
1218         root->last_trans = 0;
1219         root->highest_objectid = 0;
1220         root->nr_delalloc_inodes = 0;
1221         root->nr_ordered_extents = 0;
1222         root->name = NULL;
1223         root->inode_tree = RB_ROOT;
1224         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1225         root->block_rsv = NULL;
1226         root->orphan_block_rsv = NULL;
1227
1228         INIT_LIST_HEAD(&root->dirty_list);
1229         INIT_LIST_HEAD(&root->root_list);
1230         INIT_LIST_HEAD(&root->delalloc_inodes);
1231         INIT_LIST_HEAD(&root->delalloc_root);
1232         INIT_LIST_HEAD(&root->ordered_extents);
1233         INIT_LIST_HEAD(&root->ordered_root);
1234         INIT_LIST_HEAD(&root->logged_list[0]);
1235         INIT_LIST_HEAD(&root->logged_list[1]);
1236         spin_lock_init(&root->orphan_lock);
1237         spin_lock_init(&root->inode_lock);
1238         spin_lock_init(&root->delalloc_lock);
1239         spin_lock_init(&root->ordered_extent_lock);
1240         spin_lock_init(&root->accounting_lock);
1241         spin_lock_init(&root->log_extents_lock[0]);
1242         spin_lock_init(&root->log_extents_lock[1]);
1243         mutex_init(&root->objectid_mutex);
1244         mutex_init(&root->log_mutex);
1245         mutex_init(&root->ordered_extent_mutex);
1246         mutex_init(&root->delalloc_mutex);
1247         init_waitqueue_head(&root->log_writer_wait);
1248         init_waitqueue_head(&root->log_commit_wait[0]);
1249         init_waitqueue_head(&root->log_commit_wait[1]);
1250         INIT_LIST_HEAD(&root->log_ctxs[0]);
1251         INIT_LIST_HEAD(&root->log_ctxs[1]);
1252         atomic_set(&root->log_commit[0], 0);
1253         atomic_set(&root->log_commit[1], 0);
1254         atomic_set(&root->log_writers, 0);
1255         atomic_set(&root->log_batch, 0);
1256         atomic_set(&root->orphan_inodes, 0);
1257         atomic_set(&root->refs, 1);
1258         atomic_set(&root->will_be_snapshoted, 0);
1259         root->log_transid = 0;
1260         root->log_transid_committed = -1;
1261         root->last_log_commit = 0;
1262         if (fs_info)
1263                 extent_io_tree_init(&root->dirty_log_pages,
1264                                      fs_info->btree_inode->i_mapping);
1265
1266         memset(&root->root_key, 0, sizeof(root->root_key));
1267         memset(&root->root_item, 0, sizeof(root->root_item));
1268         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1269         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1270         if (fs_info)
1271                 root->defrag_trans_start = fs_info->generation;
1272         else
1273                 root->defrag_trans_start = 0;
1274         init_completion(&root->kobj_unregister);
1275         root->root_key.objectid = objectid;
1276         root->anon_dev = 0;
1277
1278         spin_lock_init(&root->root_item_lock);
1279 }
1280
1281 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1282 {
1283         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1284         if (root)
1285                 root->fs_info = fs_info;
1286         return root;
1287 }
1288
1289 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1290 /* Should only be used by the testing infrastructure */
1291 struct btrfs_root *btrfs_alloc_dummy_root(void)
1292 {
1293         struct btrfs_root *root;
1294
1295         root = btrfs_alloc_root(NULL);
1296         if (!root)
1297                 return ERR_PTR(-ENOMEM);
1298         __setup_root(4096, 4096, 4096, 4096, root, NULL, 1);
1299         set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
1300         root->alloc_bytenr = 0;
1301
1302         return root;
1303 }
1304 #endif
1305
1306 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1307                                      struct btrfs_fs_info *fs_info,
1308                                      u64 objectid)
1309 {
1310         struct extent_buffer *leaf;
1311         struct btrfs_root *tree_root = fs_info->tree_root;
1312         struct btrfs_root *root;
1313         struct btrfs_key key;
1314         int ret = 0;
1315         uuid_le uuid;
1316
1317         root = btrfs_alloc_root(fs_info);
1318         if (!root)
1319                 return ERR_PTR(-ENOMEM);
1320
1321         __setup_root(tree_root->nodesize, tree_root->leafsize,
1322                      tree_root->sectorsize, tree_root->stripesize,
1323                      root, fs_info, objectid);
1324         root->root_key.objectid = objectid;
1325         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1326         root->root_key.offset = 0;
1327
1328         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1329                                       0, objectid, NULL, 0, 0, 0);
1330         if (IS_ERR(leaf)) {
1331                 ret = PTR_ERR(leaf);
1332                 leaf = NULL;
1333                 goto fail;
1334         }
1335
1336         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1337         btrfs_set_header_bytenr(leaf, leaf->start);
1338         btrfs_set_header_generation(leaf, trans->transid);
1339         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1340         btrfs_set_header_owner(leaf, objectid);
1341         root->node = leaf;
1342
1343         write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
1344                             BTRFS_FSID_SIZE);
1345         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1346                             btrfs_header_chunk_tree_uuid(leaf),
1347                             BTRFS_UUID_SIZE);
1348         btrfs_mark_buffer_dirty(leaf);
1349
1350         root->commit_root = btrfs_root_node(root);
1351         set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1352
1353         root->root_item.flags = 0;
1354         root->root_item.byte_limit = 0;
1355         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1356         btrfs_set_root_generation(&root->root_item, trans->transid);
1357         btrfs_set_root_level(&root->root_item, 0);
1358         btrfs_set_root_refs(&root->root_item, 1);
1359         btrfs_set_root_used(&root->root_item, leaf->len);
1360         btrfs_set_root_last_snapshot(&root->root_item, 0);
1361         btrfs_set_root_dirid(&root->root_item, 0);
1362         uuid_le_gen(&uuid);
1363         memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1364         root->root_item.drop_level = 0;
1365
1366         key.objectid = objectid;
1367         key.type = BTRFS_ROOT_ITEM_KEY;
1368         key.offset = 0;
1369         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1370         if (ret)
1371                 goto fail;
1372
1373         btrfs_tree_unlock(leaf);
1374
1375         return root;
1376
1377 fail:
1378         if (leaf) {
1379                 btrfs_tree_unlock(leaf);
1380                 free_extent_buffer(root->commit_root);
1381                 free_extent_buffer(leaf);
1382         }
1383         kfree(root);
1384
1385         return ERR_PTR(ret);
1386 }
1387
1388 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1389                                          struct btrfs_fs_info *fs_info)
1390 {
1391         struct btrfs_root *root;
1392         struct btrfs_root *tree_root = fs_info->tree_root;
1393         struct extent_buffer *leaf;
1394
1395         root = btrfs_alloc_root(fs_info);
1396         if (!root)
1397                 return ERR_PTR(-ENOMEM);
1398
1399         __setup_root(tree_root->nodesize, tree_root->leafsize,
1400                      tree_root->sectorsize, tree_root->stripesize,
1401                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1402
1403         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1404         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1405         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1406
1407         /*
1408          * DON'T set REF_COWS for log trees
1409          *
1410          * log trees do not get reference counted because they go away
1411          * before a real commit is actually done.  They do store pointers
1412          * to file data extents, and those reference counts still get
1413          * updated (along with back refs to the log tree).
1414          */
1415
1416         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1417                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1418                                       0, 0, 0);
1419         if (IS_ERR(leaf)) {
1420                 kfree(root);
1421                 return ERR_CAST(leaf);
1422         }
1423
1424         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1425         btrfs_set_header_bytenr(leaf, leaf->start);
1426         btrfs_set_header_generation(leaf, trans->transid);
1427         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1428         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1429         root->node = leaf;
1430
1431         write_extent_buffer(root->node, root->fs_info->fsid,
1432                             btrfs_header_fsid(), BTRFS_FSID_SIZE);
1433         btrfs_mark_buffer_dirty(root->node);
1434         btrfs_tree_unlock(root->node);
1435         return root;
1436 }
1437
1438 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1439                              struct btrfs_fs_info *fs_info)
1440 {
1441         struct btrfs_root *log_root;
1442
1443         log_root = alloc_log_tree(trans, fs_info);
1444         if (IS_ERR(log_root))
1445                 return PTR_ERR(log_root);
1446         WARN_ON(fs_info->log_root_tree);
1447         fs_info->log_root_tree = log_root;
1448         return 0;
1449 }
1450
1451 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1452                        struct btrfs_root *root)
1453 {
1454         struct btrfs_root *log_root;
1455         struct btrfs_inode_item *inode_item;
1456
1457         log_root = alloc_log_tree(trans, root->fs_info);
1458         if (IS_ERR(log_root))
1459                 return PTR_ERR(log_root);
1460
1461         log_root->last_trans = trans->transid;
1462         log_root->root_key.offset = root->root_key.objectid;
1463
1464         inode_item = &log_root->root_item.inode;
1465         btrfs_set_stack_inode_generation(inode_item, 1);
1466         btrfs_set_stack_inode_size(inode_item, 3);
1467         btrfs_set_stack_inode_nlink(inode_item, 1);
1468         btrfs_set_stack_inode_nbytes(inode_item, root->leafsize);
1469         btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1470
1471         btrfs_set_root_node(&log_root->root_item, log_root->node);
1472
1473         WARN_ON(root->log_root);
1474         root->log_root = log_root;
1475         root->log_transid = 0;
1476         root->log_transid_committed = -1;
1477         root->last_log_commit = 0;
1478         return 0;
1479 }
1480
1481 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1482                                                struct btrfs_key *key)
1483 {
1484         struct btrfs_root *root;
1485         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1486         struct btrfs_path *path;
1487         u64 generation;
1488         u32 blocksize;
1489         int ret;
1490
1491         path = btrfs_alloc_path();
1492         if (!path)
1493                 return ERR_PTR(-ENOMEM);
1494
1495         root = btrfs_alloc_root(fs_info);
1496         if (!root) {
1497                 ret = -ENOMEM;
1498                 goto alloc_fail;
1499         }
1500
1501         __setup_root(tree_root->nodesize, tree_root->leafsize,
1502                      tree_root->sectorsize, tree_root->stripesize,
1503                      root, fs_info, key->objectid);
1504
1505         ret = btrfs_find_root(tree_root, key, path,
1506                               &root->root_item, &root->root_key);
1507         if (ret) {
1508                 if (ret > 0)
1509                         ret = -ENOENT;
1510                 goto find_fail;
1511         }
1512
1513         generation = btrfs_root_generation(&root->root_item);
1514         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1515         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1516                                      blocksize, generation);
1517         if (!root->node) {
1518                 ret = -ENOMEM;
1519                 goto find_fail;
1520         } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1521                 ret = -EIO;
1522                 goto read_fail;
1523         }
1524         root->commit_root = btrfs_root_node(root);
1525 out:
1526         btrfs_free_path(path);
1527         return root;
1528
1529 read_fail:
1530         free_extent_buffer(root->node);
1531 find_fail:
1532         kfree(root);
1533 alloc_fail:
1534         root = ERR_PTR(ret);
1535         goto out;
1536 }
1537
1538 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1539                                       struct btrfs_key *location)
1540 {
1541         struct btrfs_root *root;
1542
1543         root = btrfs_read_tree_root(tree_root, location);
1544         if (IS_ERR(root))
1545                 return root;
1546
1547         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1548                 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1549                 btrfs_check_and_init_root_item(&root->root_item);
1550         }
1551
1552         return root;
1553 }
1554
1555 int btrfs_init_fs_root(struct btrfs_root *root)
1556 {
1557         int ret;
1558         struct btrfs_subvolume_writers *writers;
1559
1560         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1561         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1562                                         GFP_NOFS);
1563         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1564                 ret = -ENOMEM;
1565                 goto fail;
1566         }
1567
1568         writers = btrfs_alloc_subvolume_writers();
1569         if (IS_ERR(writers)) {
1570                 ret = PTR_ERR(writers);
1571                 goto fail;
1572         }
1573         root->subv_writers = writers;
1574
1575         btrfs_init_free_ino_ctl(root);
1576         spin_lock_init(&root->cache_lock);
1577         init_waitqueue_head(&root->cache_wait);
1578
1579         ret = get_anon_bdev(&root->anon_dev);
1580         if (ret)
1581                 goto free_writers;
1582         return 0;
1583
1584 free_writers:
1585         btrfs_free_subvolume_writers(root->subv_writers);
1586 fail:
1587         kfree(root->free_ino_ctl);
1588         kfree(root->free_ino_pinned);
1589         return ret;
1590 }
1591
1592 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1593                                                u64 root_id)
1594 {
1595         struct btrfs_root *root;
1596
1597         spin_lock(&fs_info->fs_roots_radix_lock);
1598         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1599                                  (unsigned long)root_id);
1600         spin_unlock(&fs_info->fs_roots_radix_lock);
1601         return root;
1602 }
1603
1604 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1605                          struct btrfs_root *root)
1606 {
1607         int ret;
1608
1609         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1610         if (ret)
1611                 return ret;
1612
1613         spin_lock(&fs_info->fs_roots_radix_lock);
1614         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1615                                 (unsigned long)root->root_key.objectid,
1616                                 root);
1617         if (ret == 0)
1618                 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1619         spin_unlock(&fs_info->fs_roots_radix_lock);
1620         radix_tree_preload_end();
1621
1622         return ret;
1623 }
1624
1625 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1626                                      struct btrfs_key *location,
1627                                      bool check_ref)
1628 {
1629         struct btrfs_root *root;
1630         int ret;
1631
1632         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1633                 return fs_info->tree_root;
1634         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1635                 return fs_info->extent_root;
1636         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1637                 return fs_info->chunk_root;
1638         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1639                 return fs_info->dev_root;
1640         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1641                 return fs_info->csum_root;
1642         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1643                 return fs_info->quota_root ? fs_info->quota_root :
1644                                              ERR_PTR(-ENOENT);
1645         if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1646                 return fs_info->uuid_root ? fs_info->uuid_root :
1647                                             ERR_PTR(-ENOENT);
1648 again:
1649         root = btrfs_lookup_fs_root(fs_info, location->objectid);
1650         if (root) {
1651                 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1652                         return ERR_PTR(-ENOENT);
1653                 return root;
1654         }
1655
1656         root = btrfs_read_fs_root(fs_info->tree_root, location);
1657         if (IS_ERR(root))
1658                 return root;
1659
1660         if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1661                 ret = -ENOENT;
1662                 goto fail;
1663         }
1664
1665         ret = btrfs_init_fs_root(root);
1666         if (ret)
1667                 goto fail;
1668
1669         ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
1670                         location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
1671         if (ret < 0)
1672                 goto fail;
1673         if (ret == 0)
1674                 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1675
1676         ret = btrfs_insert_fs_root(fs_info, root);
1677         if (ret) {
1678                 if (ret == -EEXIST) {
1679                         free_fs_root(root);
1680                         goto again;
1681                 }
1682                 goto fail;
1683         }
1684         return root;
1685 fail:
1686         free_fs_root(root);
1687         return ERR_PTR(ret);
1688 }
1689
1690 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1691 {
1692         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1693         int ret = 0;
1694         struct btrfs_device *device;
1695         struct backing_dev_info *bdi;
1696
1697         rcu_read_lock();
1698         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1699                 if (!device->bdev)
1700                         continue;
1701                 bdi = blk_get_backing_dev_info(device->bdev);
1702                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1703                         ret = 1;
1704                         break;
1705                 }
1706         }
1707         rcu_read_unlock();
1708         return ret;
1709 }
1710
1711 /*
1712  * If this fails, caller must call bdi_destroy() to get rid of the
1713  * bdi again.
1714  */
1715 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1716 {
1717         int err;
1718
1719         bdi->capabilities = BDI_CAP_MAP_COPY;
1720         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1721         if (err)
1722                 return err;
1723
1724         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1725         bdi->congested_fn       = btrfs_congested_fn;
1726         bdi->congested_data     = info;
1727         return 0;
1728 }
1729
1730 /*
1731  * called by the kthread helper functions to finally call the bio end_io
1732  * functions.  This is where read checksum verification actually happens
1733  */
1734 static void end_workqueue_fn(struct btrfs_work *work)
1735 {
1736         struct bio *bio;
1737         struct end_io_wq *end_io_wq;
1738         int error;
1739
1740         end_io_wq = container_of(work, struct end_io_wq, work);
1741         bio = end_io_wq->bio;
1742
1743         error = end_io_wq->error;
1744         bio->bi_private = end_io_wq->private;
1745         bio->bi_end_io = end_io_wq->end_io;
1746         kfree(end_io_wq);
1747         bio_endio_nodec(bio, error);
1748 }
1749
1750 static int cleaner_kthread(void *arg)
1751 {
1752         struct btrfs_root *root = arg;
1753         int again;
1754
1755         do {
1756                 again = 0;
1757
1758                 /* Make the cleaner go to sleep early. */
1759                 if (btrfs_need_cleaner_sleep(root))
1760                         goto sleep;
1761
1762                 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1763                         goto sleep;
1764
1765                 /*
1766                  * Avoid the problem that we change the status of the fs
1767                  * during the above check and trylock.
1768                  */
1769                 if (btrfs_need_cleaner_sleep(root)) {
1770                         mutex_unlock(&root->fs_info->cleaner_mutex);
1771                         goto sleep;
1772                 }
1773
1774                 btrfs_run_delayed_iputs(root);
1775                 again = btrfs_clean_one_deleted_snapshot(root);
1776                 mutex_unlock(&root->fs_info->cleaner_mutex);
1777
1778                 /*
1779                  * The defragger has dealt with the R/O remount and umount,
1780                  * needn't do anything special here.
1781                  */
1782                 btrfs_run_defrag_inodes(root->fs_info);
1783 sleep:
1784                 if (!try_to_freeze() && !again) {
1785                         set_current_state(TASK_INTERRUPTIBLE);
1786                         if (!kthread_should_stop())
1787                                 schedule();
1788                         __set_current_state(TASK_RUNNING);
1789                 }
1790         } while (!kthread_should_stop());
1791         return 0;
1792 }
1793
1794 static int transaction_kthread(void *arg)
1795 {
1796         struct btrfs_root *root = arg;
1797         struct btrfs_trans_handle *trans;
1798         struct btrfs_transaction *cur;
1799         u64 transid;
1800         unsigned long now;
1801         unsigned long delay;
1802         bool cannot_commit;
1803
1804         do {
1805                 cannot_commit = false;
1806                 delay = HZ * root->fs_info->commit_interval;
1807                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1808
1809                 spin_lock(&root->fs_info->trans_lock);
1810                 cur = root->fs_info->running_transaction;
1811                 if (!cur) {
1812                         spin_unlock(&root->fs_info->trans_lock);
1813                         goto sleep;
1814                 }
1815
1816                 now = get_seconds();
1817                 if (cur->state < TRANS_STATE_BLOCKED &&
1818                     (now < cur->start_time ||
1819                      now - cur->start_time < root->fs_info->commit_interval)) {
1820                         spin_unlock(&root->fs_info->trans_lock);
1821                         delay = HZ * 5;
1822                         goto sleep;
1823                 }
1824                 transid = cur->transid;
1825                 spin_unlock(&root->fs_info->trans_lock);
1826
1827                 /* If the file system is aborted, this will always fail. */
1828                 trans = btrfs_attach_transaction(root);
1829                 if (IS_ERR(trans)) {
1830                         if (PTR_ERR(trans) != -ENOENT)
1831                                 cannot_commit = true;
1832                         goto sleep;
1833                 }
1834                 if (transid == trans->transid) {
1835                         btrfs_commit_transaction(trans, root);
1836                 } else {
1837                         btrfs_end_transaction(trans, root);
1838                 }
1839 sleep:
1840                 wake_up_process(root->fs_info->cleaner_kthread);
1841                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1842
1843                 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1844                                       &root->fs_info->fs_state)))
1845                         btrfs_cleanup_transaction(root);
1846                 if (!try_to_freeze()) {
1847                         set_current_state(TASK_INTERRUPTIBLE);
1848                         if (!kthread_should_stop() &&
1849                             (!btrfs_transaction_blocked(root->fs_info) ||
1850                              cannot_commit))
1851                                 schedule_timeout(delay);
1852                         __set_current_state(TASK_RUNNING);
1853                 }
1854         } while (!kthread_should_stop());
1855         return 0;
1856 }
1857
1858 /*
1859  * this will find the highest generation in the array of
1860  * root backups.  The index of the highest array is returned,
1861  * or -1 if we can't find anything.
1862  *
1863  * We check to make sure the array is valid by comparing the
1864  * generation of the latest  root in the array with the generation
1865  * in the super block.  If they don't match we pitch it.
1866  */
1867 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1868 {
1869         u64 cur;
1870         int newest_index = -1;
1871         struct btrfs_root_backup *root_backup;
1872         int i;
1873
1874         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1875                 root_backup = info->super_copy->super_roots + i;
1876                 cur = btrfs_backup_tree_root_gen(root_backup);
1877                 if (cur == newest_gen)
1878                         newest_index = i;
1879         }
1880
1881         /* check to see if we actually wrapped around */
1882         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1883                 root_backup = info->super_copy->super_roots;
1884                 cur = btrfs_backup_tree_root_gen(root_backup);
1885                 if (cur == newest_gen)
1886                         newest_index = 0;
1887         }
1888         return newest_index;
1889 }
1890
1891
1892 /*
1893  * find the oldest backup so we know where to store new entries
1894  * in the backup array.  This will set the backup_root_index
1895  * field in the fs_info struct
1896  */
1897 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1898                                      u64 newest_gen)
1899 {
1900         int newest_index = -1;
1901
1902         newest_index = find_newest_super_backup(info, newest_gen);
1903         /* if there was garbage in there, just move along */
1904         if (newest_index == -1) {
1905                 info->backup_root_index = 0;
1906         } else {
1907                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1908         }
1909 }
1910
1911 /*
1912  * copy all the root pointers into the super backup array.
1913  * this will bump the backup pointer by one when it is
1914  * done
1915  */
1916 static void backup_super_roots(struct btrfs_fs_info *info)
1917 {
1918         int next_backup;
1919         struct btrfs_root_backup *root_backup;
1920         int last_backup;
1921
1922         next_backup = info->backup_root_index;
1923         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1924                 BTRFS_NUM_BACKUP_ROOTS;
1925
1926         /*
1927          * just overwrite the last backup if we're at the same generation
1928          * this happens only at umount
1929          */
1930         root_backup = info->super_for_commit->super_roots + last_backup;
1931         if (btrfs_backup_tree_root_gen(root_backup) ==
1932             btrfs_header_generation(info->tree_root->node))
1933                 next_backup = last_backup;
1934
1935         root_backup = info->super_for_commit->super_roots + next_backup;
1936
1937         /*
1938          * make sure all of our padding and empty slots get zero filled
1939          * regardless of which ones we use today
1940          */
1941         memset(root_backup, 0, sizeof(*root_backup));
1942
1943         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1944
1945         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1946         btrfs_set_backup_tree_root_gen(root_backup,
1947                                btrfs_header_generation(info->tree_root->node));
1948
1949         btrfs_set_backup_tree_root_level(root_backup,
1950                                btrfs_header_level(info->tree_root->node));
1951
1952         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1953         btrfs_set_backup_chunk_root_gen(root_backup,
1954                                btrfs_header_generation(info->chunk_root->node));
1955         btrfs_set_backup_chunk_root_level(root_backup,
1956                                btrfs_header_level(info->chunk_root->node));
1957
1958         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1959         btrfs_set_backup_extent_root_gen(root_backup,
1960                                btrfs_header_generation(info->extent_root->node));
1961         btrfs_set_backup_extent_root_level(root_backup,
1962                                btrfs_header_level(info->extent_root->node));
1963
1964         /*
1965          * we might commit during log recovery, which happens before we set
1966          * the fs_root.  Make sure it is valid before we fill it in.
1967          */
1968         if (info->fs_root && info->fs_root->node) {
1969                 btrfs_set_backup_fs_root(root_backup,
1970                                          info->fs_root->node->start);
1971                 btrfs_set_backup_fs_root_gen(root_backup,
1972                                btrfs_header_generation(info->fs_root->node));
1973                 btrfs_set_backup_fs_root_level(root_backup,
1974                                btrfs_header_level(info->fs_root->node));
1975         }
1976
1977         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1978         btrfs_set_backup_dev_root_gen(root_backup,
1979                                btrfs_header_generation(info->dev_root->node));
1980         btrfs_set_backup_dev_root_level(root_backup,
1981                                        btrfs_header_level(info->dev_root->node));
1982
1983         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1984         btrfs_set_backup_csum_root_gen(root_backup,
1985                                btrfs_header_generation(info->csum_root->node));
1986         btrfs_set_backup_csum_root_level(root_backup,
1987                                btrfs_header_level(info->csum_root->node));
1988
1989         btrfs_set_backup_total_bytes(root_backup,
1990                              btrfs_super_total_bytes(info->super_copy));
1991         btrfs_set_backup_bytes_used(root_backup,
1992                              btrfs_super_bytes_used(info->super_copy));
1993         btrfs_set_backup_num_devices(root_backup,
1994                              btrfs_super_num_devices(info->super_copy));
1995
1996         /*
1997          * if we don't copy this out to the super_copy, it won't get remembered
1998          * for the next commit
1999          */
2000         memcpy(&info->super_copy->super_roots,
2001                &info->super_for_commit->super_roots,
2002                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2003 }
2004
2005 /*
2006  * this copies info out of the root backup array and back into
2007  * the in-memory super block.  It is meant to help iterate through
2008  * the array, so you send it the number of backups you've already
2009  * tried and the last backup index you used.
2010  *
2011  * this returns -1 when it has tried all the backups
2012  */
2013 static noinline int next_root_backup(struct btrfs_fs_info *info,
2014                                      struct btrfs_super_block *super,
2015                                      int *num_backups_tried, int *backup_index)
2016 {
2017         struct btrfs_root_backup *root_backup;
2018         int newest = *backup_index;
2019
2020         if (*num_backups_tried == 0) {
2021                 u64 gen = btrfs_super_generation(super);
2022
2023                 newest = find_newest_super_backup(info, gen);
2024                 if (newest == -1)
2025                         return -1;
2026
2027                 *backup_index = newest;
2028                 *num_backups_tried = 1;
2029         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2030                 /* we've tried all the backups, all done */
2031                 return -1;
2032         } else {
2033                 /* jump to the next oldest backup */
2034                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2035                         BTRFS_NUM_BACKUP_ROOTS;
2036                 *backup_index = newest;
2037                 *num_backups_tried += 1;
2038         }
2039         root_backup = super->super_roots + newest;
2040
2041         btrfs_set_super_generation(super,
2042                                    btrfs_backup_tree_root_gen(root_backup));
2043         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2044         btrfs_set_super_root_level(super,
2045                                    btrfs_backup_tree_root_level(root_backup));
2046         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2047
2048         /*
2049          * fixme: the total bytes and num_devices need to match or we should
2050          * need a fsck
2051          */
2052         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2053         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2054         return 0;
2055 }
2056
2057 /* helper to cleanup workers */
2058 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2059 {
2060         btrfs_destroy_workqueue(fs_info->fixup_workers);
2061         btrfs_destroy_workqueue(fs_info->delalloc_workers);
2062         btrfs_destroy_workqueue(fs_info->workers);
2063         btrfs_destroy_workqueue(fs_info->endio_workers);
2064         btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2065         btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2066         btrfs_destroy_workqueue(fs_info->rmw_workers);
2067         btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2068         btrfs_destroy_workqueue(fs_info->endio_write_workers);
2069         btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2070         btrfs_destroy_workqueue(fs_info->submit_workers);
2071         btrfs_destroy_workqueue(fs_info->delayed_workers);
2072         btrfs_destroy_workqueue(fs_info->caching_workers);
2073         btrfs_destroy_workqueue(fs_info->readahead_workers);
2074         btrfs_destroy_workqueue(fs_info->flush_workers);
2075         btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2076         btrfs_destroy_workqueue(fs_info->extent_workers);
2077 }
2078
2079 static void free_root_extent_buffers(struct btrfs_root *root)
2080 {
2081         if (root) {
2082                 free_extent_buffer(root->node);
2083                 free_extent_buffer(root->commit_root);
2084                 root->node = NULL;
2085                 root->commit_root = NULL;
2086         }
2087 }
2088
2089 /* helper to cleanup tree roots */
2090 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2091 {
2092         free_root_extent_buffers(info->tree_root);
2093
2094         free_root_extent_buffers(info->dev_root);
2095         free_root_extent_buffers(info->extent_root);
2096         free_root_extent_buffers(info->csum_root);
2097         free_root_extent_buffers(info->quota_root);
2098         free_root_extent_buffers(info->uuid_root);
2099         if (chunk_root)
2100                 free_root_extent_buffers(info->chunk_root);
2101 }
2102
2103 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2104 {
2105         int ret;
2106         struct btrfs_root *gang[8];
2107         int i;
2108
2109         while (!list_empty(&fs_info->dead_roots)) {
2110                 gang[0] = list_entry(fs_info->dead_roots.next,
2111                                      struct btrfs_root, root_list);
2112                 list_del(&gang[0]->root_list);
2113
2114                 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2115                         btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2116                 } else {
2117                         free_extent_buffer(gang[0]->node);
2118                         free_extent_buffer(gang[0]->commit_root);
2119                         btrfs_put_fs_root(gang[0]);
2120                 }
2121         }
2122
2123         while (1) {
2124                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2125                                              (void **)gang, 0,
2126                                              ARRAY_SIZE(gang));
2127                 if (!ret)
2128                         break;
2129                 for (i = 0; i < ret; i++)
2130                         btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2131         }
2132
2133         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2134                 btrfs_free_log_root_tree(NULL, fs_info);
2135                 btrfs_destroy_pinned_extent(fs_info->tree_root,
2136                                             fs_info->pinned_extents);
2137         }
2138 }
2139
2140 int open_ctree(struct super_block *sb,
2141                struct btrfs_fs_devices *fs_devices,
2142                char *options)
2143 {
2144         u32 sectorsize;
2145         u32 nodesize;
2146         u32 leafsize;
2147         u32 blocksize;
2148         u32 stripesize;
2149         u64 generation;
2150         u64 features;
2151         struct btrfs_key location;
2152         struct buffer_head *bh;
2153         struct btrfs_super_block *disk_super;
2154         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2155         struct btrfs_root *tree_root;
2156         struct btrfs_root *extent_root;
2157         struct btrfs_root *csum_root;
2158         struct btrfs_root *chunk_root;
2159         struct btrfs_root *dev_root;
2160         struct btrfs_root *quota_root;
2161         struct btrfs_root *uuid_root;
2162         struct btrfs_root *log_tree_root;
2163         int ret;
2164         int err = -EINVAL;
2165         int num_backups_tried = 0;
2166         int backup_index = 0;
2167         int max_active;
2168         int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2169         bool create_uuid_tree;
2170         bool check_uuid_tree;
2171
2172         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2173         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2174         if (!tree_root || !chunk_root) {
2175                 err = -ENOMEM;
2176                 goto fail;
2177         }
2178
2179         ret = init_srcu_struct(&fs_info->subvol_srcu);
2180         if (ret) {
2181                 err = ret;
2182                 goto fail;
2183         }
2184
2185         ret = setup_bdi(fs_info, &fs_info->bdi);
2186         if (ret) {
2187                 err = ret;
2188                 goto fail_srcu;
2189         }
2190
2191         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2192         if (ret) {
2193                 err = ret;
2194                 goto fail_bdi;
2195         }
2196         fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2197                                         (1 + ilog2(nr_cpu_ids));
2198
2199         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2200         if (ret) {
2201                 err = ret;
2202                 goto fail_dirty_metadata_bytes;
2203         }
2204
2205         ret = percpu_counter_init(&fs_info->bio_counter, 0);
2206         if (ret) {
2207                 err = ret;
2208                 goto fail_delalloc_bytes;
2209         }
2210
2211         fs_info->btree_inode = new_inode(sb);
2212         if (!fs_info->btree_inode) {
2213                 err = -ENOMEM;
2214                 goto fail_bio_counter;
2215         }
2216
2217         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2218
2219         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2220         INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2221         INIT_LIST_HEAD(&fs_info->trans_list);
2222         INIT_LIST_HEAD(&fs_info->dead_roots);
2223         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2224         INIT_LIST_HEAD(&fs_info->delalloc_roots);
2225         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2226         spin_lock_init(&fs_info->delalloc_root_lock);
2227         spin_lock_init(&fs_info->trans_lock);
2228         spin_lock_init(&fs_info->fs_roots_radix_lock);
2229         spin_lock_init(&fs_info->delayed_iput_lock);
2230         spin_lock_init(&fs_info->defrag_inodes_lock);
2231         spin_lock_init(&fs_info->free_chunk_lock);
2232         spin_lock_init(&fs_info->tree_mod_seq_lock);
2233         spin_lock_init(&fs_info->super_lock);
2234         spin_lock_init(&fs_info->qgroup_op_lock);
2235         spin_lock_init(&fs_info->buffer_lock);
2236         rwlock_init(&fs_info->tree_mod_log_lock);
2237         mutex_init(&fs_info->reloc_mutex);
2238         mutex_init(&fs_info->delalloc_root_mutex);
2239         seqlock_init(&fs_info->profiles_lock);
2240
2241         init_completion(&fs_info->kobj_unregister);
2242         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2243         INIT_LIST_HEAD(&fs_info->space_info);
2244         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2245         btrfs_mapping_init(&fs_info->mapping_tree);
2246         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2247                              BTRFS_BLOCK_RSV_GLOBAL);
2248         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2249                              BTRFS_BLOCK_RSV_DELALLOC);
2250         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2251         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2252         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2253         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2254                              BTRFS_BLOCK_RSV_DELOPS);
2255         atomic_set(&fs_info->nr_async_submits, 0);
2256         atomic_set(&fs_info->async_delalloc_pages, 0);
2257         atomic_set(&fs_info->async_submit_draining, 0);
2258         atomic_set(&fs_info->nr_async_bios, 0);
2259         atomic_set(&fs_info->defrag_running, 0);
2260         atomic_set(&fs_info->qgroup_op_seq, 0);
2261         atomic64_set(&fs_info->tree_mod_seq, 0);
2262         fs_info->sb = sb;
2263         fs_info->max_inline = 8192 * 1024;
2264         fs_info->metadata_ratio = 0;
2265         fs_info->defrag_inodes = RB_ROOT;
2266         fs_info->free_chunk_space = 0;
2267         fs_info->tree_mod_log = RB_ROOT;
2268         fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2269         fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
2270         /* readahead state */
2271         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2272         spin_lock_init(&fs_info->reada_lock);
2273
2274         fs_info->thread_pool_size = min_t(unsigned long,
2275                                           num_online_cpus() + 2, 8);
2276
2277         INIT_LIST_HEAD(&fs_info->ordered_roots);
2278         spin_lock_init(&fs_info->ordered_root_lock);
2279         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2280                                         GFP_NOFS);
2281         if (!fs_info->delayed_root) {
2282                 err = -ENOMEM;
2283                 goto fail_iput;
2284         }
2285         btrfs_init_delayed_root(fs_info->delayed_root);
2286
2287         mutex_init(&fs_info->scrub_lock);
2288         atomic_set(&fs_info->scrubs_running, 0);
2289         atomic_set(&fs_info->scrub_pause_req, 0);
2290         atomic_set(&fs_info->scrubs_paused, 0);
2291         atomic_set(&fs_info->scrub_cancel_req, 0);
2292         init_waitqueue_head(&fs_info->replace_wait);
2293         init_waitqueue_head(&fs_info->scrub_pause_wait);
2294         fs_info->scrub_workers_refcnt = 0;
2295 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2296         fs_info->check_integrity_print_mask = 0;
2297 #endif
2298
2299         spin_lock_init(&fs_info->balance_lock);
2300         mutex_init(&fs_info->balance_mutex);
2301         atomic_set(&fs_info->balance_running, 0);
2302         atomic_set(&fs_info->balance_pause_req, 0);
2303         atomic_set(&fs_info->balance_cancel_req, 0);
2304         fs_info->balance_ctl = NULL;
2305         init_waitqueue_head(&fs_info->balance_wait_q);
2306         btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2307
2308         sb->s_blocksize = 4096;
2309         sb->s_blocksize_bits = blksize_bits(4096);
2310         sb->s_bdi = &fs_info->bdi;
2311
2312         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2313         set_nlink(fs_info->btree_inode, 1);
2314         /*
2315          * we set the i_size on the btree inode to the max possible int.
2316          * the real end of the address space is determined by all of
2317          * the devices in the system
2318          */
2319         fs_info->btree_inode->i_size = OFFSET_MAX;
2320         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2321         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2322
2323         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2324         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2325                              fs_info->btree_inode->i_mapping);
2326         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2327         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2328
2329         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2330
2331         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2332         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2333                sizeof(struct btrfs_key));
2334         set_bit(BTRFS_INODE_DUMMY,
2335                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2336         btrfs_insert_inode_hash(fs_info->btree_inode);
2337
2338         spin_lock_init(&fs_info->block_group_cache_lock);
2339         fs_info->block_group_cache_tree = RB_ROOT;
2340         fs_info->first_logical_byte = (u64)-1;
2341
2342         extent_io_tree_init(&fs_info->freed_extents[0],
2343                              fs_info->btree_inode->i_mapping);
2344         extent_io_tree_init(&fs_info->freed_extents[1],
2345                              fs_info->btree_inode->i_mapping);
2346         fs_info->pinned_extents = &fs_info->freed_extents[0];
2347         fs_info->do_barriers = 1;
2348
2349
2350         mutex_init(&fs_info->ordered_operations_mutex);
2351         mutex_init(&fs_info->ordered_extent_flush_mutex);
2352         mutex_init(&fs_info->tree_log_mutex);
2353         mutex_init(&fs_info->chunk_mutex);
2354         mutex_init(&fs_info->transaction_kthread_mutex);
2355         mutex_init(&fs_info->cleaner_mutex);
2356         mutex_init(&fs_info->volume_mutex);
2357         init_rwsem(&fs_info->commit_root_sem);
2358         init_rwsem(&fs_info->cleanup_work_sem);
2359         init_rwsem(&fs_info->subvol_sem);
2360         sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2361         fs_info->dev_replace.lock_owner = 0;
2362         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2363         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2364         mutex_init(&fs_info->dev_replace.lock_management_lock);
2365         mutex_init(&fs_info->dev_replace.lock);
2366
2367         spin_lock_init(&fs_info->qgroup_lock);
2368         mutex_init(&fs_info->qgroup_ioctl_lock);
2369         fs_info->qgroup_tree = RB_ROOT;
2370         fs_info->qgroup_op_tree = RB_ROOT;
2371         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2372         fs_info->qgroup_seq = 1;
2373         fs_info->quota_enabled = 0;
2374         fs_info->pending_quota_state = 0;
2375         fs_info->qgroup_ulist = NULL;
2376         mutex_init(&fs_info->qgroup_rescan_lock);
2377
2378         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2379         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2380
2381         init_waitqueue_head(&fs_info->transaction_throttle);
2382         init_waitqueue_head(&fs_info->transaction_wait);
2383         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2384         init_waitqueue_head(&fs_info->async_submit_wait);
2385
2386         ret = btrfs_alloc_stripe_hash_table(fs_info);
2387         if (ret) {
2388                 err = ret;
2389                 goto fail_alloc;
2390         }
2391
2392         __setup_root(4096, 4096, 4096, 4096, tree_root,
2393                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2394
2395         invalidate_bdev(fs_devices->latest_bdev);
2396
2397         /*
2398          * Read super block and check the signature bytes only
2399          */
2400         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2401         if (!bh) {
2402                 err = -EINVAL;
2403                 goto fail_alloc;
2404         }
2405
2406         /*
2407          * We want to check superblock checksum, the type is stored inside.
2408          * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2409          */
2410         if (btrfs_check_super_csum(bh->b_data)) {
2411                 printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
2412                 err = -EINVAL;
2413                 goto fail_alloc;
2414         }
2415
2416         /*
2417          * super_copy is zeroed at allocation time and we never touch the
2418          * following bytes up to INFO_SIZE, the checksum is calculated from
2419          * the whole block of INFO_SIZE
2420          */
2421         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2422         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2423                sizeof(*fs_info->super_for_commit));
2424         brelse(bh);
2425
2426         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2427
2428         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2429         if (ret) {
2430                 printk(KERN_ERR "BTRFS: superblock contains fatal errors\n");
2431                 err = -EINVAL;
2432                 goto fail_alloc;
2433         }
2434
2435         disk_super = fs_info->super_copy;
2436         if (!btrfs_super_root(disk_super))
2437                 goto fail_alloc;
2438
2439         /* check FS state, whether FS is broken. */
2440         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2441                 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2442
2443         /*
2444          * run through our array of backup supers and setup
2445          * our ring pointer to the oldest one
2446          */
2447         generation = btrfs_super_generation(disk_super);
2448         find_oldest_super_backup(fs_info, generation);
2449
2450         /*
2451          * In the long term, we'll store the compression type in the super
2452          * block, and it'll be used for per file compression control.
2453          */
2454         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2455
2456         ret = btrfs_parse_options(tree_root, options);
2457         if (ret) {
2458                 err = ret;
2459                 goto fail_alloc;
2460         }
2461
2462         features = btrfs_super_incompat_flags(disk_super) &
2463                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2464         if (features) {
2465                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2466                        "unsupported optional features (%Lx).\n",
2467                        features);
2468                 err = -EINVAL;
2469                 goto fail_alloc;
2470         }
2471
2472         if (btrfs_super_leafsize(disk_super) !=
2473             btrfs_super_nodesize(disk_super)) {
2474                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2475                        "blocksizes don't match.  node %d leaf %d\n",
2476                        btrfs_super_nodesize(disk_super),
2477                        btrfs_super_leafsize(disk_super));
2478                 err = -EINVAL;
2479                 goto fail_alloc;
2480         }
2481         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2482                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2483                        "blocksize (%d) was too large\n",
2484                        btrfs_super_leafsize(disk_super));
2485                 err = -EINVAL;
2486                 goto fail_alloc;
2487         }
2488
2489         features = btrfs_super_incompat_flags(disk_super);
2490         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2491         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2492                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2493
2494         if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2495                 printk(KERN_ERR "BTRFS: has skinny extents\n");
2496
2497         /*
2498          * flag our filesystem as having big metadata blocks if
2499          * they are bigger than the page size
2500          */
2501         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2502                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2503                         printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
2504                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2505         }
2506
2507         nodesize = btrfs_super_nodesize(disk_super);
2508         leafsize = btrfs_super_leafsize(disk_super);
2509         sectorsize = btrfs_super_sectorsize(disk_super);
2510         stripesize = btrfs_super_stripesize(disk_super);
2511         fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2512         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2513
2514         /*
2515          * mixed block groups end up with duplicate but slightly offset
2516          * extent buffers for the same range.  It leads to corruptions
2517          */
2518         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2519             (sectorsize != leafsize)) {
2520                 printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
2521                                 "are not allowed for mixed block groups on %s\n",
2522                                 sb->s_id);
2523                 goto fail_alloc;
2524         }
2525
2526         /*
2527          * Needn't use the lock because there is no other task which will
2528          * update the flag.
2529          */
2530         btrfs_set_super_incompat_flags(disk_super, features);
2531
2532         features = btrfs_super_compat_ro_flags(disk_super) &
2533                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2534         if (!(sb->s_flags & MS_RDONLY) && features) {
2535                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2536                        "unsupported option features (%Lx).\n",
2537                        features);
2538                 err = -EINVAL;
2539                 goto fail_alloc;
2540         }
2541
2542         max_active = fs_info->thread_pool_size;
2543
2544         fs_info->workers =
2545                 btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
2546                                       max_active, 16);
2547
2548         fs_info->delalloc_workers =
2549                 btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
2550
2551         fs_info->flush_workers =
2552                 btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
2553
2554         fs_info->caching_workers =
2555                 btrfs_alloc_workqueue("cache", flags, max_active, 0);
2556
2557         /*
2558          * a higher idle thresh on the submit workers makes it much more
2559          * likely that bios will be send down in a sane order to the
2560          * devices
2561          */
2562         fs_info->submit_workers =
2563                 btrfs_alloc_workqueue("submit", flags,
2564                                       min_t(u64, fs_devices->num_devices,
2565                                             max_active), 64);
2566
2567         fs_info->fixup_workers =
2568                 btrfs_alloc_workqueue("fixup", flags, 1, 0);
2569
2570         /*
2571          * endios are largely parallel and should have a very
2572          * low idle thresh
2573          */
2574         fs_info->endio_workers =
2575                 btrfs_alloc_workqueue("endio", flags, max_active, 4);
2576         fs_info->endio_meta_workers =
2577                 btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
2578         fs_info->endio_meta_write_workers =
2579                 btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
2580         fs_info->endio_raid56_workers =
2581                 btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
2582         fs_info->rmw_workers =
2583                 btrfs_alloc_workqueue("rmw", flags, max_active, 2);
2584         fs_info->endio_write_workers =
2585                 btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
2586         fs_info->endio_freespace_worker =
2587                 btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
2588         fs_info->delayed_workers =
2589                 btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
2590         fs_info->readahead_workers =
2591                 btrfs_alloc_workqueue("readahead", flags, max_active, 2);
2592         fs_info->qgroup_rescan_workers =
2593                 btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
2594         fs_info->extent_workers =
2595                 btrfs_alloc_workqueue("extent-refs", flags,
2596                                       min_t(u64, fs_devices->num_devices,
2597                                             max_active), 8);
2598
2599         if (!(fs_info->workers && fs_info->delalloc_workers &&
2600               fs_info->submit_workers && fs_info->flush_workers &&
2601               fs_info->endio_workers && fs_info->endio_meta_workers &&
2602               fs_info->endio_meta_write_workers &&
2603               fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2604               fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2605               fs_info->caching_workers && fs_info->readahead_workers &&
2606               fs_info->fixup_workers && fs_info->delayed_workers &&
2607               fs_info->fixup_workers && fs_info->extent_workers &&
2608               fs_info->qgroup_rescan_workers)) {
2609                 err = -ENOMEM;
2610                 goto fail_sb_buffer;
2611         }
2612
2613         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2614         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2615                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2616
2617         tree_root->nodesize = nodesize;
2618         tree_root->leafsize = leafsize;
2619         tree_root->sectorsize = sectorsize;
2620         tree_root->stripesize = stripesize;
2621
2622         sb->s_blocksize = sectorsize;
2623         sb->s_blocksize_bits = blksize_bits(sectorsize);
2624
2625         if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
2626                 printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id);
2627                 goto fail_sb_buffer;
2628         }
2629
2630         if (sectorsize != PAGE_SIZE) {
2631                 printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) "
2632                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2633                 goto fail_sb_buffer;
2634         }
2635
2636         mutex_lock(&fs_info->chunk_mutex);
2637         ret = btrfs_read_sys_array(tree_root);
2638         mutex_unlock(&fs_info->chunk_mutex);
2639         if (ret) {
2640                 printk(KERN_WARNING "BTRFS: failed to read the system "
2641                        "array on %s\n", sb->s_id);
2642                 goto fail_sb_buffer;
2643         }
2644
2645         blocksize = btrfs_level_size(tree_root,
2646                                      btrfs_super_chunk_root_level(disk_super));
2647         generation = btrfs_super_chunk_root_generation(disk_super);
2648
2649         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2650                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2651
2652         chunk_root->node = read_tree_block(chunk_root,
2653                                            btrfs_super_chunk_root(disk_super),
2654                                            blocksize, generation);
2655         if (!chunk_root->node ||
2656             !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2657                 printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
2658                        sb->s_id);
2659                 goto fail_tree_roots;
2660         }
2661         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2662         chunk_root->commit_root = btrfs_root_node(chunk_root);
2663
2664         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2665            btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2666
2667         ret = btrfs_read_chunk_tree(chunk_root);
2668         if (ret) {
2669                 printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",
2670                        sb->s_id);
2671                 goto fail_tree_roots;
2672         }
2673
2674         /*
2675          * keep the device that is marked to be the target device for the
2676          * dev_replace procedure
2677          */
2678         btrfs_close_extra_devices(fs_info, fs_devices, 0);
2679
2680         if (!fs_devices->latest_bdev) {
2681                 printk(KERN_CRIT "BTRFS: failed to read devices on %s\n",
2682                        sb->s_id);
2683                 goto fail_tree_roots;
2684         }
2685
2686 retry_root_backup:
2687         blocksize = btrfs_level_size(tree_root,
2688                                      btrfs_super_root_level(disk_super));
2689         generation = btrfs_super_generation(disk_super);
2690
2691         tree_root->node = read_tree_block(tree_root,
2692                                           btrfs_super_root(disk_super),
2693                                           blocksize, generation);
2694         if (!tree_root->node ||
2695             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2696                 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2697                        sb->s_id);
2698
2699                 goto recovery_tree_root;
2700         }
2701
2702         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2703         tree_root->commit_root = btrfs_root_node(tree_root);
2704         btrfs_set_root_refs(&tree_root->root_item, 1);
2705
2706         location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2707         location.type = BTRFS_ROOT_ITEM_KEY;
2708         location.offset = 0;
2709
2710         extent_root = btrfs_read_tree_root(tree_root, &location);
2711         if (IS_ERR(extent_root)) {
2712                 ret = PTR_ERR(extent_root);
2713                 goto recovery_tree_root;
2714         }
2715         set_bit(BTRFS_ROOT_TRACK_DIRTY, &extent_root->state);
2716         fs_info->extent_root = extent_root;
2717
2718         location.objectid = BTRFS_DEV_TREE_OBJECTID;
2719         dev_root = btrfs_read_tree_root(tree_root, &location);
2720         if (IS_ERR(dev_root)) {
2721                 ret = PTR_ERR(dev_root);
2722                 goto recovery_tree_root;
2723         }
2724         set_bit(BTRFS_ROOT_TRACK_DIRTY, &dev_root->state);
2725         fs_info->dev_root = dev_root;
2726         btrfs_init_devices_late(fs_info);
2727
2728         location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2729         csum_root = btrfs_read_tree_root(tree_root, &location);
2730         if (IS_ERR(csum_root)) {
2731                 ret = PTR_ERR(csum_root);
2732                 goto recovery_tree_root;
2733         }
2734         set_bit(BTRFS_ROOT_TRACK_DIRTY, &csum_root->state);
2735         fs_info->csum_root = csum_root;
2736
2737         location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2738         quota_root = btrfs_read_tree_root(tree_root, &location);
2739         if (!IS_ERR(quota_root)) {
2740                 set_bit(BTRFS_ROOT_TRACK_DIRTY, &quota_root->state);
2741                 fs_info->quota_enabled = 1;
2742                 fs_info->pending_quota_state = 1;
2743                 fs_info->quota_root = quota_root;
2744         }
2745
2746         location.objectid = BTRFS_UUID_TREE_OBJECTID;
2747         uuid_root = btrfs_read_tree_root(tree_root, &location);
2748         if (IS_ERR(uuid_root)) {
2749                 ret = PTR_ERR(uuid_root);
2750                 if (ret != -ENOENT)
2751                         goto recovery_tree_root;
2752                 create_uuid_tree = true;
2753                 check_uuid_tree = false;
2754         } else {
2755                 set_bit(BTRFS_ROOT_TRACK_DIRTY, &uuid_root->state);
2756                 fs_info->uuid_root = uuid_root;
2757                 create_uuid_tree = false;
2758                 check_uuid_tree =
2759                     generation != btrfs_super_uuid_tree_generation(disk_super);
2760         }
2761
2762         fs_info->generation = generation;
2763         fs_info->last_trans_committed = generation;
2764
2765         ret = btrfs_recover_balance(fs_info);
2766         if (ret) {
2767                 printk(KERN_WARNING "BTRFS: failed to recover balance\n");
2768                 goto fail_block_groups;
2769         }
2770
2771         ret = btrfs_init_dev_stats(fs_info);
2772         if (ret) {
2773                 printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n",
2774                        ret);
2775                 goto fail_block_groups;
2776         }
2777
2778         ret = btrfs_init_dev_replace(fs_info);
2779         if (ret) {
2780                 pr_err("BTRFS: failed to init dev_replace: %d\n", ret);
2781                 goto fail_block_groups;
2782         }
2783
2784         btrfs_close_extra_devices(fs_info, fs_devices, 1);
2785
2786         ret = btrfs_sysfs_add_one(fs_info);
2787         if (ret) {
2788                 pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
2789                 goto fail_block_groups;
2790         }
2791
2792         ret = btrfs_init_space_info(fs_info);
2793         if (ret) {
2794                 printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret);
2795                 goto fail_sysfs;
2796         }
2797
2798         ret = btrfs_read_block_groups(extent_root);
2799         if (ret) {
2800                 printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
2801                 goto fail_sysfs;
2802         }
2803         fs_info->num_tolerated_disk_barrier_failures =
2804                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2805         if (fs_info->fs_devices->missing_devices >
2806              fs_info->num_tolerated_disk_barrier_failures &&
2807             !(sb->s_flags & MS_RDONLY)) {
2808                 printk(KERN_WARNING "BTRFS: "
2809                         "too many missing devices, writeable mount is not allowed\n");
2810                 goto fail_sysfs;
2811         }
2812
2813         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2814                                                "btrfs-cleaner");
2815         if (IS_ERR(fs_info->cleaner_kthread))
2816                 goto fail_sysfs;
2817
2818         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2819                                                    tree_root,
2820                                                    "btrfs-transaction");
2821         if (IS_ERR(fs_info->transaction_kthread))
2822                 goto fail_cleaner;
2823
2824         if (!btrfs_test_opt(tree_root, SSD) &&
2825             !btrfs_test_opt(tree_root, NOSSD) &&
2826             !fs_info->fs_devices->rotating) {
2827                 printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "
2828                        "mode\n");
2829                 btrfs_set_opt(fs_info->mount_opt, SSD);
2830         }
2831
2832         /* Set the real inode map cache flag */
2833         if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE))
2834                 btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE);
2835
2836 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2837         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2838                 ret = btrfsic_mount(tree_root, fs_devices,
2839                                     btrfs_test_opt(tree_root,
2840                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2841                                     1 : 0,
2842                                     fs_info->check_integrity_print_mask);
2843                 if (ret)
2844                         printk(KERN_WARNING "BTRFS: failed to initialize"
2845                                " integrity check module %s\n", sb->s_id);
2846         }
2847 #endif
2848         ret = btrfs_read_qgroup_config(fs_info);
2849         if (ret)
2850                 goto fail_trans_kthread;
2851
2852         /* do not make disk changes in broken FS */
2853         if (btrfs_super_log_root(disk_super) != 0) {
2854                 u64 bytenr = btrfs_super_log_root(disk_super);
2855
2856                 if (fs_devices->rw_devices == 0) {
2857                         printk(KERN_WARNING "BTRFS: log replay required "
2858                                "on RO media\n");
2859                         err = -EIO;
2860                         goto fail_qgroup;
2861                 }
2862                 blocksize =
2863                      btrfs_level_size(tree_root,
2864                                       btrfs_super_log_root_level(disk_super));
2865
2866                 log_tree_root = btrfs_alloc_root(fs_info);
2867                 if (!log_tree_root) {
2868                         err = -ENOMEM;
2869                         goto fail_qgroup;
2870                 }
2871
2872                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2873                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2874
2875                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2876                                                       blocksize,
2877                                                       generation + 1);
2878                 if (!log_tree_root->node ||
2879                     !extent_buffer_uptodate(log_tree_root->node)) {
2880                         printk(KERN_ERR "BTRFS: failed to read log tree\n");
2881                         free_extent_buffer(log_tree_root->node);
2882                         kfree(log_tree_root);
2883                         goto fail_qgroup;
2884                 }
2885                 /* returns with log_tree_root freed on success */
2886                 ret = btrfs_recover_log_trees(log_tree_root);
2887                 if (ret) {
2888                         btrfs_error(tree_root->fs_info, ret,
2889                                     "Failed to recover log tree");
2890                         free_extent_buffer(log_tree_root->node);
2891                         kfree(log_tree_root);
2892                         goto fail_qgroup;
2893                 }
2894
2895                 if (sb->s_flags & MS_RDONLY) {
2896                         ret = btrfs_commit_super(tree_root);
2897                         if (ret)
2898                                 goto fail_qgroup;
2899                 }
2900         }
2901
2902         ret = btrfs_find_orphan_roots(tree_root);
2903         if (ret)
2904                 goto fail_qgroup;
2905
2906         if (!(sb->s_flags & MS_RDONLY)) {
2907                 ret = btrfs_cleanup_fs_roots(fs_info);
2908                 if (ret)
2909                         goto fail_qgroup;
2910
2911                 mutex_lock(&fs_info->cleaner_mutex);
2912                 ret = btrfs_recover_relocation(tree_root);
2913                 mutex_unlock(&fs_info->cleaner_mutex);
2914                 if (ret < 0) {
2915                         printk(KERN_WARNING
2916                                "BTRFS: failed to recover relocation\n");
2917                         err = -EINVAL;
2918                         goto fail_qgroup;
2919                 }
2920         }
2921
2922         location.objectid = BTRFS_FS_TREE_OBJECTID;
2923         location.type = BTRFS_ROOT_ITEM_KEY;
2924         location.offset = 0;
2925
2926         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2927         if (IS_ERR(fs_info->fs_root)) {
2928                 err = PTR_ERR(fs_info->fs_root);
2929                 goto fail_qgroup;
2930         }
2931
2932         if (sb->s_flags & MS_RDONLY)
2933                 return 0;
2934
2935         down_read(&fs_info->cleanup_work_sem);
2936         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2937             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2938                 up_read(&fs_info->cleanup_work_sem);
2939                 close_ctree(tree_root);
2940                 return ret;
2941         }
2942         up_read(&fs_info->cleanup_work_sem);
2943
2944         ret = btrfs_resume_balance_async(fs_info);
2945         if (ret) {
2946                 printk(KERN_WARNING "BTRFS: failed to resume balance\n");
2947                 close_ctree(tree_root);
2948                 return ret;
2949         }
2950
2951         ret = btrfs_resume_dev_replace_async(fs_info);
2952         if (ret) {
2953                 pr_warn("BTRFS: failed to resume dev_replace\n");
2954                 close_ctree(tree_root);
2955                 return ret;
2956         }
2957
2958         btrfs_qgroup_rescan_resume(fs_info);
2959
2960         if (create_uuid_tree) {
2961                 pr_info("BTRFS: creating UUID tree\n");
2962                 ret = btrfs_create_uuid_tree(fs_info);
2963                 if (ret) {
2964                         pr_warn("BTRFS: failed to create the UUID tree %d\n",
2965                                 ret);
2966                         close_ctree(tree_root);
2967                         return ret;
2968                 }
2969         } else if (check_uuid_tree ||
2970                    btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
2971                 pr_info("BTRFS: checking UUID tree\n");
2972                 ret = btrfs_check_uuid_tree(fs_info);
2973                 if (ret) {
2974                         pr_warn("BTRFS: failed to check the UUID tree %d\n",
2975                                 ret);
2976                         close_ctree(tree_root);
2977                         return ret;
2978                 }
2979         } else {
2980                 fs_info->update_uuid_tree_gen = 1;
2981         }
2982
2983         return 0;
2984
2985 fail_qgroup:
2986         btrfs_free_qgroup_config(fs_info);
2987 fail_trans_kthread:
2988         kthread_stop(fs_info->transaction_kthread);
2989         btrfs_cleanup_transaction(fs_info->tree_root);
2990         btrfs_free_fs_roots(fs_info);
2991 fail_cleaner:
2992         kthread_stop(fs_info->cleaner_kthread);
2993
2994         /*
2995          * make sure we're done with the btree inode before we stop our
2996          * kthreads
2997          */
2998         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2999
3000 fail_sysfs:
3001         btrfs_sysfs_remove_one(fs_info);
3002
3003 fail_block_groups:
3004         btrfs_put_block_group_cache(fs_info);
3005         btrfs_free_block_groups(fs_info);
3006
3007 fail_tree_roots:
3008         free_root_pointers(fs_info, 1);
3009         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3010
3011 fail_sb_buffer:
3012         btrfs_stop_all_workers(fs_info);
3013 fail_alloc:
3014 fail_iput:
3015         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3016
3017         iput(fs_info->btree_inode);
3018 fail_bio_counter:
3019         percpu_counter_destroy(&fs_info->bio_counter);
3020 fail_delalloc_bytes:
3021         percpu_counter_destroy(&fs_info->delalloc_bytes);
3022 fail_dirty_metadata_bytes:
3023         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3024 fail_bdi:
3025         bdi_destroy(&fs_info->bdi);
3026 fail_srcu:
3027         cleanup_srcu_struct(&fs_info->subvol_srcu);
3028 fail:
3029         btrfs_free_stripe_hash_table(fs_info);
3030         btrfs_close_devices(fs_info->fs_devices);
3031         return err;
3032
3033 recovery_tree_root:
3034         if (!btrfs_test_opt(tree_root, RECOVERY))
3035                 goto fail_tree_roots;
3036
3037         free_root_pointers(fs_info, 0);
3038
3039         /* don't use the log in recovery mode, it won't be valid */
3040         btrfs_set_super_log_root(disk_super, 0);
3041
3042         /* we can't trust the free space cache either */
3043         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3044
3045         ret = next_root_backup(fs_info, fs_info->super_copy,
3046                                &num_backups_tried, &backup_index);
3047         if (ret == -1)
3048                 goto fail_block_groups;
3049         goto retry_root_backup;
3050 }
3051
3052 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3053 {
3054         if (uptodate) {
3055                 set_buffer_uptodate(bh);
3056         } else {
3057                 struct btrfs_device *device = (struct btrfs_device *)
3058                         bh->b_private;
3059
3060                 printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to "
3061                                           "I/O error on %s\n",
3062                                           rcu_str_deref(device->name));
3063                 /* note, we dont' set_buffer_write_io_error because we have
3064                  * our own ways of dealing with the IO errors
3065                  */
3066                 clear_buffer_uptodate(bh);
3067                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3068         }
3069         unlock_buffer(bh);
3070         put_bh(bh);
3071 }
3072
3073 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3074 {
3075         struct buffer_head *bh;
3076         struct buffer_head *latest = NULL;
3077         struct btrfs_super_block *super;
3078         int i;
3079         u64 transid = 0;
3080         u64 bytenr;
3081
3082         /* we would like to check all the supers, but that would make
3083          * a btrfs mount succeed after a mkfs from a different FS.
3084          * So, we need to add a special mount option to scan for
3085          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3086          */
3087         for (i = 0; i < 1; i++) {
3088                 bytenr = btrfs_sb_offset(i);
3089                 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3090                                         i_size_read(bdev->bd_inode))
3091                         break;
3092                 bh = __bread(bdev, bytenr / 4096,
3093                                         BTRFS_SUPER_INFO_SIZE);
3094                 if (!bh)
3095                         continue;
3096
3097                 super = (struct btrfs_super_block *)bh->b_data;
3098                 if (btrfs_super_bytenr(super) != bytenr ||
3099                     btrfs_super_magic(super) != BTRFS_MAGIC) {
3100                         brelse(bh);
3101                         continue;
3102                 }
3103
3104                 if (!latest || btrfs_super_generation(super) > transid) {
3105                         brelse(latest);
3106                         latest = bh;
3107                         transid = btrfs_super_generation(super);
3108                 } else {
3109                         brelse(bh);
3110                 }
3111         }
3112         return latest;
3113 }
3114
3115 /*
3116  * this should be called twice, once with wait == 0 and
3117  * once with wait == 1.  When wait == 0 is done, all the buffer heads
3118  * we write are pinned.
3119  *
3120  * They are released when wait == 1 is done.
3121  * max_mirrors must be the same for both runs, and it indicates how
3122  * many supers on this one device should be written.
3123  *
3124  * max_mirrors == 0 means to write them all.
3125  */
3126 static int write_dev_supers(struct btrfs_device *device,
3127                             struct btrfs_super_block *sb,
3128                             int do_barriers, int wait, int max_mirrors)
3129 {
3130         struct buffer_head *bh;
3131         int i;
3132         int ret;
3133         int errors = 0;
3134         u32 crc;
3135         u64 bytenr;
3136
3137         if (max_mirrors == 0)
3138                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3139
3140         for (i = 0; i < max_mirrors; i++) {
3141                 bytenr = btrfs_sb_offset(i);
3142                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
3143                         break;
3144
3145                 if (wait) {
3146                         bh = __find_get_block(device->bdev, bytenr / 4096,
3147                                               BTRFS_SUPER_INFO_SIZE);
3148                         if (!bh) {
3149                                 errors++;
3150                                 continue;
3151                         }
3152                         wait_on_buffer(bh);
3153                         if (!buffer_uptodate(bh))
3154                                 errors++;
3155
3156                         /* drop our reference */
3157                         brelse(bh);
3158
3159                         /* drop the reference from the wait == 0 run */
3160                         brelse(bh);
3161                         continue;
3162                 } else {
3163                         btrfs_set_super_bytenr(sb, bytenr);
3164
3165                         crc = ~(u32)0;
3166                         crc = btrfs_csum_data((char *)sb +
3167                                               BTRFS_CSUM_SIZE, crc,
3168                                               BTRFS_SUPER_INFO_SIZE -
3169                                               BTRFS_CSUM_SIZE);
3170                         btrfs_csum_final(crc, sb->csum);
3171
3172                         /*
3173                          * one reference for us, and we leave it for the
3174                          * caller
3175                          */
3176                         bh = __getblk(device->bdev, bytenr / 4096,
3177                                       BTRFS_SUPER_INFO_SIZE);
3178                         if (!bh) {
3179                                 printk(KERN_ERR "BTRFS: couldn't get super "
3180                                        "buffer head for bytenr %Lu\n", bytenr);
3181                                 errors++;
3182                                 continue;
3183                         }
3184
3185                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3186
3187                         /* one reference for submit_bh */
3188                         get_bh(bh);
3189
3190                         set_buffer_uptodate(bh);
3191                         lock_buffer(bh);
3192                         bh->b_end_io = btrfs_end_buffer_write_sync;
3193                         bh->b_private = device;
3194                 }
3195
3196                 /*
3197                  * we fua the first super.  The others we allow
3198                  * to go down lazy.
3199                  */
3200                 if (i == 0)
3201                         ret = btrfsic_submit_bh(WRITE_FUA, bh);
3202                 else
3203                         ret = btrfsic_submit_bh(WRITE_SYNC, bh);
3204                 if (ret)
3205                         errors++;
3206         }
3207         return errors < i ? 0 : -1;
3208 }
3209
3210 /*
3211  * endio for the write_dev_flush, this will wake anyone waiting
3212  * for the barrier when it is done
3213  */
3214 static void btrfs_end_empty_barrier(struct bio *bio, int err)
3215 {
3216         if (err) {
3217                 if (err == -EOPNOTSUPP)
3218                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3219                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3220         }
3221         if (bio->bi_private)
3222                 complete(bio->bi_private);
3223         bio_put(bio);
3224 }
3225
3226 /*
3227  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3228  * sent down.  With wait == 1, it waits for the previous flush.
3229  *
3230  * any device where the flush fails with eopnotsupp are flagged as not-barrier
3231  * capable
3232  */
3233 static int write_dev_flush(struct btrfs_device *device, int wait)
3234 {
3235         struct bio *bio;
3236         int ret = 0;
3237
3238         if (device->nobarriers)
3239                 return 0;
3240
3241         if (wait) {
3242                 bio = device->flush_bio;
3243                 if (!bio)
3244                         return 0;
3245
3246                 wait_for_completion(&device->flush_wait);
3247
3248                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3249                         printk_in_rcu("BTRFS: disabling barriers on dev %s\n",
3250                                       rcu_str_deref(device->name));
3251                         device->nobarriers = 1;
3252                 } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3253                         ret = -EIO;
3254                         btrfs_dev_stat_inc_and_print(device,
3255                                 BTRFS_DEV_STAT_FLUSH_ERRS);
3256                 }
3257
3258                 /* drop the reference from the wait == 0 run */
3259                 bio_put(bio);
3260                 device->flush_bio = NULL;
3261
3262                 return ret;
3263         }
3264
3265         /*
3266          * one reference for us, and we leave it for the
3267          * caller
3268          */
3269         device->flush_bio = NULL;
3270         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3271         if (!bio)
3272                 return -ENOMEM;
3273
3274         bio->bi_end_io = btrfs_end_empty_barrier;
3275         bio->bi_bdev = device->bdev;
3276         init_completion(&device->flush_wait);
3277         bio->bi_private = &device->flush_wait;
3278         device->flush_bio = bio;
3279
3280         bio_get(bio);
3281         btrfsic_submit_bio(WRITE_FLUSH, bio);
3282
3283         return 0;
3284 }
3285
3286 /*
3287  * send an empty flush down to each device in parallel,
3288  * then wait for them
3289  */
3290 static int barrier_all_devices(struct btrfs_fs_info *info)
3291 {
3292         struct list_head *head;
3293         struct btrfs_device *dev;
3294         int errors_send = 0;
3295         int errors_wait = 0;
3296         int ret;
3297
3298         /* send down all the barriers */
3299         head = &info->fs_devices->devices;
3300         list_for_each_entry_rcu(dev, head, dev_list) {
3301                 if (dev->missing)
3302                         continue;
3303                 if (!dev->bdev) {
3304                         errors_send++;
3305                         continue;
3306                 }
3307                 if (!dev->in_fs_metadata || !dev->writeable)
3308                         continue;
3309
3310                 ret = write_dev_flush(dev, 0);
3311                 if (ret)
3312                         errors_send++;
3313         }
3314
3315         /* wait for all the barriers */
3316         list_for_each_entry_rcu(dev, head, dev_list) {
3317                 if (dev->missing)
3318                         continue;
3319                 if (!dev->bdev) {
3320                         errors_wait++;
3321                         continue;
3322                 }
3323                 if (!dev->in_fs_metadata || !dev->writeable)
3324                         continue;
3325
3326                 ret = write_dev_flush(dev, 1);
3327                 if (ret)
3328                         errors_wait++;
3329         }
3330         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3331             errors_wait > info->num_tolerated_disk_barrier_failures)
3332                 return -EIO;
3333         return 0;
3334 }
3335
3336 int btrfs_calc_num_tolerated_disk_barrier_failures(
3337         struct btrfs_fs_info *fs_info)
3338 {
3339         struct btrfs_ioctl_space_info space;
3340         struct btrfs_space_info *sinfo;
3341         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3342                        BTRFS_BLOCK_GROUP_SYSTEM,
3343                        BTRFS_BLOCK_GROUP_METADATA,
3344                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3345         int num_types = 4;
3346         int i;
3347         int c;
3348         int num_tolerated_disk_barrier_failures =
3349                 (int)fs_info->fs_devices->num_devices;
3350
3351         for (i = 0; i < num_types; i++) {
3352                 struct btrfs_space_info *tmp;
3353
3354                 sinfo = NULL;
3355                 rcu_read_lock();
3356                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3357                         if (tmp->flags == types[i]) {
3358                                 sinfo = tmp;
3359                                 break;
3360                         }
3361                 }
3362                 rcu_read_unlock();
3363
3364                 if (!sinfo)
3365                         continue;
3366
3367                 down_read(&sinfo->groups_sem);
3368                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3369                         if (!list_empty(&sinfo->block_groups[c])) {
3370                                 u64 flags;
3371
3372                                 btrfs_get_block_group_info(
3373                                         &sinfo->block_groups[c], &space);
3374                                 if (space.total_bytes == 0 ||
3375                                     space.used_bytes == 0)
3376                                         continue;
3377                                 flags = space.flags;
3378                                 /*
3379                                  * return
3380                                  * 0: if dup, single or RAID0 is configured for
3381                                  *    any of metadata, system or data, else
3382                                  * 1: if RAID5 is configured, or if RAID1 or
3383                                  *    RAID10 is configured and only two mirrors
3384                                  *    are used, else
3385                                  * 2: if RAID6 is configured, else
3386                                  * num_mirrors - 1: if RAID1 or RAID10 is
3387                                  *                  configured and more than
3388                                  *                  2 mirrors are used.
3389                                  */
3390                                 if (num_tolerated_disk_barrier_failures > 0 &&
3391                                     ((flags & (BTRFS_BLOCK_GROUP_DUP |
3392                                                BTRFS_BLOCK_GROUP_RAID0)) ||
3393                                      ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3394                                       == 0)))
3395                                         num_tolerated_disk_barrier_failures = 0;
3396                                 else if (num_tolerated_disk_barrier_failures > 1) {
3397                                         if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3398                                             BTRFS_BLOCK_GROUP_RAID5 |
3399                                             BTRFS_BLOCK_GROUP_RAID10)) {
3400                                                 num_tolerated_disk_barrier_failures = 1;
3401                                         } else if (flags &
3402                                                    BTRFS_BLOCK_GROUP_RAID6) {
3403                                                 num_tolerated_disk_barrier_failures = 2;
3404                                         }
3405                                 }
3406                         }
3407                 }
3408                 up_read(&sinfo->groups_sem);
3409         }
3410
3411         return num_tolerated_disk_barrier_failures;
3412 }
3413
3414 static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3415 {
3416         struct list_head *head;
3417         struct btrfs_device *dev;
3418         struct btrfs_super_block *sb;
3419         struct btrfs_dev_item *dev_item;
3420         int ret;
3421         int do_barriers;
3422         int max_errors;
3423         int total_errors = 0;
3424         u64 flags;
3425
3426         do_barriers = !btrfs_test_opt(root, NOBARRIER);
3427         backup_super_roots(root->fs_info);
3428
3429         sb = root->fs_info->super_for_commit;
3430         dev_item = &sb->dev_item;
3431
3432         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3433         head = &root->fs_info->fs_devices->devices;
3434         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3435
3436         if (do_barriers) {
3437                 ret = barrier_all_devices(root->fs_info);
3438                 if (ret) {
3439                         mutex_unlock(
3440                                 &root->fs_info->fs_devices->device_list_mutex);
3441                         btrfs_error(root->fs_info, ret,
3442                                     "errors while submitting device barriers.");
3443                         return ret;
3444                 }
3445         }
3446
3447         list_for_each_entry_rcu(dev, head, dev_list) {
3448                 if (!dev->bdev) {
3449                         total_errors++;
3450                         continue;
3451                 }
3452                 if (!dev->in_fs_metadata || !dev->writeable)
3453                         continue;
3454
3455                 btrfs_set_stack_device_generation(dev_item, 0);
3456                 btrfs_set_stack_device_type(dev_item, dev->type);
3457                 btrfs_set_stack_device_id(dev_item, dev->devid);
3458                 btrfs_set_stack_device_total_bytes(dev_item,
3459                                                    dev->disk_total_bytes);
3460                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3461                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3462                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3463                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3464                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3465                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3466
3467                 flags = btrfs_super_flags(sb);
3468                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3469
3470                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3471                 if (ret)
3472                         total_errors++;
3473         }
3474         if (total_errors > max_errors) {
3475                 btrfs_err(root->fs_info, "%d errors while writing supers",
3476                        total_errors);
3477                 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3478
3479                 /* FUA is masked off if unsupported and can't be the reason */
3480                 btrfs_error(root->fs_info, -EIO,
3481                             "%d errors while writing supers", total_errors);
3482                 return -EIO;
3483         }
3484
3485         total_errors = 0;
3486         list_for_each_entry_rcu(dev, head, dev_list) {
3487                 if (!dev->bdev)
3488                         continue;
3489                 if (!dev->in_fs_metadata || !dev->writeable)
3490                         continue;
3491
3492                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3493                 if (ret)
3494                         total_errors++;
3495         }
3496         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3497         if (total_errors > max_errors) {
3498                 btrfs_error(root->fs_info, -EIO,
3499                             "%d errors while writing supers", total_errors);
3500                 return -EIO;
3501         }
3502         return 0;
3503 }
3504
3505 int write_ctree_super(struct btrfs_trans_handle *trans,
3506                       struct btrfs_root *root, int max_mirrors)
3507 {
3508         return write_all_supers(root, max_mirrors);
3509 }
3510
3511 /* Drop a fs root from the radix tree and free it. */
3512 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3513                                   struct btrfs_root *root)
3514 {
3515         spin_lock(&fs_info->fs_roots_radix_lock);
3516         radix_tree_delete(&fs_info->fs_roots_radix,
3517                           (unsigned long)root->root_key.objectid);
3518         spin_unlock(&fs_info->fs_roots_radix_lock);
3519
3520         if (btrfs_root_refs(&root->root_item) == 0)
3521                 synchronize_srcu(&fs_info->subvol_srcu);
3522
3523         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3524                 btrfs_free_log(NULL, root);
3525
3526         if (root->free_ino_pinned)
3527                 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3528         if (root->free_ino_ctl)
3529                 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3530         free_fs_root(root);
3531 }
3532
3533 static void free_fs_root(struct btrfs_root *root)
3534 {
3535         iput(root->cache_inode);
3536         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3537         btrfs_free_block_rsv(root, root->orphan_block_rsv);
3538         root->orphan_block_rsv = NULL;
3539         if (root->anon_dev)
3540                 free_anon_bdev(root->anon_dev);
3541         if (root->subv_writers)
3542                 btrfs_free_subvolume_writers(root->subv_writers);
3543         free_extent_buffer(root->node);
3544         free_extent_buffer(root->commit_root);
3545         kfree(root->free_ino_ctl);
3546         kfree(root->free_ino_pinned);
3547         kfree(root->name);
3548         btrfs_put_fs_root(root);
3549 }
3550
3551 void btrfs_free_fs_root(struct btrfs_root *root)
3552 {
3553         free_fs_root(root);
3554 }
3555
3556 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3557 {
3558         u64 root_objectid = 0;
3559         struct btrfs_root *gang[8];
3560         int i = 0;
3561         int err = 0;
3562         unsigned int ret = 0;
3563         int index;
3564
3565         while (1) {
3566                 index = srcu_read_lock(&fs_info->subvol_srcu);
3567                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3568                                              (void **)gang, root_objectid,
3569                                              ARRAY_SIZE(gang));
3570                 if (!ret) {
3571                         srcu_read_unlock(&fs_info->subvol_srcu, index);
3572                         break;
3573                 }
3574                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3575
3576                 for (i = 0; i < ret; i++) {
3577                         /* Avoid to grab roots in dead_roots */
3578                         if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3579                                 gang[i] = NULL;
3580                                 continue;
3581                         }
3582                         /* grab all the search result for later use */
3583                         gang[i] = btrfs_grab_fs_root(gang[i]);
3584                 }
3585                 srcu_read_unlock(&fs_info->subvol_srcu, index);
3586
3587                 for (i = 0; i < ret; i++) {
3588                         if (!gang[i])
3589                                 continue;
3590                         root_objectid = gang[i]->root_key.objectid;
3591                         err = btrfs_orphan_cleanup(gang[i]);
3592                         if (err)
3593                                 break;
3594                         btrfs_put_fs_root(gang[i]);
3595                 }
3596                 root_objectid++;
3597         }
3598
3599         /* release the uncleaned roots due to error */
3600         for (; i < ret; i++) {
3601                 if (gang[i])
3602                         btrfs_put_fs_root(gang[i]);
3603         }
3604         return err;
3605 }
3606
3607 int btrfs_commit_super(struct btrfs_root *root)
3608 {
3609         struct btrfs_trans_handle *trans;
3610
3611         mutex_lock(&root->fs_info->cleaner_mutex);
3612         btrfs_run_delayed_iputs(root);
3613         mutex_unlock(&root->fs_info->cleaner_mutex);
3614         wake_up_process(root->fs_info->cleaner_kthread);
3615
3616         /* wait until ongoing cleanup work done */
3617         down_write(&root->fs_info->cleanup_work_sem);
3618         up_write(&root->fs_info->cleanup_work_sem);
3619
3620         trans = btrfs_join_transaction(root);
3621         if (IS_ERR(trans))
3622                 return PTR_ERR(trans);
3623         return btrfs_commit_transaction(trans, root);
3624 }
3625
3626 int close_ctree(struct btrfs_root *root)
3627 {
3628         struct btrfs_fs_info *fs_info = root->fs_info;
3629         int ret;
3630
3631         fs_info->closing = 1;
3632         smp_mb();
3633
3634         /* wait for the uuid_scan task to finish */
3635         down(&fs_info->uuid_tree_rescan_sem);
3636         /* avoid complains from lockdep et al., set sem back to initial state */
3637         up(&fs_info->uuid_tree_rescan_sem);
3638
3639         /* pause restriper - we want to resume on mount */
3640         btrfs_pause_balance(fs_info);
3641
3642         btrfs_dev_replace_suspend_for_unmount(fs_info);
3643
3644         btrfs_scrub_cancel(fs_info);
3645
3646         /* wait for any defraggers to finish */
3647         wait_event(fs_info->transaction_wait,
3648                    (atomic_read(&fs_info->defrag_running) == 0));
3649
3650         /* clear out the rbtree of defraggable inodes */
3651         btrfs_cleanup_defrag_inodes(fs_info);
3652
3653         cancel_work_sync(&fs_info->async_reclaim_work);
3654
3655         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3656                 ret = btrfs_commit_super(root);
3657                 if (ret)
3658                         btrfs_err(root->fs_info, "commit super ret %d", ret);
3659         }
3660
3661         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3662                 btrfs_error_commit_super(root);
3663
3664         kthread_stop(fs_info->transaction_kthread);
3665         kthread_stop(fs_info->cleaner_kthread);
3666
3667         fs_info->closing = 2;
3668         smp_mb();
3669
3670         btrfs_free_qgroup_config(root->fs_info);
3671
3672         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3673                 btrfs_info(root->fs_info, "at unmount delalloc count %lld",
3674                        percpu_counter_sum(&fs_info->delalloc_bytes));
3675         }
3676
3677         btrfs_sysfs_remove_one(fs_info);
3678
3679         btrfs_free_fs_roots(fs_info);
3680
3681         btrfs_put_block_group_cache(fs_info);
3682
3683         btrfs_free_block_groups(fs_info);
3684
3685         /*
3686          * we must make sure there is not any read request to
3687          * submit after we stopping all workers.
3688          */
3689         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3690         btrfs_stop_all_workers(fs_info);
3691
3692         free_root_pointers(fs_info, 1);
3693
3694         iput(fs_info->btree_inode);
3695
3696 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3697         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3698                 btrfsic_unmount(root, fs_info->fs_devices);
3699 #endif
3700
3701         btrfs_close_devices(fs_info->fs_devices);
3702         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3703
3704         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3705         percpu_counter_destroy(&fs_info->delalloc_bytes);
3706         percpu_counter_destroy(&fs_info->bio_counter);
3707         bdi_destroy(&fs_info->bdi);
3708         cleanup_srcu_struct(&fs_info->subvol_srcu);
3709
3710         btrfs_free_stripe_hash_table(fs_info);
3711
3712         btrfs_free_block_rsv(root, root->orphan_block_rsv);
3713         root->orphan_block_rsv = NULL;
3714
3715         return 0;
3716 }
3717
3718 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3719                           int atomic)
3720 {
3721         int ret;
3722         struct inode *btree_inode = buf->pages[0]->mapping->host;
3723
3724         ret = extent_buffer_uptodate(buf);
3725         if (!ret)
3726                 return ret;
3727
3728         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3729                                     parent_transid, atomic);
3730         if (ret == -EAGAIN)
3731                 return ret;
3732         return !ret;
3733 }
3734
3735 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3736 {
3737         return set_extent_buffer_uptodate(buf);
3738 }
3739
3740 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3741 {
3742         struct btrfs_root *root;
3743         u64 transid = btrfs_header_generation(buf);
3744         int was_dirty;
3745
3746 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3747         /*
3748          * This is a fast path so only do this check if we have sanity tests
3749          * enabled.  Normal people shouldn't be marking dummy buffers as dirty
3750          * outside of the sanity tests.
3751          */
3752         if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
3753                 return;
3754 #endif
3755         root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3756         btrfs_assert_tree_locked(buf);
3757         if (transid != root->fs_info->generation)
3758                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3759                        "found %llu running %llu\n",
3760                         buf->start, transid, root->fs_info->generation);
3761         was_dirty = set_extent_buffer_dirty(buf);
3762         if (!was_dirty)
3763                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3764                                      buf->len,
3765                                      root->fs_info->dirty_metadata_batch);
3766 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3767         if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
3768                 btrfs_print_leaf(root, buf);
3769                 ASSERT(0);
3770         }
3771 #endif
3772 }
3773
3774 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3775                                         int flush_delayed)
3776 {
3777         /*
3778          * looks as though older kernels can get into trouble with
3779          * this code, they end up stuck in balance_dirty_pages forever
3780          */
3781         int ret;
3782
3783         if (current->flags & PF_MEMALLOC)
3784                 return;
3785
3786         if (flush_delayed)
3787                 btrfs_balance_delayed_items(root);
3788
3789         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3790                                      BTRFS_DIRTY_METADATA_THRESH);
3791         if (ret > 0) {
3792                 balance_dirty_pages_ratelimited(
3793                                    root->fs_info->btree_inode->i_mapping);
3794         }
3795         return;
3796 }
3797
3798 void btrfs_btree_balance_dirty(struct btrfs_root *root)
3799 {
3800         __btrfs_btree_balance_dirty(root, 1);
3801 }
3802
3803 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3804 {
3805         __btrfs_btree_balance_dirty(root, 0);
3806 }
3807
3808 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3809 {
3810         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3811         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3812 }
3813
3814 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3815                               int read_only)
3816 {
3817         /*
3818          * Placeholder for checks
3819          */
3820         return 0;
3821 }
3822
3823 static void btrfs_error_commit_super(struct btrfs_root *root)
3824 {
3825         mutex_lock(&root->fs_info->cleaner_mutex);
3826         btrfs_run_delayed_iputs(root);
3827         mutex_unlock(&root->fs_info->cleaner_mutex);
3828
3829         down_write(&root->fs_info->cleanup_work_sem);
3830         up_write(&root->fs_info->cleanup_work_sem);
3831
3832         /* cleanup FS via transaction */
3833         btrfs_cleanup_transaction(root);
3834 }
3835
3836 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3837 {
3838         struct btrfs_ordered_extent *ordered;
3839
3840         spin_lock(&root->ordered_extent_lock);
3841         /*
3842          * This will just short circuit the ordered completion stuff which will
3843          * make sure the ordered extent gets properly cleaned up.
3844          */
3845         list_for_each_entry(ordered, &root->ordered_extents,
3846                             root_extent_list)
3847                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3848         spin_unlock(&root->ordered_extent_lock);
3849 }
3850
3851 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
3852 {
3853         struct btrfs_root *root;
3854         struct list_head splice;
3855
3856         INIT_LIST_HEAD(&splice);
3857
3858         spin_lock(&fs_info->ordered_root_lock);
3859         list_splice_init(&fs_info->ordered_roots, &splice);
3860         while (!list_empty(&splice)) {
3861                 root = list_first_entry(&splice, struct btrfs_root,
3862                                         ordered_root);
3863                 list_move_tail(&root->ordered_root,
3864                                &fs_info->ordered_roots);
3865
3866                 spin_unlock(&fs_info->ordered_root_lock);
3867                 btrfs_destroy_ordered_extents(root);
3868
3869                 cond_resched();
3870                 spin_lock(&fs_info->ordered_root_lock);
3871         }
3872         spin_unlock(&fs_info->ordered_root_lock);
3873 }
3874
3875 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3876                                       struct btrfs_root *root)
3877 {
3878         struct rb_node *node;
3879         struct btrfs_delayed_ref_root *delayed_refs;
3880         struct btrfs_delayed_ref_node *ref;
3881         int ret = 0;
3882
3883         delayed_refs = &trans->delayed_refs;
3884
3885         spin_lock(&delayed_refs->lock);
3886         if (atomic_read(&delayed_refs->num_entries) == 0) {
3887                 spin_unlock(&delayed_refs->lock);
3888                 btrfs_info(root->fs_info, "delayed_refs has NO entry");
3889                 return ret;
3890         }
3891
3892         while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
3893                 struct btrfs_delayed_ref_head *head;
3894                 bool pin_bytes = false;
3895
3896                 head = rb_entry(node, struct btrfs_delayed_ref_head,
3897                                 href_node);
3898                 if (!mutex_trylock(&head->mutex)) {
3899                         atomic_inc(&head->node.refs);
3900                         spin_unlock(&delayed_refs->lock);
3901
3902                         mutex_lock(&head->mutex);
3903                         mutex_unlock(&head->mutex);
3904                         btrfs_put_delayed_ref(&head->node);
3905                         spin_lock(&delayed_refs->lock);
3906                         continue;
3907                 }
3908                 spin_lock(&head->lock);
3909                 while ((node = rb_first(&head->ref_root)) != NULL) {
3910                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
3911                                        rb_node);
3912                         ref->in_tree = 0;
3913                         rb_erase(&ref->rb_node, &head->ref_root);
3914                         atomic_dec(&delayed_refs->num_entries);
3915                         btrfs_put_delayed_ref(ref);
3916                 }
3917                 if (head->must_insert_reserved)
3918                         pin_bytes = true;
3919                 btrfs_free_delayed_extent_op(head->extent_op);
3920                 delayed_refs->num_heads--;
3921                 if (head->processing == 0)
3922                         delayed_refs->num_heads_ready--;
3923                 atomic_dec(&delayed_refs->num_entries);
3924                 head->node.in_tree = 0;
3925                 rb_erase(&head->href_node, &delayed_refs->href_root);
3926                 spin_unlock(&head->lock);
3927                 spin_unlock(&delayed_refs->lock);
3928                 mutex_unlock(&head->mutex);
3929
3930                 if (pin_bytes)
3931                         btrfs_pin_extent(root, head->node.bytenr,
3932                                          head->node.num_bytes, 1);
3933                 btrfs_put_delayed_ref(&head->node);
3934                 cond_resched();
3935                 spin_lock(&delayed_refs->lock);
3936         }
3937
3938         spin_unlock(&delayed_refs->lock);
3939
3940         return ret;
3941 }
3942
3943 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3944 {
3945         struct btrfs_inode *btrfs_inode;
3946         struct list_head splice;
3947
3948         INIT_LIST_HEAD(&splice);
3949
3950         spin_lock(&root->delalloc_lock);
3951         list_splice_init(&root->delalloc_inodes, &splice);
3952
3953         while (!list_empty(&splice)) {
3954                 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
3955                                                delalloc_inodes);
3956
3957                 list_del_init(&btrfs_inode->delalloc_inodes);
3958                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3959                           &btrfs_inode->runtime_flags);
3960                 spin_unlock(&root->delalloc_lock);
3961
3962                 btrfs_invalidate_inodes(btrfs_inode->root);
3963
3964                 spin_lock(&root->delalloc_lock);
3965         }
3966
3967         spin_unlock(&root->delalloc_lock);
3968 }
3969
3970 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
3971 {
3972         struct btrfs_root *root;
3973         struct list_head splice;
3974
3975         INIT_LIST_HEAD(&splice);
3976
3977         spin_lock(&fs_info->delalloc_root_lock);
3978         list_splice_init(&fs_info->delalloc_roots, &splice);
3979         while (!list_empty(&splice)) {
3980                 root = list_first_entry(&splice, struct btrfs_root,
3981                                          delalloc_root);
3982                 list_del_init(&root->delalloc_root);
3983                 root = btrfs_grab_fs_root(root);
3984                 BUG_ON(!root);
3985                 spin_unlock(&fs_info->delalloc_root_lock);
3986
3987                 btrfs_destroy_delalloc_inodes(root);
3988                 btrfs_put_fs_root(root);
3989
3990                 spin_lock(&fs_info->delalloc_root_lock);
3991         }
3992         spin_unlock(&fs_info->delalloc_root_lock);
3993 }
3994
3995 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3996                                         struct extent_io_tree *dirty_pages,
3997                                         int mark)
3998 {
3999         int ret;
4000         struct extent_buffer *eb;
4001         u64 start = 0;
4002         u64 end;
4003
4004         while (1) {
4005                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4006                                             mark, NULL);
4007                 if (ret)
4008                         break;
4009
4010                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
4011                 while (start <= end) {
4012                         eb = btrfs_find_tree_block(root, start,
4013                                                    root->leafsize);
4014                         start += root->leafsize;
4015                         if (!eb)
4016                                 continue;
4017                         wait_on_extent_buffer_writeback(eb);
4018
4019                         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4020                                                &eb->bflags))
4021                                 clear_extent_buffer_dirty(eb);
4022                         free_extent_buffer_stale(eb);
4023                 }
4024         }
4025
4026         return ret;
4027 }
4028
4029 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
4030                                        struct extent_io_tree *pinned_extents)
4031 {
4032         struct extent_io_tree *unpin;
4033         u64 start;
4034         u64 end;
4035         int ret;
4036         bool loop = true;
4037
4038         unpin = pinned_extents;
4039 again:
4040         while (1) {
4041                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4042                                             EXTENT_DIRTY, NULL);
4043                 if (ret)
4044                         break;
4045
4046                 /* opt_discard */
4047                 if (btrfs_test_opt(root, DISCARD))
4048                         ret = btrfs_error_discard_extent(root, start,
4049                                                          end + 1 - start,
4050                                                          NULL);
4051
4052                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4053                 btrfs_error_unpin_extent_range(root, start, end);
4054                 cond_resched();
4055         }
4056
4057         if (loop) {
4058                 if (unpin == &root->fs_info->freed_extents[0])
4059                         unpin = &root->fs_info->freed_extents[1];
4060                 else
4061                         unpin = &root->fs_info->freed_extents[0];
4062                 loop = false;
4063                 goto again;
4064         }
4065
4066         return 0;
4067 }
4068
4069 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4070                                    struct btrfs_root *root)
4071 {
4072         btrfs_destroy_delayed_refs(cur_trans, root);
4073
4074         cur_trans->state = TRANS_STATE_COMMIT_START;
4075         wake_up(&root->fs_info->transaction_blocked_wait);
4076
4077         cur_trans->state = TRANS_STATE_UNBLOCKED;
4078         wake_up(&root->fs_info->transaction_wait);
4079
4080         btrfs_destroy_delayed_inodes(root);
4081         btrfs_assert_delayed_root_empty(root);
4082
4083         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
4084                                      EXTENT_DIRTY);
4085         btrfs_destroy_pinned_extent(root,
4086                                     root->fs_info->pinned_extents);
4087
4088         cur_trans->state =TRANS_STATE_COMPLETED;
4089         wake_up(&cur_trans->commit_wait);
4090
4091         /*
4092         memset(cur_trans, 0, sizeof(*cur_trans));
4093         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
4094         */
4095 }
4096
4097 static int btrfs_cleanup_transaction(struct btrfs_root *root)
4098 {
4099         struct btrfs_transaction *t;
4100
4101         mutex_lock(&root->fs_info->transaction_kthread_mutex);
4102
4103         spin_lock(&root->fs_info->trans_lock);
4104         while (!list_empty(&root->fs_info->trans_list)) {
4105                 t = list_first_entry(&root->fs_info->trans_list,
4106                                      struct btrfs_transaction, list);
4107                 if (t->state >= TRANS_STATE_COMMIT_START) {
4108                         atomic_inc(&t->use_count);
4109                         spin_unlock(&root->fs_info->trans_lock);
4110                         btrfs_wait_for_commit(root, t->transid);
4111                         btrfs_put_transaction(t);
4112                         spin_lock(&root->fs_info->trans_lock);
4113                         continue;
4114                 }
4115                 if (t == root->fs_info->running_transaction) {
4116                         t->state = TRANS_STATE_COMMIT_DOING;
4117                         spin_unlock(&root->fs_info->trans_lock);
4118                         /*
4119                          * We wait for 0 num_writers since we don't hold a trans
4120                          * handle open currently for this transaction.
4121                          */
4122                         wait_event(t->writer_wait,
4123                                    atomic_read(&t->num_writers) == 0);
4124                 } else {
4125                         spin_unlock(&root->fs_info->trans_lock);
4126                 }
4127                 btrfs_cleanup_one_transaction(t, root);
4128
4129                 spin_lock(&root->fs_info->trans_lock);
4130                 if (t == root->fs_info->running_transaction)
4131                         root->fs_info->running_transaction = NULL;
4132                 list_del_init(&t->list);
4133                 spin_unlock(&root->fs_info->trans_lock);
4134
4135                 btrfs_put_transaction(t);
4136                 trace_btrfs_transaction_commit(root);
4137                 spin_lock(&root->fs_info->trans_lock);
4138         }
4139         spin_unlock(&root->fs_info->trans_lock);
4140         btrfs_destroy_all_ordered_extents(root->fs_info);
4141         btrfs_destroy_delayed_inodes(root);
4142         btrfs_assert_delayed_root_empty(root);
4143         btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
4144         btrfs_destroy_all_delalloc_inodes(root->fs_info);
4145         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
4146
4147         return 0;
4148 }
4149
4150 static struct extent_io_ops btree_extent_io_ops = {
4151         .readpage_end_io_hook = btree_readpage_end_io_hook,
4152         .readpage_io_failed_hook = btree_io_failed_hook,
4153         .submit_bio_hook = btree_submit_bio_hook,
4154         /* note we're sharing with inode.c for the merge bio hook */
4155         .merge_bio_hook = btrfs_merge_bio_hook,
4156 };