2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
32 # include <linux/sched.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
43 #include "ref-cache.h"
46 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
48 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
49 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
50 (unsigned long long)extent_buffer_blocknr(buf),
51 (unsigned long long)btrfs_header_blocknr(buf));
58 static struct extent_io_ops btree_extent_io_ops;
59 static void end_workqueue_fn(struct btrfs_work *work);
65 struct btrfs_fs_info *info;
68 struct list_head list;
69 struct btrfs_work work;
72 struct async_submit_bio {
75 struct list_head list;
76 extent_submit_bio_hook_t *submit_bio_hook;
79 struct btrfs_work work;
82 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
83 size_t page_offset, u64 start, u64 len,
86 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
87 struct extent_map *em;
90 spin_lock(&em_tree->lock);
91 em = lookup_extent_mapping(em_tree, start, len);
94 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
95 spin_unlock(&em_tree->lock);
98 spin_unlock(&em_tree->lock);
100 em = alloc_extent_map(GFP_NOFS);
102 em = ERR_PTR(-ENOMEM);
108 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
110 spin_lock(&em_tree->lock);
111 ret = add_extent_mapping(em_tree, em);
112 if (ret == -EEXIST) {
113 u64 failed_start = em->start;
114 u64 failed_len = em->len;
116 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
117 em->start, em->len, em->block_start);
119 em = lookup_extent_mapping(em_tree, start, len);
121 printk("after failing, found %Lu %Lu %Lu\n",
122 em->start, em->len, em->block_start);
125 em = lookup_extent_mapping(em_tree, failed_start,
128 printk("double failure lookup gives us "
129 "%Lu %Lu -> %Lu\n", em->start,
130 em->len, em->block_start);
139 spin_unlock(&em_tree->lock);
147 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
149 return btrfs_crc32c(seed, data, len);
152 void btrfs_csum_final(u32 crc, char *result)
154 *(__le32 *)result = ~cpu_to_le32(crc);
157 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
160 char result[BTRFS_CRC32_SIZE];
162 unsigned long cur_len;
163 unsigned long offset = BTRFS_CSUM_SIZE;
164 char *map_token = NULL;
166 unsigned long map_start;
167 unsigned long map_len;
171 len = buf->len - offset;
173 err = map_private_extent_buffer(buf, offset, 32,
175 &map_start, &map_len, KM_USER0);
177 printk("failed to map extent buffer! %lu\n",
181 cur_len = min(len, map_len - (offset - map_start));
182 crc = btrfs_csum_data(root, kaddr + offset - map_start,
186 unmap_extent_buffer(buf, map_token, KM_USER0);
188 btrfs_csum_final(crc, result);
191 /* FIXME, this is not good */
192 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
195 memcpy(&found, result, BTRFS_CRC32_SIZE);
197 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
198 printk("btrfs: %s checksum verify failed on %llu "
199 "wanted %X found %X level %d\n",
200 root->fs_info->sb->s_id,
201 buf->start, val, found, btrfs_header_level(buf));
205 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
210 static int verify_parent_transid(struct extent_io_tree *io_tree,
211 struct extent_buffer *eb, u64 parent_transid)
215 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
218 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
219 if (extent_buffer_uptodate(io_tree, eb) &&
220 btrfs_header_generation(eb) == parent_transid) {
224 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
225 (unsigned long long)eb->start,
226 (unsigned long long)parent_transid,
227 (unsigned long long)btrfs_header_generation(eb));
229 clear_extent_buffer_uptodate(io_tree, eb);
231 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
237 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
238 struct extent_buffer *eb,
239 u64 start, u64 parent_transid)
241 struct extent_io_tree *io_tree;
246 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
248 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
249 btree_get_extent, mirror_num);
251 !verify_parent_transid(io_tree, eb, parent_transid))
253 printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num);
254 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
260 if (mirror_num > num_copies)
266 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
268 struct extent_io_tree *tree;
269 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
273 struct extent_buffer *eb;
276 tree = &BTRFS_I(page->mapping->host)->io_tree;
278 if (page->private == EXTENT_PAGE_PRIVATE)
282 len = page->private >> 2;
286 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
287 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
288 btrfs_header_generation(eb));
290 found_start = btrfs_header_bytenr(eb);
291 if (found_start != start) {
292 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
293 start, found_start, len);
297 if (eb->first_page != page) {
298 printk("bad first page %lu %lu\n", eb->first_page->index,
303 if (!PageUptodate(page)) {
304 printk("csum not up to date page %lu\n", page->index);
308 found_level = btrfs_header_level(eb);
309 spin_lock(&root->fs_info->hash_lock);
310 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
311 spin_unlock(&root->fs_info->hash_lock);
312 csum_tree_block(root, eb, 0);
314 free_extent_buffer(eb);
319 static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
321 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
323 csum_dirty_buffer(root, page);
327 int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
328 struct extent_state *state)
330 struct extent_io_tree *tree;
334 struct extent_buffer *eb;
335 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
338 tree = &BTRFS_I(page->mapping->host)->io_tree;
339 if (page->private == EXTENT_PAGE_PRIVATE)
343 len = page->private >> 2;
347 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
349 found_start = btrfs_header_bytenr(eb);
350 if (found_start != start) {
351 printk("bad tree block start %llu %llu\n",
352 (unsigned long long)found_start,
353 (unsigned long long)eb->start);
357 if (eb->first_page != page) {
358 printk("bad first page %lu %lu\n", eb->first_page->index,
364 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
365 (unsigned long)btrfs_header_fsid(eb),
367 printk("bad fsid on block %Lu\n", eb->start);
371 found_level = btrfs_header_level(eb);
373 ret = csum_tree_block(root, eb, 1);
377 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
378 end = eb->start + end - 1;
380 free_extent_buffer(eb);
385 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
386 static void end_workqueue_bio(struct bio *bio, int err)
388 static int end_workqueue_bio(struct bio *bio,
389 unsigned int bytes_done, int err)
392 struct end_io_wq *end_io_wq = bio->bi_private;
393 struct btrfs_fs_info *fs_info;
395 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
400 fs_info = end_io_wq->info;
401 end_io_wq->error = err;
402 end_io_wq->work.func = end_workqueue_fn;
403 end_io_wq->work.flags = 0;
404 if (bio->bi_rw & (1 << BIO_RW))
405 btrfs_queue_worker(&fs_info->endio_write_workers,
408 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
410 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
415 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
418 struct end_io_wq *end_io_wq;
419 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
423 end_io_wq->private = bio->bi_private;
424 end_io_wq->end_io = bio->bi_end_io;
425 end_io_wq->info = info;
426 end_io_wq->error = 0;
427 end_io_wq->bio = bio;
428 end_io_wq->metadata = metadata;
430 bio->bi_private = end_io_wq;
431 bio->bi_end_io = end_workqueue_bio;
435 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
437 unsigned long limit = min_t(unsigned long,
438 info->workers.max_workers,
439 info->fs_devices->open_devices);
443 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
445 return atomic_read(&info->nr_async_bios) >
446 btrfs_async_submit_limit(info);
449 static void run_one_async_submit(struct btrfs_work *work)
451 struct btrfs_fs_info *fs_info;
452 struct async_submit_bio *async;
455 async = container_of(work, struct async_submit_bio, work);
456 fs_info = BTRFS_I(async->inode)->root->fs_info;
458 limit = btrfs_async_submit_limit(fs_info);
459 limit = limit * 2 / 3;
461 atomic_dec(&fs_info->nr_async_submits);
463 if (atomic_read(&fs_info->nr_async_submits) < limit &&
464 waitqueue_active(&fs_info->async_submit_wait))
465 wake_up(&fs_info->async_submit_wait);
467 async->submit_bio_hook(async->inode, async->rw, async->bio,
472 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
473 int rw, struct bio *bio, int mirror_num,
474 extent_submit_bio_hook_t *submit_bio_hook)
476 struct async_submit_bio *async;
477 int limit = btrfs_async_submit_limit(fs_info);
479 async = kmalloc(sizeof(*async), GFP_NOFS);
483 async->inode = inode;
486 async->mirror_num = mirror_num;
487 async->submit_bio_hook = submit_bio_hook;
488 async->work.func = run_one_async_submit;
489 async->work.flags = 0;
490 atomic_inc(&fs_info->nr_async_submits);
491 btrfs_queue_worker(&fs_info->workers, &async->work);
493 if (atomic_read(&fs_info->nr_async_submits) > limit) {
494 wait_event_timeout(fs_info->async_submit_wait,
495 (atomic_read(&fs_info->nr_async_submits) < limit),
498 wait_event_timeout(fs_info->async_submit_wait,
499 (atomic_read(&fs_info->nr_async_bios) < limit),
505 static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
508 struct btrfs_root *root = BTRFS_I(inode)->root;
512 offset = bio->bi_sector << 9;
515 * when we're called for a write, we're already in the async
516 * submission context. Just jump into btrfs_map_bio
518 if (rw & (1 << BIO_RW)) {
519 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
524 * called for a read, do the setup so that checksum validation
525 * can happen in the async kernel threads
527 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
530 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
533 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
537 * kthread helpers are used to submit writes so that checksumming
538 * can happen in parallel across all CPUs
540 if (!(rw & (1 << BIO_RW))) {
541 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
543 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
544 inode, rw, bio, mirror_num,
545 __btree_submit_bio_hook);
548 static int btree_writepage(struct page *page, struct writeback_control *wbc)
550 struct extent_io_tree *tree;
551 tree = &BTRFS_I(page->mapping->host)->io_tree;
553 if (current->flags & PF_MEMALLOC) {
554 redirty_page_for_writepage(wbc, page);
558 return extent_write_full_page(tree, page, btree_get_extent, wbc);
561 static int btree_writepages(struct address_space *mapping,
562 struct writeback_control *wbc)
564 struct extent_io_tree *tree;
565 tree = &BTRFS_I(mapping->host)->io_tree;
566 if (wbc->sync_mode == WB_SYNC_NONE) {
569 unsigned long thresh = 8 * 1024 * 1024;
571 if (wbc->for_kupdate)
574 num_dirty = count_range_bits(tree, &start, (u64)-1,
575 thresh, EXTENT_DIRTY);
576 if (num_dirty < thresh) {
580 return extent_writepages(tree, mapping, btree_get_extent, wbc);
583 int btree_readpage(struct file *file, struct page *page)
585 struct extent_io_tree *tree;
586 tree = &BTRFS_I(page->mapping->host)->io_tree;
587 return extent_read_full_page(tree, page, btree_get_extent);
590 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
592 struct extent_io_tree *tree;
593 struct extent_map_tree *map;
596 tree = &BTRFS_I(page->mapping->host)->io_tree;
597 map = &BTRFS_I(page->mapping->host)->extent_tree;
599 ret = try_release_extent_state(map, tree, page, gfp_flags);
604 ret = try_release_extent_buffer(tree, page);
606 ClearPagePrivate(page);
607 set_page_private(page, 0);
608 page_cache_release(page);
614 static void btree_invalidatepage(struct page *page, unsigned long offset)
616 struct extent_io_tree *tree;
617 tree = &BTRFS_I(page->mapping->host)->io_tree;
618 extent_invalidatepage(tree, page, offset);
619 btree_releasepage(page, GFP_NOFS);
620 if (PagePrivate(page)) {
621 printk("warning page private not zero on page %Lu\n",
623 ClearPagePrivate(page);
624 set_page_private(page, 0);
625 page_cache_release(page);
630 static int btree_writepage(struct page *page, struct writeback_control *wbc)
632 struct buffer_head *bh;
633 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
634 struct buffer_head *head;
635 if (!page_has_buffers(page)) {
636 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
637 (1 << BH_Dirty)|(1 << BH_Uptodate));
639 head = page_buffers(page);
642 if (buffer_dirty(bh))
643 csum_tree_block(root, bh, 0);
644 bh = bh->b_this_page;
645 } while (bh != head);
646 return block_write_full_page(page, btree_get_block, wbc);
650 static struct address_space_operations btree_aops = {
651 .readpage = btree_readpage,
652 .writepage = btree_writepage,
653 .writepages = btree_writepages,
654 .releasepage = btree_releasepage,
655 .invalidatepage = btree_invalidatepage,
656 .sync_page = block_sync_page,
659 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
662 struct extent_buffer *buf = NULL;
663 struct inode *btree_inode = root->fs_info->btree_inode;
666 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
669 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
670 buf, 0, 0, btree_get_extent, 0);
671 free_extent_buffer(buf);
675 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
676 u64 bytenr, u32 blocksize)
678 struct inode *btree_inode = root->fs_info->btree_inode;
679 struct extent_buffer *eb;
680 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
681 bytenr, blocksize, GFP_NOFS);
685 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
686 u64 bytenr, u32 blocksize)
688 struct inode *btree_inode = root->fs_info->btree_inode;
689 struct extent_buffer *eb;
691 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
692 bytenr, blocksize, NULL, GFP_NOFS);
697 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
698 u32 blocksize, u64 parent_transid)
700 struct extent_buffer *buf = NULL;
701 struct inode *btree_inode = root->fs_info->btree_inode;
702 struct extent_io_tree *io_tree;
705 io_tree = &BTRFS_I(btree_inode)->io_tree;
707 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
711 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
714 buf->flags |= EXTENT_UPTODATE;
722 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
723 struct extent_buffer *buf)
725 struct inode *btree_inode = root->fs_info->btree_inode;
726 if (btrfs_header_generation(buf) ==
727 root->fs_info->running_transaction->transid) {
728 WARN_ON(!btrfs_tree_locked(buf));
729 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
735 int wait_on_tree_block_writeback(struct btrfs_root *root,
736 struct extent_buffer *buf)
738 struct inode *btree_inode = root->fs_info->btree_inode;
739 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
744 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
745 u32 stripesize, struct btrfs_root *root,
746 struct btrfs_fs_info *fs_info,
751 root->commit_root = NULL;
752 root->ref_tree = NULL;
753 root->sectorsize = sectorsize;
754 root->nodesize = nodesize;
755 root->leafsize = leafsize;
756 root->stripesize = stripesize;
758 root->track_dirty = 0;
760 root->fs_info = fs_info;
761 root->objectid = objectid;
762 root->last_trans = 0;
763 root->highest_inode = 0;
764 root->last_inode_alloc = 0;
768 INIT_LIST_HEAD(&root->dirty_list);
769 INIT_LIST_HEAD(&root->orphan_list);
770 INIT_LIST_HEAD(&root->dead_list);
771 spin_lock_init(&root->node_lock);
772 spin_lock_init(&root->list_lock);
773 mutex_init(&root->objectid_mutex);
775 btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
776 root->ref_tree = &root->ref_tree_struct;
778 memset(&root->root_key, 0, sizeof(root->root_key));
779 memset(&root->root_item, 0, sizeof(root->root_item));
780 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
781 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
782 root->defrag_trans_start = fs_info->generation;
783 init_completion(&root->kobj_unregister);
784 root->defrag_running = 0;
785 root->defrag_level = 0;
786 root->root_key.objectid = objectid;
790 static int find_and_setup_root(struct btrfs_root *tree_root,
791 struct btrfs_fs_info *fs_info,
793 struct btrfs_root *root)
798 __setup_root(tree_root->nodesize, tree_root->leafsize,
799 tree_root->sectorsize, tree_root->stripesize,
800 root, fs_info, objectid);
801 ret = btrfs_find_last_root(tree_root, objectid,
802 &root->root_item, &root->root_key);
805 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
806 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
812 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
813 struct btrfs_key *location)
815 struct btrfs_root *root;
816 struct btrfs_root *tree_root = fs_info->tree_root;
817 struct btrfs_path *path;
818 struct extent_buffer *l;
823 root = kzalloc(sizeof(*root), GFP_NOFS);
825 return ERR_PTR(-ENOMEM);
826 if (location->offset == (u64)-1) {
827 ret = find_and_setup_root(tree_root, fs_info,
828 location->objectid, root);
836 __setup_root(tree_root->nodesize, tree_root->leafsize,
837 tree_root->sectorsize, tree_root->stripesize,
838 root, fs_info, location->objectid);
840 path = btrfs_alloc_path();
842 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
849 read_extent_buffer(l, &root->root_item,
850 btrfs_item_ptr_offset(l, path->slots[0]),
851 sizeof(root->root_item));
852 memcpy(&root->root_key, location, sizeof(*location));
855 btrfs_release_path(root, path);
856 btrfs_free_path(path);
861 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
862 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
867 ret = btrfs_find_highest_inode(root, &highest_inode);
869 root->highest_inode = highest_inode;
870 root->last_inode_alloc = highest_inode;
875 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
878 struct btrfs_root *root;
880 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
881 return fs_info->tree_root;
882 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
883 return fs_info->extent_root;
885 root = radix_tree_lookup(&fs_info->fs_roots_radix,
886 (unsigned long)root_objectid);
890 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
891 struct btrfs_key *location)
893 struct btrfs_root *root;
896 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
897 return fs_info->tree_root;
898 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
899 return fs_info->extent_root;
900 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
901 return fs_info->chunk_root;
902 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
903 return fs_info->dev_root;
905 root = radix_tree_lookup(&fs_info->fs_roots_radix,
906 (unsigned long)location->objectid);
910 root = btrfs_read_fs_root_no_radix(fs_info, location);
913 ret = radix_tree_insert(&fs_info->fs_roots_radix,
914 (unsigned long)root->root_key.objectid,
917 free_extent_buffer(root->node);
921 ret = btrfs_find_dead_roots(fs_info->tree_root,
922 root->root_key.objectid, root);
928 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
929 struct btrfs_key *location,
930 const char *name, int namelen)
932 struct btrfs_root *root;
935 root = btrfs_read_fs_root_no_name(fs_info, location);
942 ret = btrfs_set_root_name(root, name, namelen);
944 free_extent_buffer(root->node);
949 ret = btrfs_sysfs_add_root(root);
951 free_extent_buffer(root->node);
960 static int add_hasher(struct btrfs_fs_info *info, char *type) {
961 struct btrfs_hasher *hasher;
963 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
966 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
967 if (!hasher->hash_tfm) {
971 spin_lock(&info->hash_lock);
972 list_add(&hasher->list, &info->hashers);
973 spin_unlock(&info->hash_lock);
978 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
980 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
982 struct list_head *cur;
983 struct btrfs_device *device;
984 struct backing_dev_info *bdi;
986 if ((bdi_bits & (1 << BDI_write_congested)) &&
987 btrfs_congested_async(info, 0))
990 list_for_each(cur, &info->fs_devices->devices) {
991 device = list_entry(cur, struct btrfs_device, dev_list);
994 bdi = blk_get_backing_dev_info(device->bdev);
995 if (bdi && bdi_congested(bdi, bdi_bits)) {
1004 * this unplugs every device on the box, and it is only used when page
1007 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1009 struct list_head *cur;
1010 struct btrfs_device *device;
1011 struct btrfs_fs_info *info;
1013 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1014 list_for_each(cur, &info->fs_devices->devices) {
1015 device = list_entry(cur, struct btrfs_device, dev_list);
1016 bdi = blk_get_backing_dev_info(device->bdev);
1017 if (bdi->unplug_io_fn) {
1018 bdi->unplug_io_fn(bdi, page);
1023 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1025 struct inode *inode;
1026 struct extent_map_tree *em_tree;
1027 struct extent_map *em;
1028 struct address_space *mapping;
1031 /* the generic O_DIRECT read code does this */
1033 __unplug_io_fn(bdi, page);
1038 * page->mapping may change at any time. Get a consistent copy
1039 * and use that for everything below
1042 mapping = page->mapping;
1046 inode = mapping->host;
1047 offset = page_offset(page);
1049 em_tree = &BTRFS_I(inode)->extent_tree;
1050 spin_lock(&em_tree->lock);
1051 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1052 spin_unlock(&em_tree->lock);
1054 __unplug_io_fn(bdi, page);
1058 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1059 free_extent_map(em);
1060 __unplug_io_fn(bdi, page);
1063 offset = offset - em->start;
1064 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1065 em->block_start + offset, page);
1066 free_extent_map(em);
1069 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1071 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1074 bdi->ra_pages = default_backing_dev_info.ra_pages;
1076 bdi->capabilities = default_backing_dev_info.capabilities;
1077 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1078 bdi->unplug_io_data = info;
1079 bdi->congested_fn = btrfs_congested_fn;
1080 bdi->congested_data = info;
1084 static int bio_ready_for_csum(struct bio *bio)
1090 struct extent_io_tree *io_tree = NULL;
1091 struct btrfs_fs_info *info = NULL;
1092 struct bio_vec *bvec;
1096 bio_for_each_segment(bvec, bio, i) {
1097 page = bvec->bv_page;
1098 if (page->private == EXTENT_PAGE_PRIVATE) {
1099 length += bvec->bv_len;
1102 if (!page->private) {
1103 length += bvec->bv_len;
1106 length = bvec->bv_len;
1107 buf_len = page->private >> 2;
1108 start = page_offset(page) + bvec->bv_offset;
1109 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1110 info = BTRFS_I(page->mapping->host)->root->fs_info;
1112 /* are we fully contained in this bio? */
1113 if (buf_len <= length)
1116 ret = extent_range_uptodate(io_tree, start + length,
1117 start + buf_len - 1);
1124 * called by the kthread helper functions to finally call the bio end_io
1125 * functions. This is where read checksum verification actually happens
1127 static void end_workqueue_fn(struct btrfs_work *work)
1130 struct end_io_wq *end_io_wq;
1131 struct btrfs_fs_info *fs_info;
1134 end_io_wq = container_of(work, struct end_io_wq, work);
1135 bio = end_io_wq->bio;
1136 fs_info = end_io_wq->info;
1138 /* metadata bios are special because the whole tree block must
1139 * be checksummed at once. This makes sure the entire block is in
1140 * ram and up to date before trying to verify things. For
1141 * blocksize <= pagesize, it is basically a noop
1143 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1144 btrfs_queue_worker(&fs_info->endio_workers,
1148 error = end_io_wq->error;
1149 bio->bi_private = end_io_wq->private;
1150 bio->bi_end_io = end_io_wq->end_io;
1152 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1153 bio_endio(bio, bio->bi_size, error);
1155 bio_endio(bio, error);
1159 static int cleaner_kthread(void *arg)
1161 struct btrfs_root *root = arg;
1165 if (root->fs_info->closing)
1168 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1169 mutex_lock(&root->fs_info->cleaner_mutex);
1170 btrfs_clean_old_snapshots(root);
1171 mutex_unlock(&root->fs_info->cleaner_mutex);
1173 if (freezing(current)) {
1177 if (root->fs_info->closing)
1179 set_current_state(TASK_INTERRUPTIBLE);
1181 __set_current_state(TASK_RUNNING);
1183 } while (!kthread_should_stop());
1187 static int transaction_kthread(void *arg)
1189 struct btrfs_root *root = arg;
1190 struct btrfs_trans_handle *trans;
1191 struct btrfs_transaction *cur;
1193 unsigned long delay;
1198 if (root->fs_info->closing)
1202 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1203 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1205 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
1206 printk("btrfs: total reference cache size %Lu\n",
1207 root->fs_info->total_ref_cache_size);
1210 mutex_lock(&root->fs_info->trans_mutex);
1211 cur = root->fs_info->running_transaction;
1213 mutex_unlock(&root->fs_info->trans_mutex);
1217 now = get_seconds();
1218 if (now < cur->start_time || now - cur->start_time < 30) {
1219 mutex_unlock(&root->fs_info->trans_mutex);
1223 mutex_unlock(&root->fs_info->trans_mutex);
1224 trans = btrfs_start_transaction(root, 1);
1225 ret = btrfs_commit_transaction(trans, root);
1227 wake_up_process(root->fs_info->cleaner_kthread);
1228 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1230 if (freezing(current)) {
1233 if (root->fs_info->closing)
1235 set_current_state(TASK_INTERRUPTIBLE);
1236 schedule_timeout(delay);
1237 __set_current_state(TASK_RUNNING);
1239 } while (!kthread_should_stop());
1243 struct btrfs_root *open_ctree(struct super_block *sb,
1244 struct btrfs_fs_devices *fs_devices,
1252 struct buffer_head *bh;
1253 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
1255 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
1257 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1259 struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
1261 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
1266 struct btrfs_super_block *disk_super;
1268 if (!extent_root || !tree_root || !fs_info) {
1272 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1273 INIT_LIST_HEAD(&fs_info->trans_list);
1274 INIT_LIST_HEAD(&fs_info->dead_roots);
1275 INIT_LIST_HEAD(&fs_info->hashers);
1276 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1277 spin_lock_init(&fs_info->hash_lock);
1278 spin_lock_init(&fs_info->delalloc_lock);
1279 spin_lock_init(&fs_info->new_trans_lock);
1280 spin_lock_init(&fs_info->ref_cache_lock);
1282 init_completion(&fs_info->kobj_unregister);
1283 fs_info->tree_root = tree_root;
1284 fs_info->extent_root = extent_root;
1285 fs_info->chunk_root = chunk_root;
1286 fs_info->dev_root = dev_root;
1287 fs_info->fs_devices = fs_devices;
1288 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1289 INIT_LIST_HEAD(&fs_info->space_info);
1290 btrfs_mapping_init(&fs_info->mapping_tree);
1291 atomic_set(&fs_info->nr_async_submits, 0);
1292 atomic_set(&fs_info->nr_async_bios, 0);
1293 atomic_set(&fs_info->throttles, 0);
1294 atomic_set(&fs_info->throttle_gen, 0);
1296 fs_info->max_extent = (u64)-1;
1297 fs_info->max_inline = 8192 * 1024;
1298 setup_bdi(fs_info, &fs_info->bdi);
1299 fs_info->btree_inode = new_inode(sb);
1300 fs_info->btree_inode->i_ino = 1;
1301 fs_info->btree_inode->i_nlink = 1;
1302 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1304 INIT_LIST_HEAD(&fs_info->ordered_extents);
1305 spin_lock_init(&fs_info->ordered_extent_lock);
1307 sb->s_blocksize = 4096;
1308 sb->s_blocksize_bits = blksize_bits(4096);
1311 * we set the i_size on the btree inode to the max possible int.
1312 * the real end of the address space is determined by all of
1313 * the devices in the system
1315 fs_info->btree_inode->i_size = OFFSET_MAX;
1316 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1317 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1319 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1320 fs_info->btree_inode->i_mapping,
1322 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1325 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1327 extent_io_tree_init(&fs_info->free_space_cache,
1328 fs_info->btree_inode->i_mapping, GFP_NOFS);
1329 extent_io_tree_init(&fs_info->block_group_cache,
1330 fs_info->btree_inode->i_mapping, GFP_NOFS);
1331 extent_io_tree_init(&fs_info->pinned_extents,
1332 fs_info->btree_inode->i_mapping, GFP_NOFS);
1333 extent_io_tree_init(&fs_info->pending_del,
1334 fs_info->btree_inode->i_mapping, GFP_NOFS);
1335 extent_io_tree_init(&fs_info->extent_ins,
1336 fs_info->btree_inode->i_mapping, GFP_NOFS);
1337 fs_info->do_barriers = 1;
1339 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1340 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1341 sizeof(struct btrfs_key));
1342 insert_inode_hash(fs_info->btree_inode);
1343 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1345 mutex_init(&fs_info->trans_mutex);
1346 mutex_init(&fs_info->drop_mutex);
1347 mutex_init(&fs_info->alloc_mutex);
1348 mutex_init(&fs_info->chunk_mutex);
1349 mutex_init(&fs_info->transaction_kthread_mutex);
1350 mutex_init(&fs_info->cleaner_mutex);
1351 mutex_init(&fs_info->volume_mutex);
1352 init_waitqueue_head(&fs_info->transaction_throttle);
1353 init_waitqueue_head(&fs_info->transaction_wait);
1354 init_waitqueue_head(&fs_info->async_submit_wait);
1357 ret = add_hasher(fs_info, "crc32c");
1359 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1364 __setup_root(4096, 4096, 4096, 4096, tree_root,
1365 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1368 bh = __bread(fs_devices->latest_bdev,
1369 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1373 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1376 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1378 disk_super = &fs_info->super_copy;
1379 if (!btrfs_super_root(disk_super))
1380 goto fail_sb_buffer;
1382 err = btrfs_parse_options(tree_root, options);
1384 goto fail_sb_buffer;
1387 * we need to start all the end_io workers up front because the
1388 * queue work function gets called at interrupt time, and so it
1389 * cannot dynamically grow.
1391 btrfs_init_workers(&fs_info->workers, "worker",
1392 fs_info->thread_pool_size);
1393 btrfs_init_workers(&fs_info->submit_workers, "submit",
1394 min_t(u64, fs_devices->num_devices,
1395 fs_info->thread_pool_size));
1397 /* a higher idle thresh on the submit workers makes it much more
1398 * likely that bios will be send down in a sane order to the
1401 fs_info->submit_workers.idle_thresh = 64;
1403 /* fs_info->workers is responsible for checksumming file data
1404 * blocks and metadata. Using a larger idle thresh allows each
1405 * worker thread to operate on things in roughly the order they
1406 * were sent by the writeback daemons, improving overall locality
1407 * of the IO going down the pipe.
1409 fs_info->workers.idle_thresh = 128;
1411 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1412 btrfs_init_workers(&fs_info->endio_workers, "endio",
1413 fs_info->thread_pool_size);
1414 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1415 fs_info->thread_pool_size);
1418 * endios are largely parallel and should have a very
1421 fs_info->endio_workers.idle_thresh = 4;
1422 fs_info->endio_write_workers.idle_thresh = 4;
1424 btrfs_start_workers(&fs_info->workers, 1);
1425 btrfs_start_workers(&fs_info->submit_workers, 1);
1426 btrfs_start_workers(&fs_info->fixup_workers, 1);
1427 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1428 btrfs_start_workers(&fs_info->endio_write_workers,
1429 fs_info->thread_pool_size);
1432 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1433 printk("Btrfs: wanted %llu devices, but found %llu\n",
1434 (unsigned long long)btrfs_super_num_devices(disk_super),
1435 (unsigned long long)fs_devices->open_devices);
1436 if (btrfs_test_opt(tree_root, DEGRADED))
1437 printk("continuing in degraded mode\n");
1439 goto fail_sb_buffer;
1443 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1445 nodesize = btrfs_super_nodesize(disk_super);
1446 leafsize = btrfs_super_leafsize(disk_super);
1447 sectorsize = btrfs_super_sectorsize(disk_super);
1448 stripesize = btrfs_super_stripesize(disk_super);
1449 tree_root->nodesize = nodesize;
1450 tree_root->leafsize = leafsize;
1451 tree_root->sectorsize = sectorsize;
1452 tree_root->stripesize = stripesize;
1454 sb->s_blocksize = sectorsize;
1455 sb->s_blocksize_bits = blksize_bits(sectorsize);
1457 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1458 sizeof(disk_super->magic))) {
1459 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1460 goto fail_sb_buffer;
1463 mutex_lock(&fs_info->chunk_mutex);
1464 ret = btrfs_read_sys_array(tree_root);
1465 mutex_unlock(&fs_info->chunk_mutex);
1467 printk("btrfs: failed to read the system array on %s\n",
1469 goto fail_sys_array;
1472 blocksize = btrfs_level_size(tree_root,
1473 btrfs_super_chunk_root_level(disk_super));
1475 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1476 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1478 chunk_root->node = read_tree_block(chunk_root,
1479 btrfs_super_chunk_root(disk_super),
1481 BUG_ON(!chunk_root->node);
1483 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1484 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1487 mutex_lock(&fs_info->chunk_mutex);
1488 ret = btrfs_read_chunk_tree(chunk_root);
1489 mutex_unlock(&fs_info->chunk_mutex);
1492 btrfs_close_extra_devices(fs_devices);
1494 blocksize = btrfs_level_size(tree_root,
1495 btrfs_super_root_level(disk_super));
1498 tree_root->node = read_tree_block(tree_root,
1499 btrfs_super_root(disk_super),
1501 if (!tree_root->node)
1502 goto fail_sb_buffer;
1505 ret = find_and_setup_root(tree_root, fs_info,
1506 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1508 goto fail_tree_root;
1509 extent_root->track_dirty = 1;
1511 ret = find_and_setup_root(tree_root, fs_info,
1512 BTRFS_DEV_TREE_OBJECTID, dev_root);
1513 dev_root->track_dirty = 1;
1516 goto fail_extent_root;
1518 btrfs_read_block_groups(extent_root);
1520 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1521 fs_info->data_alloc_profile = (u64)-1;
1522 fs_info->metadata_alloc_profile = (u64)-1;
1523 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1524 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1526 if (!fs_info->cleaner_kthread)
1527 goto fail_extent_root;
1529 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1531 "btrfs-transaction");
1532 if (!fs_info->transaction_kthread)
1539 kthread_stop(fs_info->cleaner_kthread);
1541 free_extent_buffer(extent_root->node);
1543 free_extent_buffer(tree_root->node);
1546 btrfs_stop_workers(&fs_info->fixup_workers);
1547 btrfs_stop_workers(&fs_info->workers);
1548 btrfs_stop_workers(&fs_info->endio_workers);
1549 btrfs_stop_workers(&fs_info->endio_write_workers);
1550 btrfs_stop_workers(&fs_info->submit_workers);
1552 iput(fs_info->btree_inode);
1554 btrfs_close_devices(fs_info->fs_devices);
1555 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1559 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1560 bdi_destroy(&fs_info->bdi);
1563 return ERR_PTR(err);
1566 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1568 char b[BDEVNAME_SIZE];
1571 set_buffer_uptodate(bh);
1573 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1574 printk(KERN_WARNING "lost page write due to "
1575 "I/O error on %s\n",
1576 bdevname(bh->b_bdev, b));
1578 /* note, we dont' set_buffer_write_io_error because we have
1579 * our own ways of dealing with the IO errors
1581 clear_buffer_uptodate(bh);
1587 int write_all_supers(struct btrfs_root *root)
1589 struct list_head *cur;
1590 struct list_head *head = &root->fs_info->fs_devices->devices;
1591 struct btrfs_device *dev;
1592 struct btrfs_super_block *sb;
1593 struct btrfs_dev_item *dev_item;
1594 struct buffer_head *bh;
1598 int total_errors = 0;
1602 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1603 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1605 sb = &root->fs_info->super_for_commit;
1606 dev_item = &sb->dev_item;
1607 list_for_each(cur, head) {
1608 dev = list_entry(cur, struct btrfs_device, dev_list);
1613 if (!dev->in_fs_metadata)
1616 btrfs_set_stack_device_type(dev_item, dev->type);
1617 btrfs_set_stack_device_id(dev_item, dev->devid);
1618 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1619 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1620 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1621 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1622 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1623 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1624 flags = btrfs_super_flags(sb);
1625 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1629 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1630 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1631 btrfs_csum_final(crc, sb->csum);
1633 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1634 BTRFS_SUPER_INFO_SIZE);
1636 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1637 dev->pending_io = bh;
1640 set_buffer_uptodate(bh);
1642 bh->b_end_io = btrfs_end_buffer_write_sync;
1644 if (do_barriers && dev->barriers) {
1645 ret = submit_bh(WRITE_BARRIER, bh);
1646 if (ret == -EOPNOTSUPP) {
1647 printk("btrfs: disabling barriers on dev %s\n",
1649 set_buffer_uptodate(bh);
1653 ret = submit_bh(WRITE, bh);
1656 ret = submit_bh(WRITE, bh);
1661 if (total_errors > max_errors) {
1662 printk("btrfs: %d errors while writing supers\n", total_errors);
1667 list_for_each(cur, head) {
1668 dev = list_entry(cur, struct btrfs_device, dev_list);
1671 if (!dev->in_fs_metadata)
1674 BUG_ON(!dev->pending_io);
1675 bh = dev->pending_io;
1677 if (!buffer_uptodate(dev->pending_io)) {
1678 if (do_barriers && dev->barriers) {
1679 printk("btrfs: disabling barriers on dev %s\n",
1681 set_buffer_uptodate(bh);
1685 ret = submit_bh(WRITE, bh);
1688 if (!buffer_uptodate(bh))
1695 dev->pending_io = NULL;
1698 if (total_errors > max_errors) {
1699 printk("btrfs: %d errors while writing supers\n", total_errors);
1705 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1710 ret = write_all_supers(root);
1714 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1716 radix_tree_delete(&fs_info->fs_roots_radix,
1717 (unsigned long)root->root_key.objectid);
1719 btrfs_sysfs_del_root(root);
1723 free_extent_buffer(root->node);
1724 if (root->commit_root)
1725 free_extent_buffer(root->commit_root);
1732 static int del_fs_roots(struct btrfs_fs_info *fs_info)
1735 struct btrfs_root *gang[8];
1739 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1744 for (i = 0; i < ret; i++)
1745 btrfs_free_fs_root(fs_info, gang[i]);
1750 int close_ctree(struct btrfs_root *root)
1753 struct btrfs_trans_handle *trans;
1754 struct btrfs_fs_info *fs_info = root->fs_info;
1756 fs_info->closing = 1;
1759 kthread_stop(root->fs_info->transaction_kthread);
1760 kthread_stop(root->fs_info->cleaner_kthread);
1762 btrfs_clean_old_snapshots(root);
1763 trans = btrfs_start_transaction(root, 1);
1764 ret = btrfs_commit_transaction(trans, root);
1765 /* run commit again to drop the original snapshot */
1766 trans = btrfs_start_transaction(root, 1);
1767 btrfs_commit_transaction(trans, root);
1768 ret = btrfs_write_and_wait_transaction(NULL, root);
1771 write_ctree_super(NULL, root);
1773 if (fs_info->delalloc_bytes) {
1774 printk("btrfs: at unmount delalloc count %Lu\n",
1775 fs_info->delalloc_bytes);
1777 if (fs_info->total_ref_cache_size) {
1778 printk("btrfs: at umount reference cache size %Lu\n",
1779 fs_info->total_ref_cache_size);
1782 if (fs_info->extent_root->node)
1783 free_extent_buffer(fs_info->extent_root->node);
1785 if (fs_info->tree_root->node)
1786 free_extent_buffer(fs_info->tree_root->node);
1788 if (root->fs_info->chunk_root->node);
1789 free_extent_buffer(root->fs_info->chunk_root->node);
1791 if (root->fs_info->dev_root->node);
1792 free_extent_buffer(root->fs_info->dev_root->node);
1794 btrfs_free_block_groups(root->fs_info);
1795 fs_info->closing = 2;
1796 del_fs_roots(fs_info);
1798 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1800 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1802 btrfs_stop_workers(&fs_info->fixup_workers);
1803 btrfs_stop_workers(&fs_info->workers);
1804 btrfs_stop_workers(&fs_info->endio_workers);
1805 btrfs_stop_workers(&fs_info->endio_write_workers);
1806 btrfs_stop_workers(&fs_info->submit_workers);
1808 iput(fs_info->btree_inode);
1810 while(!list_empty(&fs_info->hashers)) {
1811 struct btrfs_hasher *hasher;
1812 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1814 list_del(&hasher->hashers);
1815 crypto_free_hash(&fs_info->hash_tfm);
1819 btrfs_close_devices(fs_info->fs_devices);
1820 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1822 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1823 bdi_destroy(&fs_info->bdi);
1826 kfree(fs_info->extent_root);
1827 kfree(fs_info->tree_root);
1828 kfree(fs_info->chunk_root);
1829 kfree(fs_info->dev_root);
1833 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1836 struct inode *btree_inode = buf->first_page->mapping->host;
1838 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1842 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1847 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1849 struct inode *btree_inode = buf->first_page->mapping->host;
1850 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1854 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1856 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1857 u64 transid = btrfs_header_generation(buf);
1858 struct inode *btree_inode = root->fs_info->btree_inode;
1860 WARN_ON(!btrfs_tree_locked(buf));
1861 if (transid != root->fs_info->generation) {
1862 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1863 (unsigned long long)buf->start,
1864 transid, root->fs_info->generation);
1867 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1870 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1873 * looks as though older kernels can get into trouble with
1874 * this code, they end up stuck in balance_dirty_pages forever
1876 struct extent_io_tree *tree;
1879 unsigned long thresh = 96 * 1024 * 1024;
1880 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1882 if (current_is_pdflush() || current->flags & PF_MEMALLOC)
1885 num_dirty = count_range_bits(tree, &start, (u64)-1,
1886 thresh, EXTENT_DIRTY);
1887 if (num_dirty > thresh) {
1888 balance_dirty_pages_ratelimited_nr(
1889 root->fs_info->btree_inode->i_mapping, 1);
1894 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1896 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1898 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1900 buf->flags |= EXTENT_UPTODATE;
1905 static struct extent_io_ops btree_extent_io_ops = {
1906 .writepage_io_hook = btree_writepage_io_hook,
1907 .readpage_end_io_hook = btree_readpage_end_io_hook,
1908 .submit_bio_hook = btree_submit_bio_hook,
1909 /* note we're sharing with inode.c for the merge bio hook */
1910 .merge_bio_hook = btrfs_merge_bio_hook,