Merge git://git.jan-o-sch.net/btrfs-unstable into integration
[pandora-kernel.git] / fs / btrfs / scrub.c
index db09f01..94cd3a1 100644 (file)
@@ -24,6 +24,7 @@
 #include "ordered-data.h"
 #include "transaction.h"
 #include "backref.h"
+#include "extent_io.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
  * any can be found.
  *
  * Future enhancements:
- *  - To enhance the performance, better read-ahead strategies for the
- *    extent-tree can be employed.
  *  - In case an unrepairable extent is encountered, track which files are
  *    affected and report them
  *  - In case of a read error on files with nodatasum, map the file and read
  *    the extent to trigger a writeback of the good copy
  *  - track and record media errors, throw out bad devices
  *  - add a mode to also read unallocated space
- *  - make the prefetch cancellable
  */
 
 struct scrub_bio;
@@ -208,7 +206,7 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
        atomic_set(&sdev->in_flight, 0);
        atomic_set(&sdev->fixup_cnt, 0);
        atomic_set(&sdev->cancel_req, 0);
-       sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
+       sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
        INIT_LIST_HEAD(&sdev->csum_list);
 
        spin_lock_init(&sdev->list_lock);
@@ -360,13 +358,13 @@ out:
 
 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
 {
-       struct page *page;
+       struct page *page = NULL;
        unsigned long index;
        struct scrub_fixup_nodatasum *fixup = ctx;
        int ret;
-       int corrected;
+       int corrected = 0;
        struct btrfs_key key;
-       struct inode *inode;
+       struct inode *inode = NULL;
        u64 end = offset + PAGE_SIZE - 1;
        struct btrfs_root *local_root;
 
@@ -384,34 +382,75 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
-       ret = set_extent_bit(&BTRFS_I(inode)->io_tree, offset, end,
-                               EXTENT_DAMAGED, 0, NULL, NULL, GFP_NOFS);
-
-       /* set_extent_bit should either succeed or give proper error */
-       WARN_ON(ret > 0);
-       if (ret)
-               return ret < 0 ? ret : -EFAULT;
-
        index = offset >> PAGE_CACHE_SHIFT;
 
        page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
-       if (!page)
-               return -ENOMEM;
+       if (!page) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
-       ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
-                                       btrfs_get_extent, fixup->mirror_num);
-       wait_on_page_locked(page);
-       corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset, end,
-                                       EXTENT_DAMAGED, 0, NULL);
+       if (PageUptodate(page)) {
+               struct btrfs_mapping_tree *map_tree;
+               if (PageDirty(page)) {
+                       /*
+                        * we need to write the data to the defect sector. the
+                        * data that was in that sector is not in memory,
+                        * because the page was modified. we must not write the
+                        * modified page to that sector.
+                        *
+                        * TODO: what could be done here: wait for the delalloc
+                        *       runner to write out that page (might involve
+                        *       COW) and see whether the sector is still
+                        *       referenced afterwards.
+                        *
+                        * For the meantime, we'll treat this error
+                        * incorrectable, although there is a chance that a
+                        * later scrub will find the bad sector again and that
+                        * there's no dirty page in memory, then.
+                        */
+                       ret = -EIO;
+                       goto out;
+               }
+               map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
+               ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
+                                       fixup->logical, page,
+                                       fixup->mirror_num);
+               unlock_page(page);
+               corrected = !ret;
+       } else {
+               /*
+                * we need to get good data first. the general readpage path
+                * will call repair_io_failure for us, we just have to make
+                * sure we read the bad mirror.
+                */
+               ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
+                                       EXTENT_DAMAGED, GFP_NOFS);
+               if (ret) {
+                       /* set_extent_bits should give proper error */
+                       WARN_ON(ret > 0);
+                       if (ret > 0)
+                               ret = -EFAULT;
+                       goto out;
+               }
 
-       if (corrected)
-               WARN_ON(!PageUptodate(page));
-       else
-               clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, end,
-                                       EXTENT_DAMAGED, 0, 0, NULL, GFP_NOFS);
+               ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
+                                               btrfs_get_extent,
+                                               fixup->mirror_num);
+               wait_on_page_locked(page);
+
+               corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
+                                               end, EXTENT_DAMAGED, 0, NULL);
+               if (!corrected)
+                       clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
+                                               EXTENT_DAMAGED, GFP_NOFS);
+       }
 
-       put_page(page);
-       iput(inode);
+out:
+       if (page)
+               put_page(page);
+       if (inode)
+               iput(inode);
 
        if (ret < 0)
                return ret;
@@ -572,7 +611,7 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
        struct scrub_dev *sdev = sbio->sdev;
        struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
        struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
-       struct btrfs_multi_bio *multi = NULL;
+       struct btrfs_bio *bbio = NULL;
        struct scrub_fixup_nodatasum *fixup;
        u64 logical = sbio->logical + ix * PAGE_SIZE;
        u64 length;
@@ -610,8 +649,8 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
 
        length = PAGE_SIZE;
        ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
-                             &multi, 0);
-       if (ret || !multi || length < PAGE_SIZE) {
+                             &bbio, 0);
+       if (ret || !bbio || length < PAGE_SIZE) {
                printk(KERN_ERR
                       "scrub_fixup: btrfs_map_block failed us for %llu\n",
                       (unsigned long long)logical);
@@ -619,19 +658,19 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
                return;
        }
 
-       if (multi->num_stripes == 1)
+       if (bbio->num_stripes == 1)
                /* there aren't any replicas */
                goto uncorrectable;
 
        /*
         * first find a good copy
         */
-       for (i = 0; i < multi->num_stripes; ++i) {
+       for (i = 0; i < bbio->num_stripes; ++i) {
                if (i + 1 == sbio->spag[ix].mirror_num)
                        continue;
 
-               if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
-                                  multi->stripes[i].physical >> 9,
+               if (scrub_fixup_io(READ, bbio->stripes[i].dev->bdev,
+                                  bbio->stripes[i].physical >> 9,
                                   sbio->bio->bi_io_vec[ix].bv_page)) {
                        /* I/O-error, this is not a good copy */
                        continue;
@@ -640,7 +679,7 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
                if (scrub_fixup_check(sbio, ix) == 0)
                        break;
        }
-       if (i == multi->num_stripes)
+       if (i == bbio->num_stripes)
                goto uncorrectable;
 
        if (!sdev->readonly) {
@@ -655,7 +694,7 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
                }
        }
 
-       kfree(multi);
+       kfree(bbio);
        spin_lock(&sdev->stat_lock);
        ++sdev->stat.corrected_errors;
        spin_unlock(&sdev->stat_lock);
@@ -665,7 +704,7 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
        return;
 
 uncorrectable:
-       kfree(multi);
+       kfree(bbio);
        spin_lock(&sdev->stat_lock);
        ++sdev->stat.uncorrectable_errors;
        spin_unlock(&sdev->stat_lock);
@@ -1088,13 +1127,16 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
        int slot;
        int i;
        u64 nstripes;
-       int start_stripe;
        struct extent_buffer *l;
        struct btrfs_key key;
        u64 physical;
        u64 logical;
        u64 generation;
        int mirror_num;
+       struct reada_control *reada1;
+       struct reada_control *reada2;
+       struct btrfs_key key_start;
+       struct btrfs_key key_end;
 
        u64 increment = map->stripe_len;
        u64 offset;
@@ -1126,81 +1168,67 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
        if (!path)
                return -ENOMEM;
 
-       path->reada = 2;
        path->search_commit_root = 1;
        path->skip_locking = 1;
 
        /*
-        * find all extents for each stripe and just read them to get
-        * them into the page cache
-        * FIXME: we can do better. build a more intelligent prefetching
+        * trigger the readahead for extent tree csum tree and wait for
+        * completion. During readahead, the scrub is officially paused
+        * to not hold off transaction commits
         */
        logical = base + offset;
-       physical = map->stripes[num].physical;
-       ret = 0;
-       for (i = 0; i < nstripes; ++i) {
-               key.objectid = logical;
-               key.type = BTRFS_EXTENT_ITEM_KEY;
-               key.offset = (u64)0;
-
-               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-               if (ret < 0)
-                       goto out_noplug;
-
-               /*
-                * we might miss half an extent here, but that doesn't matter,
-                * as it's only the prefetch
-                */
-               while (1) {
-                       l = path->nodes[0];
-                       slot = path->slots[0];
-                       if (slot >= btrfs_header_nritems(l)) {
-                               ret = btrfs_next_leaf(root, path);
-                               if (ret == 0)
-                                       continue;
-                               if (ret < 0)
-                                       goto out_noplug;
 
-                               break;
-                       }
-                       btrfs_item_key_to_cpu(l, &key, slot);
+       wait_event(sdev->list_wait,
+                  atomic_read(&sdev->in_flight) == 0);
+       atomic_inc(&fs_info->scrubs_paused);
+       wake_up(&fs_info->scrub_pause_wait);
 
-                       if (key.objectid >= logical + map->stripe_len)
-                               break;
+       /* FIXME it might be better to start readahead at commit root */
+       key_start.objectid = logical;
+       key_start.type = BTRFS_EXTENT_ITEM_KEY;
+       key_start.offset = (u64)0;
+       key_end.objectid = base + offset + nstripes * increment;
+       key_end.type = BTRFS_EXTENT_ITEM_KEY;
+       key_end.offset = (u64)0;
+       reada1 = btrfs_reada_add(root, &key_start, &key_end);
+
+       key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+       key_start.type = BTRFS_EXTENT_CSUM_KEY;
+       key_start.offset = logical;
+       key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+       key_end.type = BTRFS_EXTENT_CSUM_KEY;
+       key_end.offset = base + offset + nstripes * increment;
+       reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
+
+       if (!IS_ERR(reada1))
+               btrfs_reada_wait(reada1);
+       if (!IS_ERR(reada2))
+               btrfs_reada_wait(reada2);
 
-                       path->slots[0]++;
-               }
-               btrfs_release_path(path);
-               logical += increment;
-               physical += map->stripe_len;
-               cond_resched();
+       mutex_lock(&fs_info->scrub_lock);
+       while (atomic_read(&fs_info->scrub_pause_req)) {
+               mutex_unlock(&fs_info->scrub_lock);
+               wait_event(fs_info->scrub_pause_wait,
+                  atomic_read(&fs_info->scrub_pause_req) == 0);
+               mutex_lock(&fs_info->scrub_lock);
        }
+       atomic_dec(&fs_info->scrubs_paused);
+       mutex_unlock(&fs_info->scrub_lock);
+       wake_up(&fs_info->scrub_pause_wait);
 
        /*
         * collect all data csums for the stripe to avoid seeking during
         * the scrub. This might currently (crc32) end up to be about 1MB
         */
-       start_stripe = 0;
        blk_start_plug(&plug);
-again:
-       logical = base + offset + start_stripe * increment;
-       for (i = start_stripe; i < nstripes; ++i) {
-               ret = btrfs_lookup_csums_range(csum_root, logical,
-                                              logical + map->stripe_len - 1,
-                                              &sdev->csum_list, 1);
-               if (ret)
-                       goto out;
 
-               logical += increment;
-               cond_resched();
-       }
        /*
         * now find all extents for each stripe and scrub them
         */
-       logical = base + offset + start_stripe * increment;
-       physical = map->stripes[num].physical + start_stripe * map->stripe_len;
+       logical = base + offset;
+       physical = map->stripes[num].physical;
        ret = 0;
-       for (i = start_stripe; i < nstripes; ++i) {
+       for (i = 0; i < nstripes; ++i) {
                /*
                 * canceled?
                 */
@@ -1229,11 +1257,14 @@ again:
                        atomic_dec(&fs_info->scrubs_paused);
                        mutex_unlock(&fs_info->scrub_lock);
                        wake_up(&fs_info->scrub_pause_wait);
-                       scrub_free_csums(sdev);
-                       start_stripe = i;
-                       goto again;
                }
 
+               ret = btrfs_lookup_csums_range(csum_root, logical,
+                                              logical + map->stripe_len - 1,
+                                              &sdev->csum_list, 1);
+               if (ret)
+                       goto out;
+
                key.objectid = logical;
                key.type = BTRFS_EXTENT_ITEM_KEY;
                key.offset = (u64)0;
@@ -1329,7 +1360,6 @@ next:
 
 out:
        blk_finish_plug(&plug);
-out_noplug:
        btrfs_free_path(path);
        return ret < 0 ? ret : 0;
 }