Merge git://git.jan-o-sch.net/btrfs-unstable into integration
authorChris Mason <chris.mason@oracle.com>
Sun, 6 Nov 2011 08:07:10 +0000 (03:07 -0500)
committerChris Mason <chris.mason@oracle.com>
Sun, 6 Nov 2011 08:07:10 +0000 (03:07 -0500)
Conflicts:
fs/btrfs/Makefile
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/scrub.c

Signed-off-by: Chris Mason <chris.mason@oracle.com>
1  2 
fs/btrfs/Makefile
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/reada.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h

@@@ -7,7 -7,7 +7,7 @@@ btrfs-y += super.o ctree.o extent-tree.
           extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
           extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
           export.o tree-log.o free-space-cache.o zlib.o lzo.o \
 -         compression.o delayed-ref.o relocation.o delayed-inode.o backref.o \
 -         scrub.o
 +         compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
-          reada.o
++         reada.o backref.o
  
  btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
@@@ -618,37 -613,6 +618,37 @@@ out
        return ret;
  }
  
-                        struct extent_state *state)
 +static int btree_io_failed_hook(struct bio *failed_bio,
 +                       struct page *page, u64 start, u64 end,
++                       u64 mirror_num, struct extent_state *state)
 +{
 +      struct extent_io_tree *tree;
 +      unsigned long len;
 +      struct extent_buffer *eb;
 +      struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 +
 +      tree = &BTRFS_I(page->mapping->host)->io_tree;
 +      if (page->private == EXTENT_PAGE_PRIVATE)
 +              goto out;
 +      if (!page->private)
 +              goto out;
 +
 +      len = page->private >> 2;
 +      WARN_ON(len == 0);
 +
 +      eb = alloc_extent_buffer(tree, start, len, page);
 +      if (eb == NULL)
 +              goto out;
 +
 +      if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
 +              clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
 +              btree_readahead_hook(root, eb, eb->start, -EIO);
 +      }
 +
 +out:
 +      return -EIO;    /* we fixed nothing */
 +}
 +
  static void end_workqueue_bio(struct bio *bio, int err)
  {
        struct end_io_wq *end_io_wq = bio->bi_private;
Simple merge
@@@ -1915,11 -2093,19 +2281,19 @@@ static void end_bio_extent_readpage(str
                                                              state);
                        if (ret)
                                uptodate = 0;
+                       else
+                               clean_io_failure(start, page);
                }
-               if (!uptodate && tree->ops &&
-                   tree->ops->readpage_io_failed_hook) {
-                       ret = tree->ops->readpage_io_failed_hook(bio, page,
-                                                        start, end, state);
+               if (!uptodate) {
+                       u64 failed_mirror;
+                       failed_mirror = (u64)bio->bi_bdev;
+                       if (tree->ops && tree->ops->readpage_io_failed_hook)
+                               ret = tree->ops->readpage_io_failed_hook(
+                                               bio, page, start, end,
 -                                              failed_mirror, NULL);
++                                              failed_mirror, state);
+                       else
+                               ret = bio_readpage_error(bio, page, start, end,
+                                                        failed_mirror, NULL);
                        if (ret == 0) {
                                uptodate =
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
@@@ -17,7 -17,7 +17,8 @@@
  #define EXTENT_NODATASUM (1 << 10)
  #define EXTENT_DO_ACCOUNTING (1 << 11)
  #define EXTENT_FIRST_DELALLOC (1 << 12)
 -#define EXTENT_DAMAGED (1 << 13)
 +#define EXTENT_NEED_WAIT (1 << 13)
++#define EXTENT_DAMAGED (1 << 14)
  #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
  #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
  
Simple merge
@@@ -2890,6 -2856,144 +2891,144 @@@ static long btrfs_ioctl_scrub_progress(
        return ret;
  }
  
 -      struct btrfs_ioctl_ino_path_args *ipa;
+ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
+ {
+       int ret = 0;
+       int i;
+       unsigned long rel_ptr;
+       int size;
++      struct btrfs_ioctl_ino_path_args *ipa = NULL;
+       struct inode_fs_paths *ipath = NULL;
+       struct btrfs_path *path;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       ipa = memdup_user(arg, sizeof(*ipa));
+       if (IS_ERR(ipa)) {
+               ret = PTR_ERR(ipa);
+               ipa = NULL;
+               goto out;
+       }
+       size = min_t(u32, ipa->size, 4096);
+       ipath = init_ipath(size, root, path);
+       if (IS_ERR(ipath)) {
+               ret = PTR_ERR(ipath);
+               ipath = NULL;
+               goto out;
+       }
+       ret = paths_from_inode(ipa->inum, ipath);
+       if (ret < 0)
+               goto out;
+       for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
+               rel_ptr = ipath->fspath->str[i] - (char *)ipath->fspath->str;
+               ipath->fspath->str[i] = (void *)rel_ptr;
+       }
+       ret = copy_to_user(ipa->fspath, ipath->fspath, size);
+       if (ret) {
+               ret = -EFAULT;
+               goto out;
+       }
+ out:
+       btrfs_free_path(path);
+       free_ipath(ipath);
+       kfree(ipa);
+       return ret;
+ }
+ static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
+ {
+       struct btrfs_data_container *inodes = ctx;
+       const size_t c = 3 * sizeof(u64);
+       if (inodes->bytes_left >= c) {
+               inodes->bytes_left -= c;
+               inodes->val[inodes->elem_cnt] = inum;
+               inodes->val[inodes->elem_cnt + 1] = offset;
+               inodes->val[inodes->elem_cnt + 2] = root;
+               inodes->elem_cnt += 3;
+       } else {
+               inodes->bytes_missing += c - inodes->bytes_left;
+               inodes->bytes_left = 0;
+               inodes->elem_missed += 3;
+       }
+       return 0;
+ }
+ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
+                                       void __user *arg)
+ {
+       int ret = 0;
+       int size;
+       u64 extent_offset;
+       struct btrfs_ioctl_logical_ino_args *loi;
+       struct btrfs_data_container *inodes = NULL;
+       struct btrfs_path *path = NULL;
+       struct btrfs_key key;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       loi = memdup_user(arg, sizeof(*loi));
+       if (IS_ERR(loi)) {
+               ret = PTR_ERR(loi);
+               loi = NULL;
+               goto out;
+       }
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       size = min_t(u32, loi->size, 4096);
+       inodes = init_data_container(size);
+       if (IS_ERR(inodes)) {
+               ret = PTR_ERR(inodes);
+               inodes = NULL;
+               goto out;
+       }
+       ret = extent_from_logical(root->fs_info, loi->logical, path, &key);
+       if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+               ret = -ENOENT;
+       if (ret < 0)
+               goto out;
+       extent_offset = loi->logical - key.objectid;
+       ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
+                                       extent_offset, build_ino_list, inodes);
+       if (ret < 0)
+               goto out;
+       ret = copy_to_user(loi->inodes, inodes, size);
+       if (ret)
+               ret = -EFAULT;
+ out:
+       btrfs_free_path(path);
+       kfree(inodes);
+       kfree(loi);
+       return ret;
+ }
  long btrfs_ioctl(struct file *file, unsigned int
                cmd, unsigned long arg)
  {
index 2b701d0,0000000..cd85711
mode 100644,000000..100644
--- /dev/null
@@@ -1,949 -1,0 +1,949 @@@
-                                         struct btrfs_multi_bio *multi)
 +/*
 + * Copyright (C) 2011 STRATO.  All rights reserved.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public
 + * License v2 as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public
 + * License along with this program; if not, write to the
 + * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 + * Boston, MA 021110-1307, USA.
 + */
 +
 +#include <linux/sched.h>
 +#include <linux/pagemap.h>
 +#include <linux/writeback.h>
 +#include <linux/blkdev.h>
 +#include <linux/rbtree.h>
 +#include <linux/slab.h>
 +#include <linux/workqueue.h>
 +#include "ctree.h"
 +#include "volumes.h"
 +#include "disk-io.h"
 +#include "transaction.h"
 +
 +#undef DEBUG
 +
 +/*
 + * This is the implementation for the generic read ahead framework.
 + *
 + * To trigger a readahead, btrfs_reada_add must be called. It will start
 + * a read ahead for the given range [start, end) on tree root. The returned
 + * handle can either be used to wait on the readahead to finish
 + * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
 + *
 + * The read ahead works as follows:
 + * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
 + * reada_start_machine will then search for extents to prefetch and trigger
 + * some reads. When a read finishes for a node, all contained node/leaf
 + * pointers that lie in the given range will also be enqueued. The reads will
 + * be triggered in sequential order, thus giving a big win over a naive
 + * enumeration. It will also make use of multi-device layouts. Each disk
 + * will have its on read pointer and all disks will by utilized in parallel.
 + * Also will no two disks read both sides of a mirror simultaneously, as this
 + * would waste seeking capacity. Instead both disks will read different parts
 + * of the filesystem.
 + * Any number of readaheads can be started in parallel. The read order will be
 + * determined globally, i.e. 2 parallel readaheads will normally finish faster
 + * than the 2 started one after another.
 + */
 +
 +#define MAX_MIRRORS 2
 +#define MAX_IN_FLIGHT 6
 +
 +struct reada_extctl {
 +      struct list_head        list;
 +      struct reada_control    *rc;
 +      u64                     generation;
 +};
 +
 +struct reada_extent {
 +      u64                     logical;
 +      struct btrfs_key        top;
 +      u32                     blocksize;
 +      int                     err;
 +      struct list_head        extctl;
 +      struct kref             refcnt;
 +      spinlock_t              lock;
 +      struct reada_zone       *zones[MAX_MIRRORS];
 +      int                     nzones;
 +      struct btrfs_device     *scheduled_for;
 +};
 +
 +struct reada_zone {
 +      u64                     start;
 +      u64                     end;
 +      u64                     elems;
 +      struct list_head        list;
 +      spinlock_t              lock;
 +      int                     locked;
 +      struct btrfs_device     *device;
 +      struct btrfs_device     *devs[MAX_MIRRORS]; /* full list, incl self */
 +      int                     ndevs;
 +      struct kref             refcnt;
 +};
 +
 +struct reada_machine_work {
 +      struct btrfs_work       work;
 +      struct btrfs_fs_info    *fs_info;
 +};
 +
 +static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
 +static void reada_control_release(struct kref *kref);
 +static void reada_zone_release(struct kref *kref);
 +static void reada_start_machine(struct btrfs_fs_info *fs_info);
 +static void __reada_start_machine(struct btrfs_fs_info *fs_info);
 +
 +static int reada_add_block(struct reada_control *rc, u64 logical,
 +                         struct btrfs_key *top, int level, u64 generation);
 +
 +/* recurses */
 +/* in case of err, eb might be NULL */
 +static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
 +                          u64 start, int err)
 +{
 +      int level = 0;
 +      int nritems;
 +      int i;
 +      u64 bytenr;
 +      u64 generation;
 +      struct reada_extent *re;
 +      struct btrfs_fs_info *fs_info = root->fs_info;
 +      struct list_head list;
 +      unsigned long index = start >> PAGE_CACHE_SHIFT;
 +      struct btrfs_device *for_dev;
 +
 +      if (eb)
 +              level = btrfs_header_level(eb);
 +
 +      /* find extent */
 +      spin_lock(&fs_info->reada_lock);
 +      re = radix_tree_lookup(&fs_info->reada_tree, index);
 +      if (re)
 +              kref_get(&re->refcnt);
 +      spin_unlock(&fs_info->reada_lock);
 +
 +      if (!re)
 +              return -1;
 +
 +      spin_lock(&re->lock);
 +      /*
 +       * just take the full list from the extent. afterwards we
 +       * don't need the lock anymore
 +       */
 +      list_replace_init(&re->extctl, &list);
 +      for_dev = re->scheduled_for;
 +      re->scheduled_for = NULL;
 +      spin_unlock(&re->lock);
 +
 +      if (err == 0) {
 +              nritems = level ? btrfs_header_nritems(eb) : 0;
 +              generation = btrfs_header_generation(eb);
 +              /*
 +               * FIXME: currently we just set nritems to 0 if this is a leaf,
 +               * effectively ignoring the content. In a next step we could
 +               * trigger more readahead depending from the content, e.g.
 +               * fetch the checksums for the extents in the leaf.
 +               */
 +      } else {
 +              /*
 +               * this is the error case, the extent buffer has not been
 +               * read correctly. We won't access anything from it and
 +               * just cleanup our data structures. Effectively this will
 +               * cut the branch below this node from read ahead.
 +               */
 +              nritems = 0;
 +              generation = 0;
 +      }
 +
 +      for (i = 0; i < nritems; i++) {
 +              struct reada_extctl *rec;
 +              u64 n_gen;
 +              struct btrfs_key key;
 +              struct btrfs_key next_key;
 +
 +              btrfs_node_key_to_cpu(eb, &key, i);
 +              if (i + 1 < nritems)
 +                      btrfs_node_key_to_cpu(eb, &next_key, i + 1);
 +              else
 +                      next_key = re->top;
 +              bytenr = btrfs_node_blockptr(eb, i);
 +              n_gen = btrfs_node_ptr_generation(eb, i);
 +
 +              list_for_each_entry(rec, &list, list) {
 +                      struct reada_control *rc = rec->rc;
 +
 +                      /*
 +                       * if the generation doesn't match, just ignore this
 +                       * extctl. This will probably cut off a branch from
 +                       * prefetch. Alternatively one could start a new (sub-)
 +                       * prefetch for this branch, starting again from root.
 +                       * FIXME: move the generation check out of this loop
 +                       */
 +#ifdef DEBUG
 +                      if (rec->generation != generation) {
 +                              printk(KERN_DEBUG "generation mismatch for "
 +                                              "(%llu,%d,%llu) %llu != %llu\n",
 +                                     key.objectid, key.type, key.offset,
 +                                     rec->generation, generation);
 +                      }
 +#endif
 +                      if (rec->generation == generation &&
 +                          btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
 +                          btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
 +                              reada_add_block(rc, bytenr, &next_key,
 +                                              level - 1, n_gen);
 +              }
 +      }
 +      /*
 +       * free extctl records
 +       */
 +      while (!list_empty(&list)) {
 +              struct reada_control *rc;
 +              struct reada_extctl *rec;
 +
 +              rec = list_first_entry(&list, struct reada_extctl, list);
 +              list_del(&rec->list);
 +              rc = rec->rc;
 +              kfree(rec);
 +
 +              kref_get(&rc->refcnt);
 +              if (atomic_dec_and_test(&rc->elems)) {
 +                      kref_put(&rc->refcnt, reada_control_release);
 +                      wake_up(&rc->wait);
 +              }
 +              kref_put(&rc->refcnt, reada_control_release);
 +
 +              reada_extent_put(fs_info, re);  /* one ref for each entry */
 +      }
 +      reada_extent_put(fs_info, re);  /* our ref */
 +      if (for_dev)
 +              atomic_dec(&for_dev->reada_in_flight);
 +
 +      return 0;
 +}
 +
 +/*
 + * start is passed separately in case eb in NULL, which may be the case with
 + * failed I/O
 + */
 +int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
 +                       u64 start, int err)
 +{
 +      int ret;
 +
 +      ret = __readahead_hook(root, eb, start, err);
 +
 +      reada_start_machine(root->fs_info);
 +
 +      return ret;
 +}
 +
 +static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
 +                                        struct btrfs_device *dev, u64 logical,
-       struct btrfs_multi_bio *multi = NULL;
++                                        struct btrfs_bio *multi)
 +{
 +      int ret;
 +      int looped = 0;
 +      struct reada_zone *zone;
 +      struct btrfs_block_group_cache *cache = NULL;
 +      u64 start;
 +      u64 end;
 +      int i;
 +
 +again:
 +      zone = NULL;
 +      spin_lock(&fs_info->reada_lock);
 +      ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
 +                                   logical >> PAGE_CACHE_SHIFT, 1);
 +      if (ret == 1)
 +              kref_get(&zone->refcnt);
 +      spin_unlock(&fs_info->reada_lock);
 +
 +      if (ret == 1) {
 +              if (logical >= zone->start && logical < zone->end)
 +                      return zone;
 +              spin_lock(&fs_info->reada_lock);
 +              kref_put(&zone->refcnt, reada_zone_release);
 +              spin_unlock(&fs_info->reada_lock);
 +      }
 +
 +      if (looped)
 +              return NULL;
 +
 +      cache = btrfs_lookup_block_group(fs_info, logical);
 +      if (!cache)
 +              return NULL;
 +
 +      start = cache->key.objectid;
 +      end = start + cache->key.offset - 1;
 +      btrfs_put_block_group(cache);
 +
 +      zone = kzalloc(sizeof(*zone), GFP_NOFS);
 +      if (!zone)
 +              return NULL;
 +
 +      zone->start = start;
 +      zone->end = end;
 +      INIT_LIST_HEAD(&zone->list);
 +      spin_lock_init(&zone->lock);
 +      zone->locked = 0;
 +      kref_init(&zone->refcnt);
 +      zone->elems = 0;
 +      zone->device = dev; /* our device always sits at index 0 */
 +      for (i = 0; i < multi->num_stripes; ++i) {
 +              /* bounds have already been checked */
 +              zone->devs[i] = multi->stripes[i].dev;
 +      }
 +      zone->ndevs = multi->num_stripes;
 +
 +      spin_lock(&fs_info->reada_lock);
 +      ret = radix_tree_insert(&dev->reada_zones,
 +                              (unsigned long)zone->end >> PAGE_CACHE_SHIFT,
 +                              zone);
 +      spin_unlock(&fs_info->reada_lock);
 +
 +      if (ret) {
 +              kfree(zone);
 +              looped = 1;
 +              goto again;
 +      }
 +
 +      return zone;
 +}
 +
 +static struct reada_extent *reada_find_extent(struct btrfs_root *root,
 +                                            u64 logical,
 +                                            struct btrfs_key *top, int level)
 +{
 +      int ret;
 +      int looped = 0;
 +      struct reada_extent *re = NULL;
 +      struct btrfs_fs_info *fs_info = root->fs_info;
 +      struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
++      struct btrfs_bio *multi = NULL;
 +      struct btrfs_device *dev;
 +      u32 blocksize;
 +      u64 length;
 +      int nzones = 0;
 +      int i;
 +      unsigned long index = logical >> PAGE_CACHE_SHIFT;
 +
 +again:
 +      spin_lock(&fs_info->reada_lock);
 +      re = radix_tree_lookup(&fs_info->reada_tree, index);
 +      if (re)
 +              kref_get(&re->refcnt);
 +      spin_unlock(&fs_info->reada_lock);
 +
 +      if (re || looped)
 +              return re;
 +
 +      re = kzalloc(sizeof(*re), GFP_NOFS);
 +      if (!re)
 +              return NULL;
 +
 +      blocksize = btrfs_level_size(root, level);
 +      re->logical = logical;
 +      re->blocksize = blocksize;
 +      re->top = *top;
 +      INIT_LIST_HEAD(&re->extctl);
 +      spin_lock_init(&re->lock);
 +      kref_init(&re->refcnt);
 +
 +      /*
 +       * map block
 +       */
 +      length = blocksize;
 +      ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &multi, 0);
 +      if (ret || !multi || length < blocksize)
 +              goto error;
 +
 +      if (multi->num_stripes > MAX_MIRRORS) {
 +              printk(KERN_ERR "btrfs readahead: more than %d copies not "
 +                              "supported", MAX_MIRRORS);
 +              goto error;
 +      }
 +
 +      for (nzones = 0; nzones < multi->num_stripes; ++nzones) {
 +              struct reada_zone *zone;
 +
 +              dev = multi->stripes[nzones].dev;
 +              zone = reada_find_zone(fs_info, dev, logical, multi);
 +              if (!zone)
 +                      break;
 +
 +              re->zones[nzones] = zone;
 +              spin_lock(&zone->lock);
 +              if (!zone->elems)
 +                      kref_get(&zone->refcnt);
 +              ++zone->elems;
 +              spin_unlock(&zone->lock);
 +              spin_lock(&fs_info->reada_lock);
 +              kref_put(&zone->refcnt, reada_zone_release);
 +              spin_unlock(&fs_info->reada_lock);
 +      }
 +      re->nzones = nzones;
 +      if (nzones == 0) {
 +              /* not a single zone found, error and out */
 +              goto error;
 +      }
 +
 +      /* insert extent in reada_tree + all per-device trees, all or nothing */
 +      spin_lock(&fs_info->reada_lock);
 +      ret = radix_tree_insert(&fs_info->reada_tree, index, re);
 +      if (ret) {
 +              spin_unlock(&fs_info->reada_lock);
 +              if (ret != -ENOMEM) {
 +                      /* someone inserted the extent in the meantime */
 +                      looped = 1;
 +              }
 +              goto error;
 +      }
 +      for (i = 0; i < nzones; ++i) {
 +              dev = multi->stripes[i].dev;
 +              ret = radix_tree_insert(&dev->reada_extents, index, re);
 +              if (ret) {
 +                      while (--i >= 0) {
 +                              dev = multi->stripes[i].dev;
 +                              BUG_ON(dev == NULL);
 +                              radix_tree_delete(&dev->reada_extents, index);
 +                      }
 +                      BUG_ON(fs_info == NULL);
 +                      radix_tree_delete(&fs_info->reada_tree, index);
 +                      spin_unlock(&fs_info->reada_lock);
 +                      goto error;
 +              }
 +      }
 +      spin_unlock(&fs_info->reada_lock);
 +
 +      return re;
 +
 +error:
 +      while (nzones) {
 +              struct reada_zone *zone;
 +
 +              --nzones;
 +              zone = re->zones[nzones];
 +              kref_get(&zone->refcnt);
 +              spin_lock(&zone->lock);
 +              --zone->elems;
 +              if (zone->elems == 0) {
 +                      /*
 +                       * no fs_info->reada_lock needed, as this can't be
 +                       * the last ref
 +                       */
 +                      kref_put(&zone->refcnt, reada_zone_release);
 +              }
 +              spin_unlock(&zone->lock);
 +
 +              spin_lock(&fs_info->reada_lock);
 +              kref_put(&zone->refcnt, reada_zone_release);
 +              spin_unlock(&fs_info->reada_lock);
 +      }
 +      kfree(re);
 +      if (looped)
 +              goto again;
 +      return NULL;
 +}
 +
 +static void reada_kref_dummy(struct kref *kr)
 +{
 +}
 +
 +static void reada_extent_put(struct btrfs_fs_info *fs_info,
 +                           struct reada_extent *re)
 +{
 +      int i;
 +      unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
 +
 +      spin_lock(&fs_info->reada_lock);
 +      if (!kref_put(&re->refcnt, reada_kref_dummy)) {
 +              spin_unlock(&fs_info->reada_lock);
 +              return;
 +      }
 +
 +      radix_tree_delete(&fs_info->reada_tree, index);
 +      for (i = 0; i < re->nzones; ++i) {
 +              struct reada_zone *zone = re->zones[i];
 +
 +              radix_tree_delete(&zone->device->reada_extents, index);
 +      }
 +
 +      spin_unlock(&fs_info->reada_lock);
 +
 +      for (i = 0; i < re->nzones; ++i) {
 +              struct reada_zone *zone = re->zones[i];
 +
 +              kref_get(&zone->refcnt);
 +              spin_lock(&zone->lock);
 +              --zone->elems;
 +              if (zone->elems == 0) {
 +                      /* no fs_info->reada_lock needed, as this can't be
 +                       * the last ref */
 +                      kref_put(&zone->refcnt, reada_zone_release);
 +              }
 +              spin_unlock(&zone->lock);
 +
 +              spin_lock(&fs_info->reada_lock);
 +              kref_put(&zone->refcnt, reada_zone_release);
 +              spin_unlock(&fs_info->reada_lock);
 +      }
 +      if (re->scheduled_for)
 +              atomic_dec(&re->scheduled_for->reada_in_flight);
 +
 +      kfree(re);
 +}
 +
 +static void reada_zone_release(struct kref *kref)
 +{
 +      struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
 +
 +      radix_tree_delete(&zone->device->reada_zones,
 +                        zone->end >> PAGE_CACHE_SHIFT);
 +
 +      kfree(zone);
 +}
 +
 +static void reada_control_release(struct kref *kref)
 +{
 +      struct reada_control *rc = container_of(kref, struct reada_control,
 +                                              refcnt);
 +
 +      kfree(rc);
 +}
 +
 +static int reada_add_block(struct reada_control *rc, u64 logical,
 +                         struct btrfs_key *top, int level, u64 generation)
 +{
 +      struct btrfs_root *root = rc->root;
 +      struct reada_extent *re;
 +      struct reada_extctl *rec;
 +
 +      re = reada_find_extent(root, logical, top, level); /* takes one ref */
 +      if (!re)
 +              return -1;
 +
 +      rec = kzalloc(sizeof(*rec), GFP_NOFS);
 +      if (!rec) {
 +              reada_extent_put(root->fs_info, re);
 +              return -1;
 +      }
 +
 +      rec->rc = rc;
 +      rec->generation = generation;
 +      atomic_inc(&rc->elems);
 +
 +      spin_lock(&re->lock);
 +      list_add_tail(&rec->list, &re->extctl);
 +      spin_unlock(&re->lock);
 +
 +      /* leave the ref on the extent */
 +
 +      return 0;
 +}
 +
 +/*
 + * called with fs_info->reada_lock held
 + */
 +static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
 +{
 +      int i;
 +      unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
 +
 +      for (i = 0; i < zone->ndevs; ++i) {
 +              struct reada_zone *peer;
 +              peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
 +              if (peer && peer->device != zone->device)
 +                      peer->locked = lock;
 +      }
 +}
 +
 +/*
 + * called with fs_info->reada_lock held
 + */
 +static int reada_pick_zone(struct btrfs_device *dev)
 +{
 +      struct reada_zone *top_zone = NULL;
 +      struct reada_zone *top_locked_zone = NULL;
 +      u64 top_elems = 0;
 +      u64 top_locked_elems = 0;
 +      unsigned long index = 0;
 +      int ret;
 +
 +      if (dev->reada_curr_zone) {
 +              reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
 +              kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
 +              dev->reada_curr_zone = NULL;
 +      }
 +      /* pick the zone with the most elements */
 +      while (1) {
 +              struct reada_zone *zone;
 +
 +              ret = radix_tree_gang_lookup(&dev->reada_zones,
 +                                           (void **)&zone, index, 1);
 +              if (ret == 0)
 +                      break;
 +              index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
 +              if (zone->locked) {
 +                      if (zone->elems > top_locked_elems) {
 +                              top_locked_elems = zone->elems;
 +                              top_locked_zone = zone;
 +                      }
 +              } else {
 +                      if (zone->elems > top_elems) {
 +                              top_elems = zone->elems;
 +                              top_zone = zone;
 +                      }
 +              }
 +      }
 +      if (top_zone)
 +              dev->reada_curr_zone = top_zone;
 +      else if (top_locked_zone)
 +              dev->reada_curr_zone = top_locked_zone;
 +      else
 +              return 0;
 +
 +      dev->reada_next = dev->reada_curr_zone->start;
 +      kref_get(&dev->reada_curr_zone->refcnt);
 +      reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
 +
 +      return 1;
 +}
 +
 +static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
 +                                 struct btrfs_device *dev)
 +{
 +      struct reada_extent *re = NULL;
 +      int mirror_num = 0;
 +      struct extent_buffer *eb = NULL;
 +      u64 logical;
 +      u32 blocksize;
 +      int ret;
 +      int i;
 +      int need_kick = 0;
 +
 +      spin_lock(&fs_info->reada_lock);
 +      if (dev->reada_curr_zone == NULL) {
 +              ret = reada_pick_zone(dev);
 +              if (!ret) {
 +                      spin_unlock(&fs_info->reada_lock);
 +                      return 0;
 +              }
 +      }
 +      /*
 +       * FIXME currently we issue the reads one extent at a time. If we have
 +       * a contiguous block of extents, we could also coagulate them or use
 +       * plugging to speed things up
 +       */
 +      ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
 +                                   dev->reada_next >> PAGE_CACHE_SHIFT, 1);
 +      if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
 +              ret = reada_pick_zone(dev);
 +              if (!ret) {
 +                      spin_unlock(&fs_info->reada_lock);
 +                      return 0;
 +              }
 +              re = NULL;
 +              ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
 +                                      dev->reada_next >> PAGE_CACHE_SHIFT, 1);
 +      }
 +      if (ret == 0) {
 +              spin_unlock(&fs_info->reada_lock);
 +              return 0;
 +      }
 +      dev->reada_next = re->logical + re->blocksize;
 +      kref_get(&re->refcnt);
 +
 +      spin_unlock(&fs_info->reada_lock);
 +
 +      /*
 +       * find mirror num
 +       */
 +      for (i = 0; i < re->nzones; ++i) {
 +              if (re->zones[i]->device == dev) {
 +                      mirror_num = i + 1;
 +                      break;
 +              }
 +      }
 +      logical = re->logical;
 +      blocksize = re->blocksize;
 +
 +      spin_lock(&re->lock);
 +      if (re->scheduled_for == NULL) {
 +              re->scheduled_for = dev;
 +              need_kick = 1;
 +      }
 +      spin_unlock(&re->lock);
 +
 +      reada_extent_put(fs_info, re);
 +
 +      if (!need_kick)
 +              return 0;
 +
 +      atomic_inc(&dev->reada_in_flight);
 +      ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
 +                       mirror_num, &eb);
 +      if (ret)
 +              __readahead_hook(fs_info->extent_root, NULL, logical, ret);
 +      else if (eb)
 +              __readahead_hook(fs_info->extent_root, eb, eb->start, ret);
 +
 +      if (eb)
 +              free_extent_buffer(eb);
 +
 +      return 1;
 +
 +}
 +
 +static void reada_start_machine_worker(struct btrfs_work *work)
 +{
 +      struct reada_machine_work *rmw;
 +      struct btrfs_fs_info *fs_info;
 +
 +      rmw = container_of(work, struct reada_machine_work, work);
 +      fs_info = rmw->fs_info;
 +
 +      kfree(rmw);
 +
 +      __reada_start_machine(fs_info);
 +}
 +
 +static void __reada_start_machine(struct btrfs_fs_info *fs_info)
 +{
 +      struct btrfs_device *device;
 +      struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 +      u64 enqueued;
 +      u64 total = 0;
 +      int i;
 +
 +      do {
 +              enqueued = 0;
 +              list_for_each_entry(device, &fs_devices->devices, dev_list) {
 +                      if (atomic_read(&device->reada_in_flight) <
 +                          MAX_IN_FLIGHT)
 +                              enqueued += reada_start_machine_dev(fs_info,
 +                                                                  device);
 +              }
 +              total += enqueued;
 +      } while (enqueued && total < 10000);
 +
 +      if (enqueued == 0)
 +              return;
 +
 +      /*
 +       * If everything is already in the cache, this is effectively single
 +       * threaded. To a) not hold the caller for too long and b) to utilize
 +       * more cores, we broke the loop above after 10000 iterations and now
 +       * enqueue to workers to finish it. This will distribute the load to
 +       * the cores.
 +       */
 +      for (i = 0; i < 2; ++i)
 +              reada_start_machine(fs_info);
 +}
 +
 +static void reada_start_machine(struct btrfs_fs_info *fs_info)
 +{
 +      struct reada_machine_work *rmw;
 +
 +      rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
 +      if (!rmw) {
 +              /* FIXME we cannot handle this properly right now */
 +              BUG();
 +      }
 +      rmw->work.func = reada_start_machine_worker;
 +      rmw->fs_info = fs_info;
 +
 +      btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work);
 +}
 +
 +#ifdef DEBUG
 +static void dump_devs(struct btrfs_fs_info *fs_info, int all)
 +{
 +      struct btrfs_device *device;
 +      struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 +      unsigned long index;
 +      int ret;
 +      int i;
 +      int j;
 +      int cnt;
 +
 +      spin_lock(&fs_info->reada_lock);
 +      list_for_each_entry(device, &fs_devices->devices, dev_list) {
 +              printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
 +                      atomic_read(&device->reada_in_flight));
 +              index = 0;
 +              while (1) {
 +                      struct reada_zone *zone;
 +                      ret = radix_tree_gang_lookup(&device->reada_zones,
 +                                                   (void **)&zone, index, 1);
 +                      if (ret == 0)
 +                              break;
 +                      printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
 +                              "%d devs", zone->start, zone->end, zone->elems,
 +                              zone->locked);
 +                      for (j = 0; j < zone->ndevs; ++j) {
 +                              printk(KERN_CONT " %lld",
 +                                      zone->devs[j]->devid);
 +                      }
 +                      if (device->reada_curr_zone == zone)
 +                              printk(KERN_CONT " curr off %llu",
 +                                      device->reada_next - zone->start);
 +                      printk(KERN_CONT "\n");
 +                      index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
 +              }
 +              cnt = 0;
 +              index = 0;
 +              while (all) {
 +                      struct reada_extent *re = NULL;
 +
 +                      ret = radix_tree_gang_lookup(&device->reada_extents,
 +                                                   (void **)&re, index, 1);
 +                      if (ret == 0)
 +                              break;
 +                      printk(KERN_DEBUG
 +                              "  re: logical %llu size %u empty %d for %lld",
 +                              re->logical, re->blocksize,
 +                              list_empty(&re->extctl), re->scheduled_for ?
 +                              re->scheduled_for->devid : -1);
 +
 +                      for (i = 0; i < re->nzones; ++i) {
 +                              printk(KERN_CONT " zone %llu-%llu devs",
 +                                      re->zones[i]->start,
 +                                      re->zones[i]->end);
 +                              for (j = 0; j < re->zones[i]->ndevs; ++j) {
 +                                      printk(KERN_CONT " %lld",
 +                                              re->zones[i]->devs[j]->devid);
 +                              }
 +                      }
 +                      printk(KERN_CONT "\n");
 +                      index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
 +                      if (++cnt > 15)
 +                              break;
 +              }
 +      }
 +
 +      index = 0;
 +      cnt = 0;
 +      while (all) {
 +              struct reada_extent *re = NULL;
 +
 +              ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
 +                                           index, 1);
 +              if (ret == 0)
 +                      break;
 +              if (!re->scheduled_for) {
 +                      index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
 +                      continue;
 +              }
 +              printk(KERN_DEBUG
 +                      "re: logical %llu size %u list empty %d for %lld",
 +                      re->logical, re->blocksize, list_empty(&re->extctl),
 +                      re->scheduled_for ? re->scheduled_for->devid : -1);
 +              for (i = 0; i < re->nzones; ++i) {
 +                      printk(KERN_CONT " zone %llu-%llu devs",
 +                              re->zones[i]->start,
 +                              re->zones[i]->end);
 +                      for (i = 0; i < re->nzones; ++i) {
 +                              printk(KERN_CONT " zone %llu-%llu devs",
 +                                      re->zones[i]->start,
 +                                      re->zones[i]->end);
 +                              for (j = 0; j < re->zones[i]->ndevs; ++j) {
 +                                      printk(KERN_CONT " %lld",
 +                                              re->zones[i]->devs[j]->devid);
 +                              }
 +                      }
 +              }
 +              printk(KERN_CONT "\n");
 +              index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
 +      }
 +      spin_unlock(&fs_info->reada_lock);
 +}
 +#endif
 +
 +/*
 + * interface
 + */
 +struct reada_control *btrfs_reada_add(struct btrfs_root *root,
 +                      struct btrfs_key *key_start, struct btrfs_key *key_end)
 +{
 +      struct reada_control *rc;
 +      u64 start;
 +      u64 generation;
 +      int level;
 +      struct extent_buffer *node;
 +      static struct btrfs_key max_key = {
 +              .objectid = (u64)-1,
 +              .type = (u8)-1,
 +              .offset = (u64)-1
 +      };
 +
 +      rc = kzalloc(sizeof(*rc), GFP_NOFS);
 +      if (!rc)
 +              return ERR_PTR(-ENOMEM);
 +
 +      rc->root = root;
 +      rc->key_start = *key_start;
 +      rc->key_end = *key_end;
 +      atomic_set(&rc->elems, 0);
 +      init_waitqueue_head(&rc->wait);
 +      kref_init(&rc->refcnt);
 +      kref_get(&rc->refcnt); /* one ref for having elements */
 +
 +      node = btrfs_root_node(root);
 +      start = node->start;
 +      level = btrfs_header_level(node);
 +      generation = btrfs_header_generation(node);
 +      free_extent_buffer(node);
 +
 +      reada_add_block(rc, start, &max_key, level, generation);
 +
 +      reada_start_machine(root->fs_info);
 +
 +      return rc;
 +}
 +
 +#ifdef DEBUG
 +int btrfs_reada_wait(void *handle)
 +{
 +      struct reada_control *rc = handle;
 +
 +      while (atomic_read(&rc->elems)) {
 +              wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
 +                                 5 * HZ);
 +              dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
 +      }
 +
 +      dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
 +
 +      kref_put(&rc->refcnt, reada_control_release);
 +
 +      return 0;
 +}
 +#else
 +int btrfs_reada_wait(void *handle)
 +{
 +      struct reada_control *rc = handle;
 +
 +      while (atomic_read(&rc->elems)) {
 +              wait_event(rc->wait, atomic_read(&rc->elems) == 0);
 +      }
 +
 +      kref_put(&rc->refcnt, reada_control_release);
 +
 +      return 0;
 +}
 +#endif
 +
 +void btrfs_reada_detach(void *handle)
 +{
 +      struct reada_control *rc = handle;
 +
 +      kref_put(&rc->refcnt, reada_control_release);
 +}
@@@ -178,8 -207,9 +204,9 @@@ struct scrub_dev *scrub_setup_dev(struc
        sdev->first_free = 0;
        sdev->curr = -1;
        atomic_set(&sdev->in_flight, 0);
+       atomic_set(&sdev->fixup_cnt, 0);
        atomic_set(&sdev->cancel_req, 0);
 -      sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
 +      sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
        INIT_LIST_HEAD(&sdev->csum_list);
  
        spin_lock_init(&sdev->list_lock);
@@@ -743,11 -1136,7 +1132,11 @@@ static noinline_for_stack int scrub_str
        u64 physical;
        u64 logical;
        u64 generation;
-       u64 mirror_num;
+       int mirror_num;
 +      struct reada_control *reada1;
 +      struct reada_control *reada2;
 +      struct btrfs_key key_start;
 +      struct btrfs_key key_end;
  
        u64 increment = map->stripe_len;
        u64 offset;
Simple merge
Simple merge