Btrfs: Add locking around volume management (device add/remove/balance)
[pandora-kernel.git] / fs / btrfs / volumes.c
index aa001b0..5e6ee7a 100644 (file)
@@ -27,6 +27,7 @@
 #include "transaction.h"
 #include "print-tree.h"
 #include "volumes.h"
+#include "async-thread.h"
 
 struct map_lookup {
        u64 type;
@@ -55,6 +56,18 @@ void btrfs_unlock_volumes(void)
        mutex_unlock(&uuid_mutex);
 }
 
+static void lock_chunks(struct btrfs_root *root)
+{
+       mutex_lock(&root->fs_info->alloc_mutex);
+       mutex_lock(&root->fs_info->chunk_mutex);
+}
+
+static void unlock_chunks(struct btrfs_root *root)
+{
+       mutex_unlock(&root->fs_info->alloc_mutex);
+       mutex_unlock(&root->fs_info->chunk_mutex);
+}
+
 int btrfs_cleanup_fs_uuids(void)
 {
        struct btrfs_fs_devices *fs_devices;
@@ -71,8 +84,10 @@ int btrfs_cleanup_fs_uuids(void)
                                         dev_list);
                        if (dev->bdev) {
                                close_bdev_excl(dev->bdev);
+                               fs_devices->open_devices--;
                        }
                        list_del(&dev->dev_list);
+                       kfree(dev->name);
                        kfree(dev);
                }
        }
@@ -108,6 +123,101 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid)
        return NULL;
 }
 
+/*
+ * we try to collect pending bios for a device so we don't get a large
+ * number of procs sending bios down to the same device.  This greatly
+ * improves the schedulers ability to collect and merge the bios.
+ *
+ * But, it also turns into a long list of bios to process and that is sure
+ * to eventually make the worker thread block.  The solution here is to
+ * make some progress and then put this work struct back at the end of
+ * the list if the block device is congested.  This way, multiple devices
+ * can make progress from a single worker thread.
+ */
+int run_scheduled_bios(struct btrfs_device *device)
+{
+       struct bio *pending;
+       struct backing_dev_info *bdi;
+       struct bio *tail;
+       struct bio *cur;
+       int again = 0;
+       unsigned long num_run = 0;
+
+       bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
+loop:
+       spin_lock(&device->io_lock);
+
+       /* take all the bios off the list at once and process them
+        * later on (without the lock held).  But, remember the
+        * tail and other pointers so the bios can be properly reinserted
+        * into the list if we hit congestion
+        */
+       pending = device->pending_bios;
+       tail = device->pending_bio_tail;
+       WARN_ON(pending && !tail);
+       device->pending_bios = NULL;
+       device->pending_bio_tail = NULL;
+
+       /*
+        * if pending was null this time around, no bios need processing
+        * at all and we can stop.  Otherwise it'll loop back up again
+        * and do an additional check so no bios are missed.
+        *
+        * device->running_pending is used to synchronize with the
+        * schedule_bio code.
+        */
+       if (pending) {
+               again = 1;
+               device->running_pending = 1;
+       } else {
+               again = 0;
+               device->running_pending = 0;
+       }
+       spin_unlock(&device->io_lock);
+
+       while(pending) {
+               cur = pending;
+               pending = pending->bi_next;
+               cur->bi_next = NULL;
+               atomic_dec(&device->dev_root->fs_info->nr_async_submits);
+               submit_bio(cur->bi_rw, cur);
+               num_run++;
+
+               /*
+                * we made progress, there is more work to do and the bdi
+                * is now congested.  Back off and let other work structs
+                * run instead
+                */
+               if (pending && num_run && bdi_write_congested(bdi)) {
+                       struct bio *old_head;
+
+                       spin_lock(&device->io_lock);
+                       old_head = device->pending_bios;
+                       device->pending_bios = pending;
+                       if (device->pending_bio_tail)
+                               tail->bi_next = old_head;
+                       else
+                               device->pending_bio_tail = tail;
+
+                       spin_unlock(&device->io_lock);
+                       btrfs_requeue_work(&device->work);
+                       goto done;
+               }
+       }
+       if (again)
+               goto loop;
+done:
+       return 0;
+}
+
+void pending_bios_fn(struct btrfs_work *work)
+{
+       struct btrfs_device *device;
+
+       device = container_of(work, struct btrfs_device, work);
+       run_scheduled_bios(device);
+}
+
 static int device_list_add(const char *path,
                           struct btrfs_super_block *disk_super,
                           u64 devid, struct btrfs_fs_devices **fs_devices_ret)
@@ -118,7 +228,7 @@ static int device_list_add(const char *path,
 
        fs_devices = find_fsid(disk_super->fsid);
        if (!fs_devices) {
-               fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
+               fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
                if (!fs_devices)
                        return -ENOMEM;
                INIT_LIST_HEAD(&fs_devices->devices);
@@ -127,8 +237,6 @@ static int device_list_add(const char *path,
                memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
                fs_devices->latest_devid = devid;
                fs_devices->latest_trans = found_transid;
-               fs_devices->lowest_devid = (u64)-1;
-               fs_devices->num_devices = 0;
                device = NULL;
        } else {
                device = __find_device(&fs_devices->devices, devid,
@@ -141,6 +249,7 @@ static int device_list_add(const char *path,
                        return -ENOMEM;
                }
                device->devid = devid;
+               device->work.func = pending_bios_fn;
                memcpy(device->uuid, disk_super->dev_item.uuid,
                       BTRFS_UUID_SIZE);
                device->barriers = 1;
@@ -159,13 +268,41 @@ static int device_list_add(const char *path,
                fs_devices->latest_devid = devid;
                fs_devices->latest_trans = found_transid;
        }
-       if (fs_devices->lowest_devid > devid) {
-               fs_devices->lowest_devid = devid;
-       }
        *fs_devices_ret = fs_devices;
        return 0;
 }
 
+int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
+{
+       struct list_head *head = &fs_devices->devices;
+       struct list_head *cur;
+       struct btrfs_device *device;
+
+       mutex_lock(&uuid_mutex);
+again:
+       list_for_each(cur, head) {
+               device = list_entry(cur, struct btrfs_device, dev_list);
+               if (!device->in_fs_metadata) {
+                       struct block_device *bdev;
+                       list_del(&device->dev_list);
+                       list_del(&device->dev_alloc_list);
+                       fs_devices->num_devices--;
+                       if (device->bdev) {
+                               bdev = device->bdev;
+                               fs_devices->open_devices--;
+                               mutex_unlock(&uuid_mutex);
+                               close_bdev_excl(bdev);
+                               mutex_lock(&uuid_mutex);
+                       }
+                       kfree(device->name);
+                       kfree(device);
+                       goto again;
+               }
+       }
+       mutex_unlock(&uuid_mutex);
+       return 0;
+}
+
 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 {
        struct list_head *head = &fs_devices->devices;
@@ -177,9 +314,12 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
                device = list_entry(cur, struct btrfs_device, dev_list);
                if (device->bdev) {
                        close_bdev_excl(device->bdev);
+                       fs_devices->open_devices--;
                }
                device->bdev = NULL;
+               device->in_fs_metadata = 0;
        }
+       fs_devices->mounted = 0;
        mutex_unlock(&uuid_mutex);
        return 0;
 }
@@ -191,35 +331,77 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
        struct list_head *head = &fs_devices->devices;
        struct list_head *cur;
        struct btrfs_device *device;
-       int ret;
+       struct block_device *latest_bdev = NULL;
+       struct buffer_head *bh;
+       struct btrfs_super_block *disk_super;
+       u64 latest_devid = 0;
+       u64 latest_transid = 0;
+       u64 transid;
+       u64 devid;
+       int ret = 0;
 
        mutex_lock(&uuid_mutex);
+       if (fs_devices->mounted)
+               goto out;
+
        list_for_each(cur, head) {
                device = list_entry(cur, struct btrfs_device, dev_list);
                if (device->bdev)
                        continue;
 
+               if (!device->name)
+                       continue;
+
                bdev = open_bdev_excl(device->name, flags, holder);
 
                if (IS_ERR(bdev)) {
                        printk("open %s failed\n", device->name);
-                       ret = PTR_ERR(bdev);
-                       goto fail;
+                       goto error;
                }
                set_blocksize(bdev, 4096);
-               if (device->devid == fs_devices->latest_devid)
-                       fs_devices->latest_bdev = bdev;
-               if (device->devid == fs_devices->lowest_devid) {
-                       fs_devices->lowest_bdev = bdev;
+
+               bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+               if (!bh)
+                       goto error_close;
+
+               disk_super = (struct btrfs_super_block *)bh->b_data;
+               if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
+                   sizeof(disk_super->magic)))
+                       goto error_brelse;
+
+               devid = le64_to_cpu(disk_super->dev_item.devid);
+               if (devid != device->devid)
+                       goto error_brelse;
+
+               transid = btrfs_super_generation(disk_super);
+               if (!latest_transid || transid > latest_transid) {
+                       latest_devid = devid;
+                       latest_transid = transid;
+                       latest_bdev = bdev;
                }
+
                device->bdev = bdev;
+               device->in_fs_metadata = 0;
+               fs_devices->open_devices++;
+               continue;
 
+error_brelse:
+               brelse(bh);
+error_close:
+               close_bdev_excl(bdev);
+error:
+               continue;
        }
+       if (fs_devices->open_devices == 0) {
+               ret = -EIO;
+               goto out;
+       }
+       fs_devices->mounted = 1;
+       fs_devices->latest_bdev = latest_bdev;
+       fs_devices->latest_devid = latest_devid;
+       fs_devices->latest_trans = latest_transid;
+out:
        mutex_unlock(&uuid_mutex);
-       return 0;
-fail:
-       mutex_unlock(&uuid_mutex);
-       btrfs_close_devices(fs_devices);
        return ret;
 }
 
@@ -439,7 +621,8 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
        }
        BUG_ON(ret);
 
-       device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
+       if (device->bytes_used > 0)
+               device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
        ret = btrfs_del_item(trans, root, path);
        BUG_ON(ret);
 
@@ -460,6 +643,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
        struct btrfs_key key;
 
+       WARN_ON(!device->in_fs_metadata);
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -650,6 +834,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
        key.offset = device->devid;
+       lock_chunks(root);
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
        if (ret < 0)
@@ -674,22 +859,17 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
 
        next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
                              dev_list);
-       if (bdev == fs_devices->lowest_bdev)
-               fs_devices->lowest_bdev = next_dev->bdev;
        if (bdev == root->fs_info->sb->s_bdev)
                root->fs_info->sb->s_bdev = next_dev->bdev;
        if (bdev == fs_devices->latest_bdev)
                fs_devices->latest_bdev = next_dev->bdev;
 
-       total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
-       btrfs_set_super_total_bytes(&root->fs_info->super_copy,
-                                   total_bytes - device->total_bytes);
-
        total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
        btrfs_set_super_num_devices(&root->fs_info->super_copy,
                                    total_bytes - 1);
 out:
        btrfs_free_path(path);
+       unlock_chunks(root);
        btrfs_commit_transaction(trans, root);
        return ret;
 }
@@ -698,62 +878,89 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 {
        struct btrfs_device *device;
        struct block_device *bdev;
-       struct buffer_head *bh;
+       struct buffer_head *bh = NULL;
        struct btrfs_super_block *disk_super;
        u64 all_avail;
        u64 devid;
        int ret = 0;
 
-       mutex_lock(&root->fs_info->fs_mutex);
        mutex_lock(&uuid_mutex);
+       mutex_lock(&root->fs_info->volume_mutex);
 
        all_avail = root->fs_info->avail_data_alloc_bits |
                root->fs_info->avail_system_alloc_bits |
                root->fs_info->avail_metadata_alloc_bits;
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
-           root->fs_info->fs_devices->num_devices <= 4) {
+           btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
                printk("btrfs: unable to go below four devices on raid10\n");
                ret = -EINVAL;
                goto out;
        }
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
-           root->fs_info->fs_devices->num_devices <= 2) {
+           btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
                printk("btrfs: unable to go below two devices on raid1\n");
                ret = -EINVAL;
                goto out;
        }
 
-       bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
-       if (IS_ERR(bdev)) {
-               ret = PTR_ERR(bdev);
-               goto out;
-       }
+       if (strcmp(device_path, "missing") == 0) {
+               struct list_head *cur;
+               struct list_head *devices;
+               struct btrfs_device *tmp;
 
-       bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
-       if (!bh) {
-               ret = -EIO;
-               goto error_close;
-       }
-       disk_super = (struct btrfs_super_block *)bh->b_data;
-       if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
-           sizeof(disk_super->magic))) {
-               ret = -ENOENT;
-               goto error_brelse;
-       }
-       if (memcmp(disk_super->fsid, root->fs_info->fsid, BTRFS_FSID_SIZE)) {
-               ret = -ENOENT;
-               goto error_brelse;
-       }
-       devid = le64_to_cpu(disk_super->dev_item.devid);
-       device = btrfs_find_device(root, devid, NULL);
-       if (!device) {
-               ret = -ENOENT;
-               goto error_brelse;
-       }
+               device = NULL;
+               devices = &root->fs_info->fs_devices->devices;
+               list_for_each(cur, devices) {
+                       tmp = list_entry(cur, struct btrfs_device, dev_list);
+                       if (tmp->in_fs_metadata && !tmp->bdev) {
+                               device = tmp;
+                               break;
+                       }
+               }
+               bdev = NULL;
+               bh = NULL;
+               disk_super = NULL;
+               if (!device) {
+                       printk("btrfs: no missing devices found to remove\n");
+                       goto out;
+               }
+
+       } else {
+               bdev = open_bdev_excl(device_path, 0,
+                                     root->fs_info->bdev_holder);
+               if (IS_ERR(bdev)) {
+                       ret = PTR_ERR(bdev);
+                       goto out;
+               }
+
+               bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+               if (!bh) {
+                       ret = -EIO;
+                       goto error_close;
+               }
+               disk_super = (struct btrfs_super_block *)bh->b_data;
+               if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
+                   sizeof(disk_super->magic))) {
+                       ret = -ENOENT;
+                       goto error_brelse;
+               }
+               if (memcmp(disk_super->fsid, root->fs_info->fsid,
+                          BTRFS_FSID_SIZE)) {
+                       ret = -ENOENT;
+                       goto error_brelse;
+               }
+               devid = le64_to_cpu(disk_super->dev_item.devid);
+               device = btrfs_find_device(root, devid, NULL);
+               if (!device) {
+                       ret = -ENOENT;
+                       goto error_brelse;
+               }
 
+       }
        root->fs_info->fs_devices->num_devices--;
+       root->fs_info->fs_devices->open_devices--;
 
        ret = btrfs_shrink_device(device, 0);
        if (ret)
@@ -764,19 +971,25 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        if (ret)
                goto error_brelse;
 
-       /* make sure this device isn't detected as part of the FS anymore */
-       memset(&disk_super->magic, 0, sizeof(disk_super->magic));
-       set_buffer_dirty(bh);
-       sync_dirty_buffer(bh);
-
-       brelse(bh);
-
-       /* one close for the device struct or super_block */
-       close_bdev_excl(device->bdev);
+       if (bh) {
+               /* make sure this device isn't detected as part of
+                * the FS anymore
+                */
+               memset(&disk_super->magic, 0, sizeof(disk_super->magic));
+               set_buffer_dirty(bh);
+               sync_dirty_buffer(bh);
 
-       /* one close for us */
-       close_bdev_excl(device->bdev);
+               brelse(bh);
+       }
 
+       if (device->bdev) {
+               /* one close for the device struct or super_block */
+               close_bdev_excl(device->bdev);
+       }
+       if (bdev) {
+               /* one close for us */
+               close_bdev_excl(bdev);
+       }
        kfree(device->name);
        kfree(device);
        ret = 0;
@@ -785,10 +998,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 error_brelse:
        brelse(bh);
 error_close:
-       close_bdev_excl(bdev);
+       if (bdev)
+               close_bdev_excl(bdev);
 out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        mutex_unlock(&uuid_mutex);
-       mutex_unlock(&root->fs_info->fs_mutex);
        return ret;
 }
 
@@ -807,8 +1021,11 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        if (!bdev) {
                return -EIO;
        }
-       mutex_lock(&root->fs_info->fs_mutex);
+
+       mutex_lock(&root->fs_info->volume_mutex);
+
        trans = btrfs_start_transaction(root, 1);
+       lock_chunks(root);
        devices = &root->fs_info->fs_devices->devices;
        list_for_each(cur, devices) {
                device = list_entry(cur, struct btrfs_device, dev_list);
@@ -826,6 +1043,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        }
 
        device->barriers = 1;
+       device->work.func = pending_bios_fn;
        generate_random_uuid(device->uuid);
        spin_lock_init(&device->io_lock);
        device->name = kstrdup(device_path, GFP_NOFS);
@@ -839,6 +1057,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        device->total_bytes = i_size_read(bdev->bd_inode);
        device->dev_root = root->fs_info->dev_root;
        device->bdev = bdev;
+       device->in_fs_metadata = 1;
 
        ret = btrfs_add_device(trans, root, device);
        if (ret)
@@ -856,9 +1075,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        list_add(&device->dev_alloc_list,
                 &root->fs_info->fs_devices->alloc_list);
        root->fs_info->fs_devices->num_devices++;
+       root->fs_info->fs_devices->open_devices++;
 out:
+       unlock_chunks(root);
        btrfs_end_transaction(trans, root);
-       mutex_unlock(&root->fs_info->fs_mutex);
+       mutex_unlock(&root->fs_info->volume_mutex);
+
        return ret;
 
 out_close_bdev:
@@ -912,7 +1134,7 @@ out:
        return ret;
 }
 
-int btrfs_grow_device(struct btrfs_trans_handle *trans,
+static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
                      struct btrfs_device *device, u64 new_size)
 {
        struct btrfs_super_block *super_copy =
@@ -924,6 +1146,16 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
        return btrfs_update_device(trans, device);
 }
 
+int btrfs_grow_device(struct btrfs_trans_handle *trans,
+                     struct btrfs_device *device, u64 new_size)
+{
+       int ret;
+       lock_chunks(device->dev_root);
+       ret = __btrfs_grow_device(trans, device, new_size);
+       unlock_chunks(device->dev_root);
+       return ret;
+}
+
 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            u64 chunk_tree, u64 chunk_objectid,
@@ -1011,6 +1243,8 @@ int btrfs_relocate_chunk(struct btrfs_root *root,
        int ret;
        int i;
 
+       printk("btrfs relocating chunk %llu\n",
+              (unsigned long long)chunk_offset);
        root = root->fs_info->chunk_root;
        extent_root = root->fs_info->extent_root;
        em_tree = &root->fs_info->mapping_tree.map_tree;
@@ -1022,6 +1256,8 @@ int btrfs_relocate_chunk(struct btrfs_root *root,
        trans = btrfs_start_transaction(root, 1);
        BUG_ON(!trans);
 
+       lock_chunks(root);
+
        /*
         * step two, delete the device extents and the
         * chunk tree entries
@@ -1039,8 +1275,10 @@ int btrfs_relocate_chunk(struct btrfs_root *root,
                                            map->stripes[i].physical);
                BUG_ON(ret);
 
-               ret = btrfs_update_device(trans, map->stripes[i].dev);
-               BUG_ON(ret);
+               if (map->stripes[i].dev) {
+                       ret = btrfs_update_device(trans, map->stripes[i].dev);
+                       BUG_ON(ret);
+               }
        }
        ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
                               chunk_offset);
@@ -1064,6 +1302,7 @@ int btrfs_relocate_chunk(struct btrfs_root *root,
        /* once for us */
        free_extent_map(em);
 
+       unlock_chunks(root);
        btrfs_end_transaction(trans, root);
        return 0;
 }
@@ -1094,9 +1333,9 @@ int btrfs_balance(struct btrfs_root *dev_root)
        struct btrfs_key found_key;
 
 
+       mutex_lock(&dev_root->fs_info->volume_mutex);
        dev_root = dev_root->fs_info->dev_root;
 
-       mutex_lock(&dev_root->fs_info->fs_mutex);
        /* step one make some room on all the devices */
        list_for_each(cur, devices) {
                device = list_entry(cur, struct btrfs_device, dev_list);
@@ -1140,13 +1379,14 @@ int btrfs_balance(struct btrfs_root *dev_root)
 
                ret = btrfs_previous_item(chunk_root, path, 0,
                                          BTRFS_CHUNK_ITEM_KEY);
-               if (ret) {
+               if (ret)
                        break;
-               }
+
                btrfs_item_key_to_cpu(path->nodes[0], &found_key,
                                      path->slots[0]);
                if (found_key.objectid != key.objectid)
                        break;
+
                chunk = btrfs_item_ptr(path->nodes[0],
                                       path->slots[0],
                                       struct btrfs_chunk);
@@ -1155,17 +1395,17 @@ int btrfs_balance(struct btrfs_root *dev_root)
                if (key.offset == 0)
                        break;
 
+               btrfs_release_path(chunk_root, path);
                ret = btrfs_relocate_chunk(chunk_root,
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
                BUG_ON(ret);
-               btrfs_release_path(chunk_root, path);
        }
        ret = 0;
 error:
        btrfs_free_path(path);
-       mutex_unlock(&dev_root->fs_info->fs_mutex);
+       mutex_unlock(&dev_root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -1205,14 +1445,18 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 
        path->reada = 2;
 
+       lock_chunks(root);
+
        device->total_bytes = new_size;
        ret = btrfs_update_device(trans, device);
        if (ret) {
+               unlock_chunks(root);
                btrfs_end_transaction(trans, root);
                goto done;
        }
        WARN_ON(diff > old_total);
        btrfs_set_super_total_bytes(super_copy, old_total - diff);
+       unlock_chunks(root);
        btrfs_end_transaction(trans, root);
 
        key.objectid = device->devid;
@@ -1341,7 +1585,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                return -ENOSPC;
 
        if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
-               num_stripes = btrfs_super_num_devices(&info->super_copy);
+               num_stripes = extent_root->fs_info->fs_devices->open_devices;
                min_stripes = 2;
        }
        if (type & (BTRFS_BLOCK_GROUP_DUP)) {
@@ -1350,13 +1594,13 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        }
        if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
                num_stripes = min_t(u64, 2,
-                                 btrfs_super_num_devices(&info->super_copy));
+                           extent_root->fs_info->fs_devices->open_devices);
                if (num_stripes < 2)
                        return -ENOSPC;
                min_stripes = 2;
        }
        if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
-               num_stripes = btrfs_super_num_devices(&info->super_copy);
+               num_stripes = extent_root->fs_info->fs_devices->open_devices;
                if (num_stripes < 4)
                        return -ENOSPC;
                num_stripes &= ~(u32)1;
@@ -1413,10 +1657,13 @@ again:
        while(index < num_stripes) {
                device = list_entry(cur, struct btrfs_device, dev_alloc_list);
 
-               avail = device->total_bytes - device->bytes_used;
+               if (device->total_bytes > device->bytes_used)
+                       avail = device->total_bytes - device->bytes_used;
+               else
+                       avail = 0;
                cur = cur->next;
 
-               if (avail >= min_free) {
+               if (device->in_fs_metadata && avail >= min_free) {
                        u64 ignored_start = 0;
                        ret = find_free_dev_extent(trans, device, path,
                                                   min_free,
@@ -1428,7 +1675,7 @@ again:
                                if (type & BTRFS_BLOCK_GROUP_DUP)
                                        index++;
                        }
-               } else if (avail > max_avail)
+               } else if (device->in_fs_metadata && avail > max_avail)
                        max_avail = avail;
                if (cur == dev_list)
                        break;
@@ -1608,6 +1855,22 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
        return ret;
 }
 
+static int find_live_mirror(struct map_lookup *map, int first, int num,
+                           int optimal)
+{
+       int i;
+       if (map->stripes[optimal].dev->bdev)
+               return optimal;
+       for (i = first; i < first + num; i++) {
+               if (map->stripes[i].dev->bdev)
+                       return i;
+       }
+       /* we couldn't find one that doesn't fail.  Just return something
+        * and the io error handling code will clean up eventually
+        */
+       return optimal;
+}
+
 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
                             u64 logical, u64 *length,
                             struct btrfs_multi_bio **multi_ret,
@@ -1710,8 +1973,11 @@ again:
                        num_stripes = map->num_stripes;
                else if (mirror_num)
                        stripe_index = mirror_num - 1;
-               else
-                       stripe_index = current->pid % map->num_stripes;
+               else {
+                       stripe_index = find_live_mirror(map, 0,
+                                           map->num_stripes,
+                                           current->pid % map->num_stripes);
+               }
 
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
                if (rw & (1 << BIO_RW))
@@ -1729,8 +1995,11 @@ again:
                        num_stripes = map->sub_stripes;
                else if (mirror_num)
                        stripe_index += mirror_num - 1;
-               else
-                       stripe_index += current->pid % map->sub_stripes;
+               else {
+                       stripe_index = find_live_mirror(map, stripe_index,
+                                             map->sub_stripes, stripe_index +
+                                             current->pid % map->sub_stripes);
+               }
        } else {
                /*
                 * after this do_div call, stripe_nr is the number of stripes
@@ -1747,9 +2016,11 @@ again:
                        struct backing_dev_info *bdi;
 
                        device = map->stripes[stripe_index].dev;
-                       bdi = blk_get_backing_dev_info(device->bdev);
-                       if (bdi->unplug_io_fn) {
-                               bdi->unplug_io_fn(bdi, unplug_page);
+                       if (device->bdev) {
+                               bdi = blk_get_backing_dev_info(device->bdev);
+                               if (bdi->unplug_io_fn) {
+                                       bdi->unplug_io_fn(bdi, unplug_page);
+                               }
                        }
                } else {
                        multi->stripes[i].physical =
@@ -1805,14 +2076,19 @@ static int end_bio_multi_stripe(struct bio *bio,
        if (atomic_dec_and_test(&multi->stripes_pending)) {
                bio->bi_private = multi->private;
                bio->bi_end_io = multi->end_io;
-
                /* only send an error to the higher layers if it is
                 * beyond the tolerance of the multi-bio
                 */
-               if (atomic_read(&multi->error) > multi->max_errors)
+               if (atomic_read(&multi->error) > multi->max_errors) {
                        err = -EIO;
-               else
+               } else if (err) {
+                       /*
+                        * this bio is actually up to date, we didn't
+                        * go over the max number of errors
+                        */
+                       set_bit(BIO_UPTODATE, &bio->bi_flags);
                        err = 0;
+               }
                kfree(multi);
 
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
@@ -1828,8 +2104,62 @@ static int end_bio_multi_stripe(struct bio *bio,
 #endif
 }
 
+struct async_sched {
+       struct bio *bio;
+       int rw;
+       struct btrfs_fs_info *info;
+       struct btrfs_work work;
+};
+
+/*
+ * see run_scheduled_bios for a description of why bios are collected for
+ * async submit.
+ *
+ * This will add one bio to the pending list for a device and make sure
+ * the work struct is scheduled.
+ */
+int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
+                int rw, struct bio *bio)
+{
+       int should_queue = 1;
+
+       /* don't bother with additional async steps for reads, right now */
+       if (!(rw & (1 << BIO_RW))) {
+               submit_bio(rw, bio);
+               return 0;
+       }
+
+       /*
+        * nr_async_sumbits allows us to reliably return congestion to the
+        * higher layers.  Otherwise, the async bio makes it appear we have
+        * made progress against dirty pages when we've really just put it
+        * on a queue for later
+        */
+       atomic_inc(&root->fs_info->nr_async_submits);
+       bio->bi_next = NULL;
+       bio->bi_rw |= rw;
+
+       spin_lock(&device->io_lock);
+
+       if (device->pending_bio_tail)
+               device->pending_bio_tail->bi_next = bio;
+
+       device->pending_bio_tail = bio;
+       if (!device->pending_bios)
+               device->pending_bios = bio;
+       if (device->running_pending)
+               should_queue = 0;
+
+       spin_unlock(&device->io_lock);
+
+       if (should_queue)
+               btrfs_queue_worker(&root->fs_info->submit_workers,
+                                  &device->work);
+       return 0;
+}
+
 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
-                 int mirror_num)
+                 int mirror_num, int async_submit)
 {
        struct btrfs_mapping_tree *map_tree;
        struct btrfs_device *dev;
@@ -1873,12 +2203,21 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                }
                bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
                dev = multi->stripes[dev_nr].dev;
-
-               bio->bi_bdev = dev->bdev;
-               spin_lock(&dev->io_lock);
-               dev->total_ios++;
-               spin_unlock(&dev->io_lock);
-               submit_bio(rw, bio);
+               if (dev && dev->bdev) {
+                       bio->bi_bdev = dev->bdev;
+                       if (async_submit)
+                               schedule_bio(root, dev, rw, bio);
+                       else
+                               submit_bio(rw, bio);
+               } else {
+                       bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
+                       bio->bi_sector = logical >> 9;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
+                       bio_endio(bio, bio->bi_size, -EIO);
+#else
+                       bio_endio(bio, -EIO);
+#endif
+               }
                dev_nr++;
        }
        if (total_devs == 1)
@@ -1894,6 +2233,28 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
        return __find_device(head, devid, uuid);
 }
 
+static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
+                                           u64 devid, u8 *dev_uuid)
+{
+       struct btrfs_device *device;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+
+       device = kzalloc(sizeof(*device), GFP_NOFS);
+       list_add(&device->dev_list,
+                &fs_devices->devices);
+       list_add(&device->dev_alloc_list,
+                &fs_devices->alloc_list);
+       device->barriers = 1;
+       device->dev_root = root->fs_info->dev_root;
+       device->devid = devid;
+       device->work.func = pending_bios_fn;
+       fs_devices->num_devices++;
+       spin_lock_init(&device->io_lock);
+       memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
+       return device;
+}
+
+
 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                          struct extent_buffer *leaf,
                          struct btrfs_chunk *chunk)
@@ -1958,11 +2319,22 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                                   btrfs_stripe_dev_uuid_nr(chunk, i),
                                   BTRFS_UUID_SIZE);
                map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
-               if (!map->stripes[i].dev) {
+
+               if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
                        kfree(map);
                        free_extent_map(em);
                        return -EIO;
                }
+               if (!map->stripes[i].dev) {
+                       map->stripes[i].dev =
+                               add_missing_dev(root, devid, uuid);
+                       if (!map->stripes[i].dev) {
+                               kfree(map);
+                               free_extent_map(em);
+                               return -EIO;
+                       }
+               }
+               map->stripes[i].dev->in_fs_metadata = 1;
        }
 
        spin_lock(&map_tree->map_tree.lock);
@@ -2009,20 +2381,15 @@ static int read_one_dev(struct btrfs_root *root,
                           BTRFS_UUID_SIZE);
        device = btrfs_find_device(root, devid, dev_uuid);
        if (!device) {
-               printk("warning devid %Lu not found already\n", devid);
-               device = kzalloc(sizeof(*device), GFP_NOFS);
+               printk("warning devid %Lu missing\n", devid);
+               device = add_missing_dev(root, devid, dev_uuid);
                if (!device)
                        return -ENOMEM;
-               list_add(&device->dev_list,
-                        &root->fs_info->fs_devices->devices);
-               list_add(&device->dev_alloc_list,
-                        &root->fs_info->fs_devices->alloc_list);
-               device->barriers = 1;
-               spin_lock_init(&device->io_lock);
        }
 
        fill_device_from_item(leaf, dev_item, device);
        device->dev_root = root->fs_info->dev_root;
+       device->in_fs_metadata = 1;
        ret = 0;
 #if 0
        ret = btrfs_open_device(device);