#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/random.h>
+#include <linux/version.h>
#include <asm/div64.h>
+#include "compat.h"
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
-
#define map_lookup_size(n) (sizeof(struct map_lookup) + \
(sizeof(struct btrfs_bio_stripe) * (n)))
mutex_unlock(&root->fs_info->chunk_mutex);
}
+static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
+{
+ struct btrfs_device *device;
+ WARN_ON(fs_devices->opened);
+ while (!list_empty(&fs_devices->devices)) {
+ device = list_entry(fs_devices->devices.next,
+ struct btrfs_device, dev_list);
+ list_del(&device->dev_list);
+ kfree(device->name);
+ kfree(device);
+ }
+ kfree(fs_devices);
+}
+
int btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
- struct btrfs_device *dev;
while (!list_empty(&fs_uuids)) {
fs_devices = list_entry(fs_uuids.next,
struct btrfs_fs_devices, list);
list_del(&fs_devices->list);
- while(!list_empty(&fs_devices->devices)) {
- dev = list_entry(fs_devices->devices.next,
- struct btrfs_device, dev_list);
- if (dev->bdev) {
- close_bdev_excl(dev->bdev);
- fs_devices->open_devices--;
- }
- fs_devices->num_devices--;
- if (dev->writeable)
- fs_devices->rw_devices--;
- list_del(&dev->dev_list);
- list_del(&dev->dev_alloc_list);
- kfree(dev->name);
- kfree(dev);
- }
- WARN_ON(fs_devices->num_devices);
- WARN_ON(fs_devices->open_devices);
- WARN_ON(fs_devices->rw_devices);
- kfree(fs_devices);
+ free_fs_devices(fs_devices);
}
return 0;
}
* the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread.
*/
-static int noinline run_scheduled_bios(struct btrfs_device *device)
+static noinline int run_scheduled_bios(struct btrfs_device *device)
{
struct bio *pending;
struct backing_dev_info *bdi;
}
spin_unlock(&device->io_lock);
- while(pending) {
+ while (pending) {
cur = pending;
pending = pending->bi_next;
cur->bi_next = NULL;
return 0;
}
-void pending_bios_fn(struct btrfs_work *work)
+static void pending_bios_fn(struct btrfs_work *work)
{
struct btrfs_device *device;
return 0;
}
+static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
+{
+ struct btrfs_fs_devices *fs_devices;
+ struct btrfs_device *device;
+ struct btrfs_device *orig_dev;
+
+ fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
+ if (!fs_devices)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fs_devices->devices);
+ INIT_LIST_HEAD(&fs_devices->alloc_list);
+ INIT_LIST_HEAD(&fs_devices->list);
+ fs_devices->latest_devid = orig->latest_devid;
+ fs_devices->latest_trans = orig->latest_trans;
+ memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
+
+ list_for_each_entry(orig_dev, &orig->devices, dev_list) {
+ device = kzalloc(sizeof(*device), GFP_NOFS);
+ if (!device)
+ goto error;
+
+ device->name = kstrdup(orig_dev->name, GFP_NOFS);
+ if (!device->name)
+ goto error;
+
+ device->devid = orig_dev->devid;
+ device->work.func = pending_bios_fn;
+ memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
+ device->barriers = 1;
+ spin_lock_init(&device->io_lock);
+ INIT_LIST_HEAD(&device->dev_list);
+ INIT_LIST_HEAD(&device->dev_alloc_list);
+
+ list_add(&device->dev_list, &fs_devices->devices);
+ device->fs_devices = fs_devices;
+ fs_devices->num_devices++;
+ }
+ return fs_devices;
+error:
+ free_fs_devices(fs_devices);
+ return ERR_PTR(-ENOMEM);
+}
+
int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
{
struct list_head *tmp;
struct list_head *cur;
struct btrfs_device *device;
- int seed_devices = 0;
mutex_lock(&uuid_mutex);
again:
continue;
if (device->bdev) {
- close_bdev_excl(device->bdev);
+ close_bdev_exclusive(device->bdev, device->mode);
device->bdev = NULL;
fs_devices->open_devices--;
}
device->writeable = 0;
fs_devices->rw_devices--;
}
- if (!seed_devices) {
- list_del_init(&device->dev_list);
- fs_devices->num_devices--;
- kfree(device->name);
- kfree(device);
- }
+ list_del_init(&device->dev_list);
+ fs_devices->num_devices--;
+ kfree(device->name);
+ kfree(device);
}
if (fs_devices->seed) {
fs_devices = fs_devices->seed;
- seed_devices = 1;
goto again;
}
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
- struct btrfs_fs_devices *seed_devices;
struct list_head *cur;
struct btrfs_device *device;
-again:
+
if (--fs_devices->opened > 0)
return 0;
list_for_each(cur, &fs_devices->devices) {
device = list_entry(cur, struct btrfs_device, dev_list);
if (device->bdev) {
- close_bdev_excl(device->bdev);
+ close_bdev_exclusive(device->bdev, device->mode);
fs_devices->open_devices--;
}
if (device->writeable) {
device->writeable = 0;
device->in_fs_metadata = 0;
}
+ WARN_ON(fs_devices->open_devices);
+ WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
fs_devices->seeding = 0;
- fs_devices->sprouted = 0;
- seed_devices = fs_devices->seed;
- fs_devices->seed = NULL;
- if (seed_devices) {
- fs_devices = seed_devices;
- goto again;
- }
return 0;
}
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
+ struct btrfs_fs_devices *seed_devices = NULL;
int ret;
mutex_lock(&uuid_mutex);
ret = __btrfs_close_devices(fs_devices);
+ if (!fs_devices->opened) {
+ seed_devices = fs_devices->seed;
+ fs_devices->seed = NULL;
+ }
mutex_unlock(&uuid_mutex);
+
+ while (seed_devices) {
+ fs_devices = seed_devices;
+ seed_devices = fs_devices->seed;
+ __btrfs_close_devices(fs_devices);
+ free_fs_devices(fs_devices);
+ }
return ret;
}
-int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, void *holder)
+static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+ fmode_t flags, void *holder)
{
struct block_device *bdev;
struct list_head *head = &fs_devices->devices;
if (!device->name)
continue;
- bdev = open_bdev_excl(device->name, MS_RDONLY, holder);
+ bdev = open_bdev_exclusive(device->name, flags, holder);
if (IS_ERR(bdev)) {
- printk("open %s failed\n", device->name);
+ printk(KERN_INFO "open %s failed\n", device->name);
goto error;
}
set_blocksize(bdev, 4096);
- bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+ bh = btrfs_read_dev_super(bdev);
if (!bh)
goto error_close;
disk_super = (struct btrfs_super_block *)bh->b_data;
- if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
- sizeof(disk_super->magic)))
- goto error_brelse;
-
devid = le64_to_cpu(disk_super->dev_item.devid);
if (devid != device->devid)
goto error_brelse;
device->bdev = bdev;
device->in_fs_metadata = 0;
+ device->mode = flags;
+
fs_devices->open_devices++;
if (device->writeable) {
fs_devices->rw_devices++;
error_brelse:
brelse(bh);
error_close:
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, FMODE_READ);
error:
continue;
}
}
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
- int flags, void *holder)
+ fmode_t flags, void *holder)
{
int ret;
mutex_lock(&uuid_mutex);
if (fs_devices->opened) {
- if (fs_devices->sprouted) {
- ret = -EBUSY;
- } else {
- fs_devices->opened++;
- ret = 0;
- }
+ fs_devices->opened++;
+ ret = 0;
} else {
- ret = __btrfs_open_devices(fs_devices, holder);
+ ret = __btrfs_open_devices(fs_devices, flags, holder);
}
mutex_unlock(&uuid_mutex);
return ret;
}
-int btrfs_scan_one_device(const char *path, int flags, void *holder,
+int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret)
{
struct btrfs_super_block *disk_super;
mutex_lock(&uuid_mutex);
- bdev = open_bdev_excl(path, flags, holder);
+ bdev = open_bdev_exclusive(path, flags, holder);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
ret = set_blocksize(bdev, 4096);
if (ret)
goto error_close;
- bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+ bh = btrfs_read_dev_super(bdev);
if (!bh) {
ret = -EIO;
goto error_close;
}
disk_super = (struct btrfs_super_block *)bh->b_data;
- if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
- sizeof(disk_super->magic))) {
- ret = -EINVAL;
- goto error_brelse;
- }
devid = le64_to_cpu(disk_super->dev_item.devid);
transid = btrfs_super_generation(disk_super);
if (disk_super->label[0])
- printk("device label %s ", disk_super->label);
+ printk(KERN_INFO "device label %s ", disk_super->label);
else {
/* FIXME, make a readl uuid parser */
- printk("device fsid %llx-%llx ",
+ printk(KERN_INFO "device fsid %llx-%llx ",
*(unsigned long long *)disk_super->fsid,
*(unsigned long long *)(disk_super->fsid + 8));
}
- printk("devid %Lu transid %Lu %s\n", devid, transid, path);
+ printk(KERN_INFO "devid %llu transid %llu %s\n",
+ (unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
-error_brelse:
brelse(bh);
error_close:
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, flags);
error:
mutex_unlock(&uuid_mutex);
return ret;
goto check_pending;
}
}
- if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
+ if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
goto next;
- }
start_found = 1;
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
return ret;
}
-int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
+static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 start)
{
btrfs_set_device_group(leaf, dev_item, 0);
btrfs_set_device_seek_speed(leaf, dev_item, 0);
btrfs_set_device_bandwidth(leaf, dev_item, 0);
+ btrfs_set_device_start_offset(leaf, dev_item, 0);
ptr = (unsigned long)btrfs_device_uuid(dev_item);
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
root->fs_info->fs_devices->rw_devices <= 4) {
- printk("btrfs: unable to go below four devices on raid10\n");
+ printk(KERN_ERR "btrfs: unable to go below four devices "
+ "on raid10\n");
ret = -EINVAL;
goto out;
}
if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
root->fs_info->fs_devices->rw_devices <= 2) {
- printk("btrfs: unable to go below two devices on raid1\n");
+ printk(KERN_ERR "btrfs: unable to go below two "
+ "devices on raid1\n");
ret = -EINVAL;
goto out;
}
bh = NULL;
disk_super = NULL;
if (!device) {
- printk("btrfs: no missing devices found to remove\n");
+ printk(KERN_ERR "btrfs: no missing devices found to "
+ "remove\n");
goto out;
}
} else {
- bdev = open_bdev_excl(device_path, MS_RDONLY,
+ bdev = open_bdev_exclusive(device_path, FMODE_READ,
root->fs_info->bdev_holder);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
}
set_blocksize(bdev, 4096);
- bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+ bh = btrfs_read_dev_super(bdev);
if (!bh) {
ret = -EIO;
goto error_close;
}
disk_super = (struct btrfs_super_block *)bh->b_data;
- if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
- sizeof(disk_super->magic))) {
- ret = -ENOENT;
- goto error_brelse;
- }
devid = le64_to_cpu(disk_super->dev_item.devid);
dev_uuid = disk_super->dev_item.uuid;
device = btrfs_find_device(root, devid, dev_uuid,
}
if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
- printk("btrfs: unable to remove the only writeable device\n");
+ printk(KERN_ERR "btrfs: unable to remove the only writeable "
+ "device\n");
ret = -EINVAL;
goto error_brelse;
}
goto error_brelse;
device->in_fs_metadata = 0;
- if (device->fs_devices == root->fs_info->fs_devices) {
- list_del_init(&device->dev_list);
- root->fs_info->fs_devices->num_devices--;
- if (device->bdev)
- device->fs_devices->open_devices--;
- }
+ list_del_init(&device->dev_list);
+ device->fs_devices->num_devices--;
next_device = list_entry(root->fs_info->fs_devices->devices.next,
struct btrfs_device, dev_list);
if (device->bdev == root->fs_info->fs_devices->latest_bdev)
root->fs_info->fs_devices->latest_bdev = next_device->bdev;
+ if (device->bdev) {
+ close_bdev_exclusive(device->bdev, device->mode);
+ device->bdev = NULL;
+ device->fs_devices->open_devices--;
+ }
+
num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
- if (device->fs_devices != root->fs_info->fs_devices) {
- BUG_ON(device->writeable);
- brelse(bh);
- if (bdev)
- close_bdev_excl(bdev);
-
- if (device->bdev) {
- close_bdev_excl(device->bdev);
- device->bdev = NULL;
- device->fs_devices->open_devices--;
- }
- if (device->fs_devices->open_devices == 0) {
- struct btrfs_fs_devices *fs_devices;
- fs_devices = root->fs_info->fs_devices;
- while (fs_devices) {
- if (fs_devices->seed == device->fs_devices)
- break;
- fs_devices = fs_devices->seed;
- }
- fs_devices->seed = device->fs_devices->seed;
- device->fs_devices->seed = NULL;
- __btrfs_close_devices(device->fs_devices);
+ if (device->fs_devices->open_devices == 0) {
+ struct btrfs_fs_devices *fs_devices;
+ fs_devices = root->fs_info->fs_devices;
+ while (fs_devices) {
+ if (fs_devices->seed == device->fs_devices)
+ break;
+ fs_devices = fs_devices->seed;
}
- ret = 0;
- goto out;
+ fs_devices->seed = device->fs_devices->seed;
+ device->fs_devices->seed = NULL;
+ __btrfs_close_devices(device->fs_devices);
+ free_fs_devices(device->fs_devices);
}
/*
set_buffer_dirty(bh);
sync_dirty_buffer(bh);
}
- brelse(bh);
- if (device->bdev) {
- /* one close for the device struct or super_block */
- close_bdev_excl(device->bdev);
- }
- if (bdev) {
- /* one close for us */
- close_bdev_excl(bdev);
- }
kfree(device->name);
kfree(device);
ret = 0;
- goto out;
error_brelse:
brelse(bh);
error_close:
if (bdev)
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, FMODE_READ);
out:
mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
{
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
+ struct btrfs_fs_devices *seed_devices;
struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
struct btrfs_device *device;
u64 super_flags;
BUG_ON(!mutex_is_locked(&uuid_mutex));
- if (!fs_devices->seeding || fs_devices->opened != 1)
+ if (!fs_devices->seeding)
return -EINVAL;
- old_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
- if (!old_devices)
+ seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
+ if (!seed_devices)
return -ENOMEM;
- memcpy(old_devices, fs_devices, sizeof(*old_devices));
- old_devices->opened = 1;
- old_devices->sprouted = 1;
- INIT_LIST_HEAD(&old_devices->devices);
- INIT_LIST_HEAD(&old_devices->alloc_list);
- list_splice_init(&fs_devices->devices, &old_devices->devices);
- list_splice_init(&fs_devices->alloc_list, &old_devices->alloc_list);
- list_for_each_entry(device, &old_devices->devices, dev_list) {
- device->fs_devices = old_devices;
+ old_devices = clone_fs_devices(fs_devices);
+ if (IS_ERR(old_devices)) {
+ kfree(seed_devices);
+ return PTR_ERR(old_devices);
}
+
list_add(&old_devices->list, &fs_uuids);
+ memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
+ seed_devices->opened = 1;
+ INIT_LIST_HEAD(&seed_devices->devices);
+ INIT_LIST_HEAD(&seed_devices->alloc_list);
+ list_splice_init(&fs_devices->devices, &seed_devices->devices);
+ list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
+ list_for_each_entry(device, &seed_devices->devices, dev_list) {
+ device->fs_devices = seed_devices;
+ }
+
fs_devices->seeding = 0;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
- fs_devices->seed = old_devices;
+ fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
return -EINVAL;
- bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
- if (!bdev) {
+ bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
+ if (!bdev)
return -EIO;
- }
if (root->fs_info->fs_devices->seeding) {
seeding_dev = 1;
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
+ device->mode = 0;
set_blocksize(device->bdev, 4096);
if (seeding_dev) {
mutex_unlock(&root->fs_info->volume_mutex);
return ret;
error:
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, 0);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
goto out;
}
-int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
- struct btrfs_device *device)
+static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
return 0;
}
-int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
+static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
chunk_offset)
{
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
return ret;
}
-int btrfs_relocate_chunk(struct btrfs_root *root,
+static int btrfs_relocate_chunk(struct btrfs_root *root,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset)
{
int ret;
int i;
- printk("btrfs relocating chunk %llu\n",
+ printk(KERN_INFO "btrfs relocating chunk %llu\n",
(unsigned long long)chunk_offset);
root = root->fs_info->chunk_root;
extent_root = root->fs_info->extent_root;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
- while(1) {
+ while (1) {
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0)
goto error;
return ret;
}
-int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
+static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
return 0;
}
-static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
+static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
int num_stripes, int sub_stripes)
{
if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
min_free += 1024 * 1024;
INIT_LIST_HEAD(&private_devs);
- while(index < num_stripes) {
+ while (index < num_stripes) {
device = list_entry(cur, struct btrfs_device, dev_alloc_list);
BUG_ON(!device->writeable);
if (device->total_bytes > device->bytes_used)
return 0;
}
-static int noinline init_first_rw_device(struct btrfs_trans_handle *trans,
+static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device)
{
{
struct extent_map *em;
- while(1) {
+ while (1) {
spin_lock(&tree->map_tree.lock);
em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
if (em)
int max_errors = 0;
struct btrfs_multi_bio *multi = NULL;
- if (multi_ret && !(rw & (1 << BIO_RW))) {
+ if (multi_ret && !(rw & (1 << BIO_RW)))
stripes_allocated = 1;
- }
again:
if (multi_ret) {
multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
return 0;
if (!em) {
- printk("unable to find logical %Lu len %Lu\n", logical, *length);
+ printk(KERN_CRIT "unable to find logical %llu len %llu\n",
+ (unsigned long long)logical,
+ (unsigned long long)*length);
BUG();
}
device = map->stripes[stripe_index].dev;
if (device->bdev) {
bdi = blk_get_backing_dev_info(device->bdev);
- if (bdi->unplug_io_fn) {
+ if (bdi->unplug_io_fn)
bdi->unplug_io_fn(bdi, unplug_page);
- }
}
} else {
multi->stripes[i].physical =
mirror_num, NULL);
}
+int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+ u64 chunk_start, u64 physical, u64 devid,
+ u64 **logical, int *naddrs, int *stripe_len)
+{
+ struct extent_map_tree *em_tree = &map_tree->map_tree;
+ struct extent_map *em;
+ struct map_lookup *map;
+ u64 *buf;
+ u64 bytenr;
+ u64 length;
+ u64 stripe_nr;
+ int i, j, nr = 0;
+
+ spin_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, chunk_start, 1);
+ spin_unlock(&em_tree->lock);
+
+ BUG_ON(!em || em->start != chunk_start);
+ map = (struct map_lookup *)em->bdev;
+
+ length = em->len;
+ if (map->type & BTRFS_BLOCK_GROUP_RAID10)
+ do_div(length, map->num_stripes / map->sub_stripes);
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+ do_div(length, map->num_stripes);
+
+ buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
+ BUG_ON(!buf);
+
+ for (i = 0; i < map->num_stripes; i++) {
+ if (devid && map->stripes[i].dev->devid != devid)
+ continue;
+ if (map->stripes[i].physical > physical ||
+ map->stripes[i].physical + length <= physical)
+ continue;
+
+ stripe_nr = physical - map->stripes[i].physical;
+ do_div(stripe_nr, map->stripe_len);
+
+ if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+ stripe_nr = stripe_nr * map->num_stripes + i;
+ do_div(stripe_nr, map->sub_stripes);
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+ stripe_nr = stripe_nr * map->num_stripes + i;
+ }
+ bytenr = chunk_start + stripe_nr * map->stripe_len;
+ WARN_ON(nr >= map->num_stripes);
+ for (j = 0; j < nr; j++) {
+ if (buf[j] == bytenr)
+ break;
+ }
+ if (j == nr) {
+ WARN_ON(nr >= map->num_stripes);
+ buf[nr++] = bytenr;
+ }
+ }
+
+ for (i = 0; i > nr; i++) {
+ struct btrfs_multi_bio *multi;
+ struct btrfs_bio_stripe *stripe;
+ int ret;
+
+ length = 1;
+ ret = btrfs_map_block(map_tree, WRITE, buf[i],
+ &length, &multi, 0);
+ BUG_ON(ret);
+
+ stripe = multi->stripes;
+ for (j = 0; j < multi->num_stripes; j++) {
+ if (stripe->physical >= physical &&
+ physical < stripe->physical + length)
+ break;
+ }
+ BUG_ON(j >= multi->num_stripes);
+ kfree(multi);
+ }
+
+ *logical = buf;
+ *naddrs = nr;
+ *stripe_len = map->stripe_len;
+
+ free_extent_map(em);
+ return 0;
+}
+
int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
u64 logical, struct page *page)
{
NULL, 0, page);
}
-
static void end_bio_multi_stripe(struct bio *bio, int err)
{
struct btrfs_multi_bio *multi = bio->bi_private;
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-static int noinline schedule_bio(struct btrfs_root *root,
+static noinline int schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
int rw, struct bio *bio)
{
total_devs = multi->num_stripes;
if (map_length < length) {
- printk("mapping failed logical %Lu bio len %Lu "
- "len %Lu\n", logical, length, map_length);
+ printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
+ "len %llu\n", (unsigned long long)logical,
+ (unsigned long long)length,
+ (unsigned long long)map_length);
BUG();
}
multi->end_io = first_bio->bi_end_io;
multi->orig_bio = first_bio;
atomic_set(&multi->stripes_pending, multi->num_stripes);
- while(dev_nr < total_devs) {
+ while (dev_nr < total_devs) {
if (total_devs > 1) {
if (dev_nr < total_devs - 1) {
bio = bio_clone(first_bio, GFP_NOFS);
device->dev_root = root->fs_info->dev_root;
device->devid = devid;
device->work.func = pending_bios_fn;
+ device->fs_devices = fs_devices;
fs_devices->num_devices++;
spin_lock_init(&device->io_lock);
+ INIT_LIST_HEAD(&device->dev_alloc_list);
memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
return device;
}
ret = -ENOENT;
goto out;
}
- if (fs_devices->opened) {
- ret = -EBUSY;
+
+ fs_devices = clone_fs_devices(fs_devices);
+ if (IS_ERR(fs_devices)) {
+ ret = PTR_ERR(fs_devices);
goto out;
}
- ret = __btrfs_open_devices(fs_devices, root->fs_info->bdev_holder);
+ ret = __btrfs_open_devices(fs_devices, FMODE_READ,
+ root->fs_info->bdev_holder);
if (ret)
goto out;
if (!fs_devices->seeding) {
__btrfs_close_devices(fs_devices);
+ free_fs_devices(fs_devices);
ret = -EINVAL;
goto out;
}
fs_devices->seed = root->fs_info->fs_devices->seed;
root->fs_info->fs_devices->seed = fs_devices;
- fs_devices->sprouted = 1;
out:
mutex_unlock(&uuid_mutex);
return ret;
struct btrfs_device *device;
u64 devid;
int ret;
- int seed_devices = 0;
u8 fs_uuid[BTRFS_UUID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
ret = open_seed_devices(root, fs_uuid);
- if (ret)
+ if (ret && !btrfs_test_opt(root, DEGRADED))
return ret;
- seed_devices = 1;
}
device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
if (!device || !device->bdev) {
- if (!btrfs_test_opt(root, DEGRADED) || seed_devices)
+ if (!btrfs_test_opt(root, DEGRADED))
return -EIO;
if (!device) {
- printk("warning devid %Lu missing\n", devid);
+ printk(KERN_WARNING "warning devid %llu missing\n",
+ (unsigned long long)devid);
device = add_missing_dev(root, devid, dev_uuid);
if (!device)
return -ENOMEM;
if (device->writeable)
device->fs_devices->total_rw_bytes += device->total_bytes;
ret = 0;
-#if 0
- ret = btrfs_open_device(device);
- if (ret) {
- kfree(device);
- }
-#endif
return ret;
}
key.type = 0;
again:
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- while(1) {
+ while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {