2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
29 #include "extent_map.h"
31 #include "transaction.h"
32 #include "print-tree.h"
34 #include "async-thread.h"
36 static int init_first_rw_device(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct btrfs_device *device);
39 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
41 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
42 (sizeof(struct btrfs_bio_stripe) * (n)))
44 static DEFINE_MUTEX(uuid_mutex);
45 static LIST_HEAD(fs_uuids);
47 void btrfs_lock_volumes(void)
49 mutex_lock(&uuid_mutex);
52 void btrfs_unlock_volumes(void)
54 mutex_unlock(&uuid_mutex);
57 static void lock_chunks(struct btrfs_root *root)
59 mutex_lock(&root->fs_info->chunk_mutex);
62 static void unlock_chunks(struct btrfs_root *root)
64 mutex_unlock(&root->fs_info->chunk_mutex);
67 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
69 struct btrfs_device *device;
70 WARN_ON(fs_devices->opened);
71 while (!list_empty(&fs_devices->devices)) {
72 device = list_entry(fs_devices->devices.next,
73 struct btrfs_device, dev_list);
74 list_del(&device->dev_list);
81 int btrfs_cleanup_fs_uuids(void)
83 struct btrfs_fs_devices *fs_devices;
85 while (!list_empty(&fs_uuids)) {
86 fs_devices = list_entry(fs_uuids.next,
87 struct btrfs_fs_devices, list);
88 list_del(&fs_devices->list);
89 free_fs_devices(fs_devices);
94 static noinline struct btrfs_device *__find_device(struct list_head *head,
97 struct btrfs_device *dev;
99 list_for_each_entry(dev, head, dev_list) {
100 if (dev->devid == devid &&
101 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
108 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
110 struct btrfs_fs_devices *fs_devices;
112 list_for_each_entry(fs_devices, &fs_uuids, list) {
113 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
119 static void requeue_list(struct btrfs_pending_bios *pending_bios,
120 struct bio *head, struct bio *tail)
123 struct bio *old_head;
125 old_head = pending_bios->head;
126 pending_bios->head = head;
127 if (pending_bios->tail)
128 tail->bi_next = old_head;
130 pending_bios->tail = tail;
134 * we try to collect pending bios for a device so we don't get a large
135 * number of procs sending bios down to the same device. This greatly
136 * improves the schedulers ability to collect and merge the bios.
138 * But, it also turns into a long list of bios to process and that is sure
139 * to eventually make the worker thread block. The solution here is to
140 * make some progress and then put this work struct back at the end of
141 * the list if the block device is congested. This way, multiple devices
142 * can make progress from a single worker thread.
144 static noinline int run_scheduled_bios(struct btrfs_device *device)
147 struct backing_dev_info *bdi;
148 struct btrfs_fs_info *fs_info;
149 struct btrfs_pending_bios *pending_bios;
153 unsigned long num_run;
154 unsigned long batch_run = 0;
156 unsigned long last_waited = 0;
158 struct blk_plug plug;
161 * this function runs all the bios we've collected for
162 * a particular device. We don't want to wander off to
163 * another device without first sending all of these down.
164 * So, setup a plug here and finish it off before we return
166 blk_start_plug(&plug);
168 bdi = blk_get_backing_dev_info(device->bdev);
169 fs_info = device->dev_root->fs_info;
170 limit = btrfs_async_submit_limit(fs_info);
171 limit = limit * 2 / 3;
174 spin_lock(&device->io_lock);
179 /* take all the bios off the list at once and process them
180 * later on (without the lock held). But, remember the
181 * tail and other pointers so the bios can be properly reinserted
182 * into the list if we hit congestion
184 if (!force_reg && device->pending_sync_bios.head) {
185 pending_bios = &device->pending_sync_bios;
188 pending_bios = &device->pending_bios;
192 pending = pending_bios->head;
193 tail = pending_bios->tail;
194 WARN_ON(pending && !tail);
197 * if pending was null this time around, no bios need processing
198 * at all and we can stop. Otherwise it'll loop back up again
199 * and do an additional check so no bios are missed.
201 * device->running_pending is used to synchronize with the
204 if (device->pending_sync_bios.head == NULL &&
205 device->pending_bios.head == NULL) {
207 device->running_pending = 0;
210 device->running_pending = 1;
213 pending_bios->head = NULL;
214 pending_bios->tail = NULL;
216 spin_unlock(&device->io_lock);
221 /* we want to work on both lists, but do more bios on the
222 * sync list than the regular list
225 pending_bios != &device->pending_sync_bios &&
226 device->pending_sync_bios.head) ||
227 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
228 device->pending_bios.head)) {
229 spin_lock(&device->io_lock);
230 requeue_list(pending_bios, pending, tail);
235 pending = pending->bi_next;
237 atomic_dec(&fs_info->nr_async_bios);
239 if (atomic_read(&fs_info->nr_async_bios) < limit &&
240 waitqueue_active(&fs_info->async_submit_wait))
241 wake_up(&fs_info->async_submit_wait);
243 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
245 submit_bio(cur->bi_rw, cur);
252 * we made progress, there is more work to do and the bdi
253 * is now congested. Back off and let other work structs
256 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
257 fs_info->fs_devices->open_devices > 1) {
258 struct io_context *ioc;
260 ioc = current->io_context;
263 * the main goal here is that we don't want to
264 * block if we're going to be able to submit
265 * more requests without blocking.
267 * This code does two great things, it pokes into
268 * the elevator code from a filesystem _and_
269 * it makes assumptions about how batching works.
271 if (ioc && ioc->nr_batch_requests > 0 &&
272 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
274 ioc->last_waited == last_waited)) {
276 * we want to go through our batch of
277 * requests and stop. So, we copy out
278 * the ioc->last_waited time and test
279 * against it before looping
281 last_waited = ioc->last_waited;
286 spin_lock(&device->io_lock);
287 requeue_list(pending_bios, pending, tail);
288 device->running_pending = 1;
290 spin_unlock(&device->io_lock);
291 btrfs_requeue_work(&device->work);
300 spin_lock(&device->io_lock);
301 if (device->pending_bios.head || device->pending_sync_bios.head)
303 spin_unlock(&device->io_lock);
306 blk_finish_plug(&plug);
310 static void pending_bios_fn(struct btrfs_work *work)
312 struct btrfs_device *device;
314 device = container_of(work, struct btrfs_device, work);
315 run_scheduled_bios(device);
318 static noinline int device_list_add(const char *path,
319 struct btrfs_super_block *disk_super,
320 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
322 struct btrfs_device *device;
323 struct btrfs_fs_devices *fs_devices;
324 u64 found_transid = btrfs_super_generation(disk_super);
327 fs_devices = find_fsid(disk_super->fsid);
329 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
332 INIT_LIST_HEAD(&fs_devices->devices);
333 INIT_LIST_HEAD(&fs_devices->alloc_list);
334 list_add(&fs_devices->list, &fs_uuids);
335 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
336 fs_devices->latest_devid = devid;
337 fs_devices->latest_trans = found_transid;
338 mutex_init(&fs_devices->device_list_mutex);
341 device = __find_device(&fs_devices->devices, devid,
342 disk_super->dev_item.uuid);
345 if (fs_devices->opened)
348 device = kzalloc(sizeof(*device), GFP_NOFS);
350 /* we can safely leave the fs_devices entry around */
353 device->devid = devid;
354 device->work.func = pending_bios_fn;
355 memcpy(device->uuid, disk_super->dev_item.uuid,
357 spin_lock_init(&device->io_lock);
358 device->name = kstrdup(path, GFP_NOFS);
363 INIT_LIST_HEAD(&device->dev_alloc_list);
365 mutex_lock(&fs_devices->device_list_mutex);
366 list_add(&device->dev_list, &fs_devices->devices);
367 mutex_unlock(&fs_devices->device_list_mutex);
369 device->fs_devices = fs_devices;
370 fs_devices->num_devices++;
371 } else if (!device->name || strcmp(device->name, path)) {
372 name = kstrdup(path, GFP_NOFS);
377 if (device->missing) {
378 fs_devices->missing_devices--;
383 if (found_transid > fs_devices->latest_trans) {
384 fs_devices->latest_devid = devid;
385 fs_devices->latest_trans = found_transid;
387 *fs_devices_ret = fs_devices;
391 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
393 struct btrfs_fs_devices *fs_devices;
394 struct btrfs_device *device;
395 struct btrfs_device *orig_dev;
397 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
399 return ERR_PTR(-ENOMEM);
401 INIT_LIST_HEAD(&fs_devices->devices);
402 INIT_LIST_HEAD(&fs_devices->alloc_list);
403 INIT_LIST_HEAD(&fs_devices->list);
404 mutex_init(&fs_devices->device_list_mutex);
405 fs_devices->latest_devid = orig->latest_devid;
406 fs_devices->latest_trans = orig->latest_trans;
407 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
409 mutex_lock(&orig->device_list_mutex);
410 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
411 device = kzalloc(sizeof(*device), GFP_NOFS);
415 device->name = kstrdup(orig_dev->name, GFP_NOFS);
421 device->devid = orig_dev->devid;
422 device->work.func = pending_bios_fn;
423 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
424 spin_lock_init(&device->io_lock);
425 INIT_LIST_HEAD(&device->dev_list);
426 INIT_LIST_HEAD(&device->dev_alloc_list);
428 list_add(&device->dev_list, &fs_devices->devices);
429 device->fs_devices = fs_devices;
430 fs_devices->num_devices++;
432 mutex_unlock(&orig->device_list_mutex);
435 mutex_unlock(&orig->device_list_mutex);
436 free_fs_devices(fs_devices);
437 return ERR_PTR(-ENOMEM);
440 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
442 struct btrfs_device *device, *next;
444 mutex_lock(&uuid_mutex);
446 mutex_lock(&fs_devices->device_list_mutex);
447 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
448 if (device->in_fs_metadata)
452 blkdev_put(device->bdev, device->mode);
454 fs_devices->open_devices--;
456 if (device->writeable) {
457 list_del_init(&device->dev_alloc_list);
458 device->writeable = 0;
459 fs_devices->rw_devices--;
461 list_del_init(&device->dev_list);
462 fs_devices->num_devices--;
466 mutex_unlock(&fs_devices->device_list_mutex);
468 if (fs_devices->seed) {
469 fs_devices = fs_devices->seed;
473 mutex_unlock(&uuid_mutex);
477 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
479 struct btrfs_device *device;
481 if (--fs_devices->opened > 0)
484 list_for_each_entry(device, &fs_devices->devices, dev_list) {
486 blkdev_put(device->bdev, device->mode);
487 fs_devices->open_devices--;
489 if (device->writeable) {
490 list_del_init(&device->dev_alloc_list);
491 fs_devices->rw_devices--;
495 device->writeable = 0;
496 device->in_fs_metadata = 0;
498 WARN_ON(fs_devices->open_devices);
499 WARN_ON(fs_devices->rw_devices);
500 fs_devices->opened = 0;
501 fs_devices->seeding = 0;
506 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
508 struct btrfs_fs_devices *seed_devices = NULL;
511 mutex_lock(&uuid_mutex);
512 ret = __btrfs_close_devices(fs_devices);
513 if (!fs_devices->opened) {
514 seed_devices = fs_devices->seed;
515 fs_devices->seed = NULL;
517 mutex_unlock(&uuid_mutex);
519 while (seed_devices) {
520 fs_devices = seed_devices;
521 seed_devices = fs_devices->seed;
522 __btrfs_close_devices(fs_devices);
523 free_fs_devices(fs_devices);
528 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
529 fmode_t flags, void *holder)
531 struct block_device *bdev;
532 struct list_head *head = &fs_devices->devices;
533 struct btrfs_device *device;
534 struct block_device *latest_bdev = NULL;
535 struct buffer_head *bh;
536 struct btrfs_super_block *disk_super;
537 u64 latest_devid = 0;
538 u64 latest_transid = 0;
545 list_for_each_entry(device, head, dev_list) {
551 bdev = blkdev_get_by_path(device->name, flags, holder);
553 printk(KERN_INFO "open %s failed\n", device->name);
556 set_blocksize(bdev, 4096);
558 bh = btrfs_read_dev_super(bdev);
564 disk_super = (struct btrfs_super_block *)bh->b_data;
565 devid = btrfs_stack_device_id(&disk_super->dev_item);
566 if (devid != device->devid)
569 if (memcmp(device->uuid, disk_super->dev_item.uuid,
573 device->generation = btrfs_super_generation(disk_super);
574 if (!latest_transid || device->generation > latest_transid) {
575 latest_devid = devid;
576 latest_transid = device->generation;
580 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
581 device->writeable = 0;
583 device->writeable = !bdev_read_only(bdev);
588 device->in_fs_metadata = 0;
589 device->mode = flags;
591 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
592 fs_devices->rotating = 1;
594 fs_devices->open_devices++;
595 if (device->writeable) {
596 fs_devices->rw_devices++;
597 list_add(&device->dev_alloc_list,
598 &fs_devices->alloc_list);
605 blkdev_put(bdev, flags);
609 if (fs_devices->open_devices == 0) {
613 fs_devices->seeding = seeding;
614 fs_devices->opened = 1;
615 fs_devices->latest_bdev = latest_bdev;
616 fs_devices->latest_devid = latest_devid;
617 fs_devices->latest_trans = latest_transid;
618 fs_devices->total_rw_bytes = 0;
623 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
624 fmode_t flags, void *holder)
628 mutex_lock(&uuid_mutex);
629 if (fs_devices->opened) {
630 fs_devices->opened++;
633 ret = __btrfs_open_devices(fs_devices, flags, holder);
635 mutex_unlock(&uuid_mutex);
639 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
640 struct btrfs_fs_devices **fs_devices_ret)
642 struct btrfs_super_block *disk_super;
643 struct block_device *bdev;
644 struct buffer_head *bh;
649 mutex_lock(&uuid_mutex);
652 bdev = blkdev_get_by_path(path, flags, holder);
659 ret = set_blocksize(bdev, 4096);
662 bh = btrfs_read_dev_super(bdev);
667 disk_super = (struct btrfs_super_block *)bh->b_data;
668 devid = btrfs_stack_device_id(&disk_super->dev_item);
669 transid = btrfs_super_generation(disk_super);
670 if (disk_super->label[0])
671 printk(KERN_INFO "device label %s ", disk_super->label);
673 /* FIXME, make a readl uuid parser */
674 printk(KERN_INFO "device fsid %llx-%llx ",
675 *(unsigned long long *)disk_super->fsid,
676 *(unsigned long long *)(disk_super->fsid + 8));
678 printk(KERN_CONT "devid %llu transid %llu %s\n",
679 (unsigned long long)devid, (unsigned long long)transid, path);
680 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
684 blkdev_put(bdev, flags);
686 mutex_unlock(&uuid_mutex);
690 /* helper to account the used device space in the range */
691 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
692 u64 end, u64 *length)
694 struct btrfs_key key;
695 struct btrfs_root *root = device->dev_root;
696 struct btrfs_dev_extent *dev_extent;
697 struct btrfs_path *path;
701 struct extent_buffer *l;
705 if (start >= device->total_bytes)
708 path = btrfs_alloc_path();
713 key.objectid = device->devid;
715 key.type = BTRFS_DEV_EXTENT_KEY;
717 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
721 ret = btrfs_previous_item(root, path, key.objectid, key.type);
728 slot = path->slots[0];
729 if (slot >= btrfs_header_nritems(l)) {
730 ret = btrfs_next_leaf(root, path);
738 btrfs_item_key_to_cpu(l, &key, slot);
740 if (key.objectid < device->devid)
743 if (key.objectid > device->devid)
746 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
749 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
750 extent_end = key.offset + btrfs_dev_extent_length(l,
752 if (key.offset <= start && extent_end > end) {
753 *length = end - start + 1;
755 } else if (key.offset <= start && extent_end > start)
756 *length += extent_end - start;
757 else if (key.offset > start && extent_end <= end)
758 *length += extent_end - key.offset;
759 else if (key.offset > start && key.offset <= end) {
760 *length += end - key.offset + 1;
762 } else if (key.offset > end)
770 btrfs_free_path(path);
775 * find_free_dev_extent - find free space in the specified device
776 * @trans: transaction handler
777 * @device: the device which we search the free space in
778 * @num_bytes: the size of the free space that we need
779 * @start: store the start of the free space.
780 * @len: the size of the free space. that we find, or the size of the max
781 * free space if we don't find suitable free space
783 * this uses a pretty simple search, the expectation is that it is
784 * called very infrequently and that a given device has a small number
787 * @start is used to store the start of the free space if we find. But if we
788 * don't find suitable free space, it will be used to store the start position
789 * of the max free space.
791 * @len is used to store the size of the free space that we find.
792 * But if we don't find suitable free space, it is used to store the size of
793 * the max free space.
795 int find_free_dev_extent(struct btrfs_trans_handle *trans,
796 struct btrfs_device *device, u64 num_bytes,
797 u64 *start, u64 *len)
799 struct btrfs_key key;
800 struct btrfs_root *root = device->dev_root;
801 struct btrfs_dev_extent *dev_extent;
802 struct btrfs_path *path;
808 u64 search_end = device->total_bytes;
811 struct extent_buffer *l;
813 /* FIXME use last free of some kind */
815 /* we don't want to overwrite the superblock on the drive,
816 * so we make sure to start at an offset of at least 1MB
818 search_start = 1024 * 1024;
820 if (root->fs_info->alloc_start + num_bytes <= search_end)
821 search_start = max(root->fs_info->alloc_start, search_start);
823 max_hole_start = search_start;
826 if (search_start >= search_end) {
831 path = btrfs_alloc_path();
838 key.objectid = device->devid;
839 key.offset = search_start;
840 key.type = BTRFS_DEV_EXTENT_KEY;
842 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
846 ret = btrfs_previous_item(root, path, key.objectid, key.type);
853 slot = path->slots[0];
854 if (slot >= btrfs_header_nritems(l)) {
855 ret = btrfs_next_leaf(root, path);
863 btrfs_item_key_to_cpu(l, &key, slot);
865 if (key.objectid < device->devid)
868 if (key.objectid > device->devid)
871 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
874 if (key.offset > search_start) {
875 hole_size = key.offset - search_start;
877 if (hole_size > max_hole_size) {
878 max_hole_start = search_start;
879 max_hole_size = hole_size;
883 * If this free space is greater than which we need,
884 * it must be the max free space that we have found
885 * until now, so max_hole_start must point to the start
886 * of this free space and the length of this free space
887 * is stored in max_hole_size. Thus, we return
888 * max_hole_start and max_hole_size and go back to the
891 if (hole_size >= num_bytes) {
897 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
898 extent_end = key.offset + btrfs_dev_extent_length(l,
900 if (extent_end > search_start)
901 search_start = extent_end;
907 hole_size = search_end- search_start;
908 if (hole_size > max_hole_size) {
909 max_hole_start = search_start;
910 max_hole_size = hole_size;
914 if (hole_size < num_bytes)
920 btrfs_free_path(path);
922 *start = max_hole_start;
924 *len = max_hole_size;
928 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
929 struct btrfs_device *device,
933 struct btrfs_path *path;
934 struct btrfs_root *root = device->dev_root;
935 struct btrfs_key key;
936 struct btrfs_key found_key;
937 struct extent_buffer *leaf = NULL;
938 struct btrfs_dev_extent *extent = NULL;
940 path = btrfs_alloc_path();
944 key.objectid = device->devid;
946 key.type = BTRFS_DEV_EXTENT_KEY;
948 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
950 ret = btrfs_previous_item(root, path, key.objectid,
951 BTRFS_DEV_EXTENT_KEY);
954 leaf = path->nodes[0];
955 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
956 extent = btrfs_item_ptr(leaf, path->slots[0],
957 struct btrfs_dev_extent);
958 BUG_ON(found_key.offset > start || found_key.offset +
959 btrfs_dev_extent_length(leaf, extent) < start);
960 } else if (ret == 0) {
961 leaf = path->nodes[0];
962 extent = btrfs_item_ptr(leaf, path->slots[0],
963 struct btrfs_dev_extent);
967 if (device->bytes_used > 0)
968 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
969 ret = btrfs_del_item(trans, root, path);
972 btrfs_free_path(path);
976 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
977 struct btrfs_device *device,
978 u64 chunk_tree, u64 chunk_objectid,
979 u64 chunk_offset, u64 start, u64 num_bytes)
982 struct btrfs_path *path;
983 struct btrfs_root *root = device->dev_root;
984 struct btrfs_dev_extent *extent;
985 struct extent_buffer *leaf;
986 struct btrfs_key key;
988 WARN_ON(!device->in_fs_metadata);
989 path = btrfs_alloc_path();
993 key.objectid = device->devid;
995 key.type = BTRFS_DEV_EXTENT_KEY;
996 ret = btrfs_insert_empty_item(trans, root, path, &key,
1000 leaf = path->nodes[0];
1001 extent = btrfs_item_ptr(leaf, path->slots[0],
1002 struct btrfs_dev_extent);
1003 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1004 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1005 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1007 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1008 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1011 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1012 btrfs_mark_buffer_dirty(leaf);
1013 btrfs_free_path(path);
1017 static noinline int find_next_chunk(struct btrfs_root *root,
1018 u64 objectid, u64 *offset)
1020 struct btrfs_path *path;
1022 struct btrfs_key key;
1023 struct btrfs_chunk *chunk;
1024 struct btrfs_key found_key;
1026 path = btrfs_alloc_path();
1029 key.objectid = objectid;
1030 key.offset = (u64)-1;
1031 key.type = BTRFS_CHUNK_ITEM_KEY;
1033 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1039 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1043 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1045 if (found_key.objectid != objectid)
1048 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1049 struct btrfs_chunk);
1050 *offset = found_key.offset +
1051 btrfs_chunk_length(path->nodes[0], chunk);
1056 btrfs_free_path(path);
1060 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1063 struct btrfs_key key;
1064 struct btrfs_key found_key;
1065 struct btrfs_path *path;
1067 root = root->fs_info->chunk_root;
1069 path = btrfs_alloc_path();
1073 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1074 key.type = BTRFS_DEV_ITEM_KEY;
1075 key.offset = (u64)-1;
1077 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1083 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1084 BTRFS_DEV_ITEM_KEY);
1088 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1090 *objectid = found_key.offset + 1;
1094 btrfs_free_path(path);
1099 * the device information is stored in the chunk root
1100 * the btrfs_device struct should be fully filled in
1102 int btrfs_add_device(struct btrfs_trans_handle *trans,
1103 struct btrfs_root *root,
1104 struct btrfs_device *device)
1107 struct btrfs_path *path;
1108 struct btrfs_dev_item *dev_item;
1109 struct extent_buffer *leaf;
1110 struct btrfs_key key;
1113 root = root->fs_info->chunk_root;
1115 path = btrfs_alloc_path();
1119 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1120 key.type = BTRFS_DEV_ITEM_KEY;
1121 key.offset = device->devid;
1123 ret = btrfs_insert_empty_item(trans, root, path, &key,
1128 leaf = path->nodes[0];
1129 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1131 btrfs_set_device_id(leaf, dev_item, device->devid);
1132 btrfs_set_device_generation(leaf, dev_item, 0);
1133 btrfs_set_device_type(leaf, dev_item, device->type);
1134 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1135 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1136 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1137 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1138 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1139 btrfs_set_device_group(leaf, dev_item, 0);
1140 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1141 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1142 btrfs_set_device_start_offset(leaf, dev_item, 0);
1144 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1145 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1146 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1147 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1148 btrfs_mark_buffer_dirty(leaf);
1152 btrfs_free_path(path);
1156 static int btrfs_rm_dev_item(struct btrfs_root *root,
1157 struct btrfs_device *device)
1160 struct btrfs_path *path;
1161 struct btrfs_key key;
1162 struct btrfs_trans_handle *trans;
1164 root = root->fs_info->chunk_root;
1166 path = btrfs_alloc_path();
1170 trans = btrfs_start_transaction(root, 0);
1171 if (IS_ERR(trans)) {
1172 btrfs_free_path(path);
1173 return PTR_ERR(trans);
1175 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1176 key.type = BTRFS_DEV_ITEM_KEY;
1177 key.offset = device->devid;
1180 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1189 ret = btrfs_del_item(trans, root, path);
1193 btrfs_free_path(path);
1194 unlock_chunks(root);
1195 btrfs_commit_transaction(trans, root);
1199 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1201 struct btrfs_device *device;
1202 struct btrfs_device *next_device;
1203 struct block_device *bdev;
1204 struct buffer_head *bh = NULL;
1205 struct btrfs_super_block *disk_super;
1212 mutex_lock(&uuid_mutex);
1213 mutex_lock(&root->fs_info->volume_mutex);
1215 all_avail = root->fs_info->avail_data_alloc_bits |
1216 root->fs_info->avail_system_alloc_bits |
1217 root->fs_info->avail_metadata_alloc_bits;
1219 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1220 root->fs_info->fs_devices->num_devices <= 4) {
1221 printk(KERN_ERR "btrfs: unable to go below four devices "
1227 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1228 root->fs_info->fs_devices->num_devices <= 2) {
1229 printk(KERN_ERR "btrfs: unable to go below two "
1230 "devices on raid1\n");
1235 if (strcmp(device_path, "missing") == 0) {
1236 struct list_head *devices;
1237 struct btrfs_device *tmp;
1240 devices = &root->fs_info->fs_devices->devices;
1241 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1242 list_for_each_entry(tmp, devices, dev_list) {
1243 if (tmp->in_fs_metadata && !tmp->bdev) {
1248 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1253 printk(KERN_ERR "btrfs: no missing devices found to "
1258 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1259 root->fs_info->bdev_holder);
1261 ret = PTR_ERR(bdev);
1265 set_blocksize(bdev, 4096);
1266 bh = btrfs_read_dev_super(bdev);
1271 disk_super = (struct btrfs_super_block *)bh->b_data;
1272 devid = btrfs_stack_device_id(&disk_super->dev_item);
1273 dev_uuid = disk_super->dev_item.uuid;
1274 device = btrfs_find_device(root, devid, dev_uuid,
1282 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1283 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1289 if (device->writeable) {
1290 list_del_init(&device->dev_alloc_list);
1291 root->fs_info->fs_devices->rw_devices--;
1294 ret = btrfs_shrink_device(device, 0);
1298 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1302 device->in_fs_metadata = 0;
1305 * the device list mutex makes sure that we don't change
1306 * the device list while someone else is writing out all
1307 * the device supers.
1309 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1310 list_del_init(&device->dev_list);
1311 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1313 device->fs_devices->num_devices--;
1315 if (device->missing)
1316 root->fs_info->fs_devices->missing_devices--;
1318 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1319 struct btrfs_device, dev_list);
1320 if (device->bdev == root->fs_info->sb->s_bdev)
1321 root->fs_info->sb->s_bdev = next_device->bdev;
1322 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1323 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1326 blkdev_put(device->bdev, device->mode);
1327 device->bdev = NULL;
1328 device->fs_devices->open_devices--;
1331 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1332 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1334 if (device->fs_devices->open_devices == 0) {
1335 struct btrfs_fs_devices *fs_devices;
1336 fs_devices = root->fs_info->fs_devices;
1337 while (fs_devices) {
1338 if (fs_devices->seed == device->fs_devices)
1340 fs_devices = fs_devices->seed;
1342 fs_devices->seed = device->fs_devices->seed;
1343 device->fs_devices->seed = NULL;
1344 __btrfs_close_devices(device->fs_devices);
1345 free_fs_devices(device->fs_devices);
1349 * at this point, the device is zero sized. We want to
1350 * remove it from the devices list and zero out the old super
1352 if (device->writeable) {
1353 /* make sure this device isn't detected as part of
1356 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1357 set_buffer_dirty(bh);
1358 sync_dirty_buffer(bh);
1361 kfree(device->name);
1369 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1371 mutex_unlock(&root->fs_info->volume_mutex);
1372 mutex_unlock(&uuid_mutex);
1375 if (device->writeable) {
1376 list_add(&device->dev_alloc_list,
1377 &root->fs_info->fs_devices->alloc_list);
1378 root->fs_info->fs_devices->rw_devices++;
1384 * does all the dirty work required for changing file system's UUID.
1386 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1387 struct btrfs_root *root)
1389 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1390 struct btrfs_fs_devices *old_devices;
1391 struct btrfs_fs_devices *seed_devices;
1392 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1393 struct btrfs_device *device;
1396 BUG_ON(!mutex_is_locked(&uuid_mutex));
1397 if (!fs_devices->seeding)
1400 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1404 old_devices = clone_fs_devices(fs_devices);
1405 if (IS_ERR(old_devices)) {
1406 kfree(seed_devices);
1407 return PTR_ERR(old_devices);
1410 list_add(&old_devices->list, &fs_uuids);
1412 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1413 seed_devices->opened = 1;
1414 INIT_LIST_HEAD(&seed_devices->devices);
1415 INIT_LIST_HEAD(&seed_devices->alloc_list);
1416 mutex_init(&seed_devices->device_list_mutex);
1417 list_splice_init(&fs_devices->devices, &seed_devices->devices);
1418 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1419 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1420 device->fs_devices = seed_devices;
1423 fs_devices->seeding = 0;
1424 fs_devices->num_devices = 0;
1425 fs_devices->open_devices = 0;
1426 fs_devices->seed = seed_devices;
1428 generate_random_uuid(fs_devices->fsid);
1429 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1430 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1431 super_flags = btrfs_super_flags(disk_super) &
1432 ~BTRFS_SUPER_FLAG_SEEDING;
1433 btrfs_set_super_flags(disk_super, super_flags);
1439 * strore the expected generation for seed devices in device items.
1441 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1442 struct btrfs_root *root)
1444 struct btrfs_path *path;
1445 struct extent_buffer *leaf;
1446 struct btrfs_dev_item *dev_item;
1447 struct btrfs_device *device;
1448 struct btrfs_key key;
1449 u8 fs_uuid[BTRFS_UUID_SIZE];
1450 u8 dev_uuid[BTRFS_UUID_SIZE];
1454 path = btrfs_alloc_path();
1458 root = root->fs_info->chunk_root;
1459 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1461 key.type = BTRFS_DEV_ITEM_KEY;
1464 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1468 leaf = path->nodes[0];
1470 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1471 ret = btrfs_next_leaf(root, path);
1476 leaf = path->nodes[0];
1477 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1478 btrfs_release_path(root, path);
1482 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1483 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1484 key.type != BTRFS_DEV_ITEM_KEY)
1487 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1488 struct btrfs_dev_item);
1489 devid = btrfs_device_id(leaf, dev_item);
1490 read_extent_buffer(leaf, dev_uuid,
1491 (unsigned long)btrfs_device_uuid(dev_item),
1493 read_extent_buffer(leaf, fs_uuid,
1494 (unsigned long)btrfs_device_fsid(dev_item),
1496 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1499 if (device->fs_devices->seeding) {
1500 btrfs_set_device_generation(leaf, dev_item,
1501 device->generation);
1502 btrfs_mark_buffer_dirty(leaf);
1510 btrfs_free_path(path);
1514 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1516 struct btrfs_trans_handle *trans;
1517 struct btrfs_device *device;
1518 struct block_device *bdev;
1519 struct list_head *devices;
1520 struct super_block *sb = root->fs_info->sb;
1522 int seeding_dev = 0;
1525 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1528 bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
1529 root->fs_info->bdev_holder);
1531 return PTR_ERR(bdev);
1533 if (root->fs_info->fs_devices->seeding) {
1535 down_write(&sb->s_umount);
1536 mutex_lock(&uuid_mutex);
1539 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1540 mutex_lock(&root->fs_info->volume_mutex);
1542 devices = &root->fs_info->fs_devices->devices;
1544 * we have the volume lock, so we don't need the extra
1545 * device list mutex while reading the list here.
1547 list_for_each_entry(device, devices, dev_list) {
1548 if (device->bdev == bdev) {
1554 device = kzalloc(sizeof(*device), GFP_NOFS);
1556 /* we can safely leave the fs_devices entry around */
1561 device->name = kstrdup(device_path, GFP_NOFS);
1562 if (!device->name) {
1568 ret = find_next_devid(root, &device->devid);
1570 kfree(device->name);
1575 trans = btrfs_start_transaction(root, 0);
1576 if (IS_ERR(trans)) {
1577 kfree(device->name);
1579 ret = PTR_ERR(trans);
1585 device->writeable = 1;
1586 device->work.func = pending_bios_fn;
1587 generate_random_uuid(device->uuid);
1588 spin_lock_init(&device->io_lock);
1589 device->generation = trans->transid;
1590 device->io_width = root->sectorsize;
1591 device->io_align = root->sectorsize;
1592 device->sector_size = root->sectorsize;
1593 device->total_bytes = i_size_read(bdev->bd_inode);
1594 device->disk_total_bytes = device->total_bytes;
1595 device->dev_root = root->fs_info->dev_root;
1596 device->bdev = bdev;
1597 device->in_fs_metadata = 1;
1598 device->mode = FMODE_EXCL;
1599 set_blocksize(device->bdev, 4096);
1602 sb->s_flags &= ~MS_RDONLY;
1603 ret = btrfs_prepare_sprout(trans, root);
1607 device->fs_devices = root->fs_info->fs_devices;
1610 * we don't want write_supers to jump in here with our device
1613 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1614 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1615 list_add(&device->dev_alloc_list,
1616 &root->fs_info->fs_devices->alloc_list);
1617 root->fs_info->fs_devices->num_devices++;
1618 root->fs_info->fs_devices->open_devices++;
1619 root->fs_info->fs_devices->rw_devices++;
1620 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1622 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1623 root->fs_info->fs_devices->rotating = 1;
1625 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1626 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1627 total_bytes + device->total_bytes);
1629 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1630 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1632 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1635 ret = init_first_rw_device(trans, root, device);
1637 ret = btrfs_finish_sprout(trans, root);
1640 ret = btrfs_add_device(trans, root, device);
1644 * we've got more storage, clear any full flags on the space
1647 btrfs_clear_space_info_full(root->fs_info);
1649 unlock_chunks(root);
1650 btrfs_commit_transaction(trans, root);
1653 mutex_unlock(&uuid_mutex);
1654 up_write(&sb->s_umount);
1656 ret = btrfs_relocate_sys_chunks(root);
1660 mutex_unlock(&root->fs_info->volume_mutex);
1663 blkdev_put(bdev, FMODE_EXCL);
1665 mutex_unlock(&uuid_mutex);
1666 up_write(&sb->s_umount);
1671 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1672 struct btrfs_device *device)
1675 struct btrfs_path *path;
1676 struct btrfs_root *root;
1677 struct btrfs_dev_item *dev_item;
1678 struct extent_buffer *leaf;
1679 struct btrfs_key key;
1681 root = device->dev_root->fs_info->chunk_root;
1683 path = btrfs_alloc_path();
1687 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1688 key.type = BTRFS_DEV_ITEM_KEY;
1689 key.offset = device->devid;
1691 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1700 leaf = path->nodes[0];
1701 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1703 btrfs_set_device_id(leaf, dev_item, device->devid);
1704 btrfs_set_device_type(leaf, dev_item, device->type);
1705 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1706 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1707 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1708 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1709 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1710 btrfs_mark_buffer_dirty(leaf);
1713 btrfs_free_path(path);
1717 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1718 struct btrfs_device *device, u64 new_size)
1720 struct btrfs_super_block *super_copy =
1721 &device->dev_root->fs_info->super_copy;
1722 u64 old_total = btrfs_super_total_bytes(super_copy);
1723 u64 diff = new_size - device->total_bytes;
1725 if (!device->writeable)
1727 if (new_size <= device->total_bytes)
1730 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1731 device->fs_devices->total_rw_bytes += diff;
1733 device->total_bytes = new_size;
1734 device->disk_total_bytes = new_size;
1735 btrfs_clear_space_info_full(device->dev_root->fs_info);
1737 return btrfs_update_device(trans, device);
1740 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1741 struct btrfs_device *device, u64 new_size)
1744 lock_chunks(device->dev_root);
1745 ret = __btrfs_grow_device(trans, device, new_size);
1746 unlock_chunks(device->dev_root);
1750 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1751 struct btrfs_root *root,
1752 u64 chunk_tree, u64 chunk_objectid,
1756 struct btrfs_path *path;
1757 struct btrfs_key key;
1759 root = root->fs_info->chunk_root;
1760 path = btrfs_alloc_path();
1764 key.objectid = chunk_objectid;
1765 key.offset = chunk_offset;
1766 key.type = BTRFS_CHUNK_ITEM_KEY;
1768 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1771 ret = btrfs_del_item(trans, root, path);
1773 btrfs_free_path(path);
1777 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1780 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1781 struct btrfs_disk_key *disk_key;
1782 struct btrfs_chunk *chunk;
1789 struct btrfs_key key;
1791 array_size = btrfs_super_sys_array_size(super_copy);
1793 ptr = super_copy->sys_chunk_array;
1796 while (cur < array_size) {
1797 disk_key = (struct btrfs_disk_key *)ptr;
1798 btrfs_disk_key_to_cpu(&key, disk_key);
1800 len = sizeof(*disk_key);
1802 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1803 chunk = (struct btrfs_chunk *)(ptr + len);
1804 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1805 len += btrfs_chunk_item_size(num_stripes);
1810 if (key.objectid == chunk_objectid &&
1811 key.offset == chunk_offset) {
1812 memmove(ptr, ptr + len, array_size - (cur + len));
1814 btrfs_set_super_sys_array_size(super_copy, array_size);
1823 static int btrfs_relocate_chunk(struct btrfs_root *root,
1824 u64 chunk_tree, u64 chunk_objectid,
1827 struct extent_map_tree *em_tree;
1828 struct btrfs_root *extent_root;
1829 struct btrfs_trans_handle *trans;
1830 struct extent_map *em;
1831 struct map_lookup *map;
1835 root = root->fs_info->chunk_root;
1836 extent_root = root->fs_info->extent_root;
1837 em_tree = &root->fs_info->mapping_tree.map_tree;
1839 ret = btrfs_can_relocate(extent_root, chunk_offset);
1843 /* step one, relocate all the extents inside this chunk */
1844 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1848 trans = btrfs_start_transaction(root, 0);
1849 BUG_ON(IS_ERR(trans));
1854 * step two, delete the device extents and the
1855 * chunk tree entries
1857 read_lock(&em_tree->lock);
1858 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1859 read_unlock(&em_tree->lock);
1861 BUG_ON(em->start > chunk_offset ||
1862 em->start + em->len < chunk_offset);
1863 map = (struct map_lookup *)em->bdev;
1865 for (i = 0; i < map->num_stripes; i++) {
1866 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1867 map->stripes[i].physical);
1870 if (map->stripes[i].dev) {
1871 ret = btrfs_update_device(trans, map->stripes[i].dev);
1875 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1880 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1882 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1883 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1887 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1890 write_lock(&em_tree->lock);
1891 remove_extent_mapping(em_tree, em);
1892 write_unlock(&em_tree->lock);
1897 /* once for the tree */
1898 free_extent_map(em);
1900 free_extent_map(em);
1902 unlock_chunks(root);
1903 btrfs_end_transaction(trans, root);
1907 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1909 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1910 struct btrfs_path *path;
1911 struct extent_buffer *leaf;
1912 struct btrfs_chunk *chunk;
1913 struct btrfs_key key;
1914 struct btrfs_key found_key;
1915 u64 chunk_tree = chunk_root->root_key.objectid;
1917 bool retried = false;
1921 path = btrfs_alloc_path();
1926 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1927 key.offset = (u64)-1;
1928 key.type = BTRFS_CHUNK_ITEM_KEY;
1931 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1936 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1943 leaf = path->nodes[0];
1944 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1946 chunk = btrfs_item_ptr(leaf, path->slots[0],
1947 struct btrfs_chunk);
1948 chunk_type = btrfs_chunk_type(leaf, chunk);
1949 btrfs_release_path(chunk_root, path);
1951 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1952 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1961 if (found_key.offset == 0)
1963 key.offset = found_key.offset - 1;
1966 if (failed && !retried) {
1970 } else if (failed && retried) {
1975 btrfs_free_path(path);
1979 static u64 div_factor(u64 num, int factor)
1988 int btrfs_balance(struct btrfs_root *dev_root)
1991 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1992 struct btrfs_device *device;
1995 struct btrfs_path *path;
1996 struct btrfs_key key;
1997 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1998 struct btrfs_trans_handle *trans;
1999 struct btrfs_key found_key;
2001 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
2004 if (!capable(CAP_SYS_ADMIN))
2007 mutex_lock(&dev_root->fs_info->volume_mutex);
2008 dev_root = dev_root->fs_info->dev_root;
2010 /* step one make some room on all the devices */
2011 list_for_each_entry(device, devices, dev_list) {
2012 old_size = device->total_bytes;
2013 size_to_free = div_factor(old_size, 1);
2014 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2015 if (!device->writeable ||
2016 device->total_bytes - device->bytes_used > size_to_free)
2019 ret = btrfs_shrink_device(device, old_size - size_to_free);
2024 trans = btrfs_start_transaction(dev_root, 0);
2025 BUG_ON(IS_ERR(trans));
2027 ret = btrfs_grow_device(trans, device, old_size);
2030 btrfs_end_transaction(trans, dev_root);
2033 /* step two, relocate all the chunks */
2034 path = btrfs_alloc_path();
2037 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2038 key.offset = (u64)-1;
2039 key.type = BTRFS_CHUNK_ITEM_KEY;
2042 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2047 * this shouldn't happen, it means the last relocate
2053 ret = btrfs_previous_item(chunk_root, path, 0,
2054 BTRFS_CHUNK_ITEM_KEY);
2058 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2060 if (found_key.objectid != key.objectid)
2063 /* chunk zero is special */
2064 if (found_key.offset == 0)
2067 btrfs_release_path(chunk_root, path);
2068 ret = btrfs_relocate_chunk(chunk_root,
2069 chunk_root->root_key.objectid,
2072 BUG_ON(ret && ret != -ENOSPC);
2073 key.offset = found_key.offset - 1;
2077 btrfs_free_path(path);
2078 mutex_unlock(&dev_root->fs_info->volume_mutex);
2083 * shrinking a device means finding all of the device extents past
2084 * the new size, and then following the back refs to the chunks.
2085 * The chunk relocation code actually frees the device extent
2087 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2089 struct btrfs_trans_handle *trans;
2090 struct btrfs_root *root = device->dev_root;
2091 struct btrfs_dev_extent *dev_extent = NULL;
2092 struct btrfs_path *path;
2100 bool retried = false;
2101 struct extent_buffer *l;
2102 struct btrfs_key key;
2103 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2104 u64 old_total = btrfs_super_total_bytes(super_copy);
2105 u64 old_size = device->total_bytes;
2106 u64 diff = device->total_bytes - new_size;
2108 if (new_size >= device->total_bytes)
2111 path = btrfs_alloc_path();
2119 device->total_bytes = new_size;
2120 if (device->writeable)
2121 device->fs_devices->total_rw_bytes -= diff;
2122 unlock_chunks(root);
2125 key.objectid = device->devid;
2126 key.offset = (u64)-1;
2127 key.type = BTRFS_DEV_EXTENT_KEY;
2130 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2134 ret = btrfs_previous_item(root, path, 0, key.type);
2139 btrfs_release_path(root, path);
2144 slot = path->slots[0];
2145 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2147 if (key.objectid != device->devid) {
2148 btrfs_release_path(root, path);
2152 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2153 length = btrfs_dev_extent_length(l, dev_extent);
2155 if (key.offset + length <= new_size) {
2156 btrfs_release_path(root, path);
2160 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2161 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2162 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2163 btrfs_release_path(root, path);
2165 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2167 if (ret && ret != -ENOSPC)
2174 if (failed && !retried) {
2178 } else if (failed && retried) {
2182 device->total_bytes = old_size;
2183 if (device->writeable)
2184 device->fs_devices->total_rw_bytes += diff;
2185 unlock_chunks(root);
2189 /* Shrinking succeeded, else we would be at "done". */
2190 trans = btrfs_start_transaction(root, 0);
2191 if (IS_ERR(trans)) {
2192 ret = PTR_ERR(trans);
2198 device->disk_total_bytes = new_size;
2199 /* Now btrfs_update_device() will change the on-disk size. */
2200 ret = btrfs_update_device(trans, device);
2202 unlock_chunks(root);
2203 btrfs_end_transaction(trans, root);
2206 WARN_ON(diff > old_total);
2207 btrfs_set_super_total_bytes(super_copy, old_total - diff);
2208 unlock_chunks(root);
2209 btrfs_end_transaction(trans, root);
2211 btrfs_free_path(path);
2215 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2216 struct btrfs_root *root,
2217 struct btrfs_key *key,
2218 struct btrfs_chunk *chunk, int item_size)
2220 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2221 struct btrfs_disk_key disk_key;
2225 array_size = btrfs_super_sys_array_size(super_copy);
2226 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2229 ptr = super_copy->sys_chunk_array + array_size;
2230 btrfs_cpu_key_to_disk(&disk_key, key);
2231 memcpy(ptr, &disk_key, sizeof(disk_key));
2232 ptr += sizeof(disk_key);
2233 memcpy(ptr, chunk, item_size);
2234 item_size += sizeof(disk_key);
2235 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2239 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
2240 int num_stripes, int sub_stripes)
2242 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
2244 else if (type & BTRFS_BLOCK_GROUP_RAID10)
2245 return calc_size * (num_stripes / sub_stripes);
2247 return calc_size * num_stripes;
2250 /* Used to sort the devices by max_avail(descending sort) */
2251 int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2)
2253 if (((struct btrfs_device_info *)dev_info1)->max_avail >
2254 ((struct btrfs_device_info *)dev_info2)->max_avail)
2256 else if (((struct btrfs_device_info *)dev_info1)->max_avail <
2257 ((struct btrfs_device_info *)dev_info2)->max_avail)
2263 static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type,
2264 int *num_stripes, int *min_stripes,
2271 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2272 *num_stripes = fs_devices->rw_devices;
2275 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2279 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2280 if (fs_devices->rw_devices < 2)
2285 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2286 *num_stripes = fs_devices->rw_devices;
2287 if (*num_stripes < 4)
2289 *num_stripes &= ~(u32)1;
2297 static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices,
2298 u64 proposed_size, u64 type,
2299 int num_stripes, int small_stripe)
2301 int min_stripe_size = 1 * 1024 * 1024;
2302 u64 calc_size = proposed_size;
2303 u64 max_chunk_size = calc_size;
2306 if (type & (BTRFS_BLOCK_GROUP_RAID1 |
2307 BTRFS_BLOCK_GROUP_DUP |
2308 BTRFS_BLOCK_GROUP_RAID10))
2311 if (type & BTRFS_BLOCK_GROUP_DATA) {
2312 max_chunk_size = 10 * calc_size;
2313 min_stripe_size = 64 * 1024 * 1024;
2314 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2315 max_chunk_size = 256 * 1024 * 1024;
2316 min_stripe_size = 32 * 1024 * 1024;
2317 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2318 calc_size = 8 * 1024 * 1024;
2319 max_chunk_size = calc_size * 2;
2320 min_stripe_size = 1 * 1024 * 1024;
2323 /* we don't want a chunk larger than 10% of writeable space */
2324 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2327 if (calc_size * num_stripes > max_chunk_size * ncopies) {
2328 calc_size = max_chunk_size * ncopies;
2329 do_div(calc_size, num_stripes);
2330 do_div(calc_size, BTRFS_STRIPE_LEN);
2331 calc_size *= BTRFS_STRIPE_LEN;
2334 /* we don't want tiny stripes */
2336 calc_size = max_t(u64, min_stripe_size, calc_size);
2339 * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
2340 * we end up with something bigger than a stripe
2342 calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN);
2344 do_div(calc_size, BTRFS_STRIPE_LEN);
2345 calc_size *= BTRFS_STRIPE_LEN;
2350 static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map,
2353 struct map_lookup *new;
2354 size_t len = map_lookup_size(num_stripes);
2356 BUG_ON(map->num_stripes < num_stripes);
2358 if (map->num_stripes == num_stripes)
2361 new = kmalloc(len, GFP_NOFS);
2363 /* just change map->num_stripes */
2364 map->num_stripes = num_stripes;
2368 memcpy(new, map, len);
2369 new->num_stripes = num_stripes;
2375 * helper to allocate device space from btrfs_device_info, in which we stored
2376 * max free space information of every device. It is used when we can not
2377 * allocate chunks by default size.
2379 * By this helper, we can allocate a new chunk as larger as possible.
2381 static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans,
2382 struct btrfs_fs_devices *fs_devices,
2383 struct btrfs_device_info *devices,
2384 int nr_device, u64 type,
2385 struct map_lookup **map_lookup,
2386 int min_stripes, u64 *stripe_size)
2388 int i, index, sort_again = 0;
2389 int min_devices = min_stripes;
2390 u64 max_avail, min_free;
2391 struct map_lookup *map = *map_lookup;
2394 if (nr_device < min_stripes)
2397 btrfs_descending_sort_devices(devices, nr_device);
2399 max_avail = devices[0].max_avail;
2403 for (i = 0; i < nr_device; i++) {
2405 * if dev_offset = 0, it means the free space of this device
2406 * is less than what we need, and we didn't search max avail
2407 * extent on this device, so do it now.
2409 if (!devices[i].dev_offset) {
2410 ret = find_free_dev_extent(trans, devices[i].dev,
2412 &devices[i].dev_offset,
2413 &devices[i].max_avail);
2414 if (ret != 0 && ret != -ENOSPC)
2420 /* we update the max avail free extent of each devices, sort again */
2422 btrfs_descending_sort_devices(devices, nr_device);
2424 if (type & BTRFS_BLOCK_GROUP_DUP)
2427 if (!devices[min_devices - 1].max_avail)
2430 max_avail = devices[min_devices - 1].max_avail;
2431 if (type & BTRFS_BLOCK_GROUP_DUP)
2432 do_div(max_avail, 2);
2434 max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type,
2436 if (type & BTRFS_BLOCK_GROUP_DUP)
2437 min_free = max_avail * 2;
2439 min_free = max_avail;
2441 if (min_free > devices[min_devices - 1].max_avail)
2444 map = __shrink_map_lookup_stripes(map, min_stripes);
2445 *stripe_size = max_avail;
2448 for (i = 0; i < min_stripes; i++) {
2449 map->stripes[i].dev = devices[index].dev;
2450 map->stripes[i].physical = devices[index].dev_offset;
2451 if (type & BTRFS_BLOCK_GROUP_DUP) {
2453 map->stripes[i].dev = devices[index].dev;
2454 map->stripes[i].physical = devices[index].dev_offset +
2464 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2465 struct btrfs_root *extent_root,
2466 struct map_lookup **map_ret,
2467 u64 *num_bytes, u64 *stripe_size,
2468 u64 start, u64 type)
2470 struct btrfs_fs_info *info = extent_root->fs_info;
2471 struct btrfs_device *device = NULL;
2472 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2473 struct list_head *cur;
2474 struct map_lookup *map;
2475 struct extent_map_tree *em_tree;
2476 struct extent_map *em;
2477 struct btrfs_device_info *devices_info;
2478 struct list_head private_devs;
2479 u64 calc_size = 1024 * 1024 * 1024;
2486 int min_devices; /* the min number of devices we need */
2491 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2492 (type & BTRFS_BLOCK_GROUP_DUP)) {
2494 type &= ~BTRFS_BLOCK_GROUP_DUP;
2496 if (list_empty(&fs_devices->alloc_list))
2499 ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes,
2500 &min_stripes, &sub_stripes);
2504 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2509 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2514 map->num_stripes = num_stripes;
2516 cur = fs_devices->alloc_list.next;
2520 calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type,
2523 if (type & BTRFS_BLOCK_GROUP_DUP) {
2524 min_free = calc_size * 2;
2527 min_free = calc_size;
2528 min_devices = min_stripes;
2531 INIT_LIST_HEAD(&private_devs);
2532 while (index < num_stripes) {
2533 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2534 BUG_ON(!device->writeable);
2535 if (device->total_bytes > device->bytes_used)
2536 avail = device->total_bytes - device->bytes_used;
2541 if (device->in_fs_metadata && avail >= min_free) {
2542 ret = find_free_dev_extent(trans, device, min_free,
2543 &devices_info[i].dev_offset,
2544 &devices_info[i].max_avail);
2546 list_move_tail(&device->dev_alloc_list,
2548 map->stripes[index].dev = device;
2549 map->stripes[index].physical =
2550 devices_info[i].dev_offset;
2552 if (type & BTRFS_BLOCK_GROUP_DUP) {
2553 map->stripes[index].dev = device;
2554 map->stripes[index].physical =
2555 devices_info[i].dev_offset +
2559 } else if (ret != -ENOSPC)
2562 devices_info[i].dev = device;
2564 } else if (device->in_fs_metadata &&
2565 avail >= BTRFS_STRIPE_LEN) {
2566 devices_info[i].dev = device;
2567 devices_info[i].max_avail = avail;
2571 if (cur == &fs_devices->alloc_list)
2575 list_splice(&private_devs, &fs_devices->alloc_list);
2576 if (index < num_stripes) {
2577 if (index >= min_stripes) {
2578 num_stripes = index;
2579 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2580 num_stripes /= sub_stripes;
2581 num_stripes *= sub_stripes;
2584 map = __shrink_map_lookup_stripes(map, num_stripes);
2585 } else if (i >= min_devices) {
2586 ret = __btrfs_alloc_tiny_space(trans, fs_devices,
2587 devices_info, i, type,
2597 map->sector_size = extent_root->sectorsize;
2598 map->stripe_len = BTRFS_STRIPE_LEN;
2599 map->io_align = BTRFS_STRIPE_LEN;
2600 map->io_width = BTRFS_STRIPE_LEN;
2602 map->sub_stripes = sub_stripes;
2605 *stripe_size = calc_size;
2606 *num_bytes = chunk_bytes_by_type(type, calc_size,
2607 map->num_stripes, sub_stripes);
2609 trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
2611 em = alloc_extent_map(GFP_NOFS);
2616 em->bdev = (struct block_device *)map;
2618 em->len = *num_bytes;
2619 em->block_start = 0;
2620 em->block_len = em->len;
2622 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2623 write_lock(&em_tree->lock);
2624 ret = add_extent_mapping(em_tree, em);
2625 write_unlock(&em_tree->lock);
2627 free_extent_map(em);
2629 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2630 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2635 while (index < map->num_stripes) {
2636 device = map->stripes[index].dev;
2637 dev_offset = map->stripes[index].physical;
2639 ret = btrfs_alloc_dev_extent(trans, device,
2640 info->chunk_root->root_key.objectid,
2641 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2642 start, dev_offset, calc_size);
2647 kfree(devices_info);
2652 kfree(devices_info);
2656 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2657 struct btrfs_root *extent_root,
2658 struct map_lookup *map, u64 chunk_offset,
2659 u64 chunk_size, u64 stripe_size)
2662 struct btrfs_key key;
2663 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2664 struct btrfs_device *device;
2665 struct btrfs_chunk *chunk;
2666 struct btrfs_stripe *stripe;
2667 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2671 chunk = kzalloc(item_size, GFP_NOFS);
2676 while (index < map->num_stripes) {
2677 device = map->stripes[index].dev;
2678 device->bytes_used += stripe_size;
2679 ret = btrfs_update_device(trans, device);
2685 stripe = &chunk->stripe;
2686 while (index < map->num_stripes) {
2687 device = map->stripes[index].dev;
2688 dev_offset = map->stripes[index].physical;
2690 btrfs_set_stack_stripe_devid(stripe, device->devid);
2691 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2692 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2697 btrfs_set_stack_chunk_length(chunk, chunk_size);
2698 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2699 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2700 btrfs_set_stack_chunk_type(chunk, map->type);
2701 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2702 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2703 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2704 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2705 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2707 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2708 key.type = BTRFS_CHUNK_ITEM_KEY;
2709 key.offset = chunk_offset;
2711 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2714 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2715 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2725 * Chunk allocation falls into two parts. The first part does works
2726 * that make the new allocated chunk useable, but not do any operation
2727 * that modifies the chunk tree. The second part does the works that
2728 * require modifying the chunk tree. This division is important for the
2729 * bootstrap process of adding storage to a seed btrfs.
2731 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2732 struct btrfs_root *extent_root, u64 type)
2737 struct map_lookup *map;
2738 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2741 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2746 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2747 &stripe_size, chunk_offset, type);
2751 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2752 chunk_size, stripe_size);
2757 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2758 struct btrfs_root *root,
2759 struct btrfs_device *device)
2762 u64 sys_chunk_offset;
2766 u64 sys_stripe_size;
2768 struct map_lookup *map;
2769 struct map_lookup *sys_map;
2770 struct btrfs_fs_info *fs_info = root->fs_info;
2771 struct btrfs_root *extent_root = fs_info->extent_root;
2774 ret = find_next_chunk(fs_info->chunk_root,
2775 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2778 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2779 (fs_info->metadata_alloc_profile &
2780 fs_info->avail_metadata_alloc_bits);
2781 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2783 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2784 &stripe_size, chunk_offset, alloc_profile);
2787 sys_chunk_offset = chunk_offset + chunk_size;
2789 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2790 (fs_info->system_alloc_profile &
2791 fs_info->avail_system_alloc_bits);
2792 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2794 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2795 &sys_chunk_size, &sys_stripe_size,
2796 sys_chunk_offset, alloc_profile);
2799 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2803 * Modifying chunk tree needs allocating new blocks from both
2804 * system block group and metadata block group. So we only can
2805 * do operations require modifying the chunk tree after both
2806 * block groups were created.
2808 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2809 chunk_size, stripe_size);
2812 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2813 sys_chunk_offset, sys_chunk_size,
2819 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2821 struct extent_map *em;
2822 struct map_lookup *map;
2823 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2827 read_lock(&map_tree->map_tree.lock);
2828 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2829 read_unlock(&map_tree->map_tree.lock);
2833 if (btrfs_test_opt(root, DEGRADED)) {
2834 free_extent_map(em);
2838 map = (struct map_lookup *)em->bdev;
2839 for (i = 0; i < map->num_stripes; i++) {
2840 if (!map->stripes[i].dev->writeable) {
2845 free_extent_map(em);
2849 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2851 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2854 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2856 struct extent_map *em;
2859 write_lock(&tree->map_tree.lock);
2860 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2862 remove_extent_mapping(&tree->map_tree, em);
2863 write_unlock(&tree->map_tree.lock);
2868 free_extent_map(em);
2869 /* once for the tree */
2870 free_extent_map(em);
2874 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2876 struct extent_map *em;
2877 struct map_lookup *map;
2878 struct extent_map_tree *em_tree = &map_tree->map_tree;
2881 read_lock(&em_tree->lock);
2882 em = lookup_extent_mapping(em_tree, logical, len);
2883 read_unlock(&em_tree->lock);
2886 BUG_ON(em->start > logical || em->start + em->len < logical);
2887 map = (struct map_lookup *)em->bdev;
2888 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2889 ret = map->num_stripes;
2890 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2891 ret = map->sub_stripes;
2894 free_extent_map(em);
2898 static int find_live_mirror(struct map_lookup *map, int first, int num,
2902 if (map->stripes[optimal].dev->bdev)
2904 for (i = first; i < first + num; i++) {
2905 if (map->stripes[i].dev->bdev)
2908 /* we couldn't find one that doesn't fail. Just return something
2909 * and the io error handling code will clean up eventually
2914 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2915 u64 logical, u64 *length,
2916 struct btrfs_multi_bio **multi_ret,
2919 struct extent_map *em;
2920 struct map_lookup *map;
2921 struct extent_map_tree *em_tree = &map_tree->map_tree;
2924 u64 stripe_end_offset;
2928 int stripes_allocated = 8;
2929 int stripes_required = 1;
2934 struct btrfs_multi_bio *multi = NULL;
2936 if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
2937 stripes_allocated = 1;
2940 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2945 atomic_set(&multi->error, 0);
2948 read_lock(&em_tree->lock);
2949 em = lookup_extent_mapping(em_tree, logical, *length);
2950 read_unlock(&em_tree->lock);
2953 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2954 (unsigned long long)logical,
2955 (unsigned long long)*length);
2959 BUG_ON(em->start > logical || em->start + em->len < logical);
2960 map = (struct map_lookup *)em->bdev;
2961 offset = logical - em->start;
2963 if (mirror_num > map->num_stripes)
2966 /* if our multi bio struct is too small, back off and try again */
2967 if (rw & REQ_WRITE) {
2968 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2969 BTRFS_BLOCK_GROUP_DUP)) {
2970 stripes_required = map->num_stripes;
2972 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2973 stripes_required = map->sub_stripes;
2977 if (rw & REQ_DISCARD) {
2978 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2979 BTRFS_BLOCK_GROUP_RAID1 |
2980 BTRFS_BLOCK_GROUP_DUP |
2981 BTRFS_BLOCK_GROUP_RAID10)) {
2982 stripes_required = map->num_stripes;
2985 if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
2986 stripes_allocated < stripes_required) {
2987 stripes_allocated = map->num_stripes;
2988 free_extent_map(em);
2994 * stripe_nr counts the total number of stripes we have to stride
2995 * to get to this block
2997 do_div(stripe_nr, map->stripe_len);
2999 stripe_offset = stripe_nr * map->stripe_len;
3000 BUG_ON(offset < stripe_offset);
3002 /* stripe_offset is the offset of this block in its stripe*/
3003 stripe_offset = offset - stripe_offset;
3005 if (rw & REQ_DISCARD)
3006 *length = min_t(u64, em->len - offset, *length);
3007 else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3008 BTRFS_BLOCK_GROUP_RAID1 |
3009 BTRFS_BLOCK_GROUP_RAID10 |
3010 BTRFS_BLOCK_GROUP_DUP)) {
3011 /* we limit the length of each bio to what fits in a stripe */
3012 *length = min_t(u64, em->len - offset,
3013 map->stripe_len - stripe_offset);
3015 *length = em->len - offset;
3023 stripe_nr_orig = stripe_nr;
3024 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3025 (~(map->stripe_len - 1));
3026 do_div(stripe_nr_end, map->stripe_len);
3027 stripe_end_offset = stripe_nr_end * map->stripe_len -
3029 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3030 if (rw & REQ_DISCARD)
3031 num_stripes = min_t(u64, map->num_stripes,
3032 stripe_nr_end - stripe_nr_orig);
3033 stripe_index = do_div(stripe_nr, map->num_stripes);
3034 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3035 if (rw & (REQ_WRITE | REQ_DISCARD))
3036 num_stripes = map->num_stripes;
3037 else if (mirror_num)
3038 stripe_index = mirror_num - 1;
3040 stripe_index = find_live_mirror(map, 0,
3042 current->pid % map->num_stripes);
3045 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3046 if (rw & (REQ_WRITE | REQ_DISCARD))
3047 num_stripes = map->num_stripes;
3048 else if (mirror_num)
3049 stripe_index = mirror_num - 1;
3051 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3052 int factor = map->num_stripes / map->sub_stripes;
3054 stripe_index = do_div(stripe_nr, factor);
3055 stripe_index *= map->sub_stripes;
3058 num_stripes = map->sub_stripes;
3059 else if (rw & REQ_DISCARD)
3060 num_stripes = min_t(u64, map->sub_stripes *
3061 (stripe_nr_end - stripe_nr_orig),
3063 else if (mirror_num)
3064 stripe_index += mirror_num - 1;
3066 stripe_index = find_live_mirror(map, stripe_index,
3067 map->sub_stripes, stripe_index +
3068 current->pid % map->sub_stripes);
3072 * after this do_div call, stripe_nr is the number of stripes
3073 * on this device we have to walk to find the data, and
3074 * stripe_index is the number of our device in the stripe array
3076 stripe_index = do_div(stripe_nr, map->num_stripes);
3078 BUG_ON(stripe_index >= map->num_stripes);
3080 if (rw & REQ_DISCARD) {
3081 for (i = 0; i < num_stripes; i++) {
3082 multi->stripes[i].physical =
3083 map->stripes[stripe_index].physical +
3084 stripe_offset + stripe_nr * map->stripe_len;
3085 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3087 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3089 u32 last_stripe = 0;
3092 div_u64_rem(stripe_nr_end - 1,
3096 for (j = 0; j < map->num_stripes; j++) {
3099 div_u64_rem(stripe_nr_end - 1 - j,
3100 map->num_stripes, &test);
3101 if (test == stripe_index)
3104 stripes = stripe_nr_end - 1 - j;
3105 do_div(stripes, map->num_stripes);
3106 multi->stripes[i].length = map->stripe_len *
3107 (stripes - stripe_nr + 1);
3110 multi->stripes[i].length -=
3114 if (stripe_index == last_stripe)
3115 multi->stripes[i].length -=
3117 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3120 int factor = map->num_stripes /
3122 u32 last_stripe = 0;
3124 div_u64_rem(stripe_nr_end - 1,
3125 factor, &last_stripe);
3126 last_stripe *= map->sub_stripes;
3128 for (j = 0; j < factor; j++) {
3131 div_u64_rem(stripe_nr_end - 1 - j,
3135 stripe_index / map->sub_stripes)
3138 stripes = stripe_nr_end - 1 - j;
3139 do_div(stripes, factor);
3140 multi->stripes[i].length = map->stripe_len *
3141 (stripes - stripe_nr + 1);
3143 if (i < map->sub_stripes) {
3144 multi->stripes[i].length -=
3146 if (i == map->sub_stripes - 1)
3149 if (stripe_index >= last_stripe &&
3150 stripe_index <= (last_stripe +
3151 map->sub_stripes - 1)) {
3152 multi->stripes[i].length -=
3156 multi->stripes[i].length = *length;
3159 if (stripe_index == map->num_stripes) {
3160 /* This could only happen for RAID0/10 */
3166 for (i = 0; i < num_stripes; i++) {
3167 multi->stripes[i].physical =
3168 map->stripes[stripe_index].physical +
3170 stripe_nr * map->stripe_len;
3171 multi->stripes[i].dev =
3172 map->stripes[stripe_index].dev;
3178 multi->num_stripes = num_stripes;
3179 multi->max_errors = max_errors;
3182 free_extent_map(em);
3186 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3187 u64 logical, u64 *length,
3188 struct btrfs_multi_bio **multi_ret, int mirror_num)
3190 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
3194 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3195 u64 chunk_start, u64 physical, u64 devid,
3196 u64 **logical, int *naddrs, int *stripe_len)
3198 struct extent_map_tree *em_tree = &map_tree->map_tree;
3199 struct extent_map *em;
3200 struct map_lookup *map;
3207 read_lock(&em_tree->lock);
3208 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3209 read_unlock(&em_tree->lock);
3211 BUG_ON(!em || em->start != chunk_start);
3212 map = (struct map_lookup *)em->bdev;
3215 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3216 do_div(length, map->num_stripes / map->sub_stripes);
3217 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3218 do_div(length, map->num_stripes);
3220 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3223 for (i = 0; i < map->num_stripes; i++) {
3224 if (devid && map->stripes[i].dev->devid != devid)
3226 if (map->stripes[i].physical > physical ||
3227 map->stripes[i].physical + length <= physical)
3230 stripe_nr = physical - map->stripes[i].physical;
3231 do_div(stripe_nr, map->stripe_len);
3233 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3234 stripe_nr = stripe_nr * map->num_stripes + i;
3235 do_div(stripe_nr, map->sub_stripes);
3236 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3237 stripe_nr = stripe_nr * map->num_stripes + i;
3239 bytenr = chunk_start + stripe_nr * map->stripe_len;
3240 WARN_ON(nr >= map->num_stripes);
3241 for (j = 0; j < nr; j++) {
3242 if (buf[j] == bytenr)
3246 WARN_ON(nr >= map->num_stripes);
3253 *stripe_len = map->stripe_len;
3255 free_extent_map(em);
3259 static void end_bio_multi_stripe(struct bio *bio, int err)
3261 struct btrfs_multi_bio *multi = bio->bi_private;
3262 int is_orig_bio = 0;
3265 atomic_inc(&multi->error);
3267 if (bio == multi->orig_bio)
3270 if (atomic_dec_and_test(&multi->stripes_pending)) {
3273 bio = multi->orig_bio;
3275 bio->bi_private = multi->private;
3276 bio->bi_end_io = multi->end_io;
3277 /* only send an error to the higher layers if it is
3278 * beyond the tolerance of the multi-bio
3280 if (atomic_read(&multi->error) > multi->max_errors) {
3284 * this bio is actually up to date, we didn't
3285 * go over the max number of errors
3287 set_bit(BIO_UPTODATE, &bio->bi_flags);
3292 bio_endio(bio, err);
3293 } else if (!is_orig_bio) {
3298 struct async_sched {
3301 struct btrfs_fs_info *info;
3302 struct btrfs_work work;
3306 * see run_scheduled_bios for a description of why bios are collected for
3309 * This will add one bio to the pending list for a device and make sure
3310 * the work struct is scheduled.
3312 static noinline int schedule_bio(struct btrfs_root *root,
3313 struct btrfs_device *device,
3314 int rw, struct bio *bio)
3316 int should_queue = 1;
3317 struct btrfs_pending_bios *pending_bios;
3319 /* don't bother with additional async steps for reads, right now */
3320 if (!(rw & REQ_WRITE)) {
3322 submit_bio(rw, bio);
3328 * nr_async_bios allows us to reliably return congestion to the
3329 * higher layers. Otherwise, the async bio makes it appear we have
3330 * made progress against dirty pages when we've really just put it
3331 * on a queue for later
3333 atomic_inc(&root->fs_info->nr_async_bios);
3334 WARN_ON(bio->bi_next);
3335 bio->bi_next = NULL;
3338 spin_lock(&device->io_lock);
3339 if (bio->bi_rw & REQ_SYNC)
3340 pending_bios = &device->pending_sync_bios;
3342 pending_bios = &device->pending_bios;
3344 if (pending_bios->tail)
3345 pending_bios->tail->bi_next = bio;
3347 pending_bios->tail = bio;
3348 if (!pending_bios->head)
3349 pending_bios->head = bio;
3350 if (device->running_pending)
3353 spin_unlock(&device->io_lock);
3356 btrfs_queue_worker(&root->fs_info->submit_workers,
3361 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
3362 int mirror_num, int async_submit)
3364 struct btrfs_mapping_tree *map_tree;
3365 struct btrfs_device *dev;
3366 struct bio *first_bio = bio;
3367 u64 logical = (u64)bio->bi_sector << 9;
3370 struct btrfs_multi_bio *multi = NULL;
3375 length = bio->bi_size;
3376 map_tree = &root->fs_info->mapping_tree;
3377 map_length = length;
3379 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
3383 total_devs = multi->num_stripes;
3384 if (map_length < length) {
3385 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3386 "len %llu\n", (unsigned long long)logical,
3387 (unsigned long long)length,
3388 (unsigned long long)map_length);
3391 multi->end_io = first_bio->bi_end_io;
3392 multi->private = first_bio->bi_private;
3393 multi->orig_bio = first_bio;
3394 atomic_set(&multi->stripes_pending, multi->num_stripes);
3396 while (dev_nr < total_devs) {
3397 if (total_devs > 1) {
3398 if (dev_nr < total_devs - 1) {
3399 bio = bio_clone(first_bio, GFP_NOFS);
3404 bio->bi_private = multi;
3405 bio->bi_end_io = end_bio_multi_stripe;
3407 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
3408 dev = multi->stripes[dev_nr].dev;
3409 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
3410 bio->bi_bdev = dev->bdev;
3412 schedule_bio(root, dev, rw, bio);
3414 submit_bio(rw, bio);
3416 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3417 bio->bi_sector = logical >> 9;
3418 bio_endio(bio, -EIO);
3422 if (total_devs == 1)
3427 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3430 struct btrfs_device *device;
3431 struct btrfs_fs_devices *cur_devices;
3433 cur_devices = root->fs_info->fs_devices;
3434 while (cur_devices) {
3436 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3437 device = __find_device(&cur_devices->devices,
3442 cur_devices = cur_devices->seed;
3447 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3448 u64 devid, u8 *dev_uuid)
3450 struct btrfs_device *device;
3451 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3453 device = kzalloc(sizeof(*device), GFP_NOFS);
3456 list_add(&device->dev_list,
3457 &fs_devices->devices);
3458 device->dev_root = root->fs_info->dev_root;
3459 device->devid = devid;
3460 device->work.func = pending_bios_fn;
3461 device->fs_devices = fs_devices;
3462 device->missing = 1;
3463 fs_devices->num_devices++;
3464 fs_devices->missing_devices++;
3465 spin_lock_init(&device->io_lock);
3466 INIT_LIST_HEAD(&device->dev_alloc_list);
3467 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3471 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3472 struct extent_buffer *leaf,
3473 struct btrfs_chunk *chunk)
3475 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3476 struct map_lookup *map;
3477 struct extent_map *em;
3481 u8 uuid[BTRFS_UUID_SIZE];
3486 logical = key->offset;
3487 length = btrfs_chunk_length(leaf, chunk);
3489 read_lock(&map_tree->map_tree.lock);
3490 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3491 read_unlock(&map_tree->map_tree.lock);
3493 /* already mapped? */
3494 if (em && em->start <= logical && em->start + em->len > logical) {
3495 free_extent_map(em);
3498 free_extent_map(em);
3501 em = alloc_extent_map(GFP_NOFS);
3504 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3505 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3507 free_extent_map(em);
3511 em->bdev = (struct block_device *)map;
3512 em->start = logical;
3514 em->block_start = 0;
3515 em->block_len = em->len;
3517 map->num_stripes = num_stripes;
3518 map->io_width = btrfs_chunk_io_width(leaf, chunk);
3519 map->io_align = btrfs_chunk_io_align(leaf, chunk);
3520 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3521 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3522 map->type = btrfs_chunk_type(leaf, chunk);
3523 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3524 for (i = 0; i < num_stripes; i++) {
3525 map->stripes[i].physical =
3526 btrfs_stripe_offset_nr(leaf, chunk, i);
3527 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3528 read_extent_buffer(leaf, uuid, (unsigned long)
3529 btrfs_stripe_dev_uuid_nr(chunk, i),
3531 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3533 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3535 free_extent_map(em);
3538 if (!map->stripes[i].dev) {
3539 map->stripes[i].dev =
3540 add_missing_dev(root, devid, uuid);
3541 if (!map->stripes[i].dev) {
3543 free_extent_map(em);
3547 map->stripes[i].dev->in_fs_metadata = 1;
3550 write_lock(&map_tree->map_tree.lock);
3551 ret = add_extent_mapping(&map_tree->map_tree, em);
3552 write_unlock(&map_tree->map_tree.lock);
3554 free_extent_map(em);
3559 static int fill_device_from_item(struct extent_buffer *leaf,
3560 struct btrfs_dev_item *dev_item,
3561 struct btrfs_device *device)
3565 device->devid = btrfs_device_id(leaf, dev_item);
3566 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3567 device->total_bytes = device->disk_total_bytes;
3568 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3569 device->type = btrfs_device_type(leaf, dev_item);
3570 device->io_align = btrfs_device_io_align(leaf, dev_item);
3571 device->io_width = btrfs_device_io_width(leaf, dev_item);
3572 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3574 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3575 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3580 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3582 struct btrfs_fs_devices *fs_devices;
3585 mutex_lock(&uuid_mutex);
3587 fs_devices = root->fs_info->fs_devices->seed;
3588 while (fs_devices) {
3589 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3593 fs_devices = fs_devices->seed;
3596 fs_devices = find_fsid(fsid);
3602 fs_devices = clone_fs_devices(fs_devices);
3603 if (IS_ERR(fs_devices)) {
3604 ret = PTR_ERR(fs_devices);
3608 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3609 root->fs_info->bdev_holder);
3613 if (!fs_devices->seeding) {
3614 __btrfs_close_devices(fs_devices);
3615 free_fs_devices(fs_devices);
3620 fs_devices->seed = root->fs_info->fs_devices->seed;
3621 root->fs_info->fs_devices->seed = fs_devices;
3623 mutex_unlock(&uuid_mutex);
3627 static int read_one_dev(struct btrfs_root *root,
3628 struct extent_buffer *leaf,
3629 struct btrfs_dev_item *dev_item)
3631 struct btrfs_device *device;
3634 u8 fs_uuid[BTRFS_UUID_SIZE];
3635 u8 dev_uuid[BTRFS_UUID_SIZE];
3637 devid = btrfs_device_id(leaf, dev_item);
3638 read_extent_buffer(leaf, dev_uuid,
3639 (unsigned long)btrfs_device_uuid(dev_item),
3641 read_extent_buffer(leaf, fs_uuid,
3642 (unsigned long)btrfs_device_fsid(dev_item),
3645 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3646 ret = open_seed_devices(root, fs_uuid);
3647 if (ret && !btrfs_test_opt(root, DEGRADED))
3651 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3652 if (!device || !device->bdev) {
3653 if (!btrfs_test_opt(root, DEGRADED))
3657 printk(KERN_WARNING "warning devid %llu missing\n",
3658 (unsigned long long)devid);
3659 device = add_missing_dev(root, devid, dev_uuid);
3662 } else if (!device->missing) {
3664 * this happens when a device that was properly setup
3665 * in the device info lists suddenly goes bad.
3666 * device->bdev is NULL, and so we have to set
3667 * device->missing to one here
3669 root->fs_info->fs_devices->missing_devices++;
3670 device->missing = 1;
3674 if (device->fs_devices != root->fs_info->fs_devices) {
3675 BUG_ON(device->writeable);
3676 if (device->generation !=
3677 btrfs_device_generation(leaf, dev_item))
3681 fill_device_from_item(leaf, dev_item, device);
3682 device->dev_root = root->fs_info->dev_root;
3683 device->in_fs_metadata = 1;
3684 if (device->writeable)
3685 device->fs_devices->total_rw_bytes += device->total_bytes;
3690 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3692 struct btrfs_dev_item *dev_item;
3694 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3696 return read_one_dev(root, buf, dev_item);
3699 int btrfs_read_sys_array(struct btrfs_root *root)
3701 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3702 struct extent_buffer *sb;
3703 struct btrfs_disk_key *disk_key;
3704 struct btrfs_chunk *chunk;
3706 unsigned long sb_ptr;
3712 struct btrfs_key key;
3714 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3715 BTRFS_SUPER_INFO_SIZE);
3718 btrfs_set_buffer_uptodate(sb);
3719 btrfs_set_buffer_lockdep_class(sb, 0);
3721 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3722 array_size = btrfs_super_sys_array_size(super_copy);
3724 ptr = super_copy->sys_chunk_array;
3725 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3728 while (cur < array_size) {
3729 disk_key = (struct btrfs_disk_key *)ptr;
3730 btrfs_disk_key_to_cpu(&key, disk_key);
3732 len = sizeof(*disk_key); ptr += len;
3736 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3737 chunk = (struct btrfs_chunk *)sb_ptr;
3738 ret = read_one_chunk(root, &key, sb, chunk);
3741 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3742 len = btrfs_chunk_item_size(num_stripes);
3751 free_extent_buffer(sb);
3755 int btrfs_read_chunk_tree(struct btrfs_root *root)
3757 struct btrfs_path *path;
3758 struct extent_buffer *leaf;
3759 struct btrfs_key key;
3760 struct btrfs_key found_key;
3764 root = root->fs_info->chunk_root;
3766 path = btrfs_alloc_path();
3770 /* first we search for all of the device items, and then we
3771 * read in all of the chunk items. This way we can create chunk
3772 * mappings that reference all of the devices that are afound
3774 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3778 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3782 leaf = path->nodes[0];
3783 slot = path->slots[0];
3784 if (slot >= btrfs_header_nritems(leaf)) {
3785 ret = btrfs_next_leaf(root, path);
3792 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3793 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3794 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3796 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3797 struct btrfs_dev_item *dev_item;
3798 dev_item = btrfs_item_ptr(leaf, slot,
3799 struct btrfs_dev_item);
3800 ret = read_one_dev(root, leaf, dev_item);
3804 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3805 struct btrfs_chunk *chunk;
3806 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3807 ret = read_one_chunk(root, &found_key, leaf, chunk);
3813 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3815 btrfs_release_path(root, path);
3820 btrfs_free_path(path);