2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
39 struct btrfs_bio_stripe stripes[];
42 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
43 (sizeof(struct btrfs_bio_stripe) * (n)))
45 static DEFINE_MUTEX(uuid_mutex);
46 static LIST_HEAD(fs_uuids);
48 void btrfs_lock_volumes(void)
50 mutex_lock(&uuid_mutex);
53 void btrfs_unlock_volumes(void)
55 mutex_unlock(&uuid_mutex);
58 int btrfs_cleanup_fs_uuids(void)
60 struct btrfs_fs_devices *fs_devices;
61 struct list_head *uuid_cur;
62 struct list_head *devices_cur;
63 struct btrfs_device *dev;
65 list_for_each(uuid_cur, &fs_uuids) {
66 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
68 while(!list_empty(&fs_devices->devices)) {
69 devices_cur = fs_devices->devices.next;
70 dev = list_entry(devices_cur, struct btrfs_device,
73 close_bdev_excl(dev->bdev);
75 list_del(&dev->dev_list);
82 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
85 struct btrfs_device *dev;
86 struct list_head *cur;
88 list_for_each(cur, head) {
89 dev = list_entry(cur, struct btrfs_device, dev_list);
90 if (dev->devid == devid &&
91 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
98 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
100 struct list_head *cur;
101 struct btrfs_fs_devices *fs_devices;
103 list_for_each(cur, &fs_uuids) {
104 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
111 static int device_list_add(const char *path,
112 struct btrfs_super_block *disk_super,
113 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
115 struct btrfs_device *device;
116 struct btrfs_fs_devices *fs_devices;
117 u64 found_transid = btrfs_super_generation(disk_super);
119 fs_devices = find_fsid(disk_super->fsid);
121 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
124 INIT_LIST_HEAD(&fs_devices->devices);
125 INIT_LIST_HEAD(&fs_devices->alloc_list);
126 list_add(&fs_devices->list, &fs_uuids);
127 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
128 fs_devices->latest_devid = devid;
129 fs_devices->latest_trans = found_transid;
130 fs_devices->lowest_devid = (u64)-1;
131 fs_devices->num_devices = 0;
134 device = __find_device(&fs_devices->devices, devid,
135 disk_super->dev_item.uuid);
138 device = kzalloc(sizeof(*device), GFP_NOFS);
140 /* we can safely leave the fs_devices entry around */
143 device->devid = devid;
144 memcpy(device->uuid, disk_super->dev_item.uuid,
146 device->barriers = 1;
147 spin_lock_init(&device->io_lock);
148 device->name = kstrdup(path, GFP_NOFS);
153 list_add(&device->dev_list, &fs_devices->devices);
154 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
155 fs_devices->num_devices++;
158 if (found_transid > fs_devices->latest_trans) {
159 fs_devices->latest_devid = devid;
160 fs_devices->latest_trans = found_transid;
162 if (fs_devices->lowest_devid > devid) {
163 fs_devices->lowest_devid = devid;
165 *fs_devices_ret = fs_devices;
169 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
171 struct list_head *head = &fs_devices->devices;
172 struct list_head *cur;
173 struct btrfs_device *device;
175 mutex_lock(&uuid_mutex);
176 list_for_each(cur, head) {
177 device = list_entry(cur, struct btrfs_device, dev_list);
179 close_bdev_excl(device->bdev);
183 mutex_unlock(&uuid_mutex);
187 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
188 int flags, void *holder)
190 struct block_device *bdev;
191 struct list_head *head = &fs_devices->devices;
192 struct list_head *cur;
193 struct btrfs_device *device;
196 mutex_lock(&uuid_mutex);
197 list_for_each(cur, head) {
198 device = list_entry(cur, struct btrfs_device, dev_list);
202 bdev = open_bdev_excl(device->name, flags, holder);
205 printk("open %s failed\n", device->name);
209 set_blocksize(bdev, 4096);
210 if (device->devid == fs_devices->latest_devid)
211 fs_devices->latest_bdev = bdev;
212 if (device->devid == fs_devices->lowest_devid) {
213 fs_devices->lowest_bdev = bdev;
218 mutex_unlock(&uuid_mutex);
221 mutex_unlock(&uuid_mutex);
222 btrfs_close_devices(fs_devices);
226 int btrfs_scan_one_device(const char *path, int flags, void *holder,
227 struct btrfs_fs_devices **fs_devices_ret)
229 struct btrfs_super_block *disk_super;
230 struct block_device *bdev;
231 struct buffer_head *bh;
236 mutex_lock(&uuid_mutex);
238 bdev = open_bdev_excl(path, flags, holder);
245 ret = set_blocksize(bdev, 4096);
248 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
253 disk_super = (struct btrfs_super_block *)bh->b_data;
254 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
255 sizeof(disk_super->magic))) {
259 devid = le64_to_cpu(disk_super->dev_item.devid);
260 transid = btrfs_super_generation(disk_super);
261 if (disk_super->label[0])
262 printk("device label %s ", disk_super->label);
264 /* FIXME, make a readl uuid parser */
265 printk("device fsid %llx-%llx ",
266 *(unsigned long long *)disk_super->fsid,
267 *(unsigned long long *)(disk_super->fsid + 8));
269 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
270 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
275 close_bdev_excl(bdev);
277 mutex_unlock(&uuid_mutex);
282 * this uses a pretty simple search, the expectation is that it is
283 * called very infrequently and that a given device has a small number
286 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
287 struct btrfs_device *device,
288 struct btrfs_path *path,
289 u64 num_bytes, u64 *start)
291 struct btrfs_key key;
292 struct btrfs_root *root = device->dev_root;
293 struct btrfs_dev_extent *dev_extent = NULL;
296 u64 search_start = 0;
297 u64 search_end = device->total_bytes;
301 struct extent_buffer *l;
306 /* FIXME use last free of some kind */
308 /* we don't want to overwrite the superblock on the drive,
309 * so we make sure to start at an offset of at least 1MB
311 search_start = max((u64)1024 * 1024, search_start);
313 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
314 search_start = max(root->fs_info->alloc_start, search_start);
316 key.objectid = device->devid;
317 key.offset = search_start;
318 key.type = BTRFS_DEV_EXTENT_KEY;
319 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
322 ret = btrfs_previous_item(root, path, 0, key.type);
326 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
329 slot = path->slots[0];
330 if (slot >= btrfs_header_nritems(l)) {
331 ret = btrfs_next_leaf(root, path);
338 if (search_start >= search_end) {
342 *start = search_start;
346 *start = last_byte > search_start ?
347 last_byte : search_start;
348 if (search_end <= *start) {
354 btrfs_item_key_to_cpu(l, &key, slot);
356 if (key.objectid < device->devid)
359 if (key.objectid > device->devid)
362 if (key.offset >= search_start && key.offset > last_byte &&
364 if (last_byte < search_start)
365 last_byte = search_start;
366 hole_size = key.offset - last_byte;
367 if (key.offset > last_byte &&
368 hole_size >= num_bytes) {
373 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
378 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
379 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
385 /* we have to make sure we didn't find an extent that has already
386 * been allocated by the map tree or the original allocation
388 btrfs_release_path(root, path);
389 BUG_ON(*start < search_start);
391 if (*start + num_bytes > search_end) {
395 /* check for pending inserts here */
399 btrfs_release_path(root, path);
403 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
404 struct btrfs_device *device,
408 struct btrfs_path *path;
409 struct btrfs_root *root = device->dev_root;
410 struct btrfs_key key;
411 struct btrfs_key found_key;
412 struct extent_buffer *leaf = NULL;
413 struct btrfs_dev_extent *extent = NULL;
415 path = btrfs_alloc_path();
419 key.objectid = device->devid;
421 key.type = BTRFS_DEV_EXTENT_KEY;
423 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
425 ret = btrfs_previous_item(root, path, key.objectid,
426 BTRFS_DEV_EXTENT_KEY);
428 leaf = path->nodes[0];
429 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
430 extent = btrfs_item_ptr(leaf, path->slots[0],
431 struct btrfs_dev_extent);
432 BUG_ON(found_key.offset > start || found_key.offset +
433 btrfs_dev_extent_length(leaf, extent) < start);
435 } else if (ret == 0) {
436 leaf = path->nodes[0];
437 extent = btrfs_item_ptr(leaf, path->slots[0],
438 struct btrfs_dev_extent);
442 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
443 ret = btrfs_del_item(trans, root, path);
446 btrfs_free_path(path);
450 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
451 struct btrfs_device *device,
452 u64 chunk_tree, u64 chunk_objectid,
454 u64 num_bytes, u64 *start)
457 struct btrfs_path *path;
458 struct btrfs_root *root = device->dev_root;
459 struct btrfs_dev_extent *extent;
460 struct extent_buffer *leaf;
461 struct btrfs_key key;
463 path = btrfs_alloc_path();
467 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
472 key.objectid = device->devid;
474 key.type = BTRFS_DEV_EXTENT_KEY;
475 ret = btrfs_insert_empty_item(trans, root, path, &key,
479 leaf = path->nodes[0];
480 extent = btrfs_item_ptr(leaf, path->slots[0],
481 struct btrfs_dev_extent);
482 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
483 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
484 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
486 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
487 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
490 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
491 btrfs_mark_buffer_dirty(leaf);
493 btrfs_free_path(path);
497 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
499 struct btrfs_path *path;
501 struct btrfs_key key;
502 struct btrfs_chunk *chunk;
503 struct btrfs_key found_key;
505 path = btrfs_alloc_path();
508 key.objectid = objectid;
509 key.offset = (u64)-1;
510 key.type = BTRFS_CHUNK_ITEM_KEY;
512 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
518 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
522 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
524 if (found_key.objectid != objectid)
527 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
529 *offset = found_key.offset +
530 btrfs_chunk_length(path->nodes[0], chunk);
535 btrfs_free_path(path);
539 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
543 struct btrfs_key key;
544 struct btrfs_key found_key;
546 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
547 key.type = BTRFS_DEV_ITEM_KEY;
548 key.offset = (u64)-1;
550 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
556 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
561 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
563 *objectid = found_key.offset + 1;
567 btrfs_release_path(root, path);
572 * the device information is stored in the chunk root
573 * the btrfs_device struct should be fully filled in
575 int btrfs_add_device(struct btrfs_trans_handle *trans,
576 struct btrfs_root *root,
577 struct btrfs_device *device)
580 struct btrfs_path *path;
581 struct btrfs_dev_item *dev_item;
582 struct extent_buffer *leaf;
583 struct btrfs_key key;
587 root = root->fs_info->chunk_root;
589 path = btrfs_alloc_path();
593 ret = find_next_devid(root, path, &free_devid);
597 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
598 key.type = BTRFS_DEV_ITEM_KEY;
599 key.offset = free_devid;
601 ret = btrfs_insert_empty_item(trans, root, path, &key,
606 leaf = path->nodes[0];
607 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
609 device->devid = free_devid;
610 btrfs_set_device_id(leaf, dev_item, device->devid);
611 btrfs_set_device_type(leaf, dev_item, device->type);
612 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
613 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
614 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
615 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
616 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
617 btrfs_set_device_group(leaf, dev_item, 0);
618 btrfs_set_device_seek_speed(leaf, dev_item, 0);
619 btrfs_set_device_bandwidth(leaf, dev_item, 0);
621 ptr = (unsigned long)btrfs_device_uuid(dev_item);
622 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
623 btrfs_mark_buffer_dirty(leaf);
627 btrfs_free_path(path);
631 static int btrfs_rm_dev_item(struct btrfs_root *root,
632 struct btrfs_device *device)
635 struct btrfs_path *path;
636 struct block_device *bdev = device->bdev;
637 struct btrfs_device *next_dev;
638 struct btrfs_key key;
640 struct btrfs_fs_devices *fs_devices;
641 struct btrfs_trans_handle *trans;
643 root = root->fs_info->chunk_root;
645 path = btrfs_alloc_path();
649 trans = btrfs_start_transaction(root, 1);
650 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
651 key.type = BTRFS_DEV_ITEM_KEY;
652 key.offset = device->devid;
654 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
663 ret = btrfs_del_item(trans, root, path);
668 * at this point, the device is zero sized. We want to
669 * remove it from the devices list and zero out the old super
671 list_del_init(&device->dev_list);
672 list_del_init(&device->dev_alloc_list);
673 fs_devices = root->fs_info->fs_devices;
675 next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
677 if (bdev == fs_devices->lowest_bdev)
678 fs_devices->lowest_bdev = next_dev->bdev;
679 if (bdev == root->fs_info->sb->s_bdev)
680 root->fs_info->sb->s_bdev = next_dev->bdev;
681 if (bdev == fs_devices->latest_bdev)
682 fs_devices->latest_bdev = next_dev->bdev;
684 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
685 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
686 total_bytes - device->total_bytes);
688 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
689 btrfs_set_super_num_devices(&root->fs_info->super_copy,
692 btrfs_free_path(path);
693 btrfs_commit_transaction(trans, root);
697 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
699 struct btrfs_device *device;
700 struct block_device *bdev;
701 struct buffer_head *bh;
702 struct btrfs_super_block *disk_super;
707 mutex_lock(&root->fs_info->fs_mutex);
708 mutex_lock(&uuid_mutex);
710 all_avail = root->fs_info->avail_data_alloc_bits |
711 root->fs_info->avail_system_alloc_bits |
712 root->fs_info->avail_metadata_alloc_bits;
714 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
715 root->fs_info->fs_devices->num_devices <= 4) {
716 printk("btrfs: unable to go below four devices on raid10\n");
721 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
722 root->fs_info->fs_devices->num_devices <= 2) {
723 printk("btrfs: unable to go below two devices on raid1\n");
728 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
734 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
739 disk_super = (struct btrfs_super_block *)bh->b_data;
740 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
741 sizeof(disk_super->magic))) {
745 if (memcmp(disk_super->fsid, root->fs_info->fsid, BTRFS_FSID_SIZE)) {
749 devid = le64_to_cpu(disk_super->dev_item.devid);
750 device = btrfs_find_device(root, devid, NULL);
756 root->fs_info->fs_devices->num_devices--;
758 ret = btrfs_shrink_device(device, 0);
763 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
767 /* make sure this device isn't detected as part of the FS anymore */
768 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
769 set_buffer_dirty(bh);
770 sync_dirty_buffer(bh);
774 /* one close for the device struct or super_block */
775 close_bdev_excl(device->bdev);
777 /* one close for us */
778 close_bdev_excl(device->bdev);
788 close_bdev_excl(bdev);
790 mutex_unlock(&uuid_mutex);
791 mutex_unlock(&root->fs_info->fs_mutex);
795 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
797 struct btrfs_trans_handle *trans;
798 struct btrfs_device *device;
799 struct block_device *bdev;
800 struct list_head *cur;
801 struct list_head *devices;
806 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
810 mutex_lock(&root->fs_info->fs_mutex);
811 trans = btrfs_start_transaction(root, 1);
812 devices = &root->fs_info->fs_devices->devices;
813 list_for_each(cur, devices) {
814 device = list_entry(cur, struct btrfs_device, dev_list);
815 if (device->bdev == bdev) {
821 device = kzalloc(sizeof(*device), GFP_NOFS);
823 /* we can safely leave the fs_devices entry around */
828 device->barriers = 1;
829 generate_random_uuid(device->uuid);
830 spin_lock_init(&device->io_lock);
831 device->name = kstrdup(device_path, GFP_NOFS);
836 device->io_width = root->sectorsize;
837 device->io_align = root->sectorsize;
838 device->sector_size = root->sectorsize;
839 device->total_bytes = i_size_read(bdev->bd_inode);
840 device->dev_root = root->fs_info->dev_root;
843 ret = btrfs_add_device(trans, root, device);
847 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
848 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
849 total_bytes + device->total_bytes);
851 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
852 btrfs_set_super_num_devices(&root->fs_info->super_copy,
855 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
856 list_add(&device->dev_alloc_list,
857 &root->fs_info->fs_devices->alloc_list);
858 root->fs_info->fs_devices->num_devices++;
860 btrfs_end_transaction(trans, root);
861 mutex_unlock(&root->fs_info->fs_mutex);
865 close_bdev_excl(bdev);
869 int btrfs_update_device(struct btrfs_trans_handle *trans,
870 struct btrfs_device *device)
873 struct btrfs_path *path;
874 struct btrfs_root *root;
875 struct btrfs_dev_item *dev_item;
876 struct extent_buffer *leaf;
877 struct btrfs_key key;
879 root = device->dev_root->fs_info->chunk_root;
881 path = btrfs_alloc_path();
885 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
886 key.type = BTRFS_DEV_ITEM_KEY;
887 key.offset = device->devid;
889 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
898 leaf = path->nodes[0];
899 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
901 btrfs_set_device_id(leaf, dev_item, device->devid);
902 btrfs_set_device_type(leaf, dev_item, device->type);
903 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
904 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
905 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
906 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
907 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
908 btrfs_mark_buffer_dirty(leaf);
911 btrfs_free_path(path);
915 int btrfs_grow_device(struct btrfs_trans_handle *trans,
916 struct btrfs_device *device, u64 new_size)
918 struct btrfs_super_block *super_copy =
919 &device->dev_root->fs_info->super_copy;
920 u64 old_total = btrfs_super_total_bytes(super_copy);
921 u64 diff = new_size - device->total_bytes;
923 btrfs_set_super_total_bytes(super_copy, old_total + diff);
924 return btrfs_update_device(trans, device);
927 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
928 struct btrfs_root *root,
929 u64 chunk_tree, u64 chunk_objectid,
933 struct btrfs_path *path;
934 struct btrfs_key key;
936 root = root->fs_info->chunk_root;
937 path = btrfs_alloc_path();
941 key.objectid = chunk_objectid;
942 key.offset = chunk_offset;
943 key.type = BTRFS_CHUNK_ITEM_KEY;
945 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
948 ret = btrfs_del_item(trans, root, path);
951 btrfs_free_path(path);
955 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
958 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
959 struct btrfs_disk_key *disk_key;
960 struct btrfs_chunk *chunk;
967 struct btrfs_key key;
969 array_size = btrfs_super_sys_array_size(super_copy);
971 ptr = super_copy->sys_chunk_array;
974 while (cur < array_size) {
975 disk_key = (struct btrfs_disk_key *)ptr;
976 btrfs_disk_key_to_cpu(&key, disk_key);
978 len = sizeof(*disk_key);
980 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
981 chunk = (struct btrfs_chunk *)(ptr + len);
982 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
983 len += btrfs_chunk_item_size(num_stripes);
988 if (key.objectid == chunk_objectid &&
989 key.offset == chunk_offset) {
990 memmove(ptr, ptr + len, array_size - (cur + len));
992 btrfs_set_super_sys_array_size(super_copy, array_size);
1002 int btrfs_relocate_chunk(struct btrfs_root *root,
1003 u64 chunk_tree, u64 chunk_objectid,
1006 struct extent_map_tree *em_tree;
1007 struct btrfs_root *extent_root;
1008 struct btrfs_trans_handle *trans;
1009 struct extent_map *em;
1010 struct map_lookup *map;
1014 printk("btrfs relocating chunk %llu\n",
1015 (unsigned long long)chunk_offset);
1016 root = root->fs_info->chunk_root;
1017 extent_root = root->fs_info->extent_root;
1018 em_tree = &root->fs_info->mapping_tree.map_tree;
1020 /* step one, relocate all the extents inside this chunk */
1021 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1024 trans = btrfs_start_transaction(root, 1);
1028 * step two, delete the device extents and the
1029 * chunk tree entries
1031 spin_lock(&em_tree->lock);
1032 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1033 spin_unlock(&em_tree->lock);
1035 BUG_ON(em->start > chunk_offset ||
1036 em->start + em->len < chunk_offset);
1037 map = (struct map_lookup *)em->bdev;
1039 for (i = 0; i < map->num_stripes; i++) {
1040 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1041 map->stripes[i].physical);
1044 ret = btrfs_update_device(trans, map->stripes[i].dev);
1047 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1052 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1053 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1057 spin_lock(&em_tree->lock);
1058 remove_extent_mapping(em_tree, em);
1062 /* once for the tree */
1063 free_extent_map(em);
1064 spin_unlock(&em_tree->lock);
1067 free_extent_map(em);
1069 btrfs_end_transaction(trans, root);
1073 static u64 div_factor(u64 num, int factor)
1083 int btrfs_balance(struct btrfs_root *dev_root)
1086 struct list_head *cur;
1087 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1088 struct btrfs_device *device;
1091 struct btrfs_path *path;
1092 struct btrfs_key key;
1093 struct btrfs_chunk *chunk;
1094 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1095 struct btrfs_trans_handle *trans;
1096 struct btrfs_key found_key;
1099 dev_root = dev_root->fs_info->dev_root;
1101 mutex_lock(&dev_root->fs_info->fs_mutex);
1102 /* step one make some room on all the devices */
1103 list_for_each(cur, devices) {
1104 device = list_entry(cur, struct btrfs_device, dev_list);
1105 old_size = device->total_bytes;
1106 size_to_free = div_factor(old_size, 1);
1107 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1108 if (device->total_bytes - device->bytes_used > size_to_free)
1111 ret = btrfs_shrink_device(device, old_size - size_to_free);
1114 trans = btrfs_start_transaction(dev_root, 1);
1117 ret = btrfs_grow_device(trans, device, old_size);
1120 btrfs_end_transaction(trans, dev_root);
1123 /* step two, relocate all the chunks */
1124 path = btrfs_alloc_path();
1127 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1128 key.offset = (u64)-1;
1129 key.type = BTRFS_CHUNK_ITEM_KEY;
1132 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1137 * this shouldn't happen, it means the last relocate
1143 ret = btrfs_previous_item(chunk_root, path, 0,
1144 BTRFS_CHUNK_ITEM_KEY);
1148 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1150 if (found_key.objectid != key.objectid)
1152 chunk = btrfs_item_ptr(path->nodes[0],
1154 struct btrfs_chunk);
1155 key.offset = found_key.offset;
1156 /* chunk zero is special */
1157 if (key.offset == 0)
1160 ret = btrfs_relocate_chunk(chunk_root,
1161 chunk_root->root_key.objectid,
1165 btrfs_release_path(chunk_root, path);
1169 btrfs_free_path(path);
1170 mutex_unlock(&dev_root->fs_info->fs_mutex);
1175 * shrinking a device means finding all of the device extents past
1176 * the new size, and then following the back refs to the chunks.
1177 * The chunk relocation code actually frees the device extent
1179 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1181 struct btrfs_trans_handle *trans;
1182 struct btrfs_root *root = device->dev_root;
1183 struct btrfs_dev_extent *dev_extent = NULL;
1184 struct btrfs_path *path;
1191 struct extent_buffer *l;
1192 struct btrfs_key key;
1193 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1194 u64 old_total = btrfs_super_total_bytes(super_copy);
1195 u64 diff = device->total_bytes - new_size;
1198 path = btrfs_alloc_path();
1202 trans = btrfs_start_transaction(root, 1);
1210 device->total_bytes = new_size;
1211 ret = btrfs_update_device(trans, device);
1213 btrfs_end_transaction(trans, root);
1216 WARN_ON(diff > old_total);
1217 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1218 btrfs_end_transaction(trans, root);
1220 key.objectid = device->devid;
1221 key.offset = (u64)-1;
1222 key.type = BTRFS_DEV_EXTENT_KEY;
1225 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1229 ret = btrfs_previous_item(root, path, 0, key.type);
1238 slot = path->slots[0];
1239 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1241 if (key.objectid != device->devid)
1244 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1245 length = btrfs_dev_extent_length(l, dev_extent);
1247 if (key.offset + length <= new_size)
1250 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1251 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1252 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1253 btrfs_release_path(root, path);
1255 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1262 btrfs_free_path(path);
1266 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1267 struct btrfs_root *root,
1268 struct btrfs_key *key,
1269 struct btrfs_chunk *chunk, int item_size)
1271 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1272 struct btrfs_disk_key disk_key;
1276 array_size = btrfs_super_sys_array_size(super_copy);
1277 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1280 ptr = super_copy->sys_chunk_array + array_size;
1281 btrfs_cpu_key_to_disk(&disk_key, key);
1282 memcpy(ptr, &disk_key, sizeof(disk_key));
1283 ptr += sizeof(disk_key);
1284 memcpy(ptr, chunk, item_size);
1285 item_size += sizeof(disk_key);
1286 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1290 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1293 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1295 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1296 return calc_size * (num_stripes / sub_stripes);
1298 return calc_size * num_stripes;
1302 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1303 struct btrfs_root *extent_root, u64 *start,
1304 u64 *num_bytes, u64 type)
1307 struct btrfs_fs_info *info = extent_root->fs_info;
1308 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1309 struct btrfs_path *path;
1310 struct btrfs_stripe *stripes;
1311 struct btrfs_device *device = NULL;
1312 struct btrfs_chunk *chunk;
1313 struct list_head private_devs;
1314 struct list_head *dev_list;
1315 struct list_head *cur;
1316 struct extent_map_tree *em_tree;
1317 struct map_lookup *map;
1318 struct extent_map *em;
1319 int min_stripe_size = 1 * 1024 * 1024;
1321 u64 calc_size = 1024 * 1024 * 1024;
1322 u64 max_chunk_size = calc_size;
1327 int num_stripes = 1;
1328 int min_stripes = 1;
1329 int sub_stripes = 0;
1333 int stripe_len = 64 * 1024;
1334 struct btrfs_key key;
1336 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1337 (type & BTRFS_BLOCK_GROUP_DUP)) {
1339 type &= ~BTRFS_BLOCK_GROUP_DUP;
1341 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1342 if (list_empty(dev_list))
1345 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1346 num_stripes = btrfs_super_num_devices(&info->super_copy);
1349 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1353 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1354 num_stripes = min_t(u64, 2,
1355 btrfs_super_num_devices(&info->super_copy));
1356 if (num_stripes < 2)
1360 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1361 num_stripes = btrfs_super_num_devices(&info->super_copy);
1362 if (num_stripes < 4)
1364 num_stripes &= ~(u32)1;
1369 if (type & BTRFS_BLOCK_GROUP_DATA) {
1370 max_chunk_size = 10 * calc_size;
1371 min_stripe_size = 64 * 1024 * 1024;
1372 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1373 max_chunk_size = 4 * calc_size;
1374 min_stripe_size = 32 * 1024 * 1024;
1375 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1376 calc_size = 8 * 1024 * 1024;
1377 max_chunk_size = calc_size * 2;
1378 min_stripe_size = 1 * 1024 * 1024;
1381 path = btrfs_alloc_path();
1385 /* we don't want a chunk larger than 10% of the FS */
1386 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1387 max_chunk_size = min(percent_max, max_chunk_size);
1390 if (calc_size * num_stripes > max_chunk_size) {
1391 calc_size = max_chunk_size;
1392 do_div(calc_size, num_stripes);
1393 do_div(calc_size, stripe_len);
1394 calc_size *= stripe_len;
1396 /* we don't want tiny stripes */
1397 calc_size = max_t(u64, min_stripe_size, calc_size);
1399 do_div(calc_size, stripe_len);
1400 calc_size *= stripe_len;
1402 INIT_LIST_HEAD(&private_devs);
1403 cur = dev_list->next;
1406 if (type & BTRFS_BLOCK_GROUP_DUP)
1407 min_free = calc_size * 2;
1409 min_free = calc_size;
1411 /* we add 1MB because we never use the first 1MB of the device */
1412 min_free += 1024 * 1024;
1414 /* build a private list of devices we will allocate from */
1415 while(index < num_stripes) {
1416 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1418 avail = device->total_bytes - device->bytes_used;
1421 if (avail >= min_free) {
1422 u64 ignored_start = 0;
1423 ret = find_free_dev_extent(trans, device, path,
1427 list_move_tail(&device->dev_alloc_list,
1430 if (type & BTRFS_BLOCK_GROUP_DUP)
1433 } else if (avail > max_avail)
1435 if (cur == dev_list)
1438 if (index < num_stripes) {
1439 list_splice(&private_devs, dev_list);
1440 if (index >= min_stripes) {
1441 num_stripes = index;
1442 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1443 num_stripes /= sub_stripes;
1444 num_stripes *= sub_stripes;
1449 if (!looped && max_avail > 0) {
1451 calc_size = max_avail;
1454 btrfs_free_path(path);
1457 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1458 key.type = BTRFS_CHUNK_ITEM_KEY;
1459 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1462 btrfs_free_path(path);
1466 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1468 btrfs_free_path(path);
1472 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1475 btrfs_free_path(path);
1478 btrfs_free_path(path);
1481 stripes = &chunk->stripe;
1482 *num_bytes = chunk_bytes_by_type(type, calc_size,
1483 num_stripes, sub_stripes);
1486 while(index < num_stripes) {
1487 struct btrfs_stripe *stripe;
1488 BUG_ON(list_empty(&private_devs));
1489 cur = private_devs.next;
1490 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1492 /* loop over this device again if we're doing a dup group */
1493 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1494 (index == num_stripes - 1))
1495 list_move_tail(&device->dev_alloc_list, dev_list);
1497 ret = btrfs_alloc_dev_extent(trans, device,
1498 info->chunk_root->root_key.objectid,
1499 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1500 calc_size, &dev_offset);
1502 device->bytes_used += calc_size;
1503 ret = btrfs_update_device(trans, device);
1506 map->stripes[index].dev = device;
1507 map->stripes[index].physical = dev_offset;
1508 stripe = stripes + index;
1509 btrfs_set_stack_stripe_devid(stripe, device->devid);
1510 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1511 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1512 physical = dev_offset;
1515 BUG_ON(!list_empty(&private_devs));
1517 /* key was set above */
1518 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1519 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1520 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1521 btrfs_set_stack_chunk_type(chunk, type);
1522 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1523 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1524 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1525 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1526 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1527 map->sector_size = extent_root->sectorsize;
1528 map->stripe_len = stripe_len;
1529 map->io_align = stripe_len;
1530 map->io_width = stripe_len;
1532 map->num_stripes = num_stripes;
1533 map->sub_stripes = sub_stripes;
1535 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1536 btrfs_chunk_item_size(num_stripes));
1538 *start = key.offset;;
1540 em = alloc_extent_map(GFP_NOFS);
1543 em->bdev = (struct block_device *)map;
1544 em->start = key.offset;
1545 em->len = *num_bytes;
1546 em->block_start = 0;
1548 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1549 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1550 chunk, btrfs_chunk_item_size(num_stripes));
1555 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1556 spin_lock(&em_tree->lock);
1557 ret = add_extent_mapping(em_tree, em);
1558 spin_unlock(&em_tree->lock);
1560 free_extent_map(em);
1564 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1566 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1569 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1571 struct extent_map *em;
1574 spin_lock(&tree->map_tree.lock);
1575 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1577 remove_extent_mapping(&tree->map_tree, em);
1578 spin_unlock(&tree->map_tree.lock);
1583 free_extent_map(em);
1584 /* once for the tree */
1585 free_extent_map(em);
1589 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1591 struct extent_map *em;
1592 struct map_lookup *map;
1593 struct extent_map_tree *em_tree = &map_tree->map_tree;
1596 spin_lock(&em_tree->lock);
1597 em = lookup_extent_mapping(em_tree, logical, len);
1598 spin_unlock(&em_tree->lock);
1601 BUG_ON(em->start > logical || em->start + em->len < logical);
1602 map = (struct map_lookup *)em->bdev;
1603 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1604 ret = map->num_stripes;
1605 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1606 ret = map->sub_stripes;
1609 free_extent_map(em);
1613 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1614 u64 logical, u64 *length,
1615 struct btrfs_multi_bio **multi_ret,
1616 int mirror_num, struct page *unplug_page)
1618 struct extent_map *em;
1619 struct map_lookup *map;
1620 struct extent_map_tree *em_tree = &map_tree->map_tree;
1624 int stripes_allocated = 8;
1625 int stripes_required = 1;
1630 struct btrfs_multi_bio *multi = NULL;
1632 if (multi_ret && !(rw & (1 << BIO_RW))) {
1633 stripes_allocated = 1;
1637 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1642 atomic_set(&multi->error, 0);
1645 spin_lock(&em_tree->lock);
1646 em = lookup_extent_mapping(em_tree, logical, *length);
1647 spin_unlock(&em_tree->lock);
1649 if (!em && unplug_page)
1653 printk("unable to find logical %Lu len %Lu\n", logical, *length);
1657 BUG_ON(em->start > logical || em->start + em->len < logical);
1658 map = (struct map_lookup *)em->bdev;
1659 offset = logical - em->start;
1661 if (mirror_num > map->num_stripes)
1664 /* if our multi bio struct is too small, back off and try again */
1665 if (rw & (1 << BIO_RW)) {
1666 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1667 BTRFS_BLOCK_GROUP_DUP)) {
1668 stripes_required = map->num_stripes;
1670 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1671 stripes_required = map->sub_stripes;
1675 if (multi_ret && rw == WRITE &&
1676 stripes_allocated < stripes_required) {
1677 stripes_allocated = map->num_stripes;
1678 free_extent_map(em);
1684 * stripe_nr counts the total number of stripes we have to stride
1685 * to get to this block
1687 do_div(stripe_nr, map->stripe_len);
1689 stripe_offset = stripe_nr * map->stripe_len;
1690 BUG_ON(offset < stripe_offset);
1692 /* stripe_offset is the offset of this block in its stripe*/
1693 stripe_offset = offset - stripe_offset;
1695 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1696 BTRFS_BLOCK_GROUP_RAID10 |
1697 BTRFS_BLOCK_GROUP_DUP)) {
1698 /* we limit the length of each bio to what fits in a stripe */
1699 *length = min_t(u64, em->len - offset,
1700 map->stripe_len - stripe_offset);
1702 *length = em->len - offset;
1705 if (!multi_ret && !unplug_page)
1710 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1711 if (unplug_page || (rw & (1 << BIO_RW)))
1712 num_stripes = map->num_stripes;
1713 else if (mirror_num)
1714 stripe_index = mirror_num - 1;
1716 stripe_index = current->pid % map->num_stripes;
1718 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1719 if (rw & (1 << BIO_RW))
1720 num_stripes = map->num_stripes;
1721 else if (mirror_num)
1722 stripe_index = mirror_num - 1;
1724 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1725 int factor = map->num_stripes / map->sub_stripes;
1727 stripe_index = do_div(stripe_nr, factor);
1728 stripe_index *= map->sub_stripes;
1730 if (unplug_page || (rw & (1 << BIO_RW)))
1731 num_stripes = map->sub_stripes;
1732 else if (mirror_num)
1733 stripe_index += mirror_num - 1;
1735 stripe_index += current->pid % map->sub_stripes;
1738 * after this do_div call, stripe_nr is the number of stripes
1739 * on this device we have to walk to find the data, and
1740 * stripe_index is the number of our device in the stripe array
1742 stripe_index = do_div(stripe_nr, map->num_stripes);
1744 BUG_ON(stripe_index >= map->num_stripes);
1746 for (i = 0; i < num_stripes; i++) {
1748 struct btrfs_device *device;
1749 struct backing_dev_info *bdi;
1751 device = map->stripes[stripe_index].dev;
1752 bdi = blk_get_backing_dev_info(device->bdev);
1753 if (bdi->unplug_io_fn) {
1754 bdi->unplug_io_fn(bdi, unplug_page);
1757 multi->stripes[i].physical =
1758 map->stripes[stripe_index].physical +
1759 stripe_offset + stripe_nr * map->stripe_len;
1760 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1766 multi->num_stripes = num_stripes;
1767 multi->max_errors = max_errors;
1770 free_extent_map(em);
1774 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1775 u64 logical, u64 *length,
1776 struct btrfs_multi_bio **multi_ret, int mirror_num)
1778 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
1782 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
1783 u64 logical, struct page *page)
1785 u64 length = PAGE_CACHE_SIZE;
1786 return __btrfs_map_block(map_tree, READ, logical, &length,
1791 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1792 static void end_bio_multi_stripe(struct bio *bio, int err)
1794 static int end_bio_multi_stripe(struct bio *bio,
1795 unsigned int bytes_done, int err)
1798 struct btrfs_multi_bio *multi = bio->bi_private;
1800 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1805 atomic_inc(&multi->error);
1807 if (atomic_dec_and_test(&multi->stripes_pending)) {
1808 bio->bi_private = multi->private;
1809 bio->bi_end_io = multi->end_io;
1810 /* only send an error to the higher layers if it is
1811 * beyond the tolerance of the multi-bio
1813 if (atomic_read(&multi->error) > multi->max_errors) {
1817 * this bio is actually up to date, we didn't
1818 * go over the max number of errors
1820 set_bit(BIO_UPTODATE, &bio->bi_flags);
1825 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1826 bio_endio(bio, bio->bi_size, err);
1828 bio_endio(bio, err);
1833 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1838 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
1841 struct btrfs_mapping_tree *map_tree;
1842 struct btrfs_device *dev;
1843 struct bio *first_bio = bio;
1844 u64 logical = bio->bi_sector << 9;
1847 struct btrfs_multi_bio *multi = NULL;
1852 length = bio->bi_size;
1853 map_tree = &root->fs_info->mapping_tree;
1854 map_length = length;
1856 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
1860 total_devs = multi->num_stripes;
1861 if (map_length < length) {
1862 printk("mapping failed logical %Lu bio len %Lu "
1863 "len %Lu\n", logical, length, map_length);
1866 multi->end_io = first_bio->bi_end_io;
1867 multi->private = first_bio->bi_private;
1868 atomic_set(&multi->stripes_pending, multi->num_stripes);
1870 while(dev_nr < total_devs) {
1871 if (total_devs > 1) {
1872 if (dev_nr < total_devs - 1) {
1873 bio = bio_clone(first_bio, GFP_NOFS);
1878 bio->bi_private = multi;
1879 bio->bi_end_io = end_bio_multi_stripe;
1881 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
1882 dev = multi->stripes[dev_nr].dev;
1884 bio->bi_bdev = dev->bdev;
1885 spin_lock(&dev->io_lock);
1887 spin_unlock(&dev->io_lock);
1888 submit_bio(rw, bio);
1891 if (total_devs == 1)
1896 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
1899 struct list_head *head = &root->fs_info->fs_devices->devices;
1901 return __find_device(head, devid, uuid);
1904 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
1905 struct extent_buffer *leaf,
1906 struct btrfs_chunk *chunk)
1908 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1909 struct map_lookup *map;
1910 struct extent_map *em;
1914 u8 uuid[BTRFS_UUID_SIZE];
1919 logical = key->offset;
1920 length = btrfs_chunk_length(leaf, chunk);
1922 spin_lock(&map_tree->map_tree.lock);
1923 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
1924 spin_unlock(&map_tree->map_tree.lock);
1926 /* already mapped? */
1927 if (em && em->start <= logical && em->start + em->len > logical) {
1928 free_extent_map(em);
1931 free_extent_map(em);
1934 map = kzalloc(sizeof(*map), GFP_NOFS);
1938 em = alloc_extent_map(GFP_NOFS);
1941 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1942 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1944 free_extent_map(em);
1948 em->bdev = (struct block_device *)map;
1949 em->start = logical;
1951 em->block_start = 0;
1953 map->num_stripes = num_stripes;
1954 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1955 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1956 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1957 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1958 map->type = btrfs_chunk_type(leaf, chunk);
1959 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1960 for (i = 0; i < num_stripes; i++) {
1961 map->stripes[i].physical =
1962 btrfs_stripe_offset_nr(leaf, chunk, i);
1963 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1964 read_extent_buffer(leaf, uuid, (unsigned long)
1965 btrfs_stripe_dev_uuid_nr(chunk, i),
1967 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
1968 if (!map->stripes[i].dev) {
1970 free_extent_map(em);
1975 spin_lock(&map_tree->map_tree.lock);
1976 ret = add_extent_mapping(&map_tree->map_tree, em);
1977 spin_unlock(&map_tree->map_tree.lock);
1979 free_extent_map(em);
1984 static int fill_device_from_item(struct extent_buffer *leaf,
1985 struct btrfs_dev_item *dev_item,
1986 struct btrfs_device *device)
1990 device->devid = btrfs_device_id(leaf, dev_item);
1991 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1992 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1993 device->type = btrfs_device_type(leaf, dev_item);
1994 device->io_align = btrfs_device_io_align(leaf, dev_item);
1995 device->io_width = btrfs_device_io_width(leaf, dev_item);
1996 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1998 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1999 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2004 static int read_one_dev(struct btrfs_root *root,
2005 struct extent_buffer *leaf,
2006 struct btrfs_dev_item *dev_item)
2008 struct btrfs_device *device;
2011 u8 dev_uuid[BTRFS_UUID_SIZE];
2013 devid = btrfs_device_id(leaf, dev_item);
2014 read_extent_buffer(leaf, dev_uuid,
2015 (unsigned long)btrfs_device_uuid(dev_item),
2017 device = btrfs_find_device(root, devid, dev_uuid);
2019 printk("warning devid %Lu not found already\n", devid);
2020 device = kzalloc(sizeof(*device), GFP_NOFS);
2023 list_add(&device->dev_list,
2024 &root->fs_info->fs_devices->devices);
2025 list_add(&device->dev_alloc_list,
2026 &root->fs_info->fs_devices->alloc_list);
2027 device->barriers = 1;
2028 spin_lock_init(&device->io_lock);
2031 fill_device_from_item(leaf, dev_item, device);
2032 device->dev_root = root->fs_info->dev_root;
2035 ret = btrfs_open_device(device);
2043 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2045 struct btrfs_dev_item *dev_item;
2047 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2049 return read_one_dev(root, buf, dev_item);
2052 int btrfs_read_sys_array(struct btrfs_root *root)
2054 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2055 struct extent_buffer *sb;
2056 struct btrfs_disk_key *disk_key;
2057 struct btrfs_chunk *chunk;
2059 unsigned long sb_ptr;
2065 struct btrfs_key key;
2067 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2068 BTRFS_SUPER_INFO_SIZE);
2071 btrfs_set_buffer_uptodate(sb);
2072 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
2073 array_size = btrfs_super_sys_array_size(super_copy);
2075 ptr = super_copy->sys_chunk_array;
2076 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2079 while (cur < array_size) {
2080 disk_key = (struct btrfs_disk_key *)ptr;
2081 btrfs_disk_key_to_cpu(&key, disk_key);
2083 len = sizeof(*disk_key); ptr += len;
2087 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2088 chunk = (struct btrfs_chunk *)sb_ptr;
2089 ret = read_one_chunk(root, &key, sb, chunk);
2092 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2093 len = btrfs_chunk_item_size(num_stripes);
2102 free_extent_buffer(sb);
2106 int btrfs_read_chunk_tree(struct btrfs_root *root)
2108 struct btrfs_path *path;
2109 struct extent_buffer *leaf;
2110 struct btrfs_key key;
2111 struct btrfs_key found_key;
2115 root = root->fs_info->chunk_root;
2117 path = btrfs_alloc_path();
2121 /* first we search for all of the device items, and then we
2122 * read in all of the chunk items. This way we can create chunk
2123 * mappings that reference all of the devices that are afound
2125 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2129 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2131 leaf = path->nodes[0];
2132 slot = path->slots[0];
2133 if (slot >= btrfs_header_nritems(leaf)) {
2134 ret = btrfs_next_leaf(root, path);
2141 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2142 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2143 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2145 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2146 struct btrfs_dev_item *dev_item;
2147 dev_item = btrfs_item_ptr(leaf, slot,
2148 struct btrfs_dev_item);
2149 ret = read_one_dev(root, leaf, dev_item);
2152 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2153 struct btrfs_chunk *chunk;
2154 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2155 ret = read_one_chunk(root, &found_key, leaf, chunk);
2159 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2161 btrfs_release_path(root, path);
2165 btrfs_free_path(path);