2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <asm/div64.h>
23 #include "extent_map.h"
25 #include "transaction.h"
26 #include "print-tree.h"
36 struct btrfs_bio_stripe stripes[];
39 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
40 (sizeof(struct btrfs_bio_stripe) * (n)))
42 static DEFINE_MUTEX(uuid_mutex);
43 static LIST_HEAD(fs_uuids);
45 int btrfs_cleanup_fs_uuids(void)
47 struct btrfs_fs_devices *fs_devices;
48 struct list_head *uuid_cur;
49 struct list_head *devices_cur;
50 struct btrfs_device *dev;
52 list_for_each(uuid_cur, &fs_uuids) {
53 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
55 while(!list_empty(&fs_devices->devices)) {
56 devices_cur = fs_devices->devices.next;
57 dev = list_entry(devices_cur, struct btrfs_device,
59 printk("uuid cleanup finds %s\n", dev->name);
62 close_bdev_excl(dev->bdev);
64 list_del(&dev->dev_list);
71 static struct btrfs_device *__find_device(struct list_head *head, u64 devid)
73 struct btrfs_device *dev;
74 struct list_head *cur;
76 list_for_each(cur, head) {
77 dev = list_entry(cur, struct btrfs_device, dev_list);
78 if (dev->devid == devid)
84 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
86 struct list_head *cur;
87 struct btrfs_fs_devices *fs_devices;
89 list_for_each(cur, &fs_uuids) {
90 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
91 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
97 static int device_list_add(const char *path,
98 struct btrfs_super_block *disk_super,
99 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
101 struct btrfs_device *device;
102 struct btrfs_fs_devices *fs_devices;
103 u64 found_transid = btrfs_super_generation(disk_super);
105 fs_devices = find_fsid(disk_super->fsid);
107 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
110 INIT_LIST_HEAD(&fs_devices->devices);
111 list_add(&fs_devices->list, &fs_uuids);
112 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
113 fs_devices->latest_devid = devid;
114 fs_devices->latest_trans = found_transid;
115 fs_devices->lowest_devid = (u64)-1;
116 fs_devices->num_devices = 0;
119 device = __find_device(&fs_devices->devices, devid);
122 device = kzalloc(sizeof(*device), GFP_NOFS);
124 /* we can safely leave the fs_devices entry around */
127 device->devid = devid;
128 device->name = kstrdup(path, GFP_NOFS);
133 list_add(&device->dev_list, &fs_devices->devices);
134 fs_devices->num_devices++;
137 if (found_transid > fs_devices->latest_trans) {
138 fs_devices->latest_devid = devid;
139 fs_devices->latest_trans = found_transid;
141 if (fs_devices->lowest_devid > devid) {
142 fs_devices->lowest_devid = devid;
143 printk("lowest devid now %Lu\n", devid);
145 *fs_devices_ret = fs_devices;
149 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
151 struct list_head *head = &fs_devices->devices;
152 struct list_head *cur;
153 struct btrfs_device *device;
155 mutex_lock(&uuid_mutex);
156 list_for_each(cur, head) {
157 device = list_entry(cur, struct btrfs_device, dev_list);
159 close_bdev_excl(device->bdev);
160 printk("close devices closes %s\n", device->name);
164 mutex_unlock(&uuid_mutex);
168 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
169 int flags, void *holder)
171 struct block_device *bdev;
172 struct list_head *head = &fs_devices->devices;
173 struct list_head *cur;
174 struct btrfs_device *device;
177 mutex_lock(&uuid_mutex);
178 list_for_each(cur, head) {
179 device = list_entry(cur, struct btrfs_device, dev_list);
180 bdev = open_bdev_excl(device->name, flags, holder);
181 printk("opening %s devid %Lu\n", device->name, device->devid);
183 printk("open %s failed\n", device->name);
187 if (device->devid == fs_devices->latest_devid)
188 fs_devices->latest_bdev = bdev;
189 if (device->devid == fs_devices->lowest_devid) {
190 fs_devices->lowest_bdev = bdev;
191 printk("lowest bdev %s\n", device->name);
195 mutex_unlock(&uuid_mutex);
198 mutex_unlock(&uuid_mutex);
199 btrfs_close_devices(fs_devices);
203 int btrfs_scan_one_device(const char *path, int flags, void *holder,
204 struct btrfs_fs_devices **fs_devices_ret)
206 struct btrfs_super_block *disk_super;
207 struct block_device *bdev;
208 struct buffer_head *bh;
212 mutex_lock(&uuid_mutex);
214 printk("scan one opens %s\n", path);
215 bdev = open_bdev_excl(path, flags, holder);
218 printk("open failed\n");
223 ret = set_blocksize(bdev, 4096);
226 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
231 disk_super = (struct btrfs_super_block *)bh->b_data;
232 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
233 sizeof(disk_super->magic))) {
234 printk("no btrfs found on %s\n", path);
238 devid = le64_to_cpu(disk_super->dev_item.devid);
239 printk("found device %Lu on %s\n", devid, path);
240 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
245 close_bdev_excl(bdev);
246 printk("scan one closes bdev %s\n", path);
248 mutex_unlock(&uuid_mutex);
253 * this uses a pretty simple search, the expectation is that it is
254 * called very infrequently and that a given device has a small number
257 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
258 struct btrfs_device *device,
259 struct btrfs_path *path,
260 u64 num_bytes, u64 *start)
262 struct btrfs_key key;
263 struct btrfs_root *root = device->dev_root;
264 struct btrfs_dev_extent *dev_extent = NULL;
267 u64 search_start = 0;
268 u64 search_end = device->total_bytes;
272 struct extent_buffer *l;
277 /* FIXME use last free of some kind */
279 /* we don't want to overwrite the superblock on the drive,
280 * so we make sure to start at an offset of at least 1MB
282 search_start = max((u64)1024 * 1024, search_start);
283 key.objectid = device->devid;
284 key.offset = search_start;
285 key.type = BTRFS_DEV_EXTENT_KEY;
286 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
289 ret = btrfs_previous_item(root, path, 0, key.type);
293 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
296 slot = path->slots[0];
297 if (slot >= btrfs_header_nritems(l)) {
298 ret = btrfs_next_leaf(root, path);
305 if (search_start >= search_end) {
309 *start = search_start;
313 *start = last_byte > search_start ?
314 last_byte : search_start;
315 if (search_end <= *start) {
321 btrfs_item_key_to_cpu(l, &key, slot);
323 if (key.objectid < device->devid)
326 if (key.objectid > device->devid)
329 if (key.offset >= search_start && key.offset > last_byte &&
331 if (last_byte < search_start)
332 last_byte = search_start;
333 hole_size = key.offset - last_byte;
334 if (key.offset > last_byte &&
335 hole_size >= num_bytes) {
340 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
345 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
346 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
352 /* we have to make sure we didn't find an extent that has already
353 * been allocated by the map tree or the original allocation
355 btrfs_release_path(root, path);
356 BUG_ON(*start < search_start);
358 if (*start + num_bytes > search_end) {
362 /* check for pending inserts here */
366 btrfs_release_path(root, path);
370 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
371 struct btrfs_device *device,
372 u64 owner, u64 num_bytes, u64 *start)
375 struct btrfs_path *path;
376 struct btrfs_root *root = device->dev_root;
377 struct btrfs_dev_extent *extent;
378 struct extent_buffer *leaf;
379 struct btrfs_key key;
381 path = btrfs_alloc_path();
385 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
390 key.objectid = device->devid;
392 key.type = BTRFS_DEV_EXTENT_KEY;
393 ret = btrfs_insert_empty_item(trans, root, path, &key,
397 leaf = path->nodes[0];
398 extent = btrfs_item_ptr(leaf, path->slots[0],
399 struct btrfs_dev_extent);
400 btrfs_set_dev_extent_owner(leaf, extent, owner);
401 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
402 btrfs_mark_buffer_dirty(leaf);
404 btrfs_free_path(path);
408 static int find_next_chunk(struct btrfs_root *root, u64 *objectid)
410 struct btrfs_path *path;
412 struct btrfs_key key;
413 struct btrfs_key found_key;
415 path = btrfs_alloc_path();
418 key.objectid = (u64)-1;
419 key.offset = (u64)-1;
420 key.type = BTRFS_CHUNK_ITEM_KEY;
422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
428 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
432 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
434 *objectid = found_key.objectid + found_key.offset;
438 btrfs_free_path(path);
442 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
446 struct btrfs_key key;
447 struct btrfs_key found_key;
449 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
450 key.type = BTRFS_DEV_ITEM_KEY;
451 key.offset = (u64)-1;
453 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
459 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
464 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
466 *objectid = found_key.offset + 1;
470 btrfs_release_path(root, path);
475 * the device information is stored in the chunk root
476 * the btrfs_device struct should be fully filled in
478 int btrfs_add_device(struct btrfs_trans_handle *trans,
479 struct btrfs_root *root,
480 struct btrfs_device *device)
483 struct btrfs_path *path;
484 struct btrfs_dev_item *dev_item;
485 struct extent_buffer *leaf;
486 struct btrfs_key key;
490 root = root->fs_info->chunk_root;
492 path = btrfs_alloc_path();
496 ret = find_next_devid(root, path, &free_devid);
500 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
501 key.type = BTRFS_DEV_ITEM_KEY;
502 key.offset = free_devid;
504 ret = btrfs_insert_empty_item(trans, root, path, &key,
509 leaf = path->nodes[0];
510 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
512 device->devid = free_devid;
513 btrfs_set_device_id(leaf, dev_item, device->devid);
514 btrfs_set_device_type(leaf, dev_item, device->type);
515 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
516 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
517 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
518 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
519 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
521 ptr = (unsigned long)btrfs_device_uuid(dev_item);
522 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
523 btrfs_mark_buffer_dirty(leaf);
527 btrfs_free_path(path);
530 int btrfs_update_device(struct btrfs_trans_handle *trans,
531 struct btrfs_device *device)
534 struct btrfs_path *path;
535 struct btrfs_root *root;
536 struct btrfs_dev_item *dev_item;
537 struct extent_buffer *leaf;
538 struct btrfs_key key;
540 root = device->dev_root->fs_info->chunk_root;
542 path = btrfs_alloc_path();
546 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
547 key.type = BTRFS_DEV_ITEM_KEY;
548 key.offset = device->devid;
550 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
559 leaf = path->nodes[0];
560 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
562 btrfs_set_device_id(leaf, dev_item, device->devid);
563 btrfs_set_device_type(leaf, dev_item, device->type);
564 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
565 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
566 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
567 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
568 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
569 btrfs_mark_buffer_dirty(leaf);
572 btrfs_free_path(path);
576 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
577 struct btrfs_root *root,
578 struct btrfs_key *key,
579 struct btrfs_chunk *chunk, int item_size)
581 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
582 struct btrfs_disk_key disk_key;
586 array_size = btrfs_super_sys_array_size(super_copy);
587 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
590 ptr = super_copy->sys_chunk_array + array_size;
591 btrfs_cpu_key_to_disk(&disk_key, key);
592 memcpy(ptr, &disk_key, sizeof(disk_key));
593 ptr += sizeof(disk_key);
594 memcpy(ptr, chunk, item_size);
595 item_size += sizeof(disk_key);
596 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
600 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
601 struct btrfs_root *extent_root, u64 *start,
602 u64 *num_bytes, u64 type)
605 struct btrfs_fs_info *info = extent_root->fs_info;
606 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
607 struct btrfs_stripe *stripes;
608 struct btrfs_device *device = NULL;
609 struct btrfs_chunk *chunk;
610 struct list_head private_devs;
611 struct list_head *dev_list = &extent_root->fs_info->fs_devices->devices;
612 struct list_head *cur;
613 struct extent_map_tree *em_tree;
614 struct map_lookup *map;
615 struct extent_map *em;
617 u64 calc_size = 1024 * 1024 * 1024;
618 u64 min_free = calc_size;
625 int stripe_len = 64 * 1024;
626 struct btrfs_key key;
628 if (list_empty(dev_list))
631 if (type & (BTRFS_BLOCK_GROUP_RAID0))
632 num_stripes = btrfs_super_num_devices(&info->super_copy);
633 if (type & (BTRFS_BLOCK_GROUP_DUP))
635 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
636 num_stripes = min_t(u64, 2,
637 btrfs_super_num_devices(&info->super_copy));
640 INIT_LIST_HEAD(&private_devs);
641 cur = dev_list->next;
644 if (type & BTRFS_BLOCK_GROUP_DUP)
645 min_free = calc_size * 2;
647 /* build a private list of devices we will allocate from */
648 while(index < num_stripes) {
649 device = list_entry(cur, struct btrfs_device, dev_list);
651 avail = device->total_bytes - device->bytes_used;
653 if (avail > max_avail)
655 if (avail >= min_free) {
656 list_move_tail(&device->dev_list, &private_devs);
658 if (type & BTRFS_BLOCK_GROUP_DUP)
664 if (index < num_stripes) {
665 list_splice(&private_devs, dev_list);
666 if (!looped && max_avail > 0) {
668 calc_size = max_avail;
674 ret = find_next_chunk(chunk_root, &key.objectid);
678 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
682 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
688 stripes = &chunk->stripe;
690 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
691 *num_bytes = calc_size;
693 *num_bytes = calc_size * num_stripes;
696 printk("new chunk type %Lu start %Lu size %Lu\n", type, key.objectid, *num_bytes);
697 while(index < num_stripes) {
698 BUG_ON(list_empty(&private_devs));
699 cur = private_devs.next;
700 device = list_entry(cur, struct btrfs_device, dev_list);
702 /* loop over this device again if we're doing a dup group */
703 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
704 (index == num_stripes - 1))
705 list_move_tail(&device->dev_list, dev_list);
707 ret = btrfs_alloc_dev_extent(trans, device,
709 calc_size, &dev_offset);
711 printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key.objectid, calc_size, device->devid, type);
712 device->bytes_used += calc_size;
713 ret = btrfs_update_device(trans, device);
716 map->stripes[index].dev = device;
717 map->stripes[index].physical = dev_offset;
718 btrfs_set_stack_stripe_devid(stripes + index, device->devid);
719 btrfs_set_stack_stripe_offset(stripes + index, dev_offset);
720 physical = dev_offset;
723 BUG_ON(!list_empty(&private_devs));
725 /* key.objectid was set above */
726 key.offset = *num_bytes;
727 key.type = BTRFS_CHUNK_ITEM_KEY;
728 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
729 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
730 btrfs_set_stack_chunk_type(chunk, type);
731 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
732 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
733 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
734 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
735 map->sector_size = extent_root->sectorsize;
736 map->stripe_len = stripe_len;
737 map->io_align = stripe_len;
738 map->io_width = stripe_len;
740 map->num_stripes = num_stripes;
742 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
743 btrfs_chunk_item_size(num_stripes));
745 *start = key.objectid;
747 em = alloc_extent_map(GFP_NOFS);
750 em->bdev = (struct block_device *)map;
751 em->start = key.objectid;
752 em->len = key.offset;
757 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
758 spin_lock(&em_tree->lock);
759 ret = add_extent_mapping(em_tree, em);
761 spin_unlock(&em_tree->lock);
766 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
768 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
771 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
773 struct extent_map *em;
776 spin_lock(&tree->map_tree.lock);
777 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
779 remove_extent_mapping(&tree->map_tree, em);
780 spin_unlock(&tree->map_tree.lock);
786 /* once for the tree */
791 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
792 u64 logical, u64 *length,
793 struct btrfs_multi_bio **multi_ret)
795 struct extent_map *em;
796 struct map_lookup *map;
797 struct extent_map_tree *em_tree = &map_tree->map_tree;
801 int stripes_allocated = 8;
804 struct btrfs_multi_bio *multi = NULL;
806 if (multi_ret && !(rw & (1 << BIO_RW))) {
807 stripes_allocated = 1;
811 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
817 spin_lock(&em_tree->lock);
818 em = lookup_extent_mapping(em_tree, logical, *length);
821 BUG_ON(em->start > logical || em->start + em->len < logical);
822 map = (struct map_lookup *)em->bdev;
823 offset = logical - em->start;
825 /* if our multi bio struct is too small, back off and try again */
826 if (multi_ret && (rw & (1 << BIO_RW)) &&
827 stripes_allocated < map->num_stripes &&
828 ((map->type & BTRFS_BLOCK_GROUP_RAID1) ||
829 (map->type & BTRFS_BLOCK_GROUP_DUP))) {
830 stripes_allocated = map->num_stripes;
831 spin_unlock(&em_tree->lock);
838 * stripe_nr counts the total number of stripes we have to stride
839 * to get to this block
841 do_div(stripe_nr, map->stripe_len);
843 stripe_offset = stripe_nr * map->stripe_len;
844 BUG_ON(offset < stripe_offset);
846 /* stripe_offset is the offset of this block in its stripe*/
847 stripe_offset = offset - stripe_offset;
849 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
850 BTRFS_BLOCK_GROUP_DUP)) {
851 /* we limit the length of each bio to what fits in a stripe */
852 *length = min_t(u64, em->len - offset,
853 map->stripe_len - stripe_offset);
855 *length = em->len - offset;
860 multi->num_stripes = 1;
862 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
863 if (rw & (1 << BIO_RW))
864 multi->num_stripes = map->num_stripes;
868 struct btrfs_device *cur;
870 for (i = 0; i < map->num_stripes; i++) {
871 cur = map->stripes[i].dev;
872 spin_lock(&cur->io_lock);
873 if (cur->total_ios < least) {
874 least = cur->total_ios;
877 spin_unlock(&cur->io_lock);
880 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
881 if (rw & (1 << BIO_RW))
882 multi->num_stripes = map->num_stripes;
885 * after this do_div call, stripe_nr is the number of stripes
886 * on this device we have to walk to find the data, and
887 * stripe_index is the number of our device in the stripe array
889 stripe_index = do_div(stripe_nr, map->num_stripes);
891 BUG_ON(stripe_index >= map->num_stripes);
892 BUG_ON(stripe_index != 0 && multi->num_stripes > 1);
894 for (i = 0; i < multi->num_stripes; i++) {
895 multi->stripes[i].physical =
896 map->stripes[stripe_index].physical + stripe_offset +
897 stripe_nr * map->stripe_len;
898 multi->stripes[i].dev = map->stripes[stripe_index].dev;
904 spin_unlock(&em_tree->lock);
908 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
909 static void end_bio_multi_stripe(struct bio *bio, int err)
911 static int end_bio_multi_stripe(struct bio *bio,
912 unsigned int bytes_done, int err)
915 struct btrfs_multi_bio *multi = bio->bi_private;
917 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
924 if (atomic_dec_and_test(&multi->stripes_pending)) {
925 bio->bi_private = multi->private;
926 bio->bi_end_io = multi->end_io;
928 if (!err && multi->error)
936 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
941 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio)
943 struct btrfs_mapping_tree *map_tree;
944 struct btrfs_device *dev;
945 struct bio *first_bio = bio;
946 u64 logical = bio->bi_sector << 9;
949 struct bio_vec *bvec;
950 struct btrfs_multi_bio *multi = NULL;
956 bio_for_each_segment(bvec, bio, i) {
957 length += bvec->bv_len;
960 map_tree = &root->fs_info->mapping_tree;
963 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi);
966 total_devs = multi->num_stripes;
967 if (map_length < length) {
968 printk("mapping failed logical %Lu bio len %Lu "
969 "len %Lu\n", logical, length, map_length);
972 multi->end_io = first_bio->bi_end_io;
973 multi->private = first_bio->bi_private;
974 atomic_set(&multi->stripes_pending, multi->num_stripes);
976 while(dev_nr < total_devs) {
977 if (total_devs > 1) {
978 if (dev_nr < total_devs - 1) {
979 bio = bio_clone(first_bio, GFP_NOFS);
984 bio->bi_private = multi;
985 bio->bi_end_io = end_bio_multi_stripe;
987 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
988 dev = multi->stripes[dev_nr].dev;
989 bio->bi_bdev = dev->bdev;
990 spin_lock(&dev->io_lock);
992 spin_unlock(&dev->io_lock);
1001 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid)
1003 struct list_head *head = &root->fs_info->fs_devices->devices;
1005 return __find_device(head, devid);
1008 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
1009 struct extent_buffer *leaf,
1010 struct btrfs_chunk *chunk)
1012 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1013 struct map_lookup *map;
1014 struct extent_map *em;
1022 logical = key->objectid;
1023 length = key->offset;
1024 spin_lock(&map_tree->map_tree.lock);
1025 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
1027 /* already mapped? */
1028 if (em && em->start <= logical && em->start + em->len > logical) {
1029 free_extent_map(em);
1030 spin_unlock(&map_tree->map_tree.lock);
1033 free_extent_map(em);
1035 spin_unlock(&map_tree->map_tree.lock);
1037 map = kzalloc(sizeof(*map), GFP_NOFS);
1041 em = alloc_extent_map(GFP_NOFS);
1044 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1045 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1047 free_extent_map(em);
1051 em->bdev = (struct block_device *)map;
1052 em->start = logical;
1054 em->block_start = 0;
1056 map->num_stripes = num_stripes;
1057 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1058 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1059 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1060 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1061 map->type = btrfs_chunk_type(leaf, chunk);
1062 for (i = 0; i < num_stripes; i++) {
1063 map->stripes[i].physical =
1064 btrfs_stripe_offset_nr(leaf, chunk, i);
1065 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1066 map->stripes[i].dev = btrfs_find_device(root, devid);
1067 if (!map->stripes[i].dev) {
1069 free_extent_map(em);
1074 spin_lock(&map_tree->map_tree.lock);
1075 ret = add_extent_mapping(&map_tree->map_tree, em);
1077 spin_unlock(&map_tree->map_tree.lock);
1078 free_extent_map(em);
1083 static int fill_device_from_item(struct extent_buffer *leaf,
1084 struct btrfs_dev_item *dev_item,
1085 struct btrfs_device *device)
1089 device->devid = btrfs_device_id(leaf, dev_item);
1090 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1091 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1092 device->type = btrfs_device_type(leaf, dev_item);
1093 device->io_align = btrfs_device_io_align(leaf, dev_item);
1094 device->io_width = btrfs_device_io_width(leaf, dev_item);
1095 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1097 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1098 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
1103 static int read_one_dev(struct btrfs_root *root,
1104 struct extent_buffer *leaf,
1105 struct btrfs_dev_item *dev_item)
1107 struct btrfs_device *device;
1111 devid = btrfs_device_id(leaf, dev_item);
1112 device = btrfs_find_device(root, devid);
1114 printk("warning devid %Lu not found already\n", devid);
1115 device = kmalloc(sizeof(*device), GFP_NOFS);
1118 list_add(&device->dev_list,
1119 &root->fs_info->fs_devices->devices);
1120 device->total_ios = 0;
1121 spin_lock_init(&device->io_lock);
1124 fill_device_from_item(leaf, dev_item, device);
1125 device->dev_root = root->fs_info->dev_root;
1128 ret = btrfs_open_device(device);
1136 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
1138 struct btrfs_dev_item *dev_item;
1140 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
1142 return read_one_dev(root, buf, dev_item);
1145 int btrfs_read_sys_array(struct btrfs_root *root)
1147 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1148 struct extent_buffer *sb = root->fs_info->sb_buffer;
1149 struct btrfs_disk_key *disk_key;
1150 struct btrfs_chunk *chunk;
1151 struct btrfs_key key;
1156 unsigned long sb_ptr;
1160 array_size = btrfs_super_sys_array_size(super_copy);
1163 * we do this loop twice, once for the device items and
1164 * once for all of the chunks. This way there are device
1165 * structs filled in for every chunk
1167 ptr = super_copy->sys_chunk_array;
1168 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
1171 while (cur < array_size) {
1172 disk_key = (struct btrfs_disk_key *)ptr;
1173 btrfs_disk_key_to_cpu(&key, disk_key);
1175 len = sizeof(*disk_key);
1180 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1181 chunk = (struct btrfs_chunk *)sb_ptr;
1182 ret = read_one_chunk(root, &key, sb, chunk);
1184 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
1185 len = btrfs_chunk_item_size(num_stripes);
1196 int btrfs_read_chunk_tree(struct btrfs_root *root)
1198 struct btrfs_path *path;
1199 struct extent_buffer *leaf;
1200 struct btrfs_key key;
1201 struct btrfs_key found_key;
1205 root = root->fs_info->chunk_root;
1207 path = btrfs_alloc_path();
1211 /* first we search for all of the device items, and then we
1212 * read in all of the chunk items. This way we can create chunk
1213 * mappings that reference all of the devices that are afound
1215 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1219 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1221 leaf = path->nodes[0];
1222 slot = path->slots[0];
1223 if (slot >= btrfs_header_nritems(leaf)) {
1224 ret = btrfs_next_leaf(root, path);
1231 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1232 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1233 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
1235 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
1236 struct btrfs_dev_item *dev_item;
1237 dev_item = btrfs_item_ptr(leaf, slot,
1238 struct btrfs_dev_item);
1239 ret = read_one_dev(root, leaf, dev_item);
1242 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
1243 struct btrfs_chunk *chunk;
1244 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1245 ret = read_one_chunk(root, &found_key, leaf, chunk);
1249 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1251 btrfs_release_path(root, path);
1255 btrfs_free_path(path);