4e7cee27aab565cef76d1e4041e643382d2438b1
[pandora-kernel.git] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
24 #include "ctree.h"
25 #include "extent_map.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "print-tree.h"
29 #include "volumes.h"
30 #include "async-thread.h"
31
32 struct map_lookup {
33         u64 type;
34         int io_align;
35         int io_width;
36         int stripe_len;
37         int sector_size;
38         int num_stripes;
39         int sub_stripes;
40         struct btrfs_bio_stripe stripes[];
41 };
42
43 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
44                             (sizeof(struct btrfs_bio_stripe) * (n)))
45
46 static DEFINE_MUTEX(uuid_mutex);
47 static LIST_HEAD(fs_uuids);
48
49 void btrfs_lock_volumes(void)
50 {
51         mutex_lock(&uuid_mutex);
52 }
53
54 void btrfs_unlock_volumes(void)
55 {
56         mutex_unlock(&uuid_mutex);
57 }
58
59 int btrfs_cleanup_fs_uuids(void)
60 {
61         struct btrfs_fs_devices *fs_devices;
62         struct list_head *uuid_cur;
63         struct list_head *devices_cur;
64         struct btrfs_device *dev;
65
66         list_for_each(uuid_cur, &fs_uuids) {
67                 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
68                                         list);
69                 while(!list_empty(&fs_devices->devices)) {
70                         devices_cur = fs_devices->devices.next;
71                         dev = list_entry(devices_cur, struct btrfs_device,
72                                          dev_list);
73                         if (dev->bdev) {
74                                 close_bdev_excl(dev->bdev);
75                                 fs_devices->open_devices--;
76                         }
77                         list_del(&dev->dev_list);
78                         kfree(dev->name);
79                         kfree(dev);
80                 }
81         }
82         return 0;
83 }
84
85 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
86                                           u8 *uuid)
87 {
88         struct btrfs_device *dev;
89         struct list_head *cur;
90
91         list_for_each(cur, head) {
92                 dev = list_entry(cur, struct btrfs_device, dev_list);
93                 if (dev->devid == devid &&
94                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
95                         return dev;
96                 }
97         }
98         return NULL;
99 }
100
101 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
102 {
103         struct list_head *cur;
104         struct btrfs_fs_devices *fs_devices;
105
106         list_for_each(cur, &fs_uuids) {
107                 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
108                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
109                         return fs_devices;
110         }
111         return NULL;
112 }
113
114 /*
115  * we try to collect pending bios for a device so we don't get a large
116  * number of procs sending bios down to the same device.  This greatly
117  * improves the schedulers ability to collect and merge the bios.
118  *
119  * But, it also turns into a long list of bios to process and that is sure
120  * to eventually make the worker thread block.  The solution here is to
121  * make some progress and then put this work struct back at the end of
122  * the list if the block device is congested.  This way, multiple devices
123  * can make progress from a single worker thread.
124  */
125 int run_scheduled_bios(struct btrfs_device *device)
126 {
127         struct bio *pending;
128         struct backing_dev_info *bdi;
129         struct bio *tail;
130         struct bio *cur;
131         int again = 0;
132         unsigned long num_run = 0;
133
134         bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
135 loop:
136         spin_lock(&device->io_lock);
137
138         /* take all the bios off the list at once and process them
139          * later on (without the lock held).  But, remember the
140          * tail and other pointers so the bios can be properly reinserted
141          * into the list if we hit congestion
142          */
143         pending = device->pending_bios;
144         tail = device->pending_bio_tail;
145         WARN_ON(pending && !tail);
146         device->pending_bios = NULL;
147         device->pending_bio_tail = NULL;
148
149         /*
150          * if pending was null this time around, no bios need processing
151          * at all and we can stop.  Otherwise it'll loop back up again
152          * and do an additional check so no bios are missed.
153          *
154          * device->running_pending is used to synchronize with the
155          * schedule_bio code.
156          */
157         if (pending) {
158                 again = 1;
159                 device->running_pending = 1;
160         } else {
161                 again = 0;
162                 device->running_pending = 0;
163         }
164         spin_unlock(&device->io_lock);
165
166         while(pending) {
167                 cur = pending;
168                 pending = pending->bi_next;
169                 cur->bi_next = NULL;
170                 atomic_dec(&device->dev_root->fs_info->nr_async_submits);
171                 submit_bio(cur->bi_rw, cur);
172                 num_run++;
173
174                 /*
175                  * we made progress, there is more work to do and the bdi
176                  * is now congested.  Back off and let other work structs
177                  * run instead
178                  */
179                 if (pending && num_run && bdi_write_congested(bdi)) {
180                         struct bio *old_head;
181
182                         spin_lock(&device->io_lock);
183                         old_head = device->pending_bios;
184                         device->pending_bios = pending;
185                         if (device->pending_bio_tail)
186                                 tail->bi_next = old_head;
187                         else
188                                 device->pending_bio_tail = tail;
189
190                         spin_unlock(&device->io_lock);
191                         btrfs_requeue_work(&device->work);
192                         goto done;
193                 }
194         }
195         if (again)
196                 goto loop;
197 done:
198         return 0;
199 }
200
201 void pending_bios_fn(struct btrfs_work *work)
202 {
203         struct btrfs_device *device;
204
205         device = container_of(work, struct btrfs_device, work);
206         run_scheduled_bios(device);
207 }
208
209 static int device_list_add(const char *path,
210                            struct btrfs_super_block *disk_super,
211                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
212 {
213         struct btrfs_device *device;
214         struct btrfs_fs_devices *fs_devices;
215         u64 found_transid = btrfs_super_generation(disk_super);
216
217         fs_devices = find_fsid(disk_super->fsid);
218         if (!fs_devices) {
219                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
220                 if (!fs_devices)
221                         return -ENOMEM;
222                 INIT_LIST_HEAD(&fs_devices->devices);
223                 INIT_LIST_HEAD(&fs_devices->alloc_list);
224                 list_add(&fs_devices->list, &fs_uuids);
225                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
226                 fs_devices->latest_devid = devid;
227                 fs_devices->latest_trans = found_transid;
228                 device = NULL;
229         } else {
230                 device = __find_device(&fs_devices->devices, devid,
231                                        disk_super->dev_item.uuid);
232         }
233         if (!device) {
234                 device = kzalloc(sizeof(*device), GFP_NOFS);
235                 if (!device) {
236                         /* we can safely leave the fs_devices entry around */
237                         return -ENOMEM;
238                 }
239                 device->devid = devid;
240                 device->work.func = pending_bios_fn;
241                 memcpy(device->uuid, disk_super->dev_item.uuid,
242                        BTRFS_UUID_SIZE);
243                 device->barriers = 1;
244                 spin_lock_init(&device->io_lock);
245                 device->name = kstrdup(path, GFP_NOFS);
246                 if (!device->name) {
247                         kfree(device);
248                         return -ENOMEM;
249                 }
250                 list_add(&device->dev_list, &fs_devices->devices);
251                 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
252                 fs_devices->num_devices++;
253         }
254
255         if (found_transid > fs_devices->latest_trans) {
256                 fs_devices->latest_devid = devid;
257                 fs_devices->latest_trans = found_transid;
258         }
259         *fs_devices_ret = fs_devices;
260         return 0;
261 }
262
263 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
264 {
265         struct list_head *head = &fs_devices->devices;
266         struct list_head *cur;
267         struct btrfs_device *device;
268
269         mutex_lock(&uuid_mutex);
270 again:
271         list_for_each(cur, head) {
272                 device = list_entry(cur, struct btrfs_device, dev_list);
273                 if (!device->in_fs_metadata) {
274                         struct block_device *bdev;
275                         list_del(&device->dev_list);
276                         list_del(&device->dev_alloc_list);
277                         fs_devices->num_devices--;
278                         if (device->bdev) {
279                                 bdev = device->bdev;
280                                 fs_devices->open_devices--;
281                                 mutex_unlock(&uuid_mutex);
282                                 close_bdev_excl(bdev);
283                                 mutex_lock(&uuid_mutex);
284                         }
285                         kfree(device->name);
286                         kfree(device);
287                         goto again;
288                 }
289         }
290         mutex_unlock(&uuid_mutex);
291         return 0;
292 }
293
294 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
295 {
296         struct list_head *head = &fs_devices->devices;
297         struct list_head *cur;
298         struct btrfs_device *device;
299
300         mutex_lock(&uuid_mutex);
301         list_for_each(cur, head) {
302                 device = list_entry(cur, struct btrfs_device, dev_list);
303                 if (device->bdev) {
304                         close_bdev_excl(device->bdev);
305                         fs_devices->open_devices--;
306                 }
307                 device->bdev = NULL;
308                 device->in_fs_metadata = 0;
309         }
310         fs_devices->mounted = 0;
311         mutex_unlock(&uuid_mutex);
312         return 0;
313 }
314
315 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
316                        int flags, void *holder)
317 {
318         struct block_device *bdev;
319         struct list_head *head = &fs_devices->devices;
320         struct list_head *cur;
321         struct btrfs_device *device;
322         struct block_device *latest_bdev = NULL;
323         struct buffer_head *bh;
324         struct btrfs_super_block *disk_super;
325         u64 latest_devid = 0;
326         u64 latest_transid = 0;
327         u64 transid;
328         u64 devid;
329         int ret = 0;
330
331         mutex_lock(&uuid_mutex);
332         if (fs_devices->mounted)
333                 goto out;
334
335         list_for_each(cur, head) {
336                 device = list_entry(cur, struct btrfs_device, dev_list);
337                 if (device->bdev)
338                         continue;
339
340                 if (!device->name)
341                         continue;
342
343                 bdev = open_bdev_excl(device->name, flags, holder);
344
345                 if (IS_ERR(bdev)) {
346                         printk("open %s failed\n", device->name);
347                         goto error;
348                 }
349                 set_blocksize(bdev, 4096);
350
351                 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
352                 if (!bh)
353                         goto error_close;
354
355                 disk_super = (struct btrfs_super_block *)bh->b_data;
356                 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
357                     sizeof(disk_super->magic)))
358                         goto error_brelse;
359
360                 devid = le64_to_cpu(disk_super->dev_item.devid);
361                 if (devid != device->devid)
362                         goto error_brelse;
363
364                 transid = btrfs_super_generation(disk_super);
365                 if (!latest_transid || transid > latest_transid) {
366                         latest_devid = devid;
367                         latest_transid = transid;
368                         latest_bdev = bdev;
369                 }
370
371                 device->bdev = bdev;
372                 device->in_fs_metadata = 0;
373                 fs_devices->open_devices++;
374                 continue;
375
376 error_brelse:
377                 brelse(bh);
378 error_close:
379                 close_bdev_excl(bdev);
380 error:
381                 continue;
382         }
383         if (fs_devices->open_devices == 0) {
384                 ret = -EIO;
385                 goto out;
386         }
387         fs_devices->mounted = 1;
388         fs_devices->latest_bdev = latest_bdev;
389         fs_devices->latest_devid = latest_devid;
390         fs_devices->latest_trans = latest_transid;
391 out:
392         mutex_unlock(&uuid_mutex);
393         return ret;
394 }
395
396 int btrfs_scan_one_device(const char *path, int flags, void *holder,
397                           struct btrfs_fs_devices **fs_devices_ret)
398 {
399         struct btrfs_super_block *disk_super;
400         struct block_device *bdev;
401         struct buffer_head *bh;
402         int ret;
403         u64 devid;
404         u64 transid;
405
406         mutex_lock(&uuid_mutex);
407
408         bdev = open_bdev_excl(path, flags, holder);
409
410         if (IS_ERR(bdev)) {
411                 ret = PTR_ERR(bdev);
412                 goto error;
413         }
414
415         ret = set_blocksize(bdev, 4096);
416         if (ret)
417                 goto error_close;
418         bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
419         if (!bh) {
420                 ret = -EIO;
421                 goto error_close;
422         }
423         disk_super = (struct btrfs_super_block *)bh->b_data;
424         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
425             sizeof(disk_super->magic))) {
426                 ret = -EINVAL;
427                 goto error_brelse;
428         }
429         devid = le64_to_cpu(disk_super->dev_item.devid);
430         transid = btrfs_super_generation(disk_super);
431         if (disk_super->label[0])
432                 printk("device label %s ", disk_super->label);
433         else {
434                 /* FIXME, make a readl uuid parser */
435                 printk("device fsid %llx-%llx ",
436                        *(unsigned long long *)disk_super->fsid,
437                        *(unsigned long long *)(disk_super->fsid + 8));
438         }
439         printk("devid %Lu transid %Lu %s\n", devid, transid, path);
440         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
441
442 error_brelse:
443         brelse(bh);
444 error_close:
445         close_bdev_excl(bdev);
446 error:
447         mutex_unlock(&uuid_mutex);
448         return ret;
449 }
450
451 /*
452  * this uses a pretty simple search, the expectation is that it is
453  * called very infrequently and that a given device has a small number
454  * of extents
455  */
456 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
457                                 struct btrfs_device *device,
458                                 struct btrfs_path *path,
459                                 u64 num_bytes, u64 *start)
460 {
461         struct btrfs_key key;
462         struct btrfs_root *root = device->dev_root;
463         struct btrfs_dev_extent *dev_extent = NULL;
464         u64 hole_size = 0;
465         u64 last_byte = 0;
466         u64 search_start = 0;
467         u64 search_end = device->total_bytes;
468         int ret;
469         int slot = 0;
470         int start_found;
471         struct extent_buffer *l;
472
473         start_found = 0;
474         path->reada = 2;
475
476         /* FIXME use last free of some kind */
477
478         /* we don't want to overwrite the superblock on the drive,
479          * so we make sure to start at an offset of at least 1MB
480          */
481         search_start = max((u64)1024 * 1024, search_start);
482
483         if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
484                 search_start = max(root->fs_info->alloc_start, search_start);
485
486         key.objectid = device->devid;
487         key.offset = search_start;
488         key.type = BTRFS_DEV_EXTENT_KEY;
489         ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
490         if (ret < 0)
491                 goto error;
492         ret = btrfs_previous_item(root, path, 0, key.type);
493         if (ret < 0)
494                 goto error;
495         l = path->nodes[0];
496         btrfs_item_key_to_cpu(l, &key, path->slots[0]);
497         while (1) {
498                 l = path->nodes[0];
499                 slot = path->slots[0];
500                 if (slot >= btrfs_header_nritems(l)) {
501                         ret = btrfs_next_leaf(root, path);
502                         if (ret == 0)
503                                 continue;
504                         if (ret < 0)
505                                 goto error;
506 no_more_items:
507                         if (!start_found) {
508                                 if (search_start >= search_end) {
509                                         ret = -ENOSPC;
510                                         goto error;
511                                 }
512                                 *start = search_start;
513                                 start_found = 1;
514                                 goto check_pending;
515                         }
516                         *start = last_byte > search_start ?
517                                 last_byte : search_start;
518                         if (search_end <= *start) {
519                                 ret = -ENOSPC;
520                                 goto error;
521                         }
522                         goto check_pending;
523                 }
524                 btrfs_item_key_to_cpu(l, &key, slot);
525
526                 if (key.objectid < device->devid)
527                         goto next;
528
529                 if (key.objectid > device->devid)
530                         goto no_more_items;
531
532                 if (key.offset >= search_start && key.offset > last_byte &&
533                     start_found) {
534                         if (last_byte < search_start)
535                                 last_byte = search_start;
536                         hole_size = key.offset - last_byte;
537                         if (key.offset > last_byte &&
538                             hole_size >= num_bytes) {
539                                 *start = last_byte;
540                                 goto check_pending;
541                         }
542                 }
543                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
544                         goto next;
545                 }
546
547                 start_found = 1;
548                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
549                 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
550 next:
551                 path->slots[0]++;
552                 cond_resched();
553         }
554 check_pending:
555         /* we have to make sure we didn't find an extent that has already
556          * been allocated by the map tree or the original allocation
557          */
558         btrfs_release_path(root, path);
559         BUG_ON(*start < search_start);
560
561         if (*start + num_bytes > search_end) {
562                 ret = -ENOSPC;
563                 goto error;
564         }
565         /* check for pending inserts here */
566         return 0;
567
568 error:
569         btrfs_release_path(root, path);
570         return ret;
571 }
572
573 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
574                           struct btrfs_device *device,
575                           u64 start)
576 {
577         int ret;
578         struct btrfs_path *path;
579         struct btrfs_root *root = device->dev_root;
580         struct btrfs_key key;
581         struct btrfs_key found_key;
582         struct extent_buffer *leaf = NULL;
583         struct btrfs_dev_extent *extent = NULL;
584
585         path = btrfs_alloc_path();
586         if (!path)
587                 return -ENOMEM;
588
589         key.objectid = device->devid;
590         key.offset = start;
591         key.type = BTRFS_DEV_EXTENT_KEY;
592
593         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
594         if (ret > 0) {
595                 ret = btrfs_previous_item(root, path, key.objectid,
596                                           BTRFS_DEV_EXTENT_KEY);
597                 BUG_ON(ret);
598                 leaf = path->nodes[0];
599                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
600                 extent = btrfs_item_ptr(leaf, path->slots[0],
601                                         struct btrfs_dev_extent);
602                 BUG_ON(found_key.offset > start || found_key.offset +
603                        btrfs_dev_extent_length(leaf, extent) < start);
604                 ret = 0;
605         } else if (ret == 0) {
606                 leaf = path->nodes[0];
607                 extent = btrfs_item_ptr(leaf, path->slots[0],
608                                         struct btrfs_dev_extent);
609         }
610         BUG_ON(ret);
611
612         if (device->bytes_used > 0)
613                 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
614         ret = btrfs_del_item(trans, root, path);
615         BUG_ON(ret);
616
617         btrfs_free_path(path);
618         return ret;
619 }
620
621 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
622                            struct btrfs_device *device,
623                            u64 chunk_tree, u64 chunk_objectid,
624                            u64 chunk_offset,
625                            u64 num_bytes, u64 *start)
626 {
627         int ret;
628         struct btrfs_path *path;
629         struct btrfs_root *root = device->dev_root;
630         struct btrfs_dev_extent *extent;
631         struct extent_buffer *leaf;
632         struct btrfs_key key;
633
634         WARN_ON(!device->in_fs_metadata);
635         path = btrfs_alloc_path();
636         if (!path)
637                 return -ENOMEM;
638
639         ret = find_free_dev_extent(trans, device, path, num_bytes, start);
640         if (ret) {
641                 goto err;
642         }
643
644         key.objectid = device->devid;
645         key.offset = *start;
646         key.type = BTRFS_DEV_EXTENT_KEY;
647         ret = btrfs_insert_empty_item(trans, root, path, &key,
648                                       sizeof(*extent));
649         BUG_ON(ret);
650
651         leaf = path->nodes[0];
652         extent = btrfs_item_ptr(leaf, path->slots[0],
653                                 struct btrfs_dev_extent);
654         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
655         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
656         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
657
658         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
659                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
660                     BTRFS_UUID_SIZE);
661
662         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
663         btrfs_mark_buffer_dirty(leaf);
664 err:
665         btrfs_free_path(path);
666         return ret;
667 }
668
669 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
670 {
671         struct btrfs_path *path;
672         int ret;
673         struct btrfs_key key;
674         struct btrfs_chunk *chunk;
675         struct btrfs_key found_key;
676
677         path = btrfs_alloc_path();
678         BUG_ON(!path);
679
680         key.objectid = objectid;
681         key.offset = (u64)-1;
682         key.type = BTRFS_CHUNK_ITEM_KEY;
683
684         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
685         if (ret < 0)
686                 goto error;
687
688         BUG_ON(ret == 0);
689
690         ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
691         if (ret) {
692                 *offset = 0;
693         } else {
694                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
695                                       path->slots[0]);
696                 if (found_key.objectid != objectid)
697                         *offset = 0;
698                 else {
699                         chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
700                                                struct btrfs_chunk);
701                         *offset = found_key.offset +
702                                 btrfs_chunk_length(path->nodes[0], chunk);
703                 }
704         }
705         ret = 0;
706 error:
707         btrfs_free_path(path);
708         return ret;
709 }
710
711 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
712                            u64 *objectid)
713 {
714         int ret;
715         struct btrfs_key key;
716         struct btrfs_key found_key;
717
718         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
719         key.type = BTRFS_DEV_ITEM_KEY;
720         key.offset = (u64)-1;
721
722         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
723         if (ret < 0)
724                 goto error;
725
726         BUG_ON(ret == 0);
727
728         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
729                                   BTRFS_DEV_ITEM_KEY);
730         if (ret) {
731                 *objectid = 1;
732         } else {
733                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
734                                       path->slots[0]);
735                 *objectid = found_key.offset + 1;
736         }
737         ret = 0;
738 error:
739         btrfs_release_path(root, path);
740         return ret;
741 }
742
743 /*
744  * the device information is stored in the chunk root
745  * the btrfs_device struct should be fully filled in
746  */
747 int btrfs_add_device(struct btrfs_trans_handle *trans,
748                      struct btrfs_root *root,
749                      struct btrfs_device *device)
750 {
751         int ret;
752         struct btrfs_path *path;
753         struct btrfs_dev_item *dev_item;
754         struct extent_buffer *leaf;
755         struct btrfs_key key;
756         unsigned long ptr;
757         u64 free_devid = 0;
758
759         root = root->fs_info->chunk_root;
760
761         path = btrfs_alloc_path();
762         if (!path)
763                 return -ENOMEM;
764
765         ret = find_next_devid(root, path, &free_devid);
766         if (ret)
767                 goto out;
768
769         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
770         key.type = BTRFS_DEV_ITEM_KEY;
771         key.offset = free_devid;
772
773         ret = btrfs_insert_empty_item(trans, root, path, &key,
774                                       sizeof(*dev_item));
775         if (ret)
776                 goto out;
777
778         leaf = path->nodes[0];
779         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
780
781         device->devid = free_devid;
782         btrfs_set_device_id(leaf, dev_item, device->devid);
783         btrfs_set_device_type(leaf, dev_item, device->type);
784         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
785         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
786         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
787         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
788         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
789         btrfs_set_device_group(leaf, dev_item, 0);
790         btrfs_set_device_seek_speed(leaf, dev_item, 0);
791         btrfs_set_device_bandwidth(leaf, dev_item, 0);
792
793         ptr = (unsigned long)btrfs_device_uuid(dev_item);
794         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
795         btrfs_mark_buffer_dirty(leaf);
796         ret = 0;
797
798 out:
799         btrfs_free_path(path);
800         return ret;
801 }
802
803 static int btrfs_rm_dev_item(struct btrfs_root *root,
804                              struct btrfs_device *device)
805 {
806         int ret;
807         struct btrfs_path *path;
808         struct block_device *bdev = device->bdev;
809         struct btrfs_device *next_dev;
810         struct btrfs_key key;
811         u64 total_bytes;
812         struct btrfs_fs_devices *fs_devices;
813         struct btrfs_trans_handle *trans;
814
815         root = root->fs_info->chunk_root;
816
817         path = btrfs_alloc_path();
818         if (!path)
819                 return -ENOMEM;
820
821         trans = btrfs_start_transaction(root, 1);
822         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
823         key.type = BTRFS_DEV_ITEM_KEY;
824         key.offset = device->devid;
825
826         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
827         if (ret < 0)
828                 goto out;
829
830         if (ret > 0) {
831                 ret = -ENOENT;
832                 goto out;
833         }
834
835         ret = btrfs_del_item(trans, root, path);
836         if (ret)
837                 goto out;
838
839         /*
840          * at this point, the device is zero sized.  We want to
841          * remove it from the devices list and zero out the old super
842          */
843         list_del_init(&device->dev_list);
844         list_del_init(&device->dev_alloc_list);
845         fs_devices = root->fs_info->fs_devices;
846
847         next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
848                               dev_list);
849         if (bdev == root->fs_info->sb->s_bdev)
850                 root->fs_info->sb->s_bdev = next_dev->bdev;
851         if (bdev == fs_devices->latest_bdev)
852                 fs_devices->latest_bdev = next_dev->bdev;
853
854         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
855         btrfs_set_super_num_devices(&root->fs_info->super_copy,
856                                     total_bytes - 1);
857 out:
858         btrfs_free_path(path);
859         btrfs_commit_transaction(trans, root);
860         return ret;
861 }
862
863 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
864 {
865         struct btrfs_device *device;
866         struct block_device *bdev;
867         struct buffer_head *bh = NULL;
868         struct btrfs_super_block *disk_super;
869         u64 all_avail;
870         u64 devid;
871         int ret = 0;
872
873         mutex_lock(&root->fs_info->alloc_mutex);
874         mutex_lock(&root->fs_info->chunk_mutex);
875         mutex_lock(&uuid_mutex);
876
877         all_avail = root->fs_info->avail_data_alloc_bits |
878                 root->fs_info->avail_system_alloc_bits |
879                 root->fs_info->avail_metadata_alloc_bits;
880
881         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
882             btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
883                 printk("btrfs: unable to go below four devices on raid10\n");
884                 ret = -EINVAL;
885                 goto out;
886         }
887
888         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
889             btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
890                 printk("btrfs: unable to go below two devices on raid1\n");
891                 ret = -EINVAL;
892                 goto out;
893         }
894
895         if (strcmp(device_path, "missing") == 0) {
896                 struct list_head *cur;
897                 struct list_head *devices;
898                 struct btrfs_device *tmp;
899
900                 device = NULL;
901                 devices = &root->fs_info->fs_devices->devices;
902                 list_for_each(cur, devices) {
903                         tmp = list_entry(cur, struct btrfs_device, dev_list);
904                         if (tmp->in_fs_metadata && !tmp->bdev) {
905                                 device = tmp;
906                                 break;
907                         }
908                 }
909                 bdev = NULL;
910                 bh = NULL;
911                 disk_super = NULL;
912                 if (!device) {
913                         printk("btrfs: no missing devices found to remove\n");
914                         goto out;
915                 }
916
917         } else {
918                 bdev = open_bdev_excl(device_path, 0,
919                                       root->fs_info->bdev_holder);
920                 if (IS_ERR(bdev)) {
921                         ret = PTR_ERR(bdev);
922                         goto out;
923                 }
924
925                 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
926                 if (!bh) {
927                         ret = -EIO;
928                         goto error_close;
929                 }
930                 disk_super = (struct btrfs_super_block *)bh->b_data;
931                 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
932                     sizeof(disk_super->magic))) {
933                         ret = -ENOENT;
934                         goto error_brelse;
935                 }
936                 if (memcmp(disk_super->fsid, root->fs_info->fsid,
937                            BTRFS_FSID_SIZE)) {
938                         ret = -ENOENT;
939                         goto error_brelse;
940                 }
941                 devid = le64_to_cpu(disk_super->dev_item.devid);
942                 device = btrfs_find_device(root, devid, NULL);
943                 if (!device) {
944                         ret = -ENOENT;
945                         goto error_brelse;
946                 }
947
948         }
949         root->fs_info->fs_devices->num_devices--;
950         root->fs_info->fs_devices->open_devices--;
951
952         ret = btrfs_shrink_device(device, 0);
953         if (ret)
954                 goto error_brelse;
955
956
957         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
958         if (ret)
959                 goto error_brelse;
960
961         if (bh) {
962                 /* make sure this device isn't detected as part of
963                  * the FS anymore
964                  */
965                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
966                 set_buffer_dirty(bh);
967                 sync_dirty_buffer(bh);
968
969                 brelse(bh);
970         }
971
972         if (device->bdev) {
973                 /* one close for the device struct or super_block */
974                 close_bdev_excl(device->bdev);
975         }
976         if (bdev) {
977                 /* one close for us */
978                 close_bdev_excl(bdev);
979         }
980         kfree(device->name);
981         kfree(device);
982         ret = 0;
983         goto out;
984
985 error_brelse:
986         brelse(bh);
987 error_close:
988         if (bdev)
989                 close_bdev_excl(bdev);
990 out:
991         mutex_unlock(&uuid_mutex);
992         mutex_unlock(&root->fs_info->chunk_mutex);
993         mutex_unlock(&root->fs_info->alloc_mutex);
994         return ret;
995 }
996
997 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
998 {
999         struct btrfs_trans_handle *trans;
1000         struct btrfs_device *device;
1001         struct block_device *bdev;
1002         struct list_head *cur;
1003         struct list_head *devices;
1004         u64 total_bytes;
1005         int ret = 0;
1006
1007
1008         bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
1009         if (!bdev) {
1010                 return -EIO;
1011         }
1012
1013         mutex_lock(&root->fs_info->alloc_mutex);
1014         mutex_lock(&root->fs_info->chunk_mutex);
1015
1016         trans = btrfs_start_transaction(root, 1);
1017         devices = &root->fs_info->fs_devices->devices;
1018         list_for_each(cur, devices) {
1019                 device = list_entry(cur, struct btrfs_device, dev_list);
1020                 if (device->bdev == bdev) {
1021                         ret = -EEXIST;
1022                         goto out;
1023                 }
1024         }
1025
1026         device = kzalloc(sizeof(*device), GFP_NOFS);
1027         if (!device) {
1028                 /* we can safely leave the fs_devices entry around */
1029                 ret = -ENOMEM;
1030                 goto out_close_bdev;
1031         }
1032
1033         device->barriers = 1;
1034         device->work.func = pending_bios_fn;
1035         generate_random_uuid(device->uuid);
1036         spin_lock_init(&device->io_lock);
1037         device->name = kstrdup(device_path, GFP_NOFS);
1038         if (!device->name) {
1039                 kfree(device);
1040                 goto out_close_bdev;
1041         }
1042         device->io_width = root->sectorsize;
1043         device->io_align = root->sectorsize;
1044         device->sector_size = root->sectorsize;
1045         device->total_bytes = i_size_read(bdev->bd_inode);
1046         device->dev_root = root->fs_info->dev_root;
1047         device->bdev = bdev;
1048         device->in_fs_metadata = 1;
1049
1050         ret = btrfs_add_device(trans, root, device);
1051         if (ret)
1052                 goto out_close_bdev;
1053
1054         total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1055         btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1056                                     total_bytes + device->total_bytes);
1057
1058         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1059         btrfs_set_super_num_devices(&root->fs_info->super_copy,
1060                                     total_bytes + 1);
1061
1062         list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1063         list_add(&device->dev_alloc_list,
1064                  &root->fs_info->fs_devices->alloc_list);
1065         root->fs_info->fs_devices->num_devices++;
1066         root->fs_info->fs_devices->open_devices++;
1067 out:
1068         btrfs_end_transaction(trans, root);
1069         mutex_unlock(&root->fs_info->chunk_mutex);
1070         mutex_unlock(&root->fs_info->alloc_mutex);
1071
1072         return ret;
1073
1074 out_close_bdev:
1075         close_bdev_excl(bdev);
1076         goto out;
1077 }
1078
1079 int btrfs_update_device(struct btrfs_trans_handle *trans,
1080                         struct btrfs_device *device)
1081 {
1082         int ret;
1083         struct btrfs_path *path;
1084         struct btrfs_root *root;
1085         struct btrfs_dev_item *dev_item;
1086         struct extent_buffer *leaf;
1087         struct btrfs_key key;
1088
1089         root = device->dev_root->fs_info->chunk_root;
1090
1091         path = btrfs_alloc_path();
1092         if (!path)
1093                 return -ENOMEM;
1094
1095         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1096         key.type = BTRFS_DEV_ITEM_KEY;
1097         key.offset = device->devid;
1098
1099         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1100         if (ret < 0)
1101                 goto out;
1102
1103         if (ret > 0) {
1104                 ret = -ENOENT;
1105                 goto out;
1106         }
1107
1108         leaf = path->nodes[0];
1109         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1110
1111         btrfs_set_device_id(leaf, dev_item, device->devid);
1112         btrfs_set_device_type(leaf, dev_item, device->type);
1113         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1114         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1115         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1116         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1117         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1118         btrfs_mark_buffer_dirty(leaf);
1119
1120 out:
1121         btrfs_free_path(path);
1122         return ret;
1123 }
1124
1125 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1126                       struct btrfs_device *device, u64 new_size)
1127 {
1128         struct btrfs_super_block *super_copy =
1129                 &device->dev_root->fs_info->super_copy;
1130         u64 old_total = btrfs_super_total_bytes(super_copy);
1131         u64 diff = new_size - device->total_bytes;
1132
1133         btrfs_set_super_total_bytes(super_copy, old_total + diff);
1134         return btrfs_update_device(trans, device);
1135 }
1136
1137 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1138                             struct btrfs_root *root,
1139                             u64 chunk_tree, u64 chunk_objectid,
1140                             u64 chunk_offset)
1141 {
1142         int ret;
1143         struct btrfs_path *path;
1144         struct btrfs_key key;
1145
1146         root = root->fs_info->chunk_root;
1147         path = btrfs_alloc_path();
1148         if (!path)
1149                 return -ENOMEM;
1150
1151         key.objectid = chunk_objectid;
1152         key.offset = chunk_offset;
1153         key.type = BTRFS_CHUNK_ITEM_KEY;
1154
1155         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1156         BUG_ON(ret);
1157
1158         ret = btrfs_del_item(trans, root, path);
1159         BUG_ON(ret);
1160
1161         btrfs_free_path(path);
1162         return 0;
1163 }
1164
1165 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1166                         chunk_offset)
1167 {
1168         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1169         struct btrfs_disk_key *disk_key;
1170         struct btrfs_chunk *chunk;
1171         u8 *ptr;
1172         int ret = 0;
1173         u32 num_stripes;
1174         u32 array_size;
1175         u32 len = 0;
1176         u32 cur;
1177         struct btrfs_key key;
1178
1179         array_size = btrfs_super_sys_array_size(super_copy);
1180
1181         ptr = super_copy->sys_chunk_array;
1182         cur = 0;
1183
1184         while (cur < array_size) {
1185                 disk_key = (struct btrfs_disk_key *)ptr;
1186                 btrfs_disk_key_to_cpu(&key, disk_key);
1187
1188                 len = sizeof(*disk_key);
1189
1190                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1191                         chunk = (struct btrfs_chunk *)(ptr + len);
1192                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1193                         len += btrfs_chunk_item_size(num_stripes);
1194                 } else {
1195                         ret = -EIO;
1196                         break;
1197                 }
1198                 if (key.objectid == chunk_objectid &&
1199                     key.offset == chunk_offset) {
1200                         memmove(ptr, ptr + len, array_size - (cur + len));
1201                         array_size -= len;
1202                         btrfs_set_super_sys_array_size(super_copy, array_size);
1203                 } else {
1204                         ptr += len;
1205                         cur += len;
1206                 }
1207         }
1208         return ret;
1209 }
1210
1211
1212 int btrfs_relocate_chunk(struct btrfs_root *root,
1213                          u64 chunk_tree, u64 chunk_objectid,
1214                          u64 chunk_offset)
1215 {
1216         struct extent_map_tree *em_tree;
1217         struct btrfs_root *extent_root;
1218         struct btrfs_trans_handle *trans;
1219         struct extent_map *em;
1220         struct map_lookup *map;
1221         int ret;
1222         int i;
1223
1224         printk("btrfs relocating chunk %llu\n",
1225                (unsigned long long)chunk_offset);
1226         root = root->fs_info->chunk_root;
1227         extent_root = root->fs_info->extent_root;
1228         em_tree = &root->fs_info->mapping_tree.map_tree;
1229
1230         /* step one, relocate all the extents inside this chunk */
1231         ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1232         BUG_ON(ret);
1233
1234         trans = btrfs_start_transaction(root, 1);
1235         BUG_ON(!trans);
1236
1237         /*
1238          * step two, delete the device extents and the
1239          * chunk tree entries
1240          */
1241         spin_lock(&em_tree->lock);
1242         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1243         spin_unlock(&em_tree->lock);
1244
1245         BUG_ON(em->start > chunk_offset ||
1246                em->start + em->len < chunk_offset);
1247         map = (struct map_lookup *)em->bdev;
1248
1249         for (i = 0; i < map->num_stripes; i++) {
1250                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1251                                             map->stripes[i].physical);
1252                 BUG_ON(ret);
1253
1254                 if (map->stripes[i].dev) {
1255                         ret = btrfs_update_device(trans, map->stripes[i].dev);
1256                         BUG_ON(ret);
1257                 }
1258         }
1259         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1260                                chunk_offset);
1261
1262         BUG_ON(ret);
1263
1264         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1265                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1266                 BUG_ON(ret);
1267         }
1268
1269         spin_lock(&em_tree->lock);
1270         remove_extent_mapping(em_tree, em);
1271         kfree(map);
1272         em->bdev = NULL;
1273
1274         /* once for the tree */
1275         free_extent_map(em);
1276         spin_unlock(&em_tree->lock);
1277
1278         /* once for us */
1279         free_extent_map(em);
1280
1281         btrfs_end_transaction(trans, root);
1282         return 0;
1283 }
1284
1285 static u64 div_factor(u64 num, int factor)
1286 {
1287         if (factor == 10)
1288                 return num;
1289         num *= factor;
1290         do_div(num, 10);
1291         return num;
1292 }
1293
1294
1295 int btrfs_balance(struct btrfs_root *dev_root)
1296 {
1297         int ret;
1298         struct list_head *cur;
1299         struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1300         struct btrfs_device *device;
1301         u64 old_size;
1302         u64 size_to_free;
1303         struct btrfs_path *path;
1304         struct btrfs_key key;
1305         struct btrfs_chunk *chunk;
1306         struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1307         struct btrfs_trans_handle *trans;
1308         struct btrfs_key found_key;
1309
1310
1311         BUG(); /* FIXME, needs locking */
1312
1313         dev_root = dev_root->fs_info->dev_root;
1314
1315         /* step one make some room on all the devices */
1316         list_for_each(cur, devices) {
1317                 device = list_entry(cur, struct btrfs_device, dev_list);
1318                 old_size = device->total_bytes;
1319                 size_to_free = div_factor(old_size, 1);
1320                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1321                 if (device->total_bytes - device->bytes_used > size_to_free)
1322                         continue;
1323
1324                 ret = btrfs_shrink_device(device, old_size - size_to_free);
1325                 BUG_ON(ret);
1326
1327                 trans = btrfs_start_transaction(dev_root, 1);
1328                 BUG_ON(!trans);
1329
1330                 ret = btrfs_grow_device(trans, device, old_size);
1331                 BUG_ON(ret);
1332
1333                 btrfs_end_transaction(trans, dev_root);
1334         }
1335
1336         /* step two, relocate all the chunks */
1337         path = btrfs_alloc_path();
1338         BUG_ON(!path);
1339
1340         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1341         key.offset = (u64)-1;
1342         key.type = BTRFS_CHUNK_ITEM_KEY;
1343
1344         while(1) {
1345                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1346                 if (ret < 0)
1347                         goto error;
1348
1349                 /*
1350                  * this shouldn't happen, it means the last relocate
1351                  * failed
1352                  */
1353                 if (ret == 0)
1354                         break;
1355
1356                 ret = btrfs_previous_item(chunk_root, path, 0,
1357                                           BTRFS_CHUNK_ITEM_KEY);
1358                 if (ret) {
1359                         break;
1360                 }
1361                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1362                                       path->slots[0]);
1363                 if (found_key.objectid != key.objectid)
1364                         break;
1365                 chunk = btrfs_item_ptr(path->nodes[0],
1366                                        path->slots[0],
1367                                        struct btrfs_chunk);
1368                 key.offset = found_key.offset;
1369                 /* chunk zero is special */
1370                 if (key.offset == 0)
1371                         break;
1372
1373                 ret = btrfs_relocate_chunk(chunk_root,
1374                                            chunk_root->root_key.objectid,
1375                                            found_key.objectid,
1376                                            found_key.offset);
1377                 BUG_ON(ret);
1378                 btrfs_release_path(chunk_root, path);
1379         }
1380         ret = 0;
1381 error:
1382         btrfs_free_path(path);
1383         return ret;
1384 }
1385
1386 /*
1387  * shrinking a device means finding all of the device extents past
1388  * the new size, and then following the back refs to the chunks.
1389  * The chunk relocation code actually frees the device extent
1390  */
1391 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1392 {
1393         struct btrfs_trans_handle *trans;
1394         struct btrfs_root *root = device->dev_root;
1395         struct btrfs_dev_extent *dev_extent = NULL;
1396         struct btrfs_path *path;
1397         u64 length;
1398         u64 chunk_tree;
1399         u64 chunk_objectid;
1400         u64 chunk_offset;
1401         int ret;
1402         int slot;
1403         struct extent_buffer *l;
1404         struct btrfs_key key;
1405         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1406         u64 old_total = btrfs_super_total_bytes(super_copy);
1407         u64 diff = device->total_bytes - new_size;
1408
1409
1410         path = btrfs_alloc_path();
1411         if (!path)
1412                 return -ENOMEM;
1413
1414         trans = btrfs_start_transaction(root, 1);
1415         if (!trans) {
1416                 ret = -ENOMEM;
1417                 goto done;
1418         }
1419
1420         path->reada = 2;
1421
1422         device->total_bytes = new_size;
1423         ret = btrfs_update_device(trans, device);
1424         if (ret) {
1425                 btrfs_end_transaction(trans, root);
1426                 goto done;
1427         }
1428         WARN_ON(diff > old_total);
1429         btrfs_set_super_total_bytes(super_copy, old_total - diff);
1430         btrfs_end_transaction(trans, root);
1431
1432         key.objectid = device->devid;
1433         key.offset = (u64)-1;
1434         key.type = BTRFS_DEV_EXTENT_KEY;
1435
1436         while (1) {
1437                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1438                 if (ret < 0)
1439                         goto done;
1440
1441                 ret = btrfs_previous_item(root, path, 0, key.type);
1442                 if (ret < 0)
1443                         goto done;
1444                 if (ret) {
1445                         ret = 0;
1446                         goto done;
1447                 }
1448
1449                 l = path->nodes[0];
1450                 slot = path->slots[0];
1451                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1452
1453                 if (key.objectid != device->devid)
1454                         goto done;
1455
1456                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1457                 length = btrfs_dev_extent_length(l, dev_extent);
1458
1459                 if (key.offset + length <= new_size)
1460                         goto done;
1461
1462                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1463                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1464                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1465                 btrfs_release_path(root, path);
1466
1467                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1468                                            chunk_offset);
1469                 if (ret)
1470                         goto done;
1471         }
1472
1473 done:
1474         btrfs_free_path(path);
1475         return ret;
1476 }
1477
1478 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1479                            struct btrfs_root *root,
1480                            struct btrfs_key *key,
1481                            struct btrfs_chunk *chunk, int item_size)
1482 {
1483         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1484         struct btrfs_disk_key disk_key;
1485         u32 array_size;
1486         u8 *ptr;
1487
1488         array_size = btrfs_super_sys_array_size(super_copy);
1489         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1490                 return -EFBIG;
1491
1492         ptr = super_copy->sys_chunk_array + array_size;
1493         btrfs_cpu_key_to_disk(&disk_key, key);
1494         memcpy(ptr, &disk_key, sizeof(disk_key));
1495         ptr += sizeof(disk_key);
1496         memcpy(ptr, chunk, item_size);
1497         item_size += sizeof(disk_key);
1498         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1499         return 0;
1500 }
1501
1502 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1503                                int sub_stripes)
1504 {
1505         if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1506                 return calc_size;
1507         else if (type & BTRFS_BLOCK_GROUP_RAID10)
1508                 return calc_size * (num_stripes / sub_stripes);
1509         else
1510                 return calc_size * num_stripes;
1511 }
1512
1513
1514 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1515                       struct btrfs_root *extent_root, u64 *start,
1516                       u64 *num_bytes, u64 type)
1517 {
1518         u64 dev_offset;
1519         struct btrfs_fs_info *info = extent_root->fs_info;
1520         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1521         struct btrfs_path *path;
1522         struct btrfs_stripe *stripes;
1523         struct btrfs_device *device = NULL;
1524         struct btrfs_chunk *chunk;
1525         struct list_head private_devs;
1526         struct list_head *dev_list;
1527         struct list_head *cur;
1528         struct extent_map_tree *em_tree;
1529         struct map_lookup *map;
1530         struct extent_map *em;
1531         int min_stripe_size = 1 * 1024 * 1024;
1532         u64 physical;
1533         u64 calc_size = 1024 * 1024 * 1024;
1534         u64 max_chunk_size = calc_size;
1535         u64 min_free;
1536         u64 avail;
1537         u64 max_avail = 0;
1538         u64 percent_max;
1539         int num_stripes = 1;
1540         int min_stripes = 1;
1541         int sub_stripes = 0;
1542         int looped = 0;
1543         int ret;
1544         int index;
1545         int stripe_len = 64 * 1024;
1546         struct btrfs_key key;
1547
1548         if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1549             (type & BTRFS_BLOCK_GROUP_DUP)) {
1550                 WARN_ON(1);
1551                 type &= ~BTRFS_BLOCK_GROUP_DUP;
1552         }
1553         dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1554         if (list_empty(dev_list))
1555                 return -ENOSPC;
1556
1557         if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1558                 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1559                 min_stripes = 2;
1560         }
1561         if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1562                 num_stripes = 2;
1563                 min_stripes = 2;
1564         }
1565         if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1566                 num_stripes = min_t(u64, 2,
1567                             extent_root->fs_info->fs_devices->open_devices);
1568                 if (num_stripes < 2)
1569                         return -ENOSPC;
1570                 min_stripes = 2;
1571         }
1572         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1573                 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1574                 if (num_stripes < 4)
1575                         return -ENOSPC;
1576                 num_stripes &= ~(u32)1;
1577                 sub_stripes = 2;
1578                 min_stripes = 4;
1579         }
1580
1581         if (type & BTRFS_BLOCK_GROUP_DATA) {
1582                 max_chunk_size = 10 * calc_size;
1583                 min_stripe_size = 64 * 1024 * 1024;
1584         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1585                 max_chunk_size = 4 * calc_size;
1586                 min_stripe_size = 32 * 1024 * 1024;
1587         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1588                 calc_size = 8 * 1024 * 1024;
1589                 max_chunk_size = calc_size * 2;
1590                 min_stripe_size = 1 * 1024 * 1024;
1591         }
1592
1593         path = btrfs_alloc_path();
1594         if (!path)
1595                 return -ENOMEM;
1596
1597         /* we don't want a chunk larger than 10% of the FS */
1598         percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1599         max_chunk_size = min(percent_max, max_chunk_size);
1600
1601 again:
1602         if (calc_size * num_stripes > max_chunk_size) {
1603                 calc_size = max_chunk_size;
1604                 do_div(calc_size, num_stripes);
1605                 do_div(calc_size, stripe_len);
1606                 calc_size *= stripe_len;
1607         }
1608         /* we don't want tiny stripes */
1609         calc_size = max_t(u64, min_stripe_size, calc_size);
1610
1611         do_div(calc_size, stripe_len);
1612         calc_size *= stripe_len;
1613
1614         INIT_LIST_HEAD(&private_devs);
1615         cur = dev_list->next;
1616         index = 0;
1617
1618         if (type & BTRFS_BLOCK_GROUP_DUP)
1619                 min_free = calc_size * 2;
1620         else
1621                 min_free = calc_size;
1622
1623         /* we add 1MB because we never use the first 1MB of the device */
1624         min_free += 1024 * 1024;
1625
1626         /* build a private list of devices we will allocate from */
1627         while(index < num_stripes) {
1628                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1629
1630                 if (device->total_bytes > device->bytes_used)
1631                         avail = device->total_bytes - device->bytes_used;
1632                 else
1633                         avail = 0;
1634                 cur = cur->next;
1635
1636                 if (device->in_fs_metadata && avail >= min_free) {
1637                         u64 ignored_start = 0;
1638                         ret = find_free_dev_extent(trans, device, path,
1639                                                    min_free,
1640                                                    &ignored_start);
1641                         if (ret == 0) {
1642                                 list_move_tail(&device->dev_alloc_list,
1643                                                &private_devs);
1644                                 index++;
1645                                 if (type & BTRFS_BLOCK_GROUP_DUP)
1646                                         index++;
1647                         }
1648                 } else if (device->in_fs_metadata && avail > max_avail)
1649                         max_avail = avail;
1650                 if (cur == dev_list)
1651                         break;
1652         }
1653         if (index < num_stripes) {
1654                 list_splice(&private_devs, dev_list);
1655                 if (index >= min_stripes) {
1656                         num_stripes = index;
1657                         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1658                                 num_stripes /= sub_stripes;
1659                                 num_stripes *= sub_stripes;
1660                         }
1661                         looped = 1;
1662                         goto again;
1663                 }
1664                 if (!looped && max_avail > 0) {
1665                         looped = 1;
1666                         calc_size = max_avail;
1667                         goto again;
1668                 }
1669                 btrfs_free_path(path);
1670                 return -ENOSPC;
1671         }
1672         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1673         key.type = BTRFS_CHUNK_ITEM_KEY;
1674         ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1675                               &key.offset);
1676         if (ret) {
1677                 btrfs_free_path(path);
1678                 return ret;
1679         }
1680
1681         chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1682         if (!chunk) {
1683                 btrfs_free_path(path);
1684                 return -ENOMEM;
1685         }
1686
1687         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1688         if (!map) {
1689                 kfree(chunk);
1690                 btrfs_free_path(path);
1691                 return -ENOMEM;
1692         }
1693         btrfs_free_path(path);
1694         path = NULL;
1695
1696         stripes = &chunk->stripe;
1697         *num_bytes = chunk_bytes_by_type(type, calc_size,
1698                                          num_stripes, sub_stripes);
1699
1700         index = 0;
1701         while(index < num_stripes) {
1702                 struct btrfs_stripe *stripe;
1703                 BUG_ON(list_empty(&private_devs));
1704                 cur = private_devs.next;
1705                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1706
1707                 /* loop over this device again if we're doing a dup group */
1708                 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1709                     (index == num_stripes - 1))
1710                         list_move_tail(&device->dev_alloc_list, dev_list);
1711
1712                 ret = btrfs_alloc_dev_extent(trans, device,
1713                              info->chunk_root->root_key.objectid,
1714                              BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1715                              calc_size, &dev_offset);
1716                 BUG_ON(ret);
1717                 device->bytes_used += calc_size;
1718                 ret = btrfs_update_device(trans, device);
1719                 BUG_ON(ret);
1720
1721                 map->stripes[index].dev = device;
1722                 map->stripes[index].physical = dev_offset;
1723                 stripe = stripes + index;
1724                 btrfs_set_stack_stripe_devid(stripe, device->devid);
1725                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1726                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1727                 physical = dev_offset;
1728                 index++;
1729         }
1730         BUG_ON(!list_empty(&private_devs));
1731
1732         /* key was set above */
1733         btrfs_set_stack_chunk_length(chunk, *num_bytes);
1734         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1735         btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1736         btrfs_set_stack_chunk_type(chunk, type);
1737         btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1738         btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1739         btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1740         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1741         btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1742         map->sector_size = extent_root->sectorsize;
1743         map->stripe_len = stripe_len;
1744         map->io_align = stripe_len;
1745         map->io_width = stripe_len;
1746         map->type = type;
1747         map->num_stripes = num_stripes;
1748         map->sub_stripes = sub_stripes;
1749
1750         ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1751                                 btrfs_chunk_item_size(num_stripes));
1752         BUG_ON(ret);
1753         *start = key.offset;;
1754
1755         em = alloc_extent_map(GFP_NOFS);
1756         if (!em)
1757                 return -ENOMEM;
1758         em->bdev = (struct block_device *)map;
1759         em->start = key.offset;
1760         em->len = *num_bytes;
1761         em->block_start = 0;
1762
1763         if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1764                 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1765                                     chunk, btrfs_chunk_item_size(num_stripes));
1766                 BUG_ON(ret);
1767         }
1768         kfree(chunk);
1769
1770         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1771         spin_lock(&em_tree->lock);
1772         ret = add_extent_mapping(em_tree, em);
1773         spin_unlock(&em_tree->lock);
1774         BUG_ON(ret);
1775         free_extent_map(em);
1776         return ret;
1777 }
1778
1779 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1780 {
1781         extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1782 }
1783
1784 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1785 {
1786         struct extent_map *em;
1787
1788         while(1) {
1789                 spin_lock(&tree->map_tree.lock);
1790                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1791                 if (em)
1792                         remove_extent_mapping(&tree->map_tree, em);
1793                 spin_unlock(&tree->map_tree.lock);
1794                 if (!em)
1795                         break;
1796                 kfree(em->bdev);
1797                 /* once for us */
1798                 free_extent_map(em);
1799                 /* once for the tree */
1800                 free_extent_map(em);
1801         }
1802 }
1803
1804 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1805 {
1806         struct extent_map *em;
1807         struct map_lookup *map;
1808         struct extent_map_tree *em_tree = &map_tree->map_tree;
1809         int ret;
1810
1811         spin_lock(&em_tree->lock);
1812         em = lookup_extent_mapping(em_tree, logical, len);
1813         spin_unlock(&em_tree->lock);
1814         BUG_ON(!em);
1815
1816         BUG_ON(em->start > logical || em->start + em->len < logical);
1817         map = (struct map_lookup *)em->bdev;
1818         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1819                 ret = map->num_stripes;
1820         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1821                 ret = map->sub_stripes;
1822         else
1823                 ret = 1;
1824         free_extent_map(em);
1825         return ret;
1826 }
1827
1828 static int find_live_mirror(struct map_lookup *map, int first, int num,
1829                             int optimal)
1830 {
1831         int i;
1832         if (map->stripes[optimal].dev->bdev)
1833                 return optimal;
1834         for (i = first; i < first + num; i++) {
1835                 if (map->stripes[i].dev->bdev)
1836                         return i;
1837         }
1838         /* we couldn't find one that doesn't fail.  Just return something
1839          * and the io error handling code will clean up eventually
1840          */
1841         return optimal;
1842 }
1843
1844 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1845                              u64 logical, u64 *length,
1846                              struct btrfs_multi_bio **multi_ret,
1847                              int mirror_num, struct page *unplug_page)
1848 {
1849         struct extent_map *em;
1850         struct map_lookup *map;
1851         struct extent_map_tree *em_tree = &map_tree->map_tree;
1852         u64 offset;
1853         u64 stripe_offset;
1854         u64 stripe_nr;
1855         int stripes_allocated = 8;
1856         int stripes_required = 1;
1857         int stripe_index;
1858         int i;
1859         int num_stripes;
1860         int max_errors = 0;
1861         struct btrfs_multi_bio *multi = NULL;
1862
1863         if (multi_ret && !(rw & (1 << BIO_RW))) {
1864                 stripes_allocated = 1;
1865         }
1866 again:
1867         if (multi_ret) {
1868                 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1869                                 GFP_NOFS);
1870                 if (!multi)
1871                         return -ENOMEM;
1872
1873                 atomic_set(&multi->error, 0);
1874         }
1875
1876         spin_lock(&em_tree->lock);
1877         em = lookup_extent_mapping(em_tree, logical, *length);
1878         spin_unlock(&em_tree->lock);
1879
1880         if (!em && unplug_page)
1881                 return 0;
1882
1883         if (!em) {
1884                 printk("unable to find logical %Lu len %Lu\n", logical, *length);
1885                 BUG();
1886         }
1887
1888         BUG_ON(em->start > logical || em->start + em->len < logical);
1889         map = (struct map_lookup *)em->bdev;
1890         offset = logical - em->start;
1891
1892         if (mirror_num > map->num_stripes)
1893                 mirror_num = 0;
1894
1895         /* if our multi bio struct is too small, back off and try again */
1896         if (rw & (1 << BIO_RW)) {
1897                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1898                                  BTRFS_BLOCK_GROUP_DUP)) {
1899                         stripes_required = map->num_stripes;
1900                         max_errors = 1;
1901                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1902                         stripes_required = map->sub_stripes;
1903                         max_errors = 1;
1904                 }
1905         }
1906         if (multi_ret && rw == WRITE &&
1907             stripes_allocated < stripes_required) {
1908                 stripes_allocated = map->num_stripes;
1909                 free_extent_map(em);
1910                 kfree(multi);
1911                 goto again;
1912         }
1913         stripe_nr = offset;
1914         /*
1915          * stripe_nr counts the total number of stripes we have to stride
1916          * to get to this block
1917          */
1918         do_div(stripe_nr, map->stripe_len);
1919
1920         stripe_offset = stripe_nr * map->stripe_len;
1921         BUG_ON(offset < stripe_offset);
1922
1923         /* stripe_offset is the offset of this block in its stripe*/
1924         stripe_offset = offset - stripe_offset;
1925
1926         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1927                          BTRFS_BLOCK_GROUP_RAID10 |
1928                          BTRFS_BLOCK_GROUP_DUP)) {
1929                 /* we limit the length of each bio to what fits in a stripe */
1930                 *length = min_t(u64, em->len - offset,
1931                               map->stripe_len - stripe_offset);
1932         } else {
1933                 *length = em->len - offset;
1934         }
1935
1936         if (!multi_ret && !unplug_page)
1937                 goto out;
1938
1939         num_stripes = 1;
1940         stripe_index = 0;
1941         if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1942                 if (unplug_page || (rw & (1 << BIO_RW)))
1943                         num_stripes = map->num_stripes;
1944                 else if (mirror_num)
1945                         stripe_index = mirror_num - 1;
1946                 else {
1947                         stripe_index = find_live_mirror(map, 0,
1948                                             map->num_stripes,
1949                                             current->pid % map->num_stripes);
1950                 }
1951
1952         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1953                 if (rw & (1 << BIO_RW))
1954                         num_stripes = map->num_stripes;
1955                 else if (mirror_num)
1956                         stripe_index = mirror_num - 1;
1957
1958         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1959                 int factor = map->num_stripes / map->sub_stripes;
1960
1961                 stripe_index = do_div(stripe_nr, factor);
1962                 stripe_index *= map->sub_stripes;
1963
1964                 if (unplug_page || (rw & (1 << BIO_RW)))
1965                         num_stripes = map->sub_stripes;
1966                 else if (mirror_num)
1967                         stripe_index += mirror_num - 1;
1968                 else {
1969                         stripe_index = find_live_mirror(map, stripe_index,
1970                                               map->sub_stripes, stripe_index +
1971                                               current->pid % map->sub_stripes);
1972                 }
1973         } else {
1974                 /*
1975                  * after this do_div call, stripe_nr is the number of stripes
1976                  * on this device we have to walk to find the data, and
1977                  * stripe_index is the number of our device in the stripe array
1978                  */
1979                 stripe_index = do_div(stripe_nr, map->num_stripes);
1980         }
1981         BUG_ON(stripe_index >= map->num_stripes);
1982
1983         for (i = 0; i < num_stripes; i++) {
1984                 if (unplug_page) {
1985                         struct btrfs_device *device;
1986                         struct backing_dev_info *bdi;
1987
1988                         device = map->stripes[stripe_index].dev;
1989                         if (device->bdev) {
1990                                 bdi = blk_get_backing_dev_info(device->bdev);
1991                                 if (bdi->unplug_io_fn) {
1992                                         bdi->unplug_io_fn(bdi, unplug_page);
1993                                 }
1994                         }
1995                 } else {
1996                         multi->stripes[i].physical =
1997                                 map->stripes[stripe_index].physical +
1998                                 stripe_offset + stripe_nr * map->stripe_len;
1999                         multi->stripes[i].dev = map->stripes[stripe_index].dev;
2000                 }
2001                 stripe_index++;
2002         }
2003         if (multi_ret) {
2004                 *multi_ret = multi;
2005                 multi->num_stripes = num_stripes;
2006                 multi->max_errors = max_errors;
2007         }
2008 out:
2009         free_extent_map(em);
2010         return 0;
2011 }
2012
2013 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2014                       u64 logical, u64 *length,
2015                       struct btrfs_multi_bio **multi_ret, int mirror_num)
2016 {
2017         return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2018                                  mirror_num, NULL);
2019 }
2020
2021 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2022                       u64 logical, struct page *page)
2023 {
2024         u64 length = PAGE_CACHE_SIZE;
2025         return __btrfs_map_block(map_tree, READ, logical, &length,
2026                                  NULL, 0, page);
2027 }
2028
2029
2030 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2031 static void end_bio_multi_stripe(struct bio *bio, int err)
2032 #else
2033 static int end_bio_multi_stripe(struct bio *bio,
2034                                    unsigned int bytes_done, int err)
2035 #endif
2036 {
2037         struct btrfs_multi_bio *multi = bio->bi_private;
2038
2039 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2040         if (bio->bi_size)
2041                 return 1;
2042 #endif
2043         if (err)
2044                 atomic_inc(&multi->error);
2045
2046         if (atomic_dec_and_test(&multi->stripes_pending)) {
2047                 bio->bi_private = multi->private;
2048                 bio->bi_end_io = multi->end_io;
2049                 /* only send an error to the higher layers if it is
2050                  * beyond the tolerance of the multi-bio
2051                  */
2052                 if (atomic_read(&multi->error) > multi->max_errors) {
2053                         err = -EIO;
2054                 } else if (err) {
2055                         /*
2056                          * this bio is actually up to date, we didn't
2057                          * go over the max number of errors
2058                          */
2059                         set_bit(BIO_UPTODATE, &bio->bi_flags);
2060                         err = 0;
2061                 }
2062                 kfree(multi);
2063
2064 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2065                 bio_endio(bio, bio->bi_size, err);
2066 #else
2067                 bio_endio(bio, err);
2068 #endif
2069         } else {
2070                 bio_put(bio);
2071         }
2072 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2073         return 0;
2074 #endif
2075 }
2076
2077 struct async_sched {
2078         struct bio *bio;
2079         int rw;
2080         struct btrfs_fs_info *info;
2081         struct btrfs_work work;
2082 };
2083
2084 /*
2085  * see run_scheduled_bios for a description of why bios are collected for
2086  * async submit.
2087  *
2088  * This will add one bio to the pending list for a device and make sure
2089  * the work struct is scheduled.
2090  */
2091 int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2092                  int rw, struct bio *bio)
2093 {
2094         int should_queue = 1;
2095
2096         /* don't bother with additional async steps for reads, right now */
2097         if (!(rw & (1 << BIO_RW))) {
2098                 submit_bio(rw, bio);
2099                 return 0;
2100         }
2101
2102         /*
2103          * nr_async_sumbits allows us to reliably return congestion to the
2104          * higher layers.  Otherwise, the async bio makes it appear we have
2105          * made progress against dirty pages when we've really just put it
2106          * on a queue for later
2107          */
2108         atomic_inc(&root->fs_info->nr_async_submits);
2109         bio->bi_next = NULL;
2110         bio->bi_rw |= rw;
2111
2112         spin_lock(&device->io_lock);
2113
2114         if (device->pending_bio_tail)
2115                 device->pending_bio_tail->bi_next = bio;
2116
2117         device->pending_bio_tail = bio;
2118         if (!device->pending_bios)
2119                 device->pending_bios = bio;
2120         if (device->running_pending)
2121                 should_queue = 0;
2122
2123         spin_unlock(&device->io_lock);
2124
2125         if (should_queue)
2126                 btrfs_queue_worker(&root->fs_info->submit_workers,
2127                                    &device->work);
2128         return 0;
2129 }
2130
2131 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2132                   int mirror_num, int async_submit)
2133 {
2134         struct btrfs_mapping_tree *map_tree;
2135         struct btrfs_device *dev;
2136         struct bio *first_bio = bio;
2137         u64 logical = bio->bi_sector << 9;
2138         u64 length = 0;
2139         u64 map_length;
2140         struct btrfs_multi_bio *multi = NULL;
2141         int ret;
2142         int dev_nr = 0;
2143         int total_devs = 1;
2144
2145         length = bio->bi_size;
2146         map_tree = &root->fs_info->mapping_tree;
2147         map_length = length;
2148
2149         ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2150                               mirror_num);
2151         BUG_ON(ret);
2152
2153         total_devs = multi->num_stripes;
2154         if (map_length < length) {
2155                 printk("mapping failed logical %Lu bio len %Lu "
2156                        "len %Lu\n", logical, length, map_length);
2157                 BUG();
2158         }
2159         multi->end_io = first_bio->bi_end_io;
2160         multi->private = first_bio->bi_private;
2161         atomic_set(&multi->stripes_pending, multi->num_stripes);
2162
2163         while(dev_nr < total_devs) {
2164                 if (total_devs > 1) {
2165                         if (dev_nr < total_devs - 1) {
2166                                 bio = bio_clone(first_bio, GFP_NOFS);
2167                                 BUG_ON(!bio);
2168                         } else {
2169                                 bio = first_bio;
2170                         }
2171                         bio->bi_private = multi;
2172                         bio->bi_end_io = end_bio_multi_stripe;
2173                 }
2174                 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2175                 dev = multi->stripes[dev_nr].dev;
2176                 if (dev && dev->bdev) {
2177                         bio->bi_bdev = dev->bdev;
2178                         if (async_submit)
2179                                 schedule_bio(root, dev, rw, bio);
2180                         else
2181                                 submit_bio(rw, bio);
2182                 } else {
2183                         bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2184                         bio->bi_sector = logical >> 9;
2185 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2186                         bio_endio(bio, bio->bi_size, -EIO);
2187 #else
2188                         bio_endio(bio, -EIO);
2189 #endif
2190                 }
2191                 dev_nr++;
2192         }
2193         if (total_devs == 1)
2194                 kfree(multi);
2195         return 0;
2196 }
2197
2198 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2199                                        u8 *uuid)
2200 {
2201         struct list_head *head = &root->fs_info->fs_devices->devices;
2202
2203         return __find_device(head, devid, uuid);
2204 }
2205
2206 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2207                                             u64 devid, u8 *dev_uuid)
2208 {
2209         struct btrfs_device *device;
2210         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2211
2212         device = kzalloc(sizeof(*device), GFP_NOFS);
2213         list_add(&device->dev_list,
2214                  &fs_devices->devices);
2215         list_add(&device->dev_alloc_list,
2216                  &fs_devices->alloc_list);
2217         device->barriers = 1;
2218         device->dev_root = root->fs_info->dev_root;
2219         device->devid = devid;
2220         device->work.func = pending_bios_fn;
2221         fs_devices->num_devices++;
2222         spin_lock_init(&device->io_lock);
2223         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2224         return device;
2225 }
2226
2227
2228 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2229                           struct extent_buffer *leaf,
2230                           struct btrfs_chunk *chunk)
2231 {
2232         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2233         struct map_lookup *map;
2234         struct extent_map *em;
2235         u64 logical;
2236         u64 length;
2237         u64 devid;
2238         u8 uuid[BTRFS_UUID_SIZE];
2239         int num_stripes;
2240         int ret;
2241         int i;
2242
2243         logical = key->offset;
2244         length = btrfs_chunk_length(leaf, chunk);
2245
2246         spin_lock(&map_tree->map_tree.lock);
2247         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2248         spin_unlock(&map_tree->map_tree.lock);
2249
2250         /* already mapped? */
2251         if (em && em->start <= logical && em->start + em->len > logical) {
2252                 free_extent_map(em);
2253                 return 0;
2254         } else if (em) {
2255                 free_extent_map(em);
2256         }
2257
2258         map = kzalloc(sizeof(*map), GFP_NOFS);
2259         if (!map)
2260                 return -ENOMEM;
2261
2262         em = alloc_extent_map(GFP_NOFS);
2263         if (!em)
2264                 return -ENOMEM;
2265         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2266         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2267         if (!map) {
2268                 free_extent_map(em);
2269                 return -ENOMEM;
2270         }
2271
2272         em->bdev = (struct block_device *)map;
2273         em->start = logical;
2274         em->len = length;
2275         em->block_start = 0;
2276
2277         map->num_stripes = num_stripes;
2278         map->io_width = btrfs_chunk_io_width(leaf, chunk);
2279         map->io_align = btrfs_chunk_io_align(leaf, chunk);
2280         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2281         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2282         map->type = btrfs_chunk_type(leaf, chunk);
2283         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2284         for (i = 0; i < num_stripes; i++) {
2285                 map->stripes[i].physical =
2286                         btrfs_stripe_offset_nr(leaf, chunk, i);
2287                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2288                 read_extent_buffer(leaf, uuid, (unsigned long)
2289                                    btrfs_stripe_dev_uuid_nr(chunk, i),
2290                                    BTRFS_UUID_SIZE);
2291                 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
2292
2293                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2294                         kfree(map);
2295                         free_extent_map(em);
2296                         return -EIO;
2297                 }
2298                 if (!map->stripes[i].dev) {
2299                         map->stripes[i].dev =
2300                                 add_missing_dev(root, devid, uuid);
2301                         if (!map->stripes[i].dev) {
2302                                 kfree(map);
2303                                 free_extent_map(em);
2304                                 return -EIO;
2305                         }
2306                 }
2307                 map->stripes[i].dev->in_fs_metadata = 1;
2308         }
2309
2310         spin_lock(&map_tree->map_tree.lock);
2311         ret = add_extent_mapping(&map_tree->map_tree, em);
2312         spin_unlock(&map_tree->map_tree.lock);
2313         BUG_ON(ret);
2314         free_extent_map(em);
2315
2316         return 0;
2317 }
2318
2319 static int fill_device_from_item(struct extent_buffer *leaf,
2320                                  struct btrfs_dev_item *dev_item,
2321                                  struct btrfs_device *device)
2322 {
2323         unsigned long ptr;
2324
2325         device->devid = btrfs_device_id(leaf, dev_item);
2326         device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2327         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2328         device->type = btrfs_device_type(leaf, dev_item);
2329         device->io_align = btrfs_device_io_align(leaf, dev_item);
2330         device->io_width = btrfs_device_io_width(leaf, dev_item);
2331         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2332
2333         ptr = (unsigned long)btrfs_device_uuid(dev_item);
2334         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2335
2336         return 0;
2337 }
2338
2339 static int read_one_dev(struct btrfs_root *root,
2340                         struct extent_buffer *leaf,
2341                         struct btrfs_dev_item *dev_item)
2342 {
2343         struct btrfs_device *device;
2344         u64 devid;
2345         int ret;
2346         u8 dev_uuid[BTRFS_UUID_SIZE];
2347
2348         devid = btrfs_device_id(leaf, dev_item);
2349         read_extent_buffer(leaf, dev_uuid,
2350                            (unsigned long)btrfs_device_uuid(dev_item),
2351                            BTRFS_UUID_SIZE);
2352         device = btrfs_find_device(root, devid, dev_uuid);
2353         if (!device) {
2354                 printk("warning devid %Lu missing\n", devid);
2355                 device = add_missing_dev(root, devid, dev_uuid);
2356                 if (!device)
2357                         return -ENOMEM;
2358         }
2359
2360         fill_device_from_item(leaf, dev_item, device);
2361         device->dev_root = root->fs_info->dev_root;
2362         device->in_fs_metadata = 1;
2363         ret = 0;
2364 #if 0
2365         ret = btrfs_open_device(device);
2366         if (ret) {
2367                 kfree(device);
2368         }
2369 #endif
2370         return ret;
2371 }
2372
2373 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2374 {
2375         struct btrfs_dev_item *dev_item;
2376
2377         dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2378                                                      dev_item);
2379         return read_one_dev(root, buf, dev_item);
2380 }
2381
2382 int btrfs_read_sys_array(struct btrfs_root *root)
2383 {
2384         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2385         struct extent_buffer *sb;
2386         struct btrfs_disk_key *disk_key;
2387         struct btrfs_chunk *chunk;
2388         u8 *ptr;
2389         unsigned long sb_ptr;
2390         int ret = 0;
2391         u32 num_stripes;
2392         u32 array_size;
2393         u32 len = 0;
2394         u32 cur;
2395         struct btrfs_key key;
2396
2397         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2398                                           BTRFS_SUPER_INFO_SIZE);
2399         if (!sb)
2400                 return -ENOMEM;
2401         btrfs_set_buffer_uptodate(sb);
2402         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
2403         array_size = btrfs_super_sys_array_size(super_copy);
2404
2405         ptr = super_copy->sys_chunk_array;
2406         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2407         cur = 0;
2408
2409         while (cur < array_size) {
2410                 disk_key = (struct btrfs_disk_key *)ptr;
2411                 btrfs_disk_key_to_cpu(&key, disk_key);
2412
2413                 len = sizeof(*disk_key); ptr += len;
2414                 sb_ptr += len;
2415                 cur += len;
2416
2417                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2418                         chunk = (struct btrfs_chunk *)sb_ptr;
2419                         ret = read_one_chunk(root, &key, sb, chunk);
2420                         if (ret)
2421                                 break;
2422                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2423                         len = btrfs_chunk_item_size(num_stripes);
2424                 } else {
2425                         ret = -EIO;
2426                         break;
2427                 }
2428                 ptr += len;
2429                 sb_ptr += len;
2430                 cur += len;
2431         }
2432         free_extent_buffer(sb);
2433         return ret;
2434 }
2435
2436 int btrfs_read_chunk_tree(struct btrfs_root *root)
2437 {
2438         struct btrfs_path *path;
2439         struct extent_buffer *leaf;
2440         struct btrfs_key key;
2441         struct btrfs_key found_key;
2442         int ret;
2443         int slot;
2444
2445         root = root->fs_info->chunk_root;
2446
2447         path = btrfs_alloc_path();
2448         if (!path)
2449                 return -ENOMEM;
2450
2451         /* first we search for all of the device items, and then we
2452          * read in all of the chunk items.  This way we can create chunk
2453          * mappings that reference all of the devices that are afound
2454          */
2455         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2456         key.offset = 0;
2457         key.type = 0;
2458 again:
2459         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2460         while(1) {
2461                 leaf = path->nodes[0];
2462                 slot = path->slots[0];
2463                 if (slot >= btrfs_header_nritems(leaf)) {
2464                         ret = btrfs_next_leaf(root, path);
2465                         if (ret == 0)
2466                                 continue;
2467                         if (ret < 0)
2468                                 goto error;
2469                         break;
2470                 }
2471                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2472                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2473                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2474                                 break;
2475                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2476                                 struct btrfs_dev_item *dev_item;
2477                                 dev_item = btrfs_item_ptr(leaf, slot,
2478                                                   struct btrfs_dev_item);
2479                                 ret = read_one_dev(root, leaf, dev_item);
2480                                 BUG_ON(ret);
2481                         }
2482                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2483                         struct btrfs_chunk *chunk;
2484                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2485                         ret = read_one_chunk(root, &found_key, leaf, chunk);
2486                 }
2487                 path->slots[0]++;
2488         }
2489         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2490                 key.objectid = 0;
2491                 btrfs_release_path(root, path);
2492                 goto again;
2493         }
2494
2495         btrfs_free_path(path);
2496         ret = 0;
2497 error:
2498         return ret;
2499 }
2500