Btrfs: check cache->caching_ctl before returning if caching has started
[pandora-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 static int update_block_group(struct btrfs_trans_handle *trans,
37                               struct btrfs_root *root,
38                               u64 bytenr, u64 num_bytes, int alloc);
39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40                                  u64 num_bytes, int reserve, int sinfo);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61                           struct btrfs_root *extent_root, u64 alloc_bytes,
62                           u64 flags, int force);
63 static int find_next_key(struct btrfs_path *path, int level,
64                          struct btrfs_key *key);
65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66                             int dump_block_groups);
67
68 static noinline int
69 block_group_cache_done(struct btrfs_block_group_cache *cache)
70 {
71         smp_mb();
72         return cache->cached == BTRFS_CACHE_FINISHED;
73 }
74
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77         return (cache->flags & bits) == bits;
78 }
79
80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
81 {
82         atomic_inc(&cache->count);
83 }
84
85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
86 {
87         if (atomic_dec_and_test(&cache->count)) {
88                 WARN_ON(cache->pinned > 0);
89                 WARN_ON(cache->reserved > 0);
90                 WARN_ON(cache->reserved_pinned > 0);
91                 kfree(cache);
92         }
93 }
94
95 /*
96  * this adds the block group to the fs_info rb tree for the block group
97  * cache
98  */
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
100                                 struct btrfs_block_group_cache *block_group)
101 {
102         struct rb_node **p;
103         struct rb_node *parent = NULL;
104         struct btrfs_block_group_cache *cache;
105
106         spin_lock(&info->block_group_cache_lock);
107         p = &info->block_group_cache_tree.rb_node;
108
109         while (*p) {
110                 parent = *p;
111                 cache = rb_entry(parent, struct btrfs_block_group_cache,
112                                  cache_node);
113                 if (block_group->key.objectid < cache->key.objectid) {
114                         p = &(*p)->rb_left;
115                 } else if (block_group->key.objectid > cache->key.objectid) {
116                         p = &(*p)->rb_right;
117                 } else {
118                         spin_unlock(&info->block_group_cache_lock);
119                         return -EEXIST;
120                 }
121         }
122
123         rb_link_node(&block_group->cache_node, parent, p);
124         rb_insert_color(&block_group->cache_node,
125                         &info->block_group_cache_tree);
126         spin_unlock(&info->block_group_cache_lock);
127
128         return 0;
129 }
130
131 /*
132  * This will return the block group at or after bytenr if contains is 0, else
133  * it will return the block group that contains the bytenr
134  */
135 static struct btrfs_block_group_cache *
136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
137                               int contains)
138 {
139         struct btrfs_block_group_cache *cache, *ret = NULL;
140         struct rb_node *n;
141         u64 end, start;
142
143         spin_lock(&info->block_group_cache_lock);
144         n = info->block_group_cache_tree.rb_node;
145
146         while (n) {
147                 cache = rb_entry(n, struct btrfs_block_group_cache,
148                                  cache_node);
149                 end = cache->key.objectid + cache->key.offset - 1;
150                 start = cache->key.objectid;
151
152                 if (bytenr < start) {
153                         if (!contains && (!ret || start < ret->key.objectid))
154                                 ret = cache;
155                         n = n->rb_left;
156                 } else if (bytenr > start) {
157                         if (contains && bytenr <= end) {
158                                 ret = cache;
159                                 break;
160                         }
161                         n = n->rb_right;
162                 } else {
163                         ret = cache;
164                         break;
165                 }
166         }
167         if (ret)
168                 btrfs_get_block_group(ret);
169         spin_unlock(&info->block_group_cache_lock);
170
171         return ret;
172 }
173
174 static int add_excluded_extent(struct btrfs_root *root,
175                                u64 start, u64 num_bytes)
176 {
177         u64 end = start + num_bytes - 1;
178         set_extent_bits(&root->fs_info->freed_extents[0],
179                         start, end, EXTENT_UPTODATE, GFP_NOFS);
180         set_extent_bits(&root->fs_info->freed_extents[1],
181                         start, end, EXTENT_UPTODATE, GFP_NOFS);
182         return 0;
183 }
184
185 static void free_excluded_extents(struct btrfs_root *root,
186                                   struct btrfs_block_group_cache *cache)
187 {
188         u64 start, end;
189
190         start = cache->key.objectid;
191         end = start + cache->key.offset - 1;
192
193         clear_extent_bits(&root->fs_info->freed_extents[0],
194                           start, end, EXTENT_UPTODATE, GFP_NOFS);
195         clear_extent_bits(&root->fs_info->freed_extents[1],
196                           start, end, EXTENT_UPTODATE, GFP_NOFS);
197 }
198
199 static int exclude_super_stripes(struct btrfs_root *root,
200                                  struct btrfs_block_group_cache *cache)
201 {
202         u64 bytenr;
203         u64 *logical;
204         int stripe_len;
205         int i, nr, ret;
206
207         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209                 cache->bytes_super += stripe_len;
210                 ret = add_excluded_extent(root, cache->key.objectid,
211                                           stripe_len);
212                 BUG_ON(ret);
213         }
214
215         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216                 bytenr = btrfs_sb_offset(i);
217                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218                                        cache->key.objectid, bytenr,
219                                        0, &logical, &nr, &stripe_len);
220                 BUG_ON(ret);
221
222                 while (nr--) {
223                         cache->bytes_super += stripe_len;
224                         ret = add_excluded_extent(root, logical[nr],
225                                                   stripe_len);
226                         BUG_ON(ret);
227                 }
228
229                 kfree(logical);
230         }
231         return 0;
232 }
233
234 static struct btrfs_caching_control *
235 get_caching_control(struct btrfs_block_group_cache *cache)
236 {
237         struct btrfs_caching_control *ctl;
238
239         spin_lock(&cache->lock);
240         if (cache->cached != BTRFS_CACHE_STARTED) {
241                 spin_unlock(&cache->lock);
242                 return NULL;
243         }
244
245         /* We're loading it the fast way, so we don't have a caching_ctl. */
246         if (!cache->caching_ctl) {
247                 spin_unlock(&cache->lock);
248                 return NULL;
249         }
250
251         ctl = cache->caching_ctl;
252         atomic_inc(&ctl->count);
253         spin_unlock(&cache->lock);
254         return ctl;
255 }
256
257 static void put_caching_control(struct btrfs_caching_control *ctl)
258 {
259         if (atomic_dec_and_test(&ctl->count))
260                 kfree(ctl);
261 }
262
263 /*
264  * this is only called by cache_block_group, since we could have freed extents
265  * we need to check the pinned_extents for any extents that can't be used yet
266  * since their free space will be released as soon as the transaction commits.
267  */
268 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
269                               struct btrfs_fs_info *info, u64 start, u64 end)
270 {
271         u64 extent_start, extent_end, size, total_added = 0;
272         int ret;
273
274         while (start < end) {
275                 ret = find_first_extent_bit(info->pinned_extents, start,
276                                             &extent_start, &extent_end,
277                                             EXTENT_DIRTY | EXTENT_UPTODATE);
278                 if (ret)
279                         break;
280
281                 if (extent_start <= start) {
282                         start = extent_end + 1;
283                 } else if (extent_start > start && extent_start < end) {
284                         size = extent_start - start;
285                         total_added += size;
286                         ret = btrfs_add_free_space(block_group, start,
287                                                    size);
288                         BUG_ON(ret);
289                         start = extent_end + 1;
290                 } else {
291                         break;
292                 }
293         }
294
295         if (start < end) {
296                 size = end - start;
297                 total_added += size;
298                 ret = btrfs_add_free_space(block_group, start, size);
299                 BUG_ON(ret);
300         }
301
302         return total_added;
303 }
304
305 static int caching_kthread(void *data)
306 {
307         struct btrfs_block_group_cache *block_group = data;
308         struct btrfs_fs_info *fs_info = block_group->fs_info;
309         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
310         struct btrfs_root *extent_root = fs_info->extent_root;
311         struct btrfs_path *path;
312         struct extent_buffer *leaf;
313         struct btrfs_key key;
314         u64 total_found = 0;
315         u64 last = 0;
316         u32 nritems;
317         int ret = 0;
318
319         path = btrfs_alloc_path();
320         if (!path)
321                 return -ENOMEM;
322
323         exclude_super_stripes(extent_root, block_group);
324         spin_lock(&block_group->space_info->lock);
325         block_group->space_info->bytes_readonly += block_group->bytes_super;
326         spin_unlock(&block_group->space_info->lock);
327
328         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
329
330         /*
331          * We don't want to deadlock with somebody trying to allocate a new
332          * extent for the extent root while also trying to search the extent
333          * root to add free space.  So we skip locking and search the commit
334          * root, since its read-only
335          */
336         path->skip_locking = 1;
337         path->search_commit_root = 1;
338         path->reada = 2;
339
340         key.objectid = last;
341         key.offset = 0;
342         key.type = BTRFS_EXTENT_ITEM_KEY;
343 again:
344         mutex_lock(&caching_ctl->mutex);
345         /* need to make sure the commit_root doesn't disappear */
346         down_read(&fs_info->extent_commit_sem);
347
348         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
349         if (ret < 0)
350                 goto err;
351
352         leaf = path->nodes[0];
353         nritems = btrfs_header_nritems(leaf);
354
355         while (1) {
356                 smp_mb();
357                 if (fs_info->closing > 1) {
358                         last = (u64)-1;
359                         break;
360                 }
361
362                 if (path->slots[0] < nritems) {
363                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
364                 } else {
365                         ret = find_next_key(path, 0, &key);
366                         if (ret)
367                                 break;
368
369                         caching_ctl->progress = last;
370                         btrfs_release_path(extent_root, path);
371                         up_read(&fs_info->extent_commit_sem);
372                         mutex_unlock(&caching_ctl->mutex);
373                         if (btrfs_transaction_in_commit(fs_info))
374                                 schedule_timeout(1);
375                         else
376                                 cond_resched();
377                         goto again;
378                 }
379
380                 if (key.objectid < block_group->key.objectid) {
381                         path->slots[0]++;
382                         continue;
383                 }
384
385                 if (key.objectid >= block_group->key.objectid +
386                     block_group->key.offset)
387                         break;
388
389                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
390                         total_found += add_new_free_space(block_group,
391                                                           fs_info, last,
392                                                           key.objectid);
393                         last = key.objectid + key.offset;
394
395                         if (total_found > (1024 * 1024 * 2)) {
396                                 total_found = 0;
397                                 wake_up(&caching_ctl->wait);
398                         }
399                 }
400                 path->slots[0]++;
401         }
402         ret = 0;
403
404         total_found += add_new_free_space(block_group, fs_info, last,
405                                           block_group->key.objectid +
406                                           block_group->key.offset);
407         caching_ctl->progress = (u64)-1;
408
409         spin_lock(&block_group->lock);
410         block_group->caching_ctl = NULL;
411         block_group->cached = BTRFS_CACHE_FINISHED;
412         spin_unlock(&block_group->lock);
413
414 err:
415         btrfs_free_path(path);
416         up_read(&fs_info->extent_commit_sem);
417
418         free_excluded_extents(extent_root, block_group);
419
420         mutex_unlock(&caching_ctl->mutex);
421         wake_up(&caching_ctl->wait);
422
423         put_caching_control(caching_ctl);
424         atomic_dec(&block_group->space_info->caching_threads);
425         btrfs_put_block_group(block_group);
426
427         return 0;
428 }
429
430 static int cache_block_group(struct btrfs_block_group_cache *cache,
431                              struct btrfs_trans_handle *trans,
432                              int load_cache_only)
433 {
434         struct btrfs_fs_info *fs_info = cache->fs_info;
435         struct btrfs_caching_control *caching_ctl;
436         struct task_struct *tsk;
437         int ret = 0;
438
439         smp_mb();
440         if (cache->cached != BTRFS_CACHE_NO)
441                 return 0;
442
443         /*
444          * We can't do the read from on-disk cache during a commit since we need
445          * to have the normal tree locking.
446          */
447         if (!trans->transaction->in_commit) {
448                 spin_lock(&cache->lock);
449                 if (cache->cached != BTRFS_CACHE_NO) {
450                         spin_unlock(&cache->lock);
451                         return 0;
452                 }
453                 cache->cached = BTRFS_CACHE_STARTED;
454                 spin_unlock(&cache->lock);
455
456                 ret = load_free_space_cache(fs_info, cache);
457
458                 spin_lock(&cache->lock);
459                 if (ret == 1) {
460                         cache->cached = BTRFS_CACHE_FINISHED;
461                         cache->last_byte_to_unpin = (u64)-1;
462                 } else {
463                         cache->cached = BTRFS_CACHE_NO;
464                 }
465                 spin_unlock(&cache->lock);
466                 if (ret == 1)
467                         return 0;
468         }
469
470         if (load_cache_only)
471                 return 0;
472
473         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
474         BUG_ON(!caching_ctl);
475
476         INIT_LIST_HEAD(&caching_ctl->list);
477         mutex_init(&caching_ctl->mutex);
478         init_waitqueue_head(&caching_ctl->wait);
479         caching_ctl->block_group = cache;
480         caching_ctl->progress = cache->key.objectid;
481         /* one for caching kthread, one for caching block group list */
482         atomic_set(&caching_ctl->count, 2);
483
484         spin_lock(&cache->lock);
485         if (cache->cached != BTRFS_CACHE_NO) {
486                 spin_unlock(&cache->lock);
487                 kfree(caching_ctl);
488                 return 0;
489         }
490         cache->caching_ctl = caching_ctl;
491         cache->cached = BTRFS_CACHE_STARTED;
492         spin_unlock(&cache->lock);
493
494         down_write(&fs_info->extent_commit_sem);
495         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
496         up_write(&fs_info->extent_commit_sem);
497
498         atomic_inc(&cache->space_info->caching_threads);
499         btrfs_get_block_group(cache);
500
501         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
502                           cache->key.objectid);
503         if (IS_ERR(tsk)) {
504                 ret = PTR_ERR(tsk);
505                 printk(KERN_ERR "error running thread %d\n", ret);
506                 BUG();
507         }
508
509         return ret;
510 }
511
512 /*
513  * return the block group that starts at or after bytenr
514  */
515 static struct btrfs_block_group_cache *
516 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
517 {
518         struct btrfs_block_group_cache *cache;
519
520         cache = block_group_cache_tree_search(info, bytenr, 0);
521
522         return cache;
523 }
524
525 /*
526  * return the block group that contains the given bytenr
527  */
528 struct btrfs_block_group_cache *btrfs_lookup_block_group(
529                                                  struct btrfs_fs_info *info,
530                                                  u64 bytenr)
531 {
532         struct btrfs_block_group_cache *cache;
533
534         cache = block_group_cache_tree_search(info, bytenr, 1);
535
536         return cache;
537 }
538
539 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
540                                                   u64 flags)
541 {
542         struct list_head *head = &info->space_info;
543         struct btrfs_space_info *found;
544
545         flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
546                  BTRFS_BLOCK_GROUP_METADATA;
547
548         rcu_read_lock();
549         list_for_each_entry_rcu(found, head, list) {
550                 if (found->flags == flags) {
551                         rcu_read_unlock();
552                         return found;
553                 }
554         }
555         rcu_read_unlock();
556         return NULL;
557 }
558
559 /*
560  * after adding space to the filesystem, we need to clear the full flags
561  * on all the space infos.
562  */
563 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
564 {
565         struct list_head *head = &info->space_info;
566         struct btrfs_space_info *found;
567
568         rcu_read_lock();
569         list_for_each_entry_rcu(found, head, list)
570                 found->full = 0;
571         rcu_read_unlock();
572 }
573
574 static u64 div_factor(u64 num, int factor)
575 {
576         if (factor == 10)
577                 return num;
578         num *= factor;
579         do_div(num, 10);
580         return num;
581 }
582
583 u64 btrfs_find_block_group(struct btrfs_root *root,
584                            u64 search_start, u64 search_hint, int owner)
585 {
586         struct btrfs_block_group_cache *cache;
587         u64 used;
588         u64 last = max(search_hint, search_start);
589         u64 group_start = 0;
590         int full_search = 0;
591         int factor = 9;
592         int wrapped = 0;
593 again:
594         while (1) {
595                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
596                 if (!cache)
597                         break;
598
599                 spin_lock(&cache->lock);
600                 last = cache->key.objectid + cache->key.offset;
601                 used = btrfs_block_group_used(&cache->item);
602
603                 if ((full_search || !cache->ro) &&
604                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
605                         if (used + cache->pinned + cache->reserved <
606                             div_factor(cache->key.offset, factor)) {
607                                 group_start = cache->key.objectid;
608                                 spin_unlock(&cache->lock);
609                                 btrfs_put_block_group(cache);
610                                 goto found;
611                         }
612                 }
613                 spin_unlock(&cache->lock);
614                 btrfs_put_block_group(cache);
615                 cond_resched();
616         }
617         if (!wrapped) {
618                 last = search_start;
619                 wrapped = 1;
620                 goto again;
621         }
622         if (!full_search && factor < 10) {
623                 last = search_start;
624                 full_search = 1;
625                 factor = 10;
626                 goto again;
627         }
628 found:
629         return group_start;
630 }
631
632 /* simple helper to search for an existing extent at a given offset */
633 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
634 {
635         int ret;
636         struct btrfs_key key;
637         struct btrfs_path *path;
638
639         path = btrfs_alloc_path();
640         BUG_ON(!path);
641         key.objectid = start;
642         key.offset = len;
643         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
644         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
645                                 0, 0);
646         btrfs_free_path(path);
647         return ret;
648 }
649
650 /*
651  * helper function to lookup reference count and flags of extent.
652  *
653  * the head node for delayed ref is used to store the sum of all the
654  * reference count modifications queued up in the rbtree. the head
655  * node may also store the extent flags to set. This way you can check
656  * to see what the reference count and extent flags would be if all of
657  * the delayed refs are not processed.
658  */
659 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
660                              struct btrfs_root *root, u64 bytenr,
661                              u64 num_bytes, u64 *refs, u64 *flags)
662 {
663         struct btrfs_delayed_ref_head *head;
664         struct btrfs_delayed_ref_root *delayed_refs;
665         struct btrfs_path *path;
666         struct btrfs_extent_item *ei;
667         struct extent_buffer *leaf;
668         struct btrfs_key key;
669         u32 item_size;
670         u64 num_refs;
671         u64 extent_flags;
672         int ret;
673
674         path = btrfs_alloc_path();
675         if (!path)
676                 return -ENOMEM;
677
678         key.objectid = bytenr;
679         key.type = BTRFS_EXTENT_ITEM_KEY;
680         key.offset = num_bytes;
681         if (!trans) {
682                 path->skip_locking = 1;
683                 path->search_commit_root = 1;
684         }
685 again:
686         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
687                                 &key, path, 0, 0);
688         if (ret < 0)
689                 goto out_free;
690
691         if (ret == 0) {
692                 leaf = path->nodes[0];
693                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
694                 if (item_size >= sizeof(*ei)) {
695                         ei = btrfs_item_ptr(leaf, path->slots[0],
696                                             struct btrfs_extent_item);
697                         num_refs = btrfs_extent_refs(leaf, ei);
698                         extent_flags = btrfs_extent_flags(leaf, ei);
699                 } else {
700 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
701                         struct btrfs_extent_item_v0 *ei0;
702                         BUG_ON(item_size != sizeof(*ei0));
703                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
704                                              struct btrfs_extent_item_v0);
705                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
706                         /* FIXME: this isn't correct for data */
707                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
708 #else
709                         BUG();
710 #endif
711                 }
712                 BUG_ON(num_refs == 0);
713         } else {
714                 num_refs = 0;
715                 extent_flags = 0;
716                 ret = 0;
717         }
718
719         if (!trans)
720                 goto out;
721
722         delayed_refs = &trans->transaction->delayed_refs;
723         spin_lock(&delayed_refs->lock);
724         head = btrfs_find_delayed_ref_head(trans, bytenr);
725         if (head) {
726                 if (!mutex_trylock(&head->mutex)) {
727                         atomic_inc(&head->node.refs);
728                         spin_unlock(&delayed_refs->lock);
729
730                         btrfs_release_path(root->fs_info->extent_root, path);
731
732                         mutex_lock(&head->mutex);
733                         mutex_unlock(&head->mutex);
734                         btrfs_put_delayed_ref(&head->node);
735                         goto again;
736                 }
737                 if (head->extent_op && head->extent_op->update_flags)
738                         extent_flags |= head->extent_op->flags_to_set;
739                 else
740                         BUG_ON(num_refs == 0);
741
742                 num_refs += head->node.ref_mod;
743                 mutex_unlock(&head->mutex);
744         }
745         spin_unlock(&delayed_refs->lock);
746 out:
747         WARN_ON(num_refs == 0);
748         if (refs)
749                 *refs = num_refs;
750         if (flags)
751                 *flags = extent_flags;
752 out_free:
753         btrfs_free_path(path);
754         return ret;
755 }
756
757 /*
758  * Back reference rules.  Back refs have three main goals:
759  *
760  * 1) differentiate between all holders of references to an extent so that
761  *    when a reference is dropped we can make sure it was a valid reference
762  *    before freeing the extent.
763  *
764  * 2) Provide enough information to quickly find the holders of an extent
765  *    if we notice a given block is corrupted or bad.
766  *
767  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
768  *    maintenance.  This is actually the same as #2, but with a slightly
769  *    different use case.
770  *
771  * There are two kinds of back refs. The implicit back refs is optimized
772  * for pointers in non-shared tree blocks. For a given pointer in a block,
773  * back refs of this kind provide information about the block's owner tree
774  * and the pointer's key. These information allow us to find the block by
775  * b-tree searching. The full back refs is for pointers in tree blocks not
776  * referenced by their owner trees. The location of tree block is recorded
777  * in the back refs. Actually the full back refs is generic, and can be
778  * used in all cases the implicit back refs is used. The major shortcoming
779  * of the full back refs is its overhead. Every time a tree block gets
780  * COWed, we have to update back refs entry for all pointers in it.
781  *
782  * For a newly allocated tree block, we use implicit back refs for
783  * pointers in it. This means most tree related operations only involve
784  * implicit back refs. For a tree block created in old transaction, the
785  * only way to drop a reference to it is COW it. So we can detect the
786  * event that tree block loses its owner tree's reference and do the
787  * back refs conversion.
788  *
789  * When a tree block is COW'd through a tree, there are four cases:
790  *
791  * The reference count of the block is one and the tree is the block's
792  * owner tree. Nothing to do in this case.
793  *
794  * The reference count of the block is one and the tree is not the
795  * block's owner tree. In this case, full back refs is used for pointers
796  * in the block. Remove these full back refs, add implicit back refs for
797  * every pointers in the new block.
798  *
799  * The reference count of the block is greater than one and the tree is
800  * the block's owner tree. In this case, implicit back refs is used for
801  * pointers in the block. Add full back refs for every pointers in the
802  * block, increase lower level extents' reference counts. The original
803  * implicit back refs are entailed to the new block.
804  *
805  * The reference count of the block is greater than one and the tree is
806  * not the block's owner tree. Add implicit back refs for every pointer in
807  * the new block, increase lower level extents' reference count.
808  *
809  * Back Reference Key composing:
810  *
811  * The key objectid corresponds to the first byte in the extent,
812  * The key type is used to differentiate between types of back refs.
813  * There are different meanings of the key offset for different types
814  * of back refs.
815  *
816  * File extents can be referenced by:
817  *
818  * - multiple snapshots, subvolumes, or different generations in one subvol
819  * - different files inside a single subvolume
820  * - different offsets inside a file (bookend extents in file.c)
821  *
822  * The extent ref structure for the implicit back refs has fields for:
823  *
824  * - Objectid of the subvolume root
825  * - objectid of the file holding the reference
826  * - original offset in the file
827  * - how many bookend extents
828  *
829  * The key offset for the implicit back refs is hash of the first
830  * three fields.
831  *
832  * The extent ref structure for the full back refs has field for:
833  *
834  * - number of pointers in the tree leaf
835  *
836  * The key offset for the implicit back refs is the first byte of
837  * the tree leaf
838  *
839  * When a file extent is allocated, The implicit back refs is used.
840  * the fields are filled in:
841  *
842  *     (root_key.objectid, inode objectid, offset in file, 1)
843  *
844  * When a file extent is removed file truncation, we find the
845  * corresponding implicit back refs and check the following fields:
846  *
847  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
848  *
849  * Btree extents can be referenced by:
850  *
851  * - Different subvolumes
852  *
853  * Both the implicit back refs and the full back refs for tree blocks
854  * only consist of key. The key offset for the implicit back refs is
855  * objectid of block's owner tree. The key offset for the full back refs
856  * is the first byte of parent block.
857  *
858  * When implicit back refs is used, information about the lowest key and
859  * level of the tree block are required. These information are stored in
860  * tree block info structure.
861  */
862
863 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
864 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
865                                   struct btrfs_root *root,
866                                   struct btrfs_path *path,
867                                   u64 owner, u32 extra_size)
868 {
869         struct btrfs_extent_item *item;
870         struct btrfs_extent_item_v0 *ei0;
871         struct btrfs_extent_ref_v0 *ref0;
872         struct btrfs_tree_block_info *bi;
873         struct extent_buffer *leaf;
874         struct btrfs_key key;
875         struct btrfs_key found_key;
876         u32 new_size = sizeof(*item);
877         u64 refs;
878         int ret;
879
880         leaf = path->nodes[0];
881         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
882
883         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
884         ei0 = btrfs_item_ptr(leaf, path->slots[0],
885                              struct btrfs_extent_item_v0);
886         refs = btrfs_extent_refs_v0(leaf, ei0);
887
888         if (owner == (u64)-1) {
889                 while (1) {
890                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
891                                 ret = btrfs_next_leaf(root, path);
892                                 if (ret < 0)
893                                         return ret;
894                                 BUG_ON(ret > 0);
895                                 leaf = path->nodes[0];
896                         }
897                         btrfs_item_key_to_cpu(leaf, &found_key,
898                                               path->slots[0]);
899                         BUG_ON(key.objectid != found_key.objectid);
900                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
901                                 path->slots[0]++;
902                                 continue;
903                         }
904                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
905                                               struct btrfs_extent_ref_v0);
906                         owner = btrfs_ref_objectid_v0(leaf, ref0);
907                         break;
908                 }
909         }
910         btrfs_release_path(root, path);
911
912         if (owner < BTRFS_FIRST_FREE_OBJECTID)
913                 new_size += sizeof(*bi);
914
915         new_size -= sizeof(*ei0);
916         ret = btrfs_search_slot(trans, root, &key, path,
917                                 new_size + extra_size, 1);
918         if (ret < 0)
919                 return ret;
920         BUG_ON(ret);
921
922         ret = btrfs_extend_item(trans, root, path, new_size);
923         BUG_ON(ret);
924
925         leaf = path->nodes[0];
926         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
927         btrfs_set_extent_refs(leaf, item, refs);
928         /* FIXME: get real generation */
929         btrfs_set_extent_generation(leaf, item, 0);
930         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
931                 btrfs_set_extent_flags(leaf, item,
932                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
933                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
934                 bi = (struct btrfs_tree_block_info *)(item + 1);
935                 /* FIXME: get first key of the block */
936                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
937                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
938         } else {
939                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
940         }
941         btrfs_mark_buffer_dirty(leaf);
942         return 0;
943 }
944 #endif
945
946 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
947 {
948         u32 high_crc = ~(u32)0;
949         u32 low_crc = ~(u32)0;
950         __le64 lenum;
951
952         lenum = cpu_to_le64(root_objectid);
953         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
954         lenum = cpu_to_le64(owner);
955         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
956         lenum = cpu_to_le64(offset);
957         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
958
959         return ((u64)high_crc << 31) ^ (u64)low_crc;
960 }
961
962 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
963                                      struct btrfs_extent_data_ref *ref)
964 {
965         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
966                                     btrfs_extent_data_ref_objectid(leaf, ref),
967                                     btrfs_extent_data_ref_offset(leaf, ref));
968 }
969
970 static int match_extent_data_ref(struct extent_buffer *leaf,
971                                  struct btrfs_extent_data_ref *ref,
972                                  u64 root_objectid, u64 owner, u64 offset)
973 {
974         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
975             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
976             btrfs_extent_data_ref_offset(leaf, ref) != offset)
977                 return 0;
978         return 1;
979 }
980
981 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
982                                            struct btrfs_root *root,
983                                            struct btrfs_path *path,
984                                            u64 bytenr, u64 parent,
985                                            u64 root_objectid,
986                                            u64 owner, u64 offset)
987 {
988         struct btrfs_key key;
989         struct btrfs_extent_data_ref *ref;
990         struct extent_buffer *leaf;
991         u32 nritems;
992         int ret;
993         int recow;
994         int err = -ENOENT;
995
996         key.objectid = bytenr;
997         if (parent) {
998                 key.type = BTRFS_SHARED_DATA_REF_KEY;
999                 key.offset = parent;
1000         } else {
1001                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1002                 key.offset = hash_extent_data_ref(root_objectid,
1003                                                   owner, offset);
1004         }
1005 again:
1006         recow = 0;
1007         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1008         if (ret < 0) {
1009                 err = ret;
1010                 goto fail;
1011         }
1012
1013         if (parent) {
1014                 if (!ret)
1015                         return 0;
1016 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1017                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1018                 btrfs_release_path(root, path);
1019                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1020                 if (ret < 0) {
1021                         err = ret;
1022                         goto fail;
1023                 }
1024                 if (!ret)
1025                         return 0;
1026 #endif
1027                 goto fail;
1028         }
1029
1030         leaf = path->nodes[0];
1031         nritems = btrfs_header_nritems(leaf);
1032         while (1) {
1033                 if (path->slots[0] >= nritems) {
1034                         ret = btrfs_next_leaf(root, path);
1035                         if (ret < 0)
1036                                 err = ret;
1037                         if (ret)
1038                                 goto fail;
1039
1040                         leaf = path->nodes[0];
1041                         nritems = btrfs_header_nritems(leaf);
1042                         recow = 1;
1043                 }
1044
1045                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1046                 if (key.objectid != bytenr ||
1047                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1048                         goto fail;
1049
1050                 ref = btrfs_item_ptr(leaf, path->slots[0],
1051                                      struct btrfs_extent_data_ref);
1052
1053                 if (match_extent_data_ref(leaf, ref, root_objectid,
1054                                           owner, offset)) {
1055                         if (recow) {
1056                                 btrfs_release_path(root, path);
1057                                 goto again;
1058                         }
1059                         err = 0;
1060                         break;
1061                 }
1062                 path->slots[0]++;
1063         }
1064 fail:
1065         return err;
1066 }
1067
1068 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1069                                            struct btrfs_root *root,
1070                                            struct btrfs_path *path,
1071                                            u64 bytenr, u64 parent,
1072                                            u64 root_objectid, u64 owner,
1073                                            u64 offset, int refs_to_add)
1074 {
1075         struct btrfs_key key;
1076         struct extent_buffer *leaf;
1077         u32 size;
1078         u32 num_refs;
1079         int ret;
1080
1081         key.objectid = bytenr;
1082         if (parent) {
1083                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1084                 key.offset = parent;
1085                 size = sizeof(struct btrfs_shared_data_ref);
1086         } else {
1087                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1088                 key.offset = hash_extent_data_ref(root_objectid,
1089                                                   owner, offset);
1090                 size = sizeof(struct btrfs_extent_data_ref);
1091         }
1092
1093         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1094         if (ret && ret != -EEXIST)
1095                 goto fail;
1096
1097         leaf = path->nodes[0];
1098         if (parent) {
1099                 struct btrfs_shared_data_ref *ref;
1100                 ref = btrfs_item_ptr(leaf, path->slots[0],
1101                                      struct btrfs_shared_data_ref);
1102                 if (ret == 0) {
1103                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1104                 } else {
1105                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1106                         num_refs += refs_to_add;
1107                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1108                 }
1109         } else {
1110                 struct btrfs_extent_data_ref *ref;
1111                 while (ret == -EEXIST) {
1112                         ref = btrfs_item_ptr(leaf, path->slots[0],
1113                                              struct btrfs_extent_data_ref);
1114                         if (match_extent_data_ref(leaf, ref, root_objectid,
1115                                                   owner, offset))
1116                                 break;
1117                         btrfs_release_path(root, path);
1118                         key.offset++;
1119                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1120                                                       size);
1121                         if (ret && ret != -EEXIST)
1122                                 goto fail;
1123
1124                         leaf = path->nodes[0];
1125                 }
1126                 ref = btrfs_item_ptr(leaf, path->slots[0],
1127                                      struct btrfs_extent_data_ref);
1128                 if (ret == 0) {
1129                         btrfs_set_extent_data_ref_root(leaf, ref,
1130                                                        root_objectid);
1131                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1132                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1133                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1134                 } else {
1135                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1136                         num_refs += refs_to_add;
1137                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1138                 }
1139         }
1140         btrfs_mark_buffer_dirty(leaf);
1141         ret = 0;
1142 fail:
1143         btrfs_release_path(root, path);
1144         return ret;
1145 }
1146
1147 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1148                                            struct btrfs_root *root,
1149                                            struct btrfs_path *path,
1150                                            int refs_to_drop)
1151 {
1152         struct btrfs_key key;
1153         struct btrfs_extent_data_ref *ref1 = NULL;
1154         struct btrfs_shared_data_ref *ref2 = NULL;
1155         struct extent_buffer *leaf;
1156         u32 num_refs = 0;
1157         int ret = 0;
1158
1159         leaf = path->nodes[0];
1160         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1161
1162         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1163                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1164                                       struct btrfs_extent_data_ref);
1165                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1166         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1167                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1168                                       struct btrfs_shared_data_ref);
1169                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1170 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1171         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1172                 struct btrfs_extent_ref_v0 *ref0;
1173                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1174                                       struct btrfs_extent_ref_v0);
1175                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1176 #endif
1177         } else {
1178                 BUG();
1179         }
1180
1181         BUG_ON(num_refs < refs_to_drop);
1182         num_refs -= refs_to_drop;
1183
1184         if (num_refs == 0) {
1185                 ret = btrfs_del_item(trans, root, path);
1186         } else {
1187                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1188                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1189                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1190                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1191 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1192                 else {
1193                         struct btrfs_extent_ref_v0 *ref0;
1194                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1195                                         struct btrfs_extent_ref_v0);
1196                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1197                 }
1198 #endif
1199                 btrfs_mark_buffer_dirty(leaf);
1200         }
1201         return ret;
1202 }
1203
1204 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1205                                           struct btrfs_path *path,
1206                                           struct btrfs_extent_inline_ref *iref)
1207 {
1208         struct btrfs_key key;
1209         struct extent_buffer *leaf;
1210         struct btrfs_extent_data_ref *ref1;
1211         struct btrfs_shared_data_ref *ref2;
1212         u32 num_refs = 0;
1213
1214         leaf = path->nodes[0];
1215         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1216         if (iref) {
1217                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1218                     BTRFS_EXTENT_DATA_REF_KEY) {
1219                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1220                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1221                 } else {
1222                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1223                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1224                 }
1225         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1226                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1227                                       struct btrfs_extent_data_ref);
1228                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1229         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1230                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1231                                       struct btrfs_shared_data_ref);
1232                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1233 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1234         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1235                 struct btrfs_extent_ref_v0 *ref0;
1236                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1237                                       struct btrfs_extent_ref_v0);
1238                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1239 #endif
1240         } else {
1241                 WARN_ON(1);
1242         }
1243         return num_refs;
1244 }
1245
1246 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1247                                           struct btrfs_root *root,
1248                                           struct btrfs_path *path,
1249                                           u64 bytenr, u64 parent,
1250                                           u64 root_objectid)
1251 {
1252         struct btrfs_key key;
1253         int ret;
1254
1255         key.objectid = bytenr;
1256         if (parent) {
1257                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1258                 key.offset = parent;
1259         } else {
1260                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1261                 key.offset = root_objectid;
1262         }
1263
1264         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1265         if (ret > 0)
1266                 ret = -ENOENT;
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268         if (ret == -ENOENT && parent) {
1269                 btrfs_release_path(root, path);
1270                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1271                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1272                 if (ret > 0)
1273                         ret = -ENOENT;
1274         }
1275 #endif
1276         return ret;
1277 }
1278
1279 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1280                                           struct btrfs_root *root,
1281                                           struct btrfs_path *path,
1282                                           u64 bytenr, u64 parent,
1283                                           u64 root_objectid)
1284 {
1285         struct btrfs_key key;
1286         int ret;
1287
1288         key.objectid = bytenr;
1289         if (parent) {
1290                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1291                 key.offset = parent;
1292         } else {
1293                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1294                 key.offset = root_objectid;
1295         }
1296
1297         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1298         btrfs_release_path(root, path);
1299         return ret;
1300 }
1301
1302 static inline int extent_ref_type(u64 parent, u64 owner)
1303 {
1304         int type;
1305         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1306                 if (parent > 0)
1307                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1308                 else
1309                         type = BTRFS_TREE_BLOCK_REF_KEY;
1310         } else {
1311                 if (parent > 0)
1312                         type = BTRFS_SHARED_DATA_REF_KEY;
1313                 else
1314                         type = BTRFS_EXTENT_DATA_REF_KEY;
1315         }
1316         return type;
1317 }
1318
1319 static int find_next_key(struct btrfs_path *path, int level,
1320                          struct btrfs_key *key)
1321
1322 {
1323         for (; level < BTRFS_MAX_LEVEL; level++) {
1324                 if (!path->nodes[level])
1325                         break;
1326                 if (path->slots[level] + 1 >=
1327                     btrfs_header_nritems(path->nodes[level]))
1328                         continue;
1329                 if (level == 0)
1330                         btrfs_item_key_to_cpu(path->nodes[level], key,
1331                                               path->slots[level] + 1);
1332                 else
1333                         btrfs_node_key_to_cpu(path->nodes[level], key,
1334                                               path->slots[level] + 1);
1335                 return 0;
1336         }
1337         return 1;
1338 }
1339
1340 /*
1341  * look for inline back ref. if back ref is found, *ref_ret is set
1342  * to the address of inline back ref, and 0 is returned.
1343  *
1344  * if back ref isn't found, *ref_ret is set to the address where it
1345  * should be inserted, and -ENOENT is returned.
1346  *
1347  * if insert is true and there are too many inline back refs, the path
1348  * points to the extent item, and -EAGAIN is returned.
1349  *
1350  * NOTE: inline back refs are ordered in the same way that back ref
1351  *       items in the tree are ordered.
1352  */
1353 static noinline_for_stack
1354 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1355                                  struct btrfs_root *root,
1356                                  struct btrfs_path *path,
1357                                  struct btrfs_extent_inline_ref **ref_ret,
1358                                  u64 bytenr, u64 num_bytes,
1359                                  u64 parent, u64 root_objectid,
1360                                  u64 owner, u64 offset, int insert)
1361 {
1362         struct btrfs_key key;
1363         struct extent_buffer *leaf;
1364         struct btrfs_extent_item *ei;
1365         struct btrfs_extent_inline_ref *iref;
1366         u64 flags;
1367         u64 item_size;
1368         unsigned long ptr;
1369         unsigned long end;
1370         int extra_size;
1371         int type;
1372         int want;
1373         int ret;
1374         int err = 0;
1375
1376         key.objectid = bytenr;
1377         key.type = BTRFS_EXTENT_ITEM_KEY;
1378         key.offset = num_bytes;
1379
1380         want = extent_ref_type(parent, owner);
1381         if (insert) {
1382                 extra_size = btrfs_extent_inline_ref_size(want);
1383                 path->keep_locks = 1;
1384         } else
1385                 extra_size = -1;
1386         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1387         if (ret < 0) {
1388                 err = ret;
1389                 goto out;
1390         }
1391         BUG_ON(ret);
1392
1393         leaf = path->nodes[0];
1394         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1395 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1396         if (item_size < sizeof(*ei)) {
1397                 if (!insert) {
1398                         err = -ENOENT;
1399                         goto out;
1400                 }
1401                 ret = convert_extent_item_v0(trans, root, path, owner,
1402                                              extra_size);
1403                 if (ret < 0) {
1404                         err = ret;
1405                         goto out;
1406                 }
1407                 leaf = path->nodes[0];
1408                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1409         }
1410 #endif
1411         BUG_ON(item_size < sizeof(*ei));
1412
1413         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1414         flags = btrfs_extent_flags(leaf, ei);
1415
1416         ptr = (unsigned long)(ei + 1);
1417         end = (unsigned long)ei + item_size;
1418
1419         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1420                 ptr += sizeof(struct btrfs_tree_block_info);
1421                 BUG_ON(ptr > end);
1422         } else {
1423                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1424         }
1425
1426         err = -ENOENT;
1427         while (1) {
1428                 if (ptr >= end) {
1429                         WARN_ON(ptr > end);
1430                         break;
1431                 }
1432                 iref = (struct btrfs_extent_inline_ref *)ptr;
1433                 type = btrfs_extent_inline_ref_type(leaf, iref);
1434                 if (want < type)
1435                         break;
1436                 if (want > type) {
1437                         ptr += btrfs_extent_inline_ref_size(type);
1438                         continue;
1439                 }
1440
1441                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1442                         struct btrfs_extent_data_ref *dref;
1443                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1444                         if (match_extent_data_ref(leaf, dref, root_objectid,
1445                                                   owner, offset)) {
1446                                 err = 0;
1447                                 break;
1448                         }
1449                         if (hash_extent_data_ref_item(leaf, dref) <
1450                             hash_extent_data_ref(root_objectid, owner, offset))
1451                                 break;
1452                 } else {
1453                         u64 ref_offset;
1454                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1455                         if (parent > 0) {
1456                                 if (parent == ref_offset) {
1457                                         err = 0;
1458                                         break;
1459                                 }
1460                                 if (ref_offset < parent)
1461                                         break;
1462                         } else {
1463                                 if (root_objectid == ref_offset) {
1464                                         err = 0;
1465                                         break;
1466                                 }
1467                                 if (ref_offset < root_objectid)
1468                                         break;
1469                         }
1470                 }
1471                 ptr += btrfs_extent_inline_ref_size(type);
1472         }
1473         if (err == -ENOENT && insert) {
1474                 if (item_size + extra_size >=
1475                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1476                         err = -EAGAIN;
1477                         goto out;
1478                 }
1479                 /*
1480                  * To add new inline back ref, we have to make sure
1481                  * there is no corresponding back ref item.
1482                  * For simplicity, we just do not add new inline back
1483                  * ref if there is any kind of item for this block
1484                  */
1485                 if (find_next_key(path, 0, &key) == 0 &&
1486                     key.objectid == bytenr &&
1487                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1488                         err = -EAGAIN;
1489                         goto out;
1490                 }
1491         }
1492         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1493 out:
1494         if (insert) {
1495                 path->keep_locks = 0;
1496                 btrfs_unlock_up_safe(path, 1);
1497         }
1498         return err;
1499 }
1500
1501 /*
1502  * helper to add new inline back ref
1503  */
1504 static noinline_for_stack
1505 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1506                                 struct btrfs_root *root,
1507                                 struct btrfs_path *path,
1508                                 struct btrfs_extent_inline_ref *iref,
1509                                 u64 parent, u64 root_objectid,
1510                                 u64 owner, u64 offset, int refs_to_add,
1511                                 struct btrfs_delayed_extent_op *extent_op)
1512 {
1513         struct extent_buffer *leaf;
1514         struct btrfs_extent_item *ei;
1515         unsigned long ptr;
1516         unsigned long end;
1517         unsigned long item_offset;
1518         u64 refs;
1519         int size;
1520         int type;
1521         int ret;
1522
1523         leaf = path->nodes[0];
1524         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1525         item_offset = (unsigned long)iref - (unsigned long)ei;
1526
1527         type = extent_ref_type(parent, owner);
1528         size = btrfs_extent_inline_ref_size(type);
1529
1530         ret = btrfs_extend_item(trans, root, path, size);
1531         BUG_ON(ret);
1532
1533         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1534         refs = btrfs_extent_refs(leaf, ei);
1535         refs += refs_to_add;
1536         btrfs_set_extent_refs(leaf, ei, refs);
1537         if (extent_op)
1538                 __run_delayed_extent_op(extent_op, leaf, ei);
1539
1540         ptr = (unsigned long)ei + item_offset;
1541         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1542         if (ptr < end - size)
1543                 memmove_extent_buffer(leaf, ptr + size, ptr,
1544                                       end - size - ptr);
1545
1546         iref = (struct btrfs_extent_inline_ref *)ptr;
1547         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1548         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1549                 struct btrfs_extent_data_ref *dref;
1550                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1551                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1552                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1553                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1554                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1555         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1556                 struct btrfs_shared_data_ref *sref;
1557                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1558                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1559                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1560         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1561                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1562         } else {
1563                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1564         }
1565         btrfs_mark_buffer_dirty(leaf);
1566         return 0;
1567 }
1568
1569 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1570                                  struct btrfs_root *root,
1571                                  struct btrfs_path *path,
1572                                  struct btrfs_extent_inline_ref **ref_ret,
1573                                  u64 bytenr, u64 num_bytes, u64 parent,
1574                                  u64 root_objectid, u64 owner, u64 offset)
1575 {
1576         int ret;
1577
1578         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1579                                            bytenr, num_bytes, parent,
1580                                            root_objectid, owner, offset, 0);
1581         if (ret != -ENOENT)
1582                 return ret;
1583
1584         btrfs_release_path(root, path);
1585         *ref_ret = NULL;
1586
1587         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1588                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1589                                             root_objectid);
1590         } else {
1591                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1592                                              root_objectid, owner, offset);
1593         }
1594         return ret;
1595 }
1596
1597 /*
1598  * helper to update/remove inline back ref
1599  */
1600 static noinline_for_stack
1601 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1602                                  struct btrfs_root *root,
1603                                  struct btrfs_path *path,
1604                                  struct btrfs_extent_inline_ref *iref,
1605                                  int refs_to_mod,
1606                                  struct btrfs_delayed_extent_op *extent_op)
1607 {
1608         struct extent_buffer *leaf;
1609         struct btrfs_extent_item *ei;
1610         struct btrfs_extent_data_ref *dref = NULL;
1611         struct btrfs_shared_data_ref *sref = NULL;
1612         unsigned long ptr;
1613         unsigned long end;
1614         u32 item_size;
1615         int size;
1616         int type;
1617         int ret;
1618         u64 refs;
1619
1620         leaf = path->nodes[0];
1621         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1622         refs = btrfs_extent_refs(leaf, ei);
1623         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1624         refs += refs_to_mod;
1625         btrfs_set_extent_refs(leaf, ei, refs);
1626         if (extent_op)
1627                 __run_delayed_extent_op(extent_op, leaf, ei);
1628
1629         type = btrfs_extent_inline_ref_type(leaf, iref);
1630
1631         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1632                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1633                 refs = btrfs_extent_data_ref_count(leaf, dref);
1634         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1635                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1636                 refs = btrfs_shared_data_ref_count(leaf, sref);
1637         } else {
1638                 refs = 1;
1639                 BUG_ON(refs_to_mod != -1);
1640         }
1641
1642         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1643         refs += refs_to_mod;
1644
1645         if (refs > 0) {
1646                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1647                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1648                 else
1649                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1650         } else {
1651                 size =  btrfs_extent_inline_ref_size(type);
1652                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1653                 ptr = (unsigned long)iref;
1654                 end = (unsigned long)ei + item_size;
1655                 if (ptr + size < end)
1656                         memmove_extent_buffer(leaf, ptr, ptr + size,
1657                                               end - ptr - size);
1658                 item_size -= size;
1659                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1660                 BUG_ON(ret);
1661         }
1662         btrfs_mark_buffer_dirty(leaf);
1663         return 0;
1664 }
1665
1666 static noinline_for_stack
1667 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1668                                  struct btrfs_root *root,
1669                                  struct btrfs_path *path,
1670                                  u64 bytenr, u64 num_bytes, u64 parent,
1671                                  u64 root_objectid, u64 owner,
1672                                  u64 offset, int refs_to_add,
1673                                  struct btrfs_delayed_extent_op *extent_op)
1674 {
1675         struct btrfs_extent_inline_ref *iref;
1676         int ret;
1677
1678         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1679                                            bytenr, num_bytes, parent,
1680                                            root_objectid, owner, offset, 1);
1681         if (ret == 0) {
1682                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1683                 ret = update_inline_extent_backref(trans, root, path, iref,
1684                                                    refs_to_add, extent_op);
1685         } else if (ret == -ENOENT) {
1686                 ret = setup_inline_extent_backref(trans, root, path, iref,
1687                                                   parent, root_objectid,
1688                                                   owner, offset, refs_to_add,
1689                                                   extent_op);
1690         }
1691         return ret;
1692 }
1693
1694 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1695                                  struct btrfs_root *root,
1696                                  struct btrfs_path *path,
1697                                  u64 bytenr, u64 parent, u64 root_objectid,
1698                                  u64 owner, u64 offset, int refs_to_add)
1699 {
1700         int ret;
1701         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1702                 BUG_ON(refs_to_add != 1);
1703                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1704                                             parent, root_objectid);
1705         } else {
1706                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1707                                              parent, root_objectid,
1708                                              owner, offset, refs_to_add);
1709         }
1710         return ret;
1711 }
1712
1713 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1714                                  struct btrfs_root *root,
1715                                  struct btrfs_path *path,
1716                                  struct btrfs_extent_inline_ref *iref,
1717                                  int refs_to_drop, int is_data)
1718 {
1719         int ret;
1720
1721         BUG_ON(!is_data && refs_to_drop != 1);
1722         if (iref) {
1723                 ret = update_inline_extent_backref(trans, root, path, iref,
1724                                                    -refs_to_drop, NULL);
1725         } else if (is_data) {
1726                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1727         } else {
1728                 ret = btrfs_del_item(trans, root, path);
1729         }
1730         return ret;
1731 }
1732
1733 static void btrfs_issue_discard(struct block_device *bdev,
1734                                 u64 start, u64 len)
1735 {
1736         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1737                         BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
1738 }
1739
1740 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1741                                 u64 num_bytes)
1742 {
1743         int ret;
1744         u64 map_length = num_bytes;
1745         struct btrfs_multi_bio *multi = NULL;
1746
1747         if (!btrfs_test_opt(root, DISCARD))
1748                 return 0;
1749
1750         /* Tell the block device(s) that the sectors can be discarded */
1751         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1752                               bytenr, &map_length, &multi, 0);
1753         if (!ret) {
1754                 struct btrfs_bio_stripe *stripe = multi->stripes;
1755                 int i;
1756
1757                 if (map_length > num_bytes)
1758                         map_length = num_bytes;
1759
1760                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1761                         btrfs_issue_discard(stripe->dev->bdev,
1762                                             stripe->physical,
1763                                             map_length);
1764                 }
1765                 kfree(multi);
1766         }
1767
1768         return ret;
1769 }
1770
1771 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1772                          struct btrfs_root *root,
1773                          u64 bytenr, u64 num_bytes, u64 parent,
1774                          u64 root_objectid, u64 owner, u64 offset)
1775 {
1776         int ret;
1777         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1778                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1779
1780         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1781                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1782                                         parent, root_objectid, (int)owner,
1783                                         BTRFS_ADD_DELAYED_REF, NULL);
1784         } else {
1785                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1786                                         parent, root_objectid, owner, offset,
1787                                         BTRFS_ADD_DELAYED_REF, NULL);
1788         }
1789         return ret;
1790 }
1791
1792 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1793                                   struct btrfs_root *root,
1794                                   u64 bytenr, u64 num_bytes,
1795                                   u64 parent, u64 root_objectid,
1796                                   u64 owner, u64 offset, int refs_to_add,
1797                                   struct btrfs_delayed_extent_op *extent_op)
1798 {
1799         struct btrfs_path *path;
1800         struct extent_buffer *leaf;
1801         struct btrfs_extent_item *item;
1802         u64 refs;
1803         int ret;
1804         int err = 0;
1805
1806         path = btrfs_alloc_path();
1807         if (!path)
1808                 return -ENOMEM;
1809
1810         path->reada = 1;
1811         path->leave_spinning = 1;
1812         /* this will setup the path even if it fails to insert the back ref */
1813         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1814                                            path, bytenr, num_bytes, parent,
1815                                            root_objectid, owner, offset,
1816                                            refs_to_add, extent_op);
1817         if (ret == 0)
1818                 goto out;
1819
1820         if (ret != -EAGAIN) {
1821                 err = ret;
1822                 goto out;
1823         }
1824
1825         leaf = path->nodes[0];
1826         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1827         refs = btrfs_extent_refs(leaf, item);
1828         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1829         if (extent_op)
1830                 __run_delayed_extent_op(extent_op, leaf, item);
1831
1832         btrfs_mark_buffer_dirty(leaf);
1833         btrfs_release_path(root->fs_info->extent_root, path);
1834
1835         path->reada = 1;
1836         path->leave_spinning = 1;
1837
1838         /* now insert the actual backref */
1839         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1840                                     path, bytenr, parent, root_objectid,
1841                                     owner, offset, refs_to_add);
1842         BUG_ON(ret);
1843 out:
1844         btrfs_free_path(path);
1845         return err;
1846 }
1847
1848 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1849                                 struct btrfs_root *root,
1850                                 struct btrfs_delayed_ref_node *node,
1851                                 struct btrfs_delayed_extent_op *extent_op,
1852                                 int insert_reserved)
1853 {
1854         int ret = 0;
1855         struct btrfs_delayed_data_ref *ref;
1856         struct btrfs_key ins;
1857         u64 parent = 0;
1858         u64 ref_root = 0;
1859         u64 flags = 0;
1860
1861         ins.objectid = node->bytenr;
1862         ins.offset = node->num_bytes;
1863         ins.type = BTRFS_EXTENT_ITEM_KEY;
1864
1865         ref = btrfs_delayed_node_to_data_ref(node);
1866         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1867                 parent = ref->parent;
1868         else
1869                 ref_root = ref->root;
1870
1871         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1872                 if (extent_op) {
1873                         BUG_ON(extent_op->update_key);
1874                         flags |= extent_op->flags_to_set;
1875                 }
1876                 ret = alloc_reserved_file_extent(trans, root,
1877                                                  parent, ref_root, flags,
1878                                                  ref->objectid, ref->offset,
1879                                                  &ins, node->ref_mod);
1880         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1881                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1882                                              node->num_bytes, parent,
1883                                              ref_root, ref->objectid,
1884                                              ref->offset, node->ref_mod,
1885                                              extent_op);
1886         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1887                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1888                                           node->num_bytes, parent,
1889                                           ref_root, ref->objectid,
1890                                           ref->offset, node->ref_mod,
1891                                           extent_op);
1892         } else {
1893                 BUG();
1894         }
1895         return ret;
1896 }
1897
1898 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1899                                     struct extent_buffer *leaf,
1900                                     struct btrfs_extent_item *ei)
1901 {
1902         u64 flags = btrfs_extent_flags(leaf, ei);
1903         if (extent_op->update_flags) {
1904                 flags |= extent_op->flags_to_set;
1905                 btrfs_set_extent_flags(leaf, ei, flags);
1906         }
1907
1908         if (extent_op->update_key) {
1909                 struct btrfs_tree_block_info *bi;
1910                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1911                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1912                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1913         }
1914 }
1915
1916 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1917                                  struct btrfs_root *root,
1918                                  struct btrfs_delayed_ref_node *node,
1919                                  struct btrfs_delayed_extent_op *extent_op)
1920 {
1921         struct btrfs_key key;
1922         struct btrfs_path *path;
1923         struct btrfs_extent_item *ei;
1924         struct extent_buffer *leaf;
1925         u32 item_size;
1926         int ret;
1927         int err = 0;
1928
1929         path = btrfs_alloc_path();
1930         if (!path)
1931                 return -ENOMEM;
1932
1933         key.objectid = node->bytenr;
1934         key.type = BTRFS_EXTENT_ITEM_KEY;
1935         key.offset = node->num_bytes;
1936
1937         path->reada = 1;
1938         path->leave_spinning = 1;
1939         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1940                                 path, 0, 1);
1941         if (ret < 0) {
1942                 err = ret;
1943                 goto out;
1944         }
1945         if (ret > 0) {
1946                 err = -EIO;
1947                 goto out;
1948         }
1949
1950         leaf = path->nodes[0];
1951         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1952 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1953         if (item_size < sizeof(*ei)) {
1954                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1955                                              path, (u64)-1, 0);
1956                 if (ret < 0) {
1957                         err = ret;
1958                         goto out;
1959                 }
1960                 leaf = path->nodes[0];
1961                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1962         }
1963 #endif
1964         BUG_ON(item_size < sizeof(*ei));
1965         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1966         __run_delayed_extent_op(extent_op, leaf, ei);
1967
1968         btrfs_mark_buffer_dirty(leaf);
1969 out:
1970         btrfs_free_path(path);
1971         return err;
1972 }
1973
1974 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1975                                 struct btrfs_root *root,
1976                                 struct btrfs_delayed_ref_node *node,
1977                                 struct btrfs_delayed_extent_op *extent_op,
1978                                 int insert_reserved)
1979 {
1980         int ret = 0;
1981         struct btrfs_delayed_tree_ref *ref;
1982         struct btrfs_key ins;
1983         u64 parent = 0;
1984         u64 ref_root = 0;
1985
1986         ins.objectid = node->bytenr;
1987         ins.offset = node->num_bytes;
1988         ins.type = BTRFS_EXTENT_ITEM_KEY;
1989
1990         ref = btrfs_delayed_node_to_tree_ref(node);
1991         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1992                 parent = ref->parent;
1993         else
1994                 ref_root = ref->root;
1995
1996         BUG_ON(node->ref_mod != 1);
1997         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1998                 BUG_ON(!extent_op || !extent_op->update_flags ||
1999                        !extent_op->update_key);
2000                 ret = alloc_reserved_tree_block(trans, root,
2001                                                 parent, ref_root,
2002                                                 extent_op->flags_to_set,
2003                                                 &extent_op->key,
2004                                                 ref->level, &ins);
2005         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2006                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2007                                              node->num_bytes, parent, ref_root,
2008                                              ref->level, 0, 1, extent_op);
2009         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2010                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2011                                           node->num_bytes, parent, ref_root,
2012                                           ref->level, 0, 1, extent_op);
2013         } else {
2014                 BUG();
2015         }
2016         return ret;
2017 }
2018
2019 /* helper function to actually process a single delayed ref entry */
2020 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2021                                struct btrfs_root *root,
2022                                struct btrfs_delayed_ref_node *node,
2023                                struct btrfs_delayed_extent_op *extent_op,
2024                                int insert_reserved)
2025 {
2026         int ret;
2027         if (btrfs_delayed_ref_is_head(node)) {
2028                 struct btrfs_delayed_ref_head *head;
2029                 /*
2030                  * we've hit the end of the chain and we were supposed
2031                  * to insert this extent into the tree.  But, it got
2032                  * deleted before we ever needed to insert it, so all
2033                  * we have to do is clean up the accounting
2034                  */
2035                 BUG_ON(extent_op);
2036                 head = btrfs_delayed_node_to_head(node);
2037                 if (insert_reserved) {
2038                         btrfs_pin_extent(root, node->bytenr,
2039                                          node->num_bytes, 1);
2040                         if (head->is_data) {
2041                                 ret = btrfs_del_csums(trans, root,
2042                                                       node->bytenr,
2043                                                       node->num_bytes);
2044                                 BUG_ON(ret);
2045                         }
2046                 }
2047                 mutex_unlock(&head->mutex);
2048                 return 0;
2049         }
2050
2051         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2052             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2053                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2054                                            insert_reserved);
2055         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2056                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2057                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2058                                            insert_reserved);
2059         else
2060                 BUG();
2061         return ret;
2062 }
2063
2064 static noinline struct btrfs_delayed_ref_node *
2065 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2066 {
2067         struct rb_node *node;
2068         struct btrfs_delayed_ref_node *ref;
2069         int action = BTRFS_ADD_DELAYED_REF;
2070 again:
2071         /*
2072          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2073          * this prevents ref count from going down to zero when
2074          * there still are pending delayed ref.
2075          */
2076         node = rb_prev(&head->node.rb_node);
2077         while (1) {
2078                 if (!node)
2079                         break;
2080                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2081                                 rb_node);
2082                 if (ref->bytenr != head->node.bytenr)
2083                         break;
2084                 if (ref->action == action)
2085                         return ref;
2086                 node = rb_prev(node);
2087         }
2088         if (action == BTRFS_ADD_DELAYED_REF) {
2089                 action = BTRFS_DROP_DELAYED_REF;
2090                 goto again;
2091         }
2092         return NULL;
2093 }
2094
2095 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2096                                        struct btrfs_root *root,
2097                                        struct list_head *cluster)
2098 {
2099         struct btrfs_delayed_ref_root *delayed_refs;
2100         struct btrfs_delayed_ref_node *ref;
2101         struct btrfs_delayed_ref_head *locked_ref = NULL;
2102         struct btrfs_delayed_extent_op *extent_op;
2103         int ret;
2104         int count = 0;
2105         int must_insert_reserved = 0;
2106
2107         delayed_refs = &trans->transaction->delayed_refs;
2108         while (1) {
2109                 if (!locked_ref) {
2110                         /* pick a new head ref from the cluster list */
2111                         if (list_empty(cluster))
2112                                 break;
2113
2114                         locked_ref = list_entry(cluster->next,
2115                                      struct btrfs_delayed_ref_head, cluster);
2116
2117                         /* grab the lock that says we are going to process
2118                          * all the refs for this head */
2119                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2120
2121                         /*
2122                          * we may have dropped the spin lock to get the head
2123                          * mutex lock, and that might have given someone else
2124                          * time to free the head.  If that's true, it has been
2125                          * removed from our list and we can move on.
2126                          */
2127                         if (ret == -EAGAIN) {
2128                                 locked_ref = NULL;
2129                                 count++;
2130                                 continue;
2131                         }
2132                 }
2133
2134                 /*
2135                  * record the must insert reserved flag before we
2136                  * drop the spin lock.
2137                  */
2138                 must_insert_reserved = locked_ref->must_insert_reserved;
2139                 locked_ref->must_insert_reserved = 0;
2140
2141                 extent_op = locked_ref->extent_op;
2142                 locked_ref->extent_op = NULL;
2143
2144                 /*
2145                  * locked_ref is the head node, so we have to go one
2146                  * node back for any delayed ref updates
2147                  */
2148                 ref = select_delayed_ref(locked_ref);
2149                 if (!ref) {
2150                         /* All delayed refs have been processed, Go ahead
2151                          * and send the head node to run_one_delayed_ref,
2152                          * so that any accounting fixes can happen
2153                          */
2154                         ref = &locked_ref->node;
2155
2156                         if (extent_op && must_insert_reserved) {
2157                                 kfree(extent_op);
2158                                 extent_op = NULL;
2159                         }
2160
2161                         if (extent_op) {
2162                                 spin_unlock(&delayed_refs->lock);
2163
2164                                 ret = run_delayed_extent_op(trans, root,
2165                                                             ref, extent_op);
2166                                 BUG_ON(ret);
2167                                 kfree(extent_op);
2168
2169                                 cond_resched();
2170                                 spin_lock(&delayed_refs->lock);
2171                                 continue;
2172                         }
2173
2174                         list_del_init(&locked_ref->cluster);
2175                         locked_ref = NULL;
2176                 }
2177
2178                 ref->in_tree = 0;
2179                 rb_erase(&ref->rb_node, &delayed_refs->root);
2180                 delayed_refs->num_entries--;
2181
2182                 spin_unlock(&delayed_refs->lock);
2183
2184                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2185                                           must_insert_reserved);
2186                 BUG_ON(ret);
2187
2188                 btrfs_put_delayed_ref(ref);
2189                 kfree(extent_op);
2190                 count++;
2191
2192                 cond_resched();
2193                 spin_lock(&delayed_refs->lock);
2194         }
2195         return count;
2196 }
2197
2198 /*
2199  * this starts processing the delayed reference count updates and
2200  * extent insertions we have queued up so far.  count can be
2201  * 0, which means to process everything in the tree at the start
2202  * of the run (but not newly added entries), or it can be some target
2203  * number you'd like to process.
2204  */
2205 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2206                            struct btrfs_root *root, unsigned long count)
2207 {
2208         struct rb_node *node;
2209         struct btrfs_delayed_ref_root *delayed_refs;
2210         struct btrfs_delayed_ref_node *ref;
2211         struct list_head cluster;
2212         int ret;
2213         int run_all = count == (unsigned long)-1;
2214         int run_most = 0;
2215
2216         if (root == root->fs_info->extent_root)
2217                 root = root->fs_info->tree_root;
2218
2219         delayed_refs = &trans->transaction->delayed_refs;
2220         INIT_LIST_HEAD(&cluster);
2221 again:
2222         spin_lock(&delayed_refs->lock);
2223         if (count == 0) {
2224                 count = delayed_refs->num_entries * 2;
2225                 run_most = 1;
2226         }
2227         while (1) {
2228                 if (!(run_all || run_most) &&
2229                     delayed_refs->num_heads_ready < 64)
2230                         break;
2231
2232                 /*
2233                  * go find something we can process in the rbtree.  We start at
2234                  * the beginning of the tree, and then build a cluster
2235                  * of refs to process starting at the first one we are able to
2236                  * lock
2237                  */
2238                 ret = btrfs_find_ref_cluster(trans, &cluster,
2239                                              delayed_refs->run_delayed_start);
2240                 if (ret)
2241                         break;
2242
2243                 ret = run_clustered_refs(trans, root, &cluster);
2244                 BUG_ON(ret < 0);
2245
2246                 count -= min_t(unsigned long, ret, count);
2247
2248                 if (count == 0)
2249                         break;
2250         }
2251
2252         if (run_all) {
2253                 node = rb_first(&delayed_refs->root);
2254                 if (!node)
2255                         goto out;
2256                 count = (unsigned long)-1;
2257
2258                 while (node) {
2259                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2260                                        rb_node);
2261                         if (btrfs_delayed_ref_is_head(ref)) {
2262                                 struct btrfs_delayed_ref_head *head;
2263
2264                                 head = btrfs_delayed_node_to_head(ref);
2265                                 atomic_inc(&ref->refs);
2266
2267                                 spin_unlock(&delayed_refs->lock);
2268                                 mutex_lock(&head->mutex);
2269                                 mutex_unlock(&head->mutex);
2270
2271                                 btrfs_put_delayed_ref(ref);
2272                                 cond_resched();
2273                                 goto again;
2274                         }
2275                         node = rb_next(node);
2276                 }
2277                 spin_unlock(&delayed_refs->lock);
2278                 schedule_timeout(1);
2279                 goto again;
2280         }
2281 out:
2282         spin_unlock(&delayed_refs->lock);
2283         return 0;
2284 }
2285
2286 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2287                                 struct btrfs_root *root,
2288                                 u64 bytenr, u64 num_bytes, u64 flags,
2289                                 int is_data)
2290 {
2291         struct btrfs_delayed_extent_op *extent_op;
2292         int ret;
2293
2294         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2295         if (!extent_op)
2296                 return -ENOMEM;
2297
2298         extent_op->flags_to_set = flags;
2299         extent_op->update_flags = 1;
2300         extent_op->update_key = 0;
2301         extent_op->is_data = is_data ? 1 : 0;
2302
2303         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2304         if (ret)
2305                 kfree(extent_op);
2306         return ret;
2307 }
2308
2309 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2310                                       struct btrfs_root *root,
2311                                       struct btrfs_path *path,
2312                                       u64 objectid, u64 offset, u64 bytenr)
2313 {
2314         struct btrfs_delayed_ref_head *head;
2315         struct btrfs_delayed_ref_node *ref;
2316         struct btrfs_delayed_data_ref *data_ref;
2317         struct btrfs_delayed_ref_root *delayed_refs;
2318         struct rb_node *node;
2319         int ret = 0;
2320
2321         ret = -ENOENT;
2322         delayed_refs = &trans->transaction->delayed_refs;
2323         spin_lock(&delayed_refs->lock);
2324         head = btrfs_find_delayed_ref_head(trans, bytenr);
2325         if (!head)
2326                 goto out;
2327
2328         if (!mutex_trylock(&head->mutex)) {
2329                 atomic_inc(&head->node.refs);
2330                 spin_unlock(&delayed_refs->lock);
2331
2332                 btrfs_release_path(root->fs_info->extent_root, path);
2333
2334                 mutex_lock(&head->mutex);
2335                 mutex_unlock(&head->mutex);
2336                 btrfs_put_delayed_ref(&head->node);
2337                 return -EAGAIN;
2338         }
2339
2340         node = rb_prev(&head->node.rb_node);
2341         if (!node)
2342                 goto out_unlock;
2343
2344         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2345
2346         if (ref->bytenr != bytenr)
2347                 goto out_unlock;
2348
2349         ret = 1;
2350         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2351                 goto out_unlock;
2352
2353         data_ref = btrfs_delayed_node_to_data_ref(ref);
2354
2355         node = rb_prev(node);
2356         if (node) {
2357                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2358                 if (ref->bytenr == bytenr)
2359                         goto out_unlock;
2360         }
2361
2362         if (data_ref->root != root->root_key.objectid ||
2363             data_ref->objectid != objectid || data_ref->offset != offset)
2364                 goto out_unlock;
2365
2366         ret = 0;
2367 out_unlock:
2368         mutex_unlock(&head->mutex);
2369 out:
2370         spin_unlock(&delayed_refs->lock);
2371         return ret;
2372 }
2373
2374 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2375                                         struct btrfs_root *root,
2376                                         struct btrfs_path *path,
2377                                         u64 objectid, u64 offset, u64 bytenr)
2378 {
2379         struct btrfs_root *extent_root = root->fs_info->extent_root;
2380         struct extent_buffer *leaf;
2381         struct btrfs_extent_data_ref *ref;
2382         struct btrfs_extent_inline_ref *iref;
2383         struct btrfs_extent_item *ei;
2384         struct btrfs_key key;
2385         u32 item_size;
2386         int ret;
2387
2388         key.objectid = bytenr;
2389         key.offset = (u64)-1;
2390         key.type = BTRFS_EXTENT_ITEM_KEY;
2391
2392         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2393         if (ret < 0)
2394                 goto out;
2395         BUG_ON(ret == 0);
2396
2397         ret = -ENOENT;
2398         if (path->slots[0] == 0)
2399                 goto out;
2400
2401         path->slots[0]--;
2402         leaf = path->nodes[0];
2403         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2404
2405         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2406                 goto out;
2407
2408         ret = 1;
2409         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2411         if (item_size < sizeof(*ei)) {
2412                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2413                 goto out;
2414         }
2415 #endif
2416         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2417
2418         if (item_size != sizeof(*ei) +
2419             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2420                 goto out;
2421
2422         if (btrfs_extent_generation(leaf, ei) <=
2423             btrfs_root_last_snapshot(&root->root_item))
2424                 goto out;
2425
2426         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2427         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2428             BTRFS_EXTENT_DATA_REF_KEY)
2429                 goto out;
2430
2431         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2432         if (btrfs_extent_refs(leaf, ei) !=
2433             btrfs_extent_data_ref_count(leaf, ref) ||
2434             btrfs_extent_data_ref_root(leaf, ref) !=
2435             root->root_key.objectid ||
2436             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2437             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2438                 goto out;
2439
2440         ret = 0;
2441 out:
2442         return ret;
2443 }
2444
2445 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2446                           struct btrfs_root *root,
2447                           u64 objectid, u64 offset, u64 bytenr)
2448 {
2449         struct btrfs_path *path;
2450         int ret;
2451         int ret2;
2452
2453         path = btrfs_alloc_path();
2454         if (!path)
2455                 return -ENOENT;
2456
2457         do {
2458                 ret = check_committed_ref(trans, root, path, objectid,
2459                                           offset, bytenr);
2460                 if (ret && ret != -ENOENT)
2461                         goto out;
2462
2463                 ret2 = check_delayed_ref(trans, root, path, objectid,
2464                                          offset, bytenr);
2465         } while (ret2 == -EAGAIN);
2466
2467         if (ret2 && ret2 != -ENOENT) {
2468                 ret = ret2;
2469                 goto out;
2470         }
2471
2472         if (ret != -ENOENT || ret2 != -ENOENT)
2473                 ret = 0;
2474 out:
2475         btrfs_free_path(path);
2476         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2477                 WARN_ON(ret > 0);
2478         return ret;
2479 }
2480
2481 #if 0
2482 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2483                     struct extent_buffer *buf, u32 nr_extents)
2484 {
2485         struct btrfs_key key;
2486         struct btrfs_file_extent_item *fi;
2487         u64 root_gen;
2488         u32 nritems;
2489         int i;
2490         int level;
2491         int ret = 0;
2492         int shared = 0;
2493
2494         if (!root->ref_cows)
2495                 return 0;
2496
2497         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2498                 shared = 0;
2499                 root_gen = root->root_key.offset;
2500         } else {
2501                 shared = 1;
2502                 root_gen = trans->transid - 1;
2503         }
2504
2505         level = btrfs_header_level(buf);
2506         nritems = btrfs_header_nritems(buf);
2507
2508         if (level == 0) {
2509                 struct btrfs_leaf_ref *ref;
2510                 struct btrfs_extent_info *info;
2511
2512                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2513                 if (!ref) {
2514                         ret = -ENOMEM;
2515                         goto out;
2516                 }
2517
2518                 ref->root_gen = root_gen;
2519                 ref->bytenr = buf->start;
2520                 ref->owner = btrfs_header_owner(buf);
2521                 ref->generation = btrfs_header_generation(buf);
2522                 ref->nritems = nr_extents;
2523                 info = ref->extents;
2524
2525                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2526                         u64 disk_bytenr;
2527                         btrfs_item_key_to_cpu(buf, &key, i);
2528                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2529                                 continue;
2530                         fi = btrfs_item_ptr(buf, i,
2531                                             struct btrfs_file_extent_item);
2532                         if (btrfs_file_extent_type(buf, fi) ==
2533                             BTRFS_FILE_EXTENT_INLINE)
2534                                 continue;
2535                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2536                         if (disk_bytenr == 0)
2537                                 continue;
2538
2539                         info->bytenr = disk_bytenr;
2540                         info->num_bytes =
2541                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2542                         info->objectid = key.objectid;
2543                         info->offset = key.offset;
2544                         info++;
2545                 }
2546
2547                 ret = btrfs_add_leaf_ref(root, ref, shared);
2548                 if (ret == -EEXIST && shared) {
2549                         struct btrfs_leaf_ref *old;
2550                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2551                         BUG_ON(!old);
2552                         btrfs_remove_leaf_ref(root, old);
2553                         btrfs_free_leaf_ref(root, old);
2554                         ret = btrfs_add_leaf_ref(root, ref, shared);
2555                 }
2556                 WARN_ON(ret);
2557                 btrfs_free_leaf_ref(root, ref);
2558         }
2559 out:
2560         return ret;
2561 }
2562
2563 /* when a block goes through cow, we update the reference counts of
2564  * everything that block points to.  The internal pointers of the block
2565  * can be in just about any order, and it is likely to have clusters of
2566  * things that are close together and clusters of things that are not.
2567  *
2568  * To help reduce the seeks that come with updating all of these reference
2569  * counts, sort them by byte number before actual updates are done.
2570  *
2571  * struct refsort is used to match byte number to slot in the btree block.
2572  * we sort based on the byte number and then use the slot to actually
2573  * find the item.
2574  *
2575  * struct refsort is smaller than strcut btrfs_item and smaller than
2576  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2577  * for a btree block, there's no way for a kmalloc of refsorts for a
2578  * single node to be bigger than a page.
2579  */
2580 struct refsort {
2581         u64 bytenr;
2582         u32 slot;
2583 };
2584
2585 /*
2586  * for passing into sort()
2587  */
2588 static int refsort_cmp(const void *a_void, const void *b_void)
2589 {
2590         const struct refsort *a = a_void;
2591         const struct refsort *b = b_void;
2592
2593         if (a->bytenr < b->bytenr)
2594                 return -1;
2595         if (a->bytenr > b->bytenr)
2596                 return 1;
2597         return 0;
2598 }
2599 #endif
2600
2601 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2602                            struct btrfs_root *root,
2603                            struct extent_buffer *buf,
2604                            int full_backref, int inc)
2605 {
2606         u64 bytenr;
2607         u64 num_bytes;
2608         u64 parent;
2609         u64 ref_root;
2610         u32 nritems;
2611         struct btrfs_key key;
2612         struct btrfs_file_extent_item *fi;
2613         int i;
2614         int level;
2615         int ret = 0;
2616         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2617                             u64, u64, u64, u64, u64, u64);
2618
2619         ref_root = btrfs_header_owner(buf);
2620         nritems = btrfs_header_nritems(buf);
2621         level = btrfs_header_level(buf);
2622
2623         if (!root->ref_cows && level == 0)
2624                 return 0;
2625
2626         if (inc)
2627                 process_func = btrfs_inc_extent_ref;
2628         else
2629                 process_func = btrfs_free_extent;
2630
2631         if (full_backref)
2632                 parent = buf->start;
2633         else
2634                 parent = 0;
2635
2636         for (i = 0; i < nritems; i++) {
2637                 if (level == 0) {
2638                         btrfs_item_key_to_cpu(buf, &key, i);
2639                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2640                                 continue;
2641                         fi = btrfs_item_ptr(buf, i,
2642                                             struct btrfs_file_extent_item);
2643                         if (btrfs_file_extent_type(buf, fi) ==
2644                             BTRFS_FILE_EXTENT_INLINE)
2645                                 continue;
2646                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2647                         if (bytenr == 0)
2648                                 continue;
2649
2650                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2651                         key.offset -= btrfs_file_extent_offset(buf, fi);
2652                         ret = process_func(trans, root, bytenr, num_bytes,
2653                                            parent, ref_root, key.objectid,
2654                                            key.offset);
2655                         if (ret)
2656                                 goto fail;
2657                 } else {
2658                         bytenr = btrfs_node_blockptr(buf, i);
2659                         num_bytes = btrfs_level_size(root, level - 1);
2660                         ret = process_func(trans, root, bytenr, num_bytes,
2661                                            parent, ref_root, level - 1, 0);
2662                         if (ret)
2663                                 goto fail;
2664                 }
2665         }
2666         return 0;
2667 fail:
2668         BUG();
2669         return ret;
2670 }
2671
2672 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2673                   struct extent_buffer *buf, int full_backref)
2674 {
2675         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2676 }
2677
2678 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2679                   struct extent_buffer *buf, int full_backref)
2680 {
2681         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2682 }
2683
2684 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2685                                  struct btrfs_root *root,
2686                                  struct btrfs_path *path,
2687                                  struct btrfs_block_group_cache *cache)
2688 {
2689         int ret;
2690         struct btrfs_root *extent_root = root->fs_info->extent_root;
2691         unsigned long bi;
2692         struct extent_buffer *leaf;
2693
2694         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2695         if (ret < 0)
2696                 goto fail;
2697         BUG_ON(ret);
2698
2699         leaf = path->nodes[0];
2700         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2701         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2702         btrfs_mark_buffer_dirty(leaf);
2703         btrfs_release_path(extent_root, path);
2704 fail:
2705         if (ret)
2706                 return ret;
2707         return 0;
2708
2709 }
2710
2711 static struct btrfs_block_group_cache *
2712 next_block_group(struct btrfs_root *root,
2713                  struct btrfs_block_group_cache *cache)
2714 {
2715         struct rb_node *node;
2716         spin_lock(&root->fs_info->block_group_cache_lock);
2717         node = rb_next(&cache->cache_node);
2718         btrfs_put_block_group(cache);
2719         if (node) {
2720                 cache = rb_entry(node, struct btrfs_block_group_cache,
2721                                  cache_node);
2722                 btrfs_get_block_group(cache);
2723         } else
2724                 cache = NULL;
2725         spin_unlock(&root->fs_info->block_group_cache_lock);
2726         return cache;
2727 }
2728
2729 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2730                             struct btrfs_trans_handle *trans,
2731                             struct btrfs_path *path)
2732 {
2733         struct btrfs_root *root = block_group->fs_info->tree_root;
2734         struct inode *inode = NULL;
2735         u64 alloc_hint = 0;
2736         int num_pages = 0;
2737         int retries = 0;
2738         int ret = 0;
2739
2740         /*
2741          * If this block group is smaller than 100 megs don't bother caching the
2742          * block group.
2743          */
2744         if (block_group->key.offset < (100 * 1024 * 1024)) {
2745                 spin_lock(&block_group->lock);
2746                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2747                 spin_unlock(&block_group->lock);
2748                 return 0;
2749         }
2750
2751 again:
2752         inode = lookup_free_space_inode(root, block_group, path);
2753         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2754                 ret = PTR_ERR(inode);
2755                 btrfs_release_path(root, path);
2756                 goto out;
2757         }
2758
2759         if (IS_ERR(inode)) {
2760                 BUG_ON(retries);
2761                 retries++;
2762
2763                 if (block_group->ro)
2764                         goto out_free;
2765
2766                 ret = create_free_space_inode(root, trans, block_group, path);
2767                 if (ret)
2768                         goto out_free;
2769                 goto again;
2770         }
2771
2772         /*
2773          * We want to set the generation to 0, that way if anything goes wrong
2774          * from here on out we know not to trust this cache when we load up next
2775          * time.
2776          */
2777         BTRFS_I(inode)->generation = 0;
2778         ret = btrfs_update_inode(trans, root, inode);
2779         WARN_ON(ret);
2780
2781         if (i_size_read(inode) > 0) {
2782                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2783                                                       inode);
2784                 if (ret)
2785                         goto out_put;
2786         }
2787
2788         spin_lock(&block_group->lock);
2789         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2790                 spin_unlock(&block_group->lock);
2791                 goto out_put;
2792         }
2793         spin_unlock(&block_group->lock);
2794
2795         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2796         if (!num_pages)
2797                 num_pages = 1;
2798
2799         /*
2800          * Just to make absolutely sure we have enough space, we're going to
2801          * preallocate 12 pages worth of space for each block group.  In
2802          * practice we ought to use at most 8, but we need extra space so we can
2803          * add our header and have a terminator between the extents and the
2804          * bitmaps.
2805          */
2806         num_pages *= 16;
2807         num_pages *= PAGE_CACHE_SIZE;
2808
2809         ret = btrfs_check_data_free_space(inode, num_pages);
2810         if (ret)
2811                 goto out_put;
2812
2813         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2814                                               num_pages, num_pages,
2815                                               &alloc_hint);
2816         btrfs_free_reserved_data_space(inode, num_pages);
2817 out_put:
2818         iput(inode);
2819 out_free:
2820         btrfs_release_path(root, path);
2821 out:
2822         spin_lock(&block_group->lock);
2823         if (ret)
2824                 block_group->disk_cache_state = BTRFS_DC_ERROR;
2825         else
2826                 block_group->disk_cache_state = BTRFS_DC_SETUP;
2827         spin_unlock(&block_group->lock);
2828
2829         return ret;
2830 }
2831
2832 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2833                                    struct btrfs_root *root)
2834 {
2835         struct btrfs_block_group_cache *cache;
2836         int err = 0;
2837         struct btrfs_path *path;
2838         u64 last = 0;
2839
2840         path = btrfs_alloc_path();
2841         if (!path)
2842                 return -ENOMEM;
2843
2844 again:
2845         while (1) {
2846                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2847                 while (cache) {
2848                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2849                                 break;
2850                         cache = next_block_group(root, cache);
2851                 }
2852                 if (!cache) {
2853                         if (last == 0)
2854                                 break;
2855                         last = 0;
2856                         continue;
2857                 }
2858                 err = cache_save_setup(cache, trans, path);
2859                 last = cache->key.objectid + cache->key.offset;
2860                 btrfs_put_block_group(cache);
2861         }
2862
2863         while (1) {
2864                 if (last == 0) {
2865                         err = btrfs_run_delayed_refs(trans, root,
2866                                                      (unsigned long)-1);
2867                         BUG_ON(err);
2868                 }
2869
2870                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2871                 while (cache) {
2872                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2873                                 btrfs_put_block_group(cache);
2874                                 goto again;
2875                         }
2876
2877                         if (cache->dirty)
2878                                 break;
2879                         cache = next_block_group(root, cache);
2880                 }
2881                 if (!cache) {
2882                         if (last == 0)
2883                                 break;
2884                         last = 0;
2885                         continue;
2886                 }
2887
2888                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2889                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2890                 cache->dirty = 0;
2891                 last = cache->key.objectid + cache->key.offset;
2892
2893                 err = write_one_cache_group(trans, root, path, cache);
2894                 BUG_ON(err);
2895                 btrfs_put_block_group(cache);
2896         }
2897
2898         while (1) {
2899                 /*
2900                  * I don't think this is needed since we're just marking our
2901                  * preallocated extent as written, but just in case it can't
2902                  * hurt.
2903                  */
2904                 if (last == 0) {
2905                         err = btrfs_run_delayed_refs(trans, root,
2906                                                      (unsigned long)-1);
2907                         BUG_ON(err);
2908                 }
2909
2910                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2911                 while (cache) {
2912                         /*
2913                          * Really this shouldn't happen, but it could if we
2914                          * couldn't write the entire preallocated extent and
2915                          * splitting the extent resulted in a new block.
2916                          */
2917                         if (cache->dirty) {
2918                                 btrfs_put_block_group(cache);
2919                                 goto again;
2920                         }
2921                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2922                                 break;
2923                         cache = next_block_group(root, cache);
2924                 }
2925                 if (!cache) {
2926                         if (last == 0)
2927                                 break;
2928                         last = 0;
2929                         continue;
2930                 }
2931
2932                 btrfs_write_out_cache(root, trans, cache, path);
2933
2934                 /*
2935                  * If we didn't have an error then the cache state is still
2936                  * NEED_WRITE, so we can set it to WRITTEN.
2937                  */
2938                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2939                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
2940                 last = cache->key.objectid + cache->key.offset;
2941                 btrfs_put_block_group(cache);
2942         }
2943
2944         btrfs_free_path(path);
2945         return 0;
2946 }
2947
2948 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2949 {
2950         struct btrfs_block_group_cache *block_group;
2951         int readonly = 0;
2952
2953         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2954         if (!block_group || block_group->ro)
2955                 readonly = 1;
2956         if (block_group)
2957                 btrfs_put_block_group(block_group);
2958         return readonly;
2959 }
2960
2961 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2962                              u64 total_bytes, u64 bytes_used,
2963                              struct btrfs_space_info **space_info)
2964 {
2965         struct btrfs_space_info *found;
2966         int i;
2967         int factor;
2968
2969         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2970                      BTRFS_BLOCK_GROUP_RAID10))
2971                 factor = 2;
2972         else
2973                 factor = 1;
2974
2975         found = __find_space_info(info, flags);
2976         if (found) {
2977                 spin_lock(&found->lock);
2978                 found->total_bytes += total_bytes;
2979                 found->bytes_used += bytes_used;
2980                 found->disk_used += bytes_used * factor;
2981                 found->full = 0;
2982                 spin_unlock(&found->lock);
2983                 *space_info = found;
2984                 return 0;
2985         }
2986         found = kzalloc(sizeof(*found), GFP_NOFS);
2987         if (!found)
2988                 return -ENOMEM;
2989
2990         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2991                 INIT_LIST_HEAD(&found->block_groups[i]);
2992         init_rwsem(&found->groups_sem);
2993         spin_lock_init(&found->lock);
2994         found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2995                                 BTRFS_BLOCK_GROUP_SYSTEM |
2996                                 BTRFS_BLOCK_GROUP_METADATA);
2997         found->total_bytes = total_bytes;
2998         found->bytes_used = bytes_used;
2999         found->disk_used = bytes_used * factor;
3000         found->bytes_pinned = 0;
3001         found->bytes_reserved = 0;
3002         found->bytes_readonly = 0;
3003         found->bytes_may_use = 0;
3004         found->full = 0;
3005         found->force_alloc = 0;
3006         *space_info = found;
3007         list_add_rcu(&found->list, &info->space_info);
3008         atomic_set(&found->caching_threads, 0);
3009         return 0;
3010 }
3011
3012 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3013 {
3014         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
3015                                    BTRFS_BLOCK_GROUP_RAID1 |
3016                                    BTRFS_BLOCK_GROUP_RAID10 |
3017                                    BTRFS_BLOCK_GROUP_DUP);
3018         if (extra_flags) {
3019                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3020                         fs_info->avail_data_alloc_bits |= extra_flags;
3021                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3022                         fs_info->avail_metadata_alloc_bits |= extra_flags;
3023                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3024                         fs_info->avail_system_alloc_bits |= extra_flags;
3025         }
3026 }
3027
3028 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3029 {
3030         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3031
3032         if (num_devices == 1)
3033                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3034         if (num_devices < 4)
3035                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3036
3037         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3038             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3039                       BTRFS_BLOCK_GROUP_RAID10))) {
3040                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3041         }
3042
3043         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3044             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3045                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3046         }
3047
3048         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3049             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3050              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3051              (flags & BTRFS_BLOCK_GROUP_DUP)))
3052                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3053         return flags;
3054 }
3055
3056 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3057 {
3058         if (flags & BTRFS_BLOCK_GROUP_DATA)
3059                 flags |= root->fs_info->avail_data_alloc_bits &
3060                          root->fs_info->data_alloc_profile;
3061         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3062                 flags |= root->fs_info->avail_system_alloc_bits &
3063                          root->fs_info->system_alloc_profile;
3064         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3065                 flags |= root->fs_info->avail_metadata_alloc_bits &
3066                          root->fs_info->metadata_alloc_profile;
3067         return btrfs_reduce_alloc_profile(root, flags);
3068 }
3069
3070 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3071 {
3072         u64 flags;
3073
3074         if (data)
3075                 flags = BTRFS_BLOCK_GROUP_DATA;
3076         else if (root == root->fs_info->chunk_root)
3077                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3078         else
3079                 flags = BTRFS_BLOCK_GROUP_METADATA;
3080
3081         return get_alloc_profile(root, flags);
3082 }
3083
3084 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3085 {
3086         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3087                                                        BTRFS_BLOCK_GROUP_DATA);
3088 }
3089
3090 /*
3091  * This will check the space that the inode allocates from to make sure we have
3092  * enough space for bytes.
3093  */
3094 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3095 {
3096         struct btrfs_space_info *data_sinfo;
3097         struct btrfs_root *root = BTRFS_I(inode)->root;
3098         u64 used;
3099         int ret = 0, committed = 0, alloc_chunk = 1;
3100
3101         /* make sure bytes are sectorsize aligned */
3102         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3103
3104         if (root == root->fs_info->tree_root) {
3105                 alloc_chunk = 0;
3106                 committed = 1;
3107         }
3108
3109         data_sinfo = BTRFS_I(inode)->space_info;
3110         if (!data_sinfo)
3111                 goto alloc;
3112
3113 again:
3114         /* make sure we have enough space to handle the data first */
3115         spin_lock(&data_sinfo->lock);
3116         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3117                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3118                 data_sinfo->bytes_may_use;
3119
3120         if (used + bytes > data_sinfo->total_bytes) {
3121                 struct btrfs_trans_handle *trans;
3122
3123                 /*
3124                  * if we don't have enough free bytes in this space then we need
3125                  * to alloc a new chunk.
3126                  */
3127                 if (!data_sinfo->full && alloc_chunk) {
3128                         u64 alloc_target;
3129
3130                         data_sinfo->force_alloc = 1;
3131                         spin_unlock(&data_sinfo->lock);
3132 alloc:
3133                         alloc_target = btrfs_get_alloc_profile(root, 1);
3134                         trans = btrfs_join_transaction(root, 1);
3135                         if (IS_ERR(trans))
3136                                 return PTR_ERR(trans);
3137
3138                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3139                                              bytes + 2 * 1024 * 1024,
3140                                              alloc_target, 0);
3141                         btrfs_end_transaction(trans, root);
3142                         if (ret < 0)
3143                                 return ret;
3144
3145                         if (!data_sinfo) {
3146                                 btrfs_set_inode_space_info(root, inode);
3147                                 data_sinfo = BTRFS_I(inode)->space_info;
3148                         }
3149                         goto again;
3150                 }
3151                 spin_unlock(&data_sinfo->lock);
3152
3153                 /* commit the current transaction and try again */
3154                 if (!committed && !root->fs_info->open_ioctl_trans) {
3155                         committed = 1;
3156                         trans = btrfs_join_transaction(root, 1);
3157                         if (IS_ERR(trans))
3158                                 return PTR_ERR(trans);
3159                         ret = btrfs_commit_transaction(trans, root);
3160                         if (ret)
3161                                 return ret;
3162                         goto again;
3163                 }
3164
3165 #if 0 /* I hope we never need this code again, just in case */
3166                 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3167                        "%llu bytes_reserved, " "%llu bytes_pinned, "
3168                        "%llu bytes_readonly, %llu may use %llu total\n",
3169                        (unsigned long long)bytes,
3170                        (unsigned long long)data_sinfo->bytes_used,
3171                        (unsigned long long)data_sinfo->bytes_reserved,
3172                        (unsigned long long)data_sinfo->bytes_pinned,
3173                        (unsigned long long)data_sinfo->bytes_readonly,
3174                        (unsigned long long)data_sinfo->bytes_may_use,
3175                        (unsigned long long)data_sinfo->total_bytes);
3176 #endif
3177                 return -ENOSPC;
3178         }
3179         data_sinfo->bytes_may_use += bytes;
3180         BTRFS_I(inode)->reserved_bytes += bytes;
3181         spin_unlock(&data_sinfo->lock);
3182
3183         return 0;
3184 }
3185
3186 /*
3187  * called when we are clearing an delalloc extent from the
3188  * inode's io_tree or there was an error for whatever reason
3189  * after calling btrfs_check_data_free_space
3190  */
3191 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3192 {
3193         struct btrfs_root *root = BTRFS_I(inode)->root;
3194         struct btrfs_space_info *data_sinfo;
3195
3196         /* make sure bytes are sectorsize aligned */
3197         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3198
3199         data_sinfo = BTRFS_I(inode)->space_info;
3200         spin_lock(&data_sinfo->lock);
3201         data_sinfo->bytes_may_use -= bytes;
3202         BTRFS_I(inode)->reserved_bytes -= bytes;
3203         spin_unlock(&data_sinfo->lock);
3204 }
3205
3206 static void force_metadata_allocation(struct btrfs_fs_info *info)
3207 {
3208         struct list_head *head = &info->space_info;
3209         struct btrfs_space_info *found;
3210
3211         rcu_read_lock();
3212         list_for_each_entry_rcu(found, head, list) {
3213                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3214                         found->force_alloc = 1;
3215         }
3216         rcu_read_unlock();
3217 }
3218
3219 static int should_alloc_chunk(struct btrfs_space_info *sinfo,
3220                               u64 alloc_bytes)
3221 {
3222         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3223
3224         if (sinfo->bytes_used + sinfo->bytes_reserved +
3225             alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3226                 return 0;
3227
3228         if (sinfo->bytes_used + sinfo->bytes_reserved +
3229             alloc_bytes < div_factor(num_bytes, 8))
3230                 return 0;
3231
3232         return 1;
3233 }
3234
3235 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3236                           struct btrfs_root *extent_root, u64 alloc_bytes,
3237                           u64 flags, int force)
3238 {
3239         struct btrfs_space_info *space_info;
3240         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3241         int ret = 0;
3242
3243         mutex_lock(&fs_info->chunk_mutex);
3244
3245         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3246
3247         space_info = __find_space_info(extent_root->fs_info, flags);
3248         if (!space_info) {
3249                 ret = update_space_info(extent_root->fs_info, flags,
3250                                         0, 0, &space_info);
3251                 BUG_ON(ret);
3252         }
3253         BUG_ON(!space_info);
3254
3255         spin_lock(&space_info->lock);
3256         if (space_info->force_alloc)
3257                 force = 1;
3258         if (space_info->full) {
3259                 spin_unlock(&space_info->lock);
3260                 goto out;
3261         }
3262
3263         if (!force && !should_alloc_chunk(space_info, alloc_bytes)) {
3264                 spin_unlock(&space_info->lock);
3265                 goto out;
3266         }
3267         spin_unlock(&space_info->lock);
3268
3269         /*
3270          * if we're doing a data chunk, go ahead and make sure that
3271          * we keep a reasonable number of metadata chunks allocated in the
3272          * FS as well.
3273          */
3274         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3275                 fs_info->data_chunk_allocations++;
3276                 if (!(fs_info->data_chunk_allocations %
3277                       fs_info->metadata_ratio))
3278                         force_metadata_allocation(fs_info);
3279         }
3280
3281         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3282         spin_lock(&space_info->lock);
3283         if (ret)
3284                 space_info->full = 1;
3285         else
3286                 ret = 1;
3287         space_info->force_alloc = 0;
3288         spin_unlock(&space_info->lock);
3289 out:
3290         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3291         return ret;
3292 }
3293
3294 static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
3295                                 struct btrfs_root *root,
3296                                 struct btrfs_space_info *sinfo, u64 num_bytes)
3297 {
3298         int ret;
3299         int end_trans = 0;
3300
3301         if (sinfo->full)
3302                 return 0;
3303
3304         spin_lock(&sinfo->lock);
3305         ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024);
3306         spin_unlock(&sinfo->lock);
3307         if (!ret)
3308                 return 0;
3309
3310         if (!trans) {
3311                 trans = btrfs_join_transaction(root, 1);
3312                 BUG_ON(IS_ERR(trans));
3313                 end_trans = 1;
3314         }
3315
3316         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3317                              num_bytes + 2 * 1024 * 1024,
3318                              get_alloc_profile(root, sinfo->flags), 0);
3319
3320         if (end_trans)
3321                 btrfs_end_transaction(trans, root);
3322
3323         return ret == 1 ? 1 : 0;
3324 }
3325
3326 /*
3327  * shrink metadata reservation for delalloc
3328  */
3329 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3330                            struct btrfs_root *root, u64 to_reclaim)
3331 {
3332         struct btrfs_block_rsv *block_rsv;
3333         u64 reserved;
3334         u64 max_reclaim;
3335         u64 reclaimed = 0;
3336         int pause = 1;
3337         int ret;
3338
3339         block_rsv = &root->fs_info->delalloc_block_rsv;
3340         spin_lock(&block_rsv->lock);
3341         reserved = block_rsv->reserved;
3342         spin_unlock(&block_rsv->lock);
3343
3344         if (reserved == 0)
3345                 return 0;
3346
3347         max_reclaim = min(reserved, to_reclaim);
3348
3349         while (1) {
3350                 ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0);
3351                 if (!ret) {
3352                         __set_current_state(TASK_INTERRUPTIBLE);
3353                         schedule_timeout(pause);
3354                         pause <<= 1;
3355                         if (pause > HZ / 10)
3356                                 pause = HZ / 10;
3357                 } else {
3358                         pause = 1;
3359                 }
3360
3361                 spin_lock(&block_rsv->lock);
3362                 if (reserved > block_rsv->reserved)
3363                         reclaimed = reserved - block_rsv->reserved;
3364                 reserved = block_rsv->reserved;
3365                 spin_unlock(&block_rsv->lock);
3366
3367                 if (reserved == 0 || reclaimed >= max_reclaim)
3368                         break;
3369
3370                 if (trans && trans->transaction->blocked)
3371                         return -EAGAIN;
3372         }
3373         return reclaimed >= to_reclaim;
3374 }
3375
3376 static int should_retry_reserve(struct btrfs_trans_handle *trans,
3377                                 struct btrfs_root *root,
3378                                 struct btrfs_block_rsv *block_rsv,
3379                                 u64 num_bytes, int *retries)
3380 {
3381         struct btrfs_space_info *space_info = block_rsv->space_info;
3382         int ret;
3383
3384         if ((*retries) > 2)
3385                 return -ENOSPC;
3386
3387         ret = maybe_allocate_chunk(trans, root, space_info, num_bytes);
3388         if (ret)
3389                 return 1;
3390
3391         if (trans && trans->transaction->in_commit)
3392                 return -ENOSPC;
3393
3394         ret = shrink_delalloc(trans, root, num_bytes);
3395         if (ret)
3396                 return ret;
3397
3398         spin_lock(&space_info->lock);
3399         if (space_info->bytes_pinned < num_bytes)
3400                 ret = 1;
3401         spin_unlock(&space_info->lock);
3402         if (ret)
3403                 return -ENOSPC;
3404
3405         (*retries)++;
3406
3407         if (trans)
3408                 return -EAGAIN;
3409
3410         trans = btrfs_join_transaction(root, 1);
3411         BUG_ON(IS_ERR(trans));
3412         ret = btrfs_commit_transaction(trans, root);
3413         BUG_ON(ret);
3414
3415         return 1;
3416 }
3417
3418 static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv,
3419                                   u64 num_bytes)
3420 {
3421         struct btrfs_space_info *space_info = block_rsv->space_info;
3422         u64 unused;
3423         int ret = -ENOSPC;
3424
3425         spin_lock(&space_info->lock);
3426         unused = space_info->bytes_used + space_info->bytes_reserved +
3427                  space_info->bytes_pinned + space_info->bytes_readonly;
3428
3429         if (unused < space_info->total_bytes)
3430                 unused = space_info->total_bytes - unused;
3431         else
3432                 unused = 0;
3433
3434         if (unused >= num_bytes) {
3435                 if (block_rsv->priority >= 10) {
3436                         space_info->bytes_reserved += num_bytes;
3437                         ret = 0;
3438                 } else {
3439                         if ((unused + block_rsv->reserved) *
3440                             block_rsv->priority >=
3441                             (num_bytes + block_rsv->reserved) * 10) {
3442                                 space_info->bytes_reserved += num_bytes;
3443                                 ret = 0;
3444                         }
3445                 }
3446         }
3447         spin_unlock(&space_info->lock);
3448
3449         return ret;
3450 }
3451
3452 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3453                                              struct btrfs_root *root)
3454 {
3455         struct btrfs_block_rsv *block_rsv;
3456         if (root->ref_cows)
3457                 block_rsv = trans->block_rsv;
3458         else
3459                 block_rsv = root->block_rsv;
3460
3461         if (!block_rsv)
3462                 block_rsv = &root->fs_info->empty_block_rsv;
3463
3464         return block_rsv;
3465 }
3466
3467 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3468                                u64 num_bytes)
3469 {
3470         int ret = -ENOSPC;
3471         spin_lock(&block_rsv->lock);
3472         if (block_rsv->reserved >= num_bytes) {
3473                 block_rsv->reserved -= num_bytes;
3474                 if (block_rsv->reserved < block_rsv->size)
3475                         block_rsv->full = 0;
3476                 ret = 0;
3477         }
3478         spin_unlock(&block_rsv->lock);
3479         return ret;
3480 }
3481
3482 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3483                                 u64 num_bytes, int update_size)
3484 {
3485         spin_lock(&block_rsv->lock);
3486         block_rsv->reserved += num_bytes;
3487         if (update_size)
3488                 block_rsv->size += num_bytes;
3489         else if (block_rsv->reserved >= block_rsv->size)
3490                 block_rsv->full = 1;
3491         spin_unlock(&block_rsv->lock);
3492 }
3493
3494 void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3495                              struct btrfs_block_rsv *dest, u64 num_bytes)
3496 {
3497         struct btrfs_space_info *space_info = block_rsv->space_info;
3498
3499         spin_lock(&block_rsv->lock);
3500         if (num_bytes == (u64)-1)
3501                 num_bytes = block_rsv->size;
3502         block_rsv->size -= num_bytes;
3503         if (block_rsv->reserved >= block_rsv->size) {
3504                 num_bytes = block_rsv->reserved - block_rsv->size;
3505                 block_rsv->reserved = block_rsv->size;
3506                 block_rsv->full = 1;
3507         } else {
3508                 num_bytes = 0;
3509         }
3510         spin_unlock(&block_rsv->lock);
3511
3512         if (num_bytes > 0) {
3513                 if (dest) {
3514                         block_rsv_add_bytes(dest, num_bytes, 0);
3515                 } else {
3516                         spin_lock(&space_info->lock);
3517                         space_info->bytes_reserved -= num_bytes;
3518                         spin_unlock(&space_info->lock);
3519                 }
3520         }
3521 }
3522
3523 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3524                                    struct btrfs_block_rsv *dst, u64 num_bytes)
3525 {
3526         int ret;
3527
3528         ret = block_rsv_use_bytes(src, num_bytes);
3529         if (ret)
3530                 return ret;
3531
3532         block_rsv_add_bytes(dst, num_bytes, 1);
3533         return 0;
3534 }
3535
3536 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3537 {
3538         memset(rsv, 0, sizeof(*rsv));
3539         spin_lock_init(&rsv->lock);
3540         atomic_set(&rsv->usage, 1);
3541         rsv->priority = 6;
3542         INIT_LIST_HEAD(&rsv->list);
3543 }
3544
3545 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3546 {
3547         struct btrfs_block_rsv *block_rsv;
3548         struct btrfs_fs_info *fs_info = root->fs_info;
3549         u64 alloc_target;
3550
3551         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3552         if (!block_rsv)
3553                 return NULL;
3554
3555         btrfs_init_block_rsv(block_rsv);
3556
3557         alloc_target = btrfs_get_alloc_profile(root, 0);
3558         block_rsv->space_info = __find_space_info(fs_info,
3559                                                   BTRFS_BLOCK_GROUP_METADATA);
3560
3561         return block_rsv;
3562 }
3563
3564 void btrfs_free_block_rsv(struct btrfs_root *root,
3565                           struct btrfs_block_rsv *rsv)
3566 {
3567         if (rsv && atomic_dec_and_test(&rsv->usage)) {
3568                 btrfs_block_rsv_release(root, rsv, (u64)-1);
3569                 if (!rsv->durable)
3570                         kfree(rsv);
3571         }
3572 }
3573
3574 /*
3575  * make the block_rsv struct be able to capture freed space.
3576  * the captured space will re-add to the the block_rsv struct
3577  * after transaction commit
3578  */
3579 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3580                                  struct btrfs_block_rsv *block_rsv)
3581 {
3582         block_rsv->durable = 1;
3583         mutex_lock(&fs_info->durable_block_rsv_mutex);
3584         list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3585         mutex_unlock(&fs_info->durable_block_rsv_mutex);
3586 }
3587
3588 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3589                         struct btrfs_root *root,
3590                         struct btrfs_block_rsv *block_rsv,
3591                         u64 num_bytes, int *retries)
3592 {
3593         int ret;
3594
3595         if (num_bytes == 0)
3596                 return 0;
3597 again:
3598         ret = reserve_metadata_bytes(block_rsv, num_bytes);
3599         if (!ret) {
3600                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3601                 return 0;
3602         }
3603
3604         ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries);
3605         if (ret > 0)
3606                 goto again;
3607
3608         return ret;
3609 }
3610
3611 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3612                           struct btrfs_root *root,
3613                           struct btrfs_block_rsv *block_rsv,
3614                           u64 min_reserved, int min_factor)
3615 {
3616         u64 num_bytes = 0;
3617         int commit_trans = 0;
3618         int ret = -ENOSPC;
3619
3620         if (!block_rsv)
3621                 return 0;
3622
3623         spin_lock(&block_rsv->lock);
3624         if (min_factor > 0)
3625                 num_bytes = div_factor(block_rsv->size, min_factor);
3626         if (min_reserved > num_bytes)
3627                 num_bytes = min_reserved;
3628
3629         if (block_rsv->reserved >= num_bytes) {
3630                 ret = 0;
3631         } else {
3632                 num_bytes -= block_rsv->reserved;
3633                 if (block_rsv->durable &&
3634                     block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3635                         commit_trans = 1;
3636         }
3637         spin_unlock(&block_rsv->lock);
3638         if (!ret)
3639                 return 0;
3640
3641         if (block_rsv->refill_used) {
3642                 ret = reserve_metadata_bytes(block_rsv, num_bytes);
3643                 if (!ret) {
3644                         block_rsv_add_bytes(block_rsv, num_bytes, 0);
3645                         return 0;
3646                 }
3647         }
3648
3649         if (commit_trans) {
3650                 if (trans)
3651                         return -EAGAIN;
3652
3653                 trans = btrfs_join_transaction(root, 1);
3654                 BUG_ON(IS_ERR(trans));
3655                 ret = btrfs_commit_transaction(trans, root);
3656                 return 0;
3657         }
3658
3659         WARN_ON(1);
3660         printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3661                 block_rsv->size, block_rsv->reserved,
3662                 block_rsv->freed[0], block_rsv->freed[1]);
3663
3664         return -ENOSPC;
3665 }
3666
3667 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3668                             struct btrfs_block_rsv *dst_rsv,
3669                             u64 num_bytes)
3670 {
3671         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3672 }
3673
3674 void btrfs_block_rsv_release(struct btrfs_root *root,
3675                              struct btrfs_block_rsv *block_rsv,
3676                              u64 num_bytes)
3677 {
3678         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3679         if (global_rsv->full || global_rsv == block_rsv ||
3680             block_rsv->space_info != global_rsv->space_info)
3681                 global_rsv = NULL;
3682         block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3683 }
3684
3685 /*
3686  * helper to calculate size of global block reservation.
3687  * the desired value is sum of space used by extent tree,
3688  * checksum tree and root tree
3689  */
3690 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3691 {
3692         struct btrfs_space_info *sinfo;
3693         u64 num_bytes;
3694         u64 meta_used;
3695         u64 data_used;
3696         int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3697 #if 0
3698         /*
3699          * per tree used space accounting can be inaccuracy, so we
3700          * can't rely on it.
3701          */
3702         spin_lock(&fs_info->extent_root->accounting_lock);
3703         num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3704         spin_unlock(&fs_info->extent_root->accounting_lock);
3705
3706         spin_lock(&fs_info->csum_root->accounting_lock);
3707         num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3708         spin_unlock(&fs_info->csum_root->accounting_lock);
3709
3710         spin_lock(&fs_info->tree_root->accounting_lock);
3711         num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3712         spin_unlock(&fs_info->tree_root->accounting_lock);
3713 #endif
3714         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3715         spin_lock(&sinfo->lock);
3716         data_used = sinfo->bytes_used;
3717         spin_unlock(&sinfo->lock);
3718
3719         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3720         spin_lock(&sinfo->lock);
3721         meta_used = sinfo->bytes_used;
3722         spin_unlock(&sinfo->lock);
3723
3724         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3725                     csum_size * 2;
3726         num_bytes += div64_u64(data_used + meta_used, 50);
3727
3728         if (num_bytes * 3 > meta_used)
3729                 num_bytes = div64_u64(meta_used, 3);
3730
3731         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3732 }
3733
3734 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3735 {
3736         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3737         struct btrfs_space_info *sinfo = block_rsv->space_info;
3738         u64 num_bytes;
3739
3740         num_bytes = calc_global_metadata_size(fs_info);
3741
3742         spin_lock(&block_rsv->lock);
3743         spin_lock(&sinfo->lock);
3744
3745         block_rsv->size = num_bytes;
3746
3747         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3748                     sinfo->bytes_reserved + sinfo->bytes_readonly;
3749
3750         if (sinfo->total_bytes > num_bytes) {
3751                 num_bytes = sinfo->total_bytes - num_bytes;
3752                 block_rsv->reserved += num_bytes;
3753                 sinfo->bytes_reserved += num_bytes;
3754         }
3755
3756         if (block_rsv->reserved >= block_rsv->size) {
3757                 num_bytes = block_rsv->reserved - block_rsv->size;
3758                 sinfo->bytes_reserved -= num_bytes;
3759                 block_rsv->reserved = block_rsv->size;
3760                 block_rsv->full = 1;
3761         }
3762 #if 0
3763         printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3764                 block_rsv->size, block_rsv->reserved);
3765 #endif
3766         spin_unlock(&sinfo->lock);
3767         spin_unlock(&block_rsv->lock);
3768 }
3769
3770 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3771 {
3772         struct btrfs_space_info *space_info;
3773
3774         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3775         fs_info->chunk_block_rsv.space_info = space_info;
3776         fs_info->chunk_block_rsv.priority = 10;
3777
3778         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3779         fs_info->global_block_rsv.space_info = space_info;
3780         fs_info->global_block_rsv.priority = 10;
3781         fs_info->global_block_rsv.refill_used = 1;
3782         fs_info->delalloc_block_rsv.space_info = space_info;
3783         fs_info->trans_block_rsv.space_info = space_info;
3784         fs_info->empty_block_rsv.space_info = space_info;
3785         fs_info->empty_block_rsv.priority = 10;
3786
3787         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3788         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3789         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3790         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3791         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3792
3793         btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3794
3795         btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3796
3797         update_global_block_rsv(fs_info);
3798 }
3799
3800 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3801 {
3802         block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3803         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3804         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3805         WARN_ON(fs_info->trans_block_rsv.size > 0);
3806         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3807         WARN_ON(fs_info->chunk_block_rsv.size > 0);
3808         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3809 }
3810
3811 static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3812 {
3813         return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3814                 3 * num_items;
3815 }
3816
3817 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3818                                  struct btrfs_root *root,
3819                                  int num_items, int *retries)
3820 {
3821         u64 num_bytes;
3822         int ret;
3823
3824         if (num_items == 0 || root->fs_info->chunk_root == root)
3825                 return 0;
3826
3827         num_bytes = calc_trans_metadata_size(root, num_items);
3828         ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3829                                   num_bytes, retries);
3830         if (!ret) {
3831                 trans->bytes_reserved += num_bytes;
3832                 trans->block_rsv = &root->fs_info->trans_block_rsv;
3833         }
3834         return ret;
3835 }
3836
3837 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3838                                   struct btrfs_root *root)
3839 {
3840         if (!trans->bytes_reserved)
3841                 return;
3842
3843         BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3844         btrfs_block_rsv_release(root, trans->block_rsv,
3845                                 trans->bytes_reserved);
3846         trans->bytes_reserved = 0;
3847 }
3848
3849 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3850                                   struct inode *inode)
3851 {
3852         struct btrfs_root *root = BTRFS_I(inode)->root;
3853         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3854         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3855
3856         /*
3857          * one for deleting orphan item, one for updating inode and
3858          * two for calling btrfs_truncate_inode_items.
3859          *
3860          * btrfs_truncate_inode_items is a delete operation, it frees
3861          * more space than it uses in most cases. So two units of
3862          * metadata space should be enough for calling it many times.
3863          * If all of the metadata space is used, we can commit
3864          * transaction and use space it freed.
3865          */
3866         u64 num_bytes = calc_trans_metadata_size(root, 4);
3867         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3868 }
3869
3870 void btrfs_orphan_release_metadata(struct inode *inode)
3871 {
3872         struct btrfs_root *root = BTRFS_I(inode)->root;
3873         u64 num_bytes = calc_trans_metadata_size(root, 4);
3874         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3875 }
3876
3877 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3878                                 struct btrfs_pending_snapshot *pending)
3879 {
3880         struct btrfs_root *root = pending->root;
3881         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3882         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3883         /*
3884          * two for root back/forward refs, two for directory entries
3885          * and one for root of the snapshot.
3886          */
3887         u64 num_bytes = calc_trans_metadata_size(root, 5);
3888         dst_rsv->space_info = src_rsv->space_info;
3889         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3890 }
3891
3892 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3893 {
3894         return num_bytes >>= 3;
3895 }
3896
3897 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3898 {
3899         struct btrfs_root *root = BTRFS_I(inode)->root;
3900         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3901         u64 to_reserve;
3902         int nr_extents;
3903         int retries = 0;
3904         int ret;
3905
3906         if (btrfs_transaction_in_commit(root->fs_info))
3907                 schedule_timeout(1);
3908
3909         num_bytes = ALIGN(num_bytes, root->sectorsize);
3910 again:
3911         spin_lock(&BTRFS_I(inode)->accounting_lock);
3912         nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3913         if (nr_extents > BTRFS_I(inode)->reserved_extents) {
3914                 nr_extents -= BTRFS_I(inode)->reserved_extents;
3915                 to_reserve = calc_trans_metadata_size(root, nr_extents);
3916         } else {
3917                 nr_extents = 0;
3918                 to_reserve = 0;
3919         }
3920
3921         to_reserve += calc_csum_metadata_size(inode, num_bytes);
3922         ret = reserve_metadata_bytes(block_rsv, to_reserve);
3923         if (ret) {
3924                 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3925                 ret = should_retry_reserve(NULL, root, block_rsv, to_reserve,
3926                                            &retries);
3927                 if (ret > 0)
3928                         goto again;
3929                 return ret;
3930         }
3931
3932         BTRFS_I(inode)->reserved_extents += nr_extents;
3933         atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3934         spin_unlock(&BTRFS_I(inode)->accounting_lock);
3935
3936         block_rsv_add_bytes(block_rsv, to_reserve, 1);
3937
3938         if (block_rsv->size > 512 * 1024 * 1024)
3939                 shrink_delalloc(NULL, root, to_reserve);
3940
3941         return 0;
3942 }
3943
3944 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3945 {
3946         struct btrfs_root *root = BTRFS_I(inode)->root;
3947         u64 to_free;
3948         int nr_extents;
3949
3950         num_bytes = ALIGN(num_bytes, root->sectorsize);
3951         atomic_dec(&BTRFS_I(inode)->outstanding_extents);
3952
3953         spin_lock(&BTRFS_I(inode)->accounting_lock);
3954         nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
3955         if (nr_extents < BTRFS_I(inode)->reserved_extents) {
3956                 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
3957                 BTRFS_I(inode)->reserved_extents -= nr_extents;
3958         } else {
3959                 nr_extents = 0;
3960         }
3961         spin_unlock(&BTRFS_I(inode)->accounting_lock);
3962
3963         to_free = calc_csum_metadata_size(inode, num_bytes);
3964         if (nr_extents > 0)
3965                 to_free += calc_trans_metadata_size(root, nr_extents);
3966
3967         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
3968                                 to_free);
3969 }
3970
3971 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
3972 {
3973         int ret;
3974
3975         ret = btrfs_check_data_free_space(inode, num_bytes);
3976         if (ret)
3977                 return ret;
3978
3979         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
3980         if (ret) {
3981                 btrfs_free_reserved_data_space(inode, num_bytes);
3982                 return ret;
3983         }
3984
3985         return 0;
3986 }
3987
3988 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
3989 {
3990         btrfs_delalloc_release_metadata(inode, num_bytes);
3991         btrfs_free_reserved_data_space(inode, num_bytes);
3992 }
3993
3994 static int update_block_group(struct btrfs_trans_handle *trans,
3995                               struct btrfs_root *root,
3996                               u64 bytenr, u64 num_bytes, int alloc)
3997 {
3998         struct btrfs_block_group_cache *cache = NULL;
3999         struct btrfs_fs_info *info = root->fs_info;
4000         u64 total = num_bytes;
4001         u64 old_val;
4002         u64 byte_in_group;
4003         int factor;
4004
4005         /* block accounting for super block */
4006         spin_lock(&info->delalloc_lock);
4007         old_val = btrfs_super_bytes_used(&info->super_copy);
4008         if (alloc)
4009                 old_val += num_bytes;
4010         else
4011                 old_val -= num_bytes;
4012         btrfs_set_super_bytes_used(&info->super_copy, old_val);
4013         spin_unlock(&info->delalloc_lock);
4014
4015         while (total) {
4016                 cache = btrfs_lookup_block_group(info, bytenr);
4017                 if (!cache)
4018                         return -1;
4019                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4020                                     BTRFS_BLOCK_GROUP_RAID1 |
4021                                     BTRFS_BLOCK_GROUP_RAID10))
4022                         factor = 2;
4023                 else
4024                         factor = 1;
4025                 /*
4026                  * If this block group has free space cache written out, we
4027                  * need to make sure to load it if we are removing space.  This
4028                  * is because we need the unpinning stage to actually add the
4029                  * space back to the block group, otherwise we will leak space.
4030                  */
4031                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4032                         cache_block_group(cache, trans, 1);
4033
4034                 byte_in_group = bytenr - cache->key.objectid;
4035                 WARN_ON(byte_in_group > cache->key.offset);
4036
4037                 spin_lock(&cache->space_info->lock);
4038                 spin_lock(&cache->lock);
4039
4040                 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4041                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4042                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4043
4044                 cache->dirty = 1;
4045                 old_val = btrfs_block_group_used(&cache->item);
4046                 num_bytes = min(total, cache->key.offset - byte_in_group);
4047                 if (alloc) {
4048                         old_val += num_bytes;
4049                         btrfs_set_block_group_used(&cache->item, old_val);
4050                         cache->reserved -= num_bytes;
4051                         cache->space_info->bytes_reserved -= num_bytes;
4052                         cache->space_info->bytes_used += num_bytes;
4053                         cache->space_info->disk_used += num_bytes * factor;
4054                         spin_unlock(&cache->lock);
4055                         spin_unlock(&cache->space_info->lock);
4056                 } else {
4057                         old_val -= num_bytes;
4058                         btrfs_set_block_group_used(&cache->item, old_val);
4059                         cache->pinned += num_bytes;
4060                         cache->space_info->bytes_pinned += num_bytes;
4061                         cache->space_info->bytes_used -= num_bytes;
4062                         cache->space_info->disk_used -= num_bytes * factor;
4063                         spin_unlock(&cache->lock);
4064                         spin_unlock(&cache->space_info->lock);
4065
4066                         set_extent_dirty(info->pinned_extents,
4067                                          bytenr, bytenr + num_bytes - 1,
4068                                          GFP_NOFS | __GFP_NOFAIL);
4069                 }
4070                 btrfs_put_block_group(cache);
4071                 total -= num_bytes;
4072                 bytenr += num_bytes;
4073         }
4074         return 0;
4075 }
4076
4077 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4078 {
4079         struct btrfs_block_group_cache *cache;
4080         u64 bytenr;
4081
4082         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4083         if (!cache)
4084                 return 0;
4085
4086         bytenr = cache->key.objectid;
4087         btrfs_put_block_group(cache);
4088
4089         return bytenr;
4090 }
4091
4092 static int pin_down_extent(struct btrfs_root *root,
4093                            struct btrfs_block_group_cache *cache,
4094                            u64 bytenr, u64 num_bytes, int reserved)
4095 {
4096         spin_lock(&cache->space_info->lock);
4097         spin_lock(&cache->lock);
4098         cache->pinned += num_bytes;
4099         cache->space_info->bytes_pinned += num_bytes;
4100         if (reserved) {
4101                 cache->reserved -= num_bytes;
4102                 cache->space_info->bytes_reserved -= num_bytes;
4103         }
4104         spin_unlock(&cache->lock);
4105         spin_unlock(&cache->space_info->lock);
4106
4107         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4108                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4109         return 0;
4110 }
4111
4112 /*
4113  * this function must be called within transaction
4114  */
4115 int btrfs_pin_extent(struct btrfs_root *root,
4116                      u64 bytenr, u64 num_bytes, int reserved)
4117 {
4118         struct btrfs_block_group_cache *cache;
4119
4120         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4121         BUG_ON(!cache);
4122
4123         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4124
4125         btrfs_put_block_group(cache);
4126         return 0;
4127 }
4128
4129 /*
4130  * update size of reserved extents. this function may return -EAGAIN
4131  * if 'reserve' is true or 'sinfo' is false.
4132  */
4133 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4134                                  u64 num_bytes, int reserve, int sinfo)
4135 {
4136         int ret = 0;
4137         if (sinfo) {
4138                 struct btrfs_space_info *space_info = cache->space_info;
4139                 spin_lock(&space_info->lock);
4140                 spin_lock(&cache->lock);
4141                 if (reserve) {
4142                         if (cache->ro) {
4143                                 ret = -EAGAIN;
4144                         } else {
4145                                 cache->reserved += num_bytes;
4146                                 space_info->bytes_reserved += num_bytes;
4147                         }
4148                 } else {
4149                         if (cache->ro)
4150                                 space_info->bytes_readonly += num_bytes;
4151                         cache->reserved -= num_bytes;
4152                         space_info->bytes_reserved -= num_bytes;
4153                 }
4154                 spin_unlock(&cache->lock);
4155                 spin_unlock(&space_info->lock);
4156         } else {
4157                 spin_lock(&cache->lock);
4158                 if (cache->ro) {
4159                         ret = -EAGAIN;
4160                 } else {
4161                         if (reserve)
4162                                 cache->reserved += num_bytes;
4163                         else
4164                                 cache->reserved -= num_bytes;
4165                 }
4166                 spin_unlock(&cache->lock);
4167         }
4168         return ret;
4169 }
4170
4171 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4172                                 struct btrfs_root *root)
4173 {
4174         struct btrfs_fs_info *fs_info = root->fs_info;
4175         struct btrfs_caching_control *next;
4176         struct btrfs_caching_control *caching_ctl;
4177         struct btrfs_block_group_cache *cache;
4178
4179         down_write(&fs_info->extent_commit_sem);
4180
4181         list_for_each_entry_safe(caching_ctl, next,
4182                                  &fs_info->caching_block_groups, list) {
4183                 cache = caching_ctl->block_group;
4184                 if (block_group_cache_done(cache)) {
4185                         cache->last_byte_to_unpin = (u64)-1;
4186                         list_del_init(&caching_ctl->list);
4187                         put_caching_control(caching_ctl);
4188                 } else {
4189                         cache->last_byte_to_unpin = caching_ctl->progress;
4190                 }
4191         }
4192
4193         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4194                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4195         else
4196                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4197
4198         up_write(&fs_info->extent_commit_sem);
4199
4200         update_global_block_rsv(fs_info);
4201         return 0;
4202 }
4203
4204 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4205 {
4206         struct btrfs_fs_info *fs_info = root->fs_info;
4207         struct btrfs_block_group_cache *cache = NULL;
4208         u64 len;
4209
4210         while (start <= end) {
4211                 if (!cache ||
4212                     start >= cache->key.objectid + cache->key.offset) {
4213                         if (cache)
4214                                 btrfs_put_block_group(cache);
4215                         cache = btrfs_lookup_block_group(fs_info, start);
4216                         BUG_ON(!cache);
4217                 }
4218
4219                 len = cache->key.objectid + cache->key.offset - start;
4220                 len = min(len, end + 1 - start);
4221
4222                 if (start < cache->last_byte_to_unpin) {
4223                         len = min(len, cache->last_byte_to_unpin - start);
4224                         btrfs_add_free_space(cache, start, len);
4225                 }
4226
4227                 start += len;
4228
4229                 spin_lock(&cache->space_info->lock);
4230                 spin_lock(&cache->lock);
4231                 cache->pinned -= len;
4232                 cache->space_info->bytes_pinned -= len;
4233                 if (cache->ro) {
4234                         cache->space_info->bytes_readonly += len;
4235                 } else if (cache->reserved_pinned > 0) {
4236                         len = min(len, cache->reserved_pinned);
4237                         cache->reserved_pinned -= len;
4238                         cache->space_info->bytes_reserved += len;
4239                 }
4240                 spin_unlock(&cache->lock);
4241                 spin_unlock(&cache->space_info->lock);
4242         }
4243
4244         if (cache)
4245                 btrfs_put_block_group(cache);
4246         return 0;
4247 }
4248
4249 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4250                                struct btrfs_root *root)
4251 {
4252         struct btrfs_fs_info *fs_info = root->fs_info;
4253         struct extent_io_tree *unpin;
4254         struct btrfs_block_rsv *block_rsv;
4255         struct btrfs_block_rsv *next_rsv;
4256         u64 start;
4257         u64 end;
4258         int idx;
4259         int ret;
4260
4261         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4262                 unpin = &fs_info->freed_extents[1];
4263         else
4264                 unpin = &fs_info->freed_extents[0];
4265
4266         while (1) {
4267                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4268                                             EXTENT_DIRTY);
4269                 if (ret)
4270                         break;
4271
4272                 ret = btrfs_discard_extent(root, start, end + 1 - start);
4273
4274                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4275                 unpin_extent_range(root, start, end);
4276                 cond_resched();
4277         }
4278
4279         mutex_lock(&fs_info->durable_block_rsv_mutex);
4280         list_for_each_entry_safe(block_rsv, next_rsv,
4281                                  &fs_info->durable_block_rsv_list, list) {
4282
4283                 idx = trans->transid & 0x1;
4284                 if (block_rsv->freed[idx] > 0) {
4285                         block_rsv_add_bytes(block_rsv,
4286                                             block_rsv->freed[idx], 0);
4287                         block_rsv->freed[idx] = 0;
4288                 }
4289                 if (atomic_read(&block_rsv->usage) == 0) {
4290                         btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4291
4292                         if (block_rsv->freed[0] == 0 &&
4293                             block_rsv->freed[1] == 0) {
4294                                 list_del_init(&block_rsv->list);
4295                                 kfree(block_rsv);
4296                         }
4297                 } else {
4298                         btrfs_block_rsv_release(root, block_rsv, 0);
4299                 }
4300         }
4301         mutex_unlock(&fs_info->durable_block_rsv_mutex);
4302
4303         return 0;
4304 }
4305
4306 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4307                                 struct btrfs_root *root,
4308                                 u64 bytenr, u64 num_bytes, u64 parent,
4309                                 u64 root_objectid, u64 owner_objectid,
4310                                 u64 owner_offset, int refs_to_drop,
4311                                 struct btrfs_delayed_extent_op *extent_op)
4312 {
4313         struct btrfs_key key;
4314         struct btrfs_path *path;
4315         struct btrfs_fs_info *info = root->fs_info;
4316         struct btrfs_root *extent_root = info->extent_root;
4317         struct extent_buffer *leaf;
4318         struct btrfs_extent_item *ei;
4319         struct btrfs_extent_inline_ref *iref;
4320         int ret;
4321         int is_data;
4322         int extent_slot = 0;
4323         int found_extent = 0;
4324         int num_to_del = 1;
4325         u32 item_size;
4326         u64 refs;
4327
4328         path = btrfs_alloc_path();
4329         if (!path)
4330                 return -ENOMEM;
4331
4332         path->reada = 1;
4333         path->leave_spinning = 1;
4334
4335         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4336         BUG_ON(!is_data && refs_to_drop != 1);
4337
4338         ret = lookup_extent_backref(trans, extent_root, path, &iref,
4339                                     bytenr, num_bytes, parent,
4340                                     root_objectid, owner_objectid,
4341                                     owner_offset);
4342         if (ret == 0) {
4343                 extent_slot = path->slots[0];
4344                 while (extent_slot >= 0) {
4345                         btrfs_item_key_to_cpu(path->nodes[0], &key,
4346                                               extent_slot);
4347                         if (key.objectid != bytenr)
4348                                 break;
4349                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4350                             key.offset == num_bytes) {
4351                                 found_extent = 1;
4352                                 break;
4353                         }
4354                         if (path->slots[0] - extent_slot > 5)
4355                                 break;
4356                         extent_slot--;
4357                 }
4358 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4359                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4360                 if (found_extent && item_size < sizeof(*ei))
4361                         found_extent = 0;
4362 #endif
4363                 if (!found_extent) {
4364                         BUG_ON(iref);
4365                         ret = remove_extent_backref(trans, extent_root, path,
4366                                                     NULL, refs_to_drop,
4367                                                     is_data);
4368                         BUG_ON(ret);
4369                         btrfs_release_path(extent_root, path);
4370                         path->leave_spinning = 1;
4371
4372                         key.objectid = bytenr;
4373                         key.type = BTRFS_EXTENT_ITEM_KEY;
4374                         key.offset = num_bytes;
4375
4376                         ret = btrfs_search_slot(trans, extent_root,
4377                                                 &key, path, -1, 1);
4378                         if (ret) {
4379                                 printk(KERN_ERR "umm, got %d back from search"
4380                                        ", was looking for %llu\n", ret,
4381                                        (unsigned long long)bytenr);
4382                                 btrfs_print_leaf(extent_root, path->nodes[0]);
4383                         }
4384                         BUG_ON(ret);
4385                         extent_slot = path->slots[0];
4386                 }
4387         } else {
4388                 btrfs_print_leaf(extent_root, path->nodes[0]);
4389                 WARN_ON(1);
4390                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4391                        "parent %llu root %llu  owner %llu offset %llu\n",
4392                        (unsigned long long)bytenr,
4393                        (unsigned long long)parent,
4394                        (unsigned long long)root_objectid,
4395                        (unsigned long long)owner_objectid,
4396                        (unsigned long long)owner_offset);
4397         }
4398
4399         leaf = path->nodes[0];
4400         item_size = btrfs_item_size_nr(leaf, extent_slot);
4401 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4402         if (item_size < sizeof(*ei)) {
4403                 BUG_ON(found_extent || extent_slot != path->slots[0]);
4404                 ret = convert_extent_item_v0(trans, extent_root, path,
4405                                              owner_objectid, 0);
4406                 BUG_ON(ret < 0);
4407
4408                 btrfs_release_path(extent_root, path);
4409                 path->leave_spinning = 1;
4410
4411                 key.objectid = bytenr;
4412                 key.type = BTRFS_EXTENT_ITEM_KEY;
4413                 key.offset = num_bytes;
4414
4415                 ret = btrfs_search_slot(trans, extent_root, &key, path,
4416                                         -1, 1);
4417                 if (ret) {
4418                         printk(KERN_ERR "umm, got %d back from search"
4419                                ", was looking for %llu\n", ret,
4420                                (unsigned long long)bytenr);
4421                         btrfs_print_leaf(extent_root, path->nodes[0]);
4422                 }
4423                 BUG_ON(ret);
4424                 extent_slot = path->slots[0];
4425                 leaf = path->nodes[0];
4426                 item_size = btrfs_item_size_nr(leaf, extent_slot);
4427         }
4428 #endif
4429         BUG_ON(item_size < sizeof(*ei));
4430         ei = btrfs_item_ptr(leaf, extent_slot,
4431                             struct btrfs_extent_item);
4432         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4433                 struct btrfs_tree_block_info *bi;
4434                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4435                 bi = (struct btrfs_tree_block_info *)(ei + 1);
4436                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4437         }
4438
4439         refs = btrfs_extent_refs(leaf, ei);
4440         BUG_ON(refs < refs_to_drop);
4441         refs -= refs_to_drop;
4442
4443         if (refs > 0) {
4444                 if (extent_op)
4445                         __run_delayed_extent_op(extent_op, leaf, ei);
4446                 /*
4447                  * In the case of inline back ref, reference count will
4448                  * be updated by remove_extent_backref
4449                  */
4450                 if (iref) {
4451                         BUG_ON(!found_extent);
4452                 } else {
4453                         btrfs_set_extent_refs(leaf, ei, refs);
4454                         btrfs_mark_buffer_dirty(leaf);
4455                 }
4456                 if (found_extent) {
4457                         ret = remove_extent_backref(trans, extent_root, path,
4458                                                     iref, refs_to_drop,
4459                                                     is_data);
4460                         BUG_ON(ret);
4461                 }
4462         } else {
4463                 if (found_extent) {
4464                         BUG_ON(is_data && refs_to_drop !=
4465                                extent_data_ref_count(root, path, iref));
4466                         if (iref) {
4467                                 BUG_ON(path->slots[0] != extent_slot);
4468                         } else {
4469                                 BUG_ON(path->slots[0] != extent_slot + 1);
4470                                 path->slots[0] = extent_slot;
4471                                 num_to_del = 2;
4472                         }
4473                 }
4474
4475                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4476                                       num_to_del);
4477                 BUG_ON(ret);
4478                 btrfs_release_path(extent_root, path);
4479
4480                 if (is_data) {
4481                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4482                         BUG_ON(ret);
4483                 } else {
4484                         invalidate_mapping_pages(info->btree_inode->i_mapping,
4485                              bytenr >> PAGE_CACHE_SHIFT,
4486                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4487                 }
4488
4489                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4490                 BUG_ON(ret);
4491         }
4492         btrfs_free_path(path);
4493         return ret;
4494 }
4495
4496 /*
4497  * when we free an block, it is possible (and likely) that we free the last
4498  * delayed ref for that extent as well.  This searches the delayed ref tree for
4499  * a given extent, and if there are no other delayed refs to be processed, it
4500  * removes it from the tree.
4501  */
4502 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4503                                       struct btrfs_root *root, u64 bytenr)
4504 {
4505         struct btrfs_delayed_ref_head *head;
4506         struct btrfs_delayed_ref_root *delayed_refs;
4507         struct btrfs_delayed_ref_node *ref;
4508         struct rb_node *node;
4509         int ret = 0;
4510
4511         delayed_refs = &trans->transaction->delayed_refs;
4512         spin_lock(&delayed_refs->lock);
4513         head = btrfs_find_delayed_ref_head(trans, bytenr);
4514         if (!head)
4515                 goto out;
4516
4517         node = rb_prev(&head->node.rb_node);
4518         if (!node)
4519                 goto out;
4520
4521         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4522
4523         /* there are still entries for this ref, we can't drop it */
4524         if (ref->bytenr == bytenr)
4525                 goto out;
4526
4527         if (head->extent_op) {
4528                 if (!head->must_insert_reserved)
4529                         goto out;
4530                 kfree(head->extent_op);
4531                 head->extent_op = NULL;
4532         }
4533
4534         /*
4535          * waiting for the lock here would deadlock.  If someone else has it
4536          * locked they are already in the process of dropping it anyway
4537          */
4538         if (!mutex_trylock(&head->mutex))
4539                 goto out;
4540
4541         /*
4542          * at this point we have a head with no other entries.  Go
4543          * ahead and process it.
4544          */
4545         head->node.in_tree = 0;
4546         rb_erase(&head->node.rb_node, &delayed_refs->root);
4547
4548         delayed_refs->num_entries--;
4549
4550         /*
4551          * we don't take a ref on the node because we're removing it from the
4552          * tree, so we just steal the ref the tree was holding.
4553          */
4554         delayed_refs->num_heads--;
4555         if (list_empty(&head->cluster))
4556                 delayed_refs->num_heads_ready--;
4557
4558         list_del_init(&head->cluster);
4559         spin_unlock(&delayed_refs->lock);
4560
4561         BUG_ON(head->extent_op);
4562         if (head->must_insert_reserved)
4563                 ret = 1;
4564
4565         mutex_unlock(&head->mutex);
4566         btrfs_put_delayed_ref(&head->node);
4567         return ret;
4568 out:
4569         spin_unlock(&delayed_refs->lock);
4570         return 0;
4571 }
4572
4573 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4574                            struct btrfs_root *root,
4575                            struct extent_buffer *buf,
4576                            u64 parent, int last_ref)
4577 {
4578         struct btrfs_block_rsv *block_rsv;
4579         struct btrfs_block_group_cache *cache = NULL;
4580         int ret;
4581
4582         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4583                 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4584                                                 parent, root->root_key.objectid,
4585                                                 btrfs_header_level(buf),
4586                                                 BTRFS_DROP_DELAYED_REF, NULL);
4587                 BUG_ON(ret);
4588         }
4589
4590         if (!last_ref)
4591                 return;
4592
4593         block_rsv = get_block_rsv(trans, root);
4594         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4595         if (block_rsv->space_info != cache->space_info)
4596                 goto out;
4597
4598         if (btrfs_header_generation(buf) == trans->transid) {
4599                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4600                         ret = check_ref_cleanup(trans, root, buf->start);
4601                         if (!ret)
4602                                 goto pin;
4603                 }
4604
4605                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4606                         pin_down_extent(root, cache, buf->start, buf->len, 1);
4607                         goto pin;
4608                 }
4609
4610                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4611
4612                 btrfs_add_free_space(cache, buf->start, buf->len);
4613                 ret = update_reserved_bytes(cache, buf->len, 0, 0);
4614                 if (ret == -EAGAIN) {
4615                         /* block group became read-only */
4616                         update_reserved_bytes(cache, buf->len, 0, 1);
4617                         goto out;
4618                 }
4619
4620                 ret = 1;
4621                 spin_lock(&block_rsv->lock);
4622                 if (block_rsv->reserved < block_rsv->size) {
4623                         block_rsv->reserved += buf->len;
4624                         ret = 0;
4625                 }
4626                 spin_unlock(&block_rsv->lock);
4627
4628                 if (ret) {
4629                         spin_lock(&cache->space_info->lock);
4630                         cache->space_info->bytes_reserved -= buf->len;
4631                         spin_unlock(&cache->space_info->lock);
4632                 }
4633                 goto out;
4634         }
4635 pin:
4636         if (block_rsv->durable && !cache->ro) {
4637                 ret = 0;
4638                 spin_lock(&cache->lock);
4639                 if (!cache->ro) {
4640                         cache->reserved_pinned += buf->len;
4641                         ret = 1;
4642                 }
4643                 spin_unlock(&cache->lock);
4644
4645                 if (ret) {
4646                         spin_lock(&block_rsv->lock);
4647                         block_rsv->freed[trans->transid & 0x1] += buf->len;
4648                         spin_unlock(&block_rsv->lock);
4649                 }
4650         }
4651 out:
4652         btrfs_put_block_group(cache);
4653 }
4654
4655 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4656                       struct btrfs_root *root,
4657                       u64 bytenr, u64 num_bytes, u64 parent,
4658                       u64 root_objectid, u64 owner, u64 offset)
4659 {
4660         int ret;
4661
4662         /*
4663          * tree log blocks never actually go into the extent allocation
4664          * tree, just update pinning info and exit early.
4665          */
4666         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4667                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4668                 /* unlocks the pinned mutex */
4669                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4670                 ret = 0;
4671         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4672                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4673                                         parent, root_objectid, (int)owner,
4674                                         BTRFS_DROP_DELAYED_REF, NULL);
4675                 BUG_ON(ret);
4676         } else {
4677                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4678                                         parent, root_objectid, owner,
4679                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
4680                 BUG_ON(ret);
4681         }
4682         return ret;
4683 }
4684
4685 static u64 stripe_align(struct btrfs_root *root, u64 val)
4686 {
4687         u64 mask = ((u64)root->stripesize - 1);
4688         u64 ret = (val + mask) & ~mask;
4689         return ret;
4690 }
4691
4692 /*
4693  * when we wait for progress in the block group caching, its because
4694  * our allocation attempt failed at least once.  So, we must sleep
4695  * and let some progress happen before we try again.
4696  *
4697  * This function will sleep at least once waiting for new free space to
4698  * show up, and then it will check the block group free space numbers
4699  * for our min num_bytes.  Another option is to have it go ahead
4700  * and look in the rbtree for a free extent of a given size, but this
4701  * is a good start.
4702  */
4703 static noinline int
4704 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4705                                 u64 num_bytes)
4706 {
4707         struct btrfs_caching_control *caching_ctl;
4708         DEFINE_WAIT(wait);
4709
4710         caching_ctl = get_caching_control(cache);
4711         if (!caching_ctl)
4712                 return 0;
4713
4714         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4715                    (cache->free_space >= num_bytes));
4716
4717         put_caching_control(caching_ctl);
4718         return 0;
4719 }
4720
4721 static noinline int
4722 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4723 {
4724         struct btrfs_caching_control *caching_ctl;
4725         DEFINE_WAIT(wait);
4726
4727         caching_ctl = get_caching_control(cache);
4728         if (!caching_ctl)
4729                 return 0;
4730
4731         wait_event(caching_ctl->wait, block_group_cache_done(cache));
4732
4733         put_caching_control(caching_ctl);
4734         return 0;
4735 }
4736
4737 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4738 {
4739         int index;
4740         if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4741                 index = 0;
4742         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4743                 index = 1;
4744         else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4745                 index = 2;
4746         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4747                 index = 3;
4748         else
4749                 index = 4;
4750         return index;
4751 }
4752
4753 enum btrfs_loop_type {
4754         LOOP_FIND_IDEAL = 0,
4755         LOOP_CACHING_NOWAIT = 1,
4756         LOOP_CACHING_WAIT = 2,
4757         LOOP_ALLOC_CHUNK = 3,
4758         LOOP_NO_EMPTY_SIZE = 4,
4759 };
4760
4761 /*
4762  * walks the btree of allocated extents and find a hole of a given size.
4763  * The key ins is changed to record the hole:
4764  * ins->objectid == block start
4765  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4766  * ins->offset == number of blocks
4767  * Any available blocks before search_start are skipped.
4768  */
4769 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4770                                      struct btrfs_root *orig_root,
4771                                      u64 num_bytes, u64 empty_size,
4772                                      u64 search_start, u64 search_end,
4773                                      u64 hint_byte, struct btrfs_key *ins,
4774                                      int data)
4775 {
4776         int ret = 0;
4777         struct btrfs_root *root = orig_root->fs_info->extent_root;
4778         struct btrfs_free_cluster *last_ptr = NULL;
4779         struct btrfs_block_group_cache *block_group = NULL;
4780         int empty_cluster = 2 * 1024 * 1024;
4781         int allowed_chunk_alloc = 0;
4782         int done_chunk_alloc = 0;
4783         struct btrfs_space_info *space_info;
4784         int last_ptr_loop = 0;
4785         int loop = 0;
4786         int index = 0;
4787         bool found_uncached_bg = false;
4788         bool failed_cluster_refill = false;
4789         bool failed_alloc = false;
4790         u64 ideal_cache_percent = 0;
4791         u64 ideal_cache_offset = 0;
4792
4793         WARN_ON(num_bytes < root->sectorsize);
4794         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4795         ins->objectid = 0;
4796         ins->offset = 0;
4797
4798         space_info = __find_space_info(root->fs_info, data);
4799         if (!space_info) {
4800                 printk(KERN_ERR "No space info for %d\n", data);
4801                 return -ENOSPC;
4802         }
4803
4804         if (orig_root->ref_cows || empty_size)
4805                 allowed_chunk_alloc = 1;
4806
4807         if (data & BTRFS_BLOCK_GROUP_METADATA) {
4808                 last_ptr = &root->fs_info->meta_alloc_cluster;
4809                 if (!btrfs_test_opt(root, SSD))
4810                         empty_cluster = 64 * 1024;
4811         }
4812
4813         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
4814                 last_ptr = &root->fs_info->data_alloc_cluster;
4815         }
4816
4817         if (last_ptr) {
4818                 spin_lock(&last_ptr->lock);
4819                 if (last_ptr->block_group)
4820                         hint_byte = last_ptr->window_start;
4821                 spin_unlock(&last_ptr->lock);
4822         }
4823
4824         search_start = max(search_start, first_logical_byte(root, 0));
4825         search_start = max(search_start, hint_byte);
4826
4827         if (!last_ptr)
4828                 empty_cluster = 0;
4829
4830         if (search_start == hint_byte) {
4831 ideal_cache:
4832                 block_group = btrfs_lookup_block_group(root->fs_info,
4833                                                        search_start);
4834                 /*
4835                  * we don't want to use the block group if it doesn't match our
4836                  * allocation bits, or if its not cached.
4837                  *
4838                  * However if we are re-searching with an ideal block group
4839                  * picked out then we don't care that the block group is cached.
4840                  */
4841                 if (block_group && block_group_bits(block_group, data) &&
4842                     (block_group->cached != BTRFS_CACHE_NO ||
4843                      search_start == ideal_cache_offset)) {
4844                         down_read(&space_info->groups_sem);
4845                         if (list_empty(&block_group->list) ||
4846                             block_group->ro) {
4847                                 /*
4848                                  * someone is removing this block group,
4849                                  * we can't jump into the have_block_group
4850                                  * target because our list pointers are not
4851                                  * valid
4852                                  */
4853                                 btrfs_put_block_group(block_group);
4854                                 up_read(&space_info->groups_sem);
4855                         } else {
4856                                 index = get_block_group_index(block_group);
4857                                 goto have_block_group;
4858                         }
4859                 } else if (block_group) {
4860                         btrfs_put_block_group(block_group);
4861                 }
4862         }
4863 search:
4864         down_read(&space_info->groups_sem);
4865         list_for_each_entry(block_group, &space_info->block_groups[index],
4866                             list) {
4867                 u64 offset;
4868                 int cached;
4869
4870                 btrfs_get_block_group(block_group);
4871                 search_start = block_group->key.objectid;
4872
4873 have_block_group:
4874                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4875                         u64 free_percent;
4876
4877                         ret = cache_block_group(block_group, trans, 1);
4878                         if (block_group->cached == BTRFS_CACHE_FINISHED)
4879                                 goto have_block_group;
4880
4881                         free_percent = btrfs_block_group_used(&block_group->item);
4882                         free_percent *= 100;
4883                         free_percent = div64_u64(free_percent,
4884                                                  block_group->key.offset);
4885                         free_percent = 100 - free_percent;
4886                         if (free_percent > ideal_cache_percent &&
4887                             likely(!block_group->ro)) {
4888                                 ideal_cache_offset = block_group->key.objectid;
4889                                 ideal_cache_percent = free_percent;
4890                         }
4891
4892                         /*
4893                          * We only want to start kthread caching if we are at
4894                          * the point where we will wait for caching to make
4895                          * progress, or if our ideal search is over and we've
4896                          * found somebody to start caching.
4897                          */
4898                         if (loop > LOOP_CACHING_NOWAIT ||
4899                             (loop > LOOP_FIND_IDEAL &&
4900                              atomic_read(&space_info->caching_threads) < 2)) {
4901                                 ret = cache_block_group(block_group, trans, 0);
4902                                 BUG_ON(ret);
4903                         }
4904                         found_uncached_bg = true;
4905
4906                         /*
4907                          * If loop is set for cached only, try the next block
4908                          * group.
4909                          */
4910                         if (loop == LOOP_FIND_IDEAL)
4911                                 goto loop;
4912                 }
4913
4914                 cached = block_group_cache_done(block_group);
4915                 if (unlikely(!cached))
4916                         found_uncached_bg = true;
4917
4918                 if (unlikely(block_group->ro))
4919                         goto loop;
4920
4921                 /*
4922                  * Ok we want to try and use the cluster allocator, so lets look
4923                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4924                  * have tried the cluster allocator plenty of times at this
4925                  * point and not have found anything, so we are likely way too
4926                  * fragmented for the clustering stuff to find anything, so lets
4927                  * just skip it and let the allocator find whatever block it can
4928                  * find
4929                  */
4930                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
4931                         /*
4932                          * the refill lock keeps out other
4933                          * people trying to start a new cluster
4934                          */
4935                         spin_lock(&last_ptr->refill_lock);
4936                         if (last_ptr->block_group &&
4937                             (last_ptr->block_group->ro ||
4938                             !block_group_bits(last_ptr->block_group, data))) {
4939                                 offset = 0;
4940                                 goto refill_cluster;
4941                         }
4942
4943                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4944                                                  num_bytes, search_start);
4945                         if (offset) {
4946                                 /* we have a block, we're done */
4947                                 spin_unlock(&last_ptr->refill_lock);
4948                                 goto checks;
4949                         }
4950
4951                         spin_lock(&last_ptr->lock);
4952                         /*
4953                          * whoops, this cluster doesn't actually point to
4954                          * this block group.  Get a ref on the block
4955                          * group is does point to and try again
4956                          */
4957                         if (!last_ptr_loop && last_ptr->block_group &&
4958                             last_ptr->block_group != block_group) {
4959
4960                                 btrfs_put_block_group(block_group);
4961                                 block_group = last_ptr->block_group;
4962                                 btrfs_get_block_group(block_group);
4963                                 spin_unlock(&last_ptr->lock);
4964                                 spin_unlock(&last_ptr->refill_lock);
4965
4966                                 last_ptr_loop = 1;
4967                                 search_start = block_group->key.objectid;
4968                                 /*
4969                                  * we know this block group is properly
4970                                  * in the list because
4971                                  * btrfs_remove_block_group, drops the
4972                                  * cluster before it removes the block
4973                                  * group from the list
4974                                  */
4975                                 goto have_block_group;
4976                         }
4977                         spin_unlock(&last_ptr->lock);
4978 refill_cluster:
4979                         /*
4980                          * this cluster didn't work out, free it and
4981                          * start over
4982                          */
4983                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
4984
4985                         last_ptr_loop = 0;
4986
4987                         /* allocate a cluster in this block group */
4988                         ret = btrfs_find_space_cluster(trans, root,
4989                                                block_group, last_ptr,
4990                                                offset, num_bytes,
4991                                                empty_cluster + empty_size);
4992                         if (ret == 0) {
4993                                 /*
4994                                  * now pull our allocation out of this
4995                                  * cluster
4996                                  */
4997                                 offset = btrfs_alloc_from_cluster(block_group,
4998                                                   last_ptr, num_bytes,
4999                                                   search_start);
5000                                 if (offset) {
5001                                         /* we found one, proceed */
5002                                         spin_unlock(&last_ptr->refill_lock);
5003                                         goto checks;
5004                                 }
5005                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5006                                    && !failed_cluster_refill) {
5007                                 spin_unlock(&last_ptr->refill_lock);
5008
5009                                 failed_cluster_refill = true;
5010                                 wait_block_group_cache_progress(block_group,
5011                                        num_bytes + empty_cluster + empty_size);
5012                                 goto have_block_group;
5013                         }
5014
5015                         /*
5016                          * at this point we either didn't find a cluster
5017                          * or we weren't able to allocate a block from our
5018                          * cluster.  Free the cluster we've been trying
5019                          * to use, and go to the next block group
5020                          */
5021                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5022                         spin_unlock(&last_ptr->refill_lock);
5023                         goto loop;
5024                 }
5025
5026                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5027                                                     num_bytes, empty_size);
5028                 /*
5029                  * If we didn't find a chunk, and we haven't failed on this
5030                  * block group before, and this block group is in the middle of
5031                  * caching and we are ok with waiting, then go ahead and wait
5032                  * for progress to be made, and set failed_alloc to true.
5033                  *
5034                  * If failed_alloc is true then we've already waited on this
5035                  * block group once and should move on to the next block group.
5036                  */
5037                 if (!offset && !failed_alloc && !cached &&
5038                     loop > LOOP_CACHING_NOWAIT) {
5039                         wait_block_group_cache_progress(block_group,
5040                                                 num_bytes + empty_size);
5041                         failed_alloc = true;
5042                         goto have_block_group;
5043                 } else if (!offset) {
5044                         goto loop;
5045                 }
5046 checks:
5047                 search_start = stripe_align(root, offset);
5048                 /* move on to the next group */
5049                 if (search_start + num_bytes >= search_end) {
5050                         btrfs_add_free_space(block_group, offset, num_bytes);
5051                         goto loop;
5052                 }
5053
5054                 /* move on to the next group */
5055                 if (search_start + num_bytes >
5056                     block_group->key.objectid + block_group->key.offset) {
5057                         btrfs_add_free_space(block_group, offset, num_bytes);
5058                         goto loop;
5059                 }
5060
5061                 ins->objectid = search_start;
5062                 ins->offset = num_bytes;
5063
5064                 if (offset < search_start)
5065                         btrfs_add_free_space(block_group, offset,
5066                                              search_start - offset);
5067                 BUG_ON(offset > search_start);
5068
5069                 ret = update_reserved_bytes(block_group, num_bytes, 1,
5070                                             (data & BTRFS_BLOCK_GROUP_DATA));
5071                 if (ret == -EAGAIN) {
5072                         btrfs_add_free_space(block_group, offset, num_bytes);
5073                         goto loop;
5074                 }
5075
5076                 /* we are all good, lets return */
5077                 ins->objectid = search_start;
5078                 ins->offset = num_bytes;
5079
5080                 if (offset < search_start)
5081                         btrfs_add_free_space(block_group, offset,
5082                                              search_start - offset);
5083                 BUG_ON(offset > search_start);
5084                 break;
5085 loop:
5086                 failed_cluster_refill = false;
5087                 failed_alloc = false;
5088                 BUG_ON(index != get_block_group_index(block_group));
5089                 btrfs_put_block_group(block_group);
5090         }
5091         up_read(&space_info->groups_sem);
5092
5093         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5094                 goto search;
5095
5096         /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5097          *                      for them to make caching progress.  Also
5098          *                      determine the best possible bg to cache
5099          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5100          *                      caching kthreads as we move along
5101          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5102          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5103          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5104          *                      again
5105          */
5106         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5107             (found_uncached_bg || empty_size || empty_cluster ||
5108              allowed_chunk_alloc)) {
5109                 index = 0;
5110                 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5111                         found_uncached_bg = false;
5112                         loop++;
5113                         if (!ideal_cache_percent &&
5114                             atomic_read(&space_info->caching_threads))
5115                                 goto search;
5116
5117                         /*
5118                          * 1 of the following 2 things have happened so far
5119                          *
5120                          * 1) We found an ideal block group for caching that
5121                          * is mostly full and will cache quickly, so we might
5122                          * as well wait for it.
5123                          *
5124                          * 2) We searched for cached only and we didn't find
5125                          * anything, and we didn't start any caching kthreads
5126                          * either, so chances are we will loop through and
5127                          * start a couple caching kthreads, and then come back
5128                          * around and just wait for them.  This will be slower
5129                          * because we will have 2 caching kthreads reading at
5130                          * the same time when we could have just started one
5131                          * and waited for it to get far enough to give us an
5132                          * allocation, so go ahead and go to the wait caching
5133                          * loop.
5134                          */
5135                         loop = LOOP_CACHING_WAIT;
5136                         search_start = ideal_cache_offset;
5137                         ideal_cache_percent = 0;
5138                         goto ideal_cache;
5139                 } else if (loop == LOOP_FIND_IDEAL) {
5140                         /*
5141                          * Didn't find a uncached bg, wait on anything we find
5142                          * next.
5143                          */
5144                         loop = LOOP_CACHING_WAIT;
5145                         goto search;
5146                 }
5147
5148                 if (loop < LOOP_CACHING_WAIT) {
5149                         loop++;
5150                         goto search;
5151                 }
5152
5153                 if (loop == LOOP_ALLOC_CHUNK) {
5154                         empty_size = 0;
5155                         empty_cluster = 0;
5156                 }
5157
5158                 if (allowed_chunk_alloc) {
5159                         ret = do_chunk_alloc(trans, root, num_bytes +
5160                                              2 * 1024 * 1024, data, 1);
5161                         allowed_chunk_alloc = 0;
5162                         done_chunk_alloc = 1;
5163                 } else if (!done_chunk_alloc) {
5164                         space_info->force_alloc = 1;
5165                 }
5166
5167                 if (loop < LOOP_NO_EMPTY_SIZE) {
5168                         loop++;
5169                         goto search;
5170                 }
5171                 ret = -ENOSPC;
5172         } else if (!ins->objectid) {
5173                 ret = -ENOSPC;
5174         }
5175
5176         /* we found what we needed */
5177         if (ins->objectid) {
5178                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5179                         trans->block_group = block_group->key.objectid;
5180
5181                 btrfs_put_block_group(block_group);
5182                 ret = 0;
5183         }
5184
5185         return ret;
5186 }
5187
5188 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5189                             int dump_block_groups)
5190 {
5191         struct btrfs_block_group_cache *cache;
5192         int index = 0;
5193
5194         spin_lock(&info->lock);
5195         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5196                (unsigned long long)(info->total_bytes - info->bytes_used -
5197                                     info->bytes_pinned - info->bytes_reserved -
5198                                     info->bytes_readonly),
5199                (info->full) ? "" : "not ");
5200         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5201                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5202                (unsigned long long)info->total_bytes,
5203                (unsigned long long)info->bytes_used,
5204                (unsigned long long)info->bytes_pinned,
5205                (unsigned long long)info->bytes_reserved,
5206                (unsigned long long)info->bytes_may_use,
5207                (unsigned long long)info->bytes_readonly);
5208         spin_unlock(&info->lock);
5209
5210         if (!dump_block_groups)
5211                 return;
5212
5213         down_read(&info->groups_sem);
5214 again:
5215         list_for_each_entry(cache, &info->block_groups[index], list) {
5216                 spin_lock(&cache->lock);
5217                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5218                        "%llu pinned %llu reserved\n",
5219                        (unsigned long long)cache->key.objectid,
5220                        (unsigned long long)cache->key.offset,
5221                        (unsigned long long)btrfs_block_group_used(&cache->item),
5222                        (unsigned long long)cache->pinned,
5223                        (unsigned long long)cache->reserved);
5224                 btrfs_dump_free_space(cache, bytes);
5225                 spin_unlock(&cache->lock);
5226         }
5227         if (++index < BTRFS_NR_RAID_TYPES)
5228                 goto again;
5229         up_read(&info->groups_sem);
5230 }
5231
5232 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5233                          struct btrfs_root *root,
5234                          u64 num_bytes, u64 min_alloc_size,
5235                          u64 empty_size, u64 hint_byte,
5236                          u64 search_end, struct btrfs_key *ins,
5237                          u64 data)
5238 {
5239         int ret;
5240         u64 search_start = 0;
5241
5242         data = btrfs_get_alloc_profile(root, data);
5243 again:
5244         /*
5245          * the only place that sets empty_size is btrfs_realloc_node, which
5246          * is not called recursively on allocations
5247          */
5248         if (empty_size || root->ref_cows)
5249                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5250                                      num_bytes + 2 * 1024 * 1024, data, 0);
5251
5252         WARN_ON(num_bytes < root->sectorsize);
5253         ret = find_free_extent(trans, root, num_bytes, empty_size,
5254                                search_start, search_end, hint_byte,
5255                                ins, data);
5256
5257         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5258                 num_bytes = num_bytes >> 1;
5259                 num_bytes = num_bytes & ~(root->sectorsize - 1);
5260                 num_bytes = max(num_bytes, min_alloc_size);
5261                 do_chunk_alloc(trans, root->fs_info->extent_root,
5262                                num_bytes, data, 1);
5263                 goto again;
5264         }
5265         if (ret == -ENOSPC) {
5266                 struct btrfs_space_info *sinfo;
5267
5268                 sinfo = __find_space_info(root->fs_info, data);
5269                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5270                        "wanted %llu\n", (unsigned long long)data,
5271                        (unsigned long long)num_bytes);
5272                 dump_space_info(sinfo, num_bytes, 1);
5273         }
5274
5275         return ret;
5276 }
5277
5278 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5279 {
5280         struct btrfs_block_group_cache *cache;
5281         int ret = 0;
5282
5283         cache = btrfs_lookup_block_group(root->fs_info, start);
5284         if (!cache) {
5285                 printk(KERN_ERR "Unable to find block group for %llu\n",
5286                        (unsigned long long)start);
5287                 return -ENOSPC;
5288         }
5289
5290         ret = btrfs_discard_extent(root, start, len);
5291
5292         btrfs_add_free_space(cache, start, len);
5293         update_reserved_bytes(cache, len, 0, 1);
5294         btrfs_put_block_group(cache);
5295
5296         return ret;
5297 }
5298
5299 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5300                                       struct btrfs_root *root,
5301                                       u64 parent, u64 root_objectid,
5302                                       u64 flags, u64 owner, u64 offset,
5303                                       struct btrfs_key *ins, int ref_mod)
5304 {
5305         int ret;
5306         struct btrfs_fs_info *fs_info = root->fs_info;
5307         struct btrfs_extent_item *extent_item;
5308         struct btrfs_extent_inline_ref *iref;
5309         struct btrfs_path *path;
5310         struct extent_buffer *leaf;
5311         int type;
5312         u32 size;
5313
5314         if (parent > 0)
5315                 type = BTRFS_SHARED_DATA_REF_KEY;
5316         else
5317                 type = BTRFS_EXTENT_DATA_REF_KEY;
5318
5319         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5320
5321         path = btrfs_alloc_path();
5322         BUG_ON(!path);
5323
5324         path->leave_spinning = 1;
5325         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5326                                       ins, size);
5327         BUG_ON(ret);
5328
5329         leaf = path->nodes[0];
5330         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5331                                      struct btrfs_extent_item);
5332         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5333         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5334         btrfs_set_extent_flags(leaf, extent_item,
5335                                flags | BTRFS_EXTENT_FLAG_DATA);
5336
5337         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5338         btrfs_set_extent_inline_ref_type(leaf, iref, type);
5339         if (parent > 0) {
5340                 struct btrfs_shared_data_ref *ref;
5341                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5342                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5343                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5344         } else {
5345                 struct btrfs_extent_data_ref *ref;
5346                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5347                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5348                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5349                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5350                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5351         }
5352
5353         btrfs_mark_buffer_dirty(path->nodes[0]);
5354         btrfs_free_path(path);
5355
5356         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5357         if (ret) {
5358                 printk(KERN_ERR "btrfs update block group failed for %llu "
5359                        "%llu\n", (unsigned long long)ins->objectid,
5360                        (unsigned long long)ins->offset);
5361                 BUG();
5362         }
5363         return ret;
5364 }
5365
5366 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5367                                      struct btrfs_root *root,
5368                                      u64 parent, u64 root_objectid,
5369                                      u64 flags, struct btrfs_disk_key *key,
5370                                      int level, struct btrfs_key *ins)
5371 {
5372         int ret;
5373         struct btrfs_fs_info *fs_info = root->fs_info;
5374         struct btrfs_extent_item *extent_item;
5375         struct btrfs_tree_block_info *block_info;
5376         struct btrfs_extent_inline_ref *iref;
5377         struct btrfs_path *path;
5378         struct extent_buffer *leaf;
5379         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5380
5381         path = btrfs_alloc_path();
5382         BUG_ON(!path);
5383
5384         path->leave_spinning = 1;
5385         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5386                                       ins, size);
5387         BUG_ON(ret);
5388
5389         leaf = path->nodes[0];
5390         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5391                                      struct btrfs_extent_item);
5392         btrfs_set_extent_refs(leaf, extent_item, 1);
5393         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5394         btrfs_set_extent_flags(leaf, extent_item,
5395                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5396         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5397
5398         btrfs_set_tree_block_key(leaf, block_info, key);
5399         btrfs_set_tree_block_level(leaf, block_info, level);
5400
5401         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5402         if (parent > 0) {
5403                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5404                 btrfs_set_extent_inline_ref_type(leaf, iref,
5405                                                  BTRFS_SHARED_BLOCK_REF_KEY);
5406                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5407         } else {
5408                 btrfs_set_extent_inline_ref_type(leaf, iref,
5409                                                  BTRFS_TREE_BLOCK_REF_KEY);
5410                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5411         }
5412
5413         btrfs_mark_buffer_dirty(leaf);
5414         btrfs_free_path(path);
5415
5416         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5417         if (ret) {
5418                 printk(KERN_ERR "btrfs update block group failed for %llu "
5419                        "%llu\n", (unsigned long long)ins->objectid,
5420                        (unsigned long long)ins->offset);
5421                 BUG();
5422         }
5423         return ret;
5424 }
5425
5426 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5427                                      struct btrfs_root *root,
5428                                      u64 root_objectid, u64 owner,
5429                                      u64 offset, struct btrfs_key *ins)
5430 {
5431         int ret;
5432
5433         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5434
5435         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5436                                          0, root_objectid, owner, offset,
5437                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
5438         return ret;
5439 }
5440
5441 /*
5442  * this is used by the tree logging recovery code.  It records that
5443  * an extent has been allocated and makes sure to clear the free
5444  * space cache bits as well
5445  */
5446 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5447                                    struct btrfs_root *root,
5448                                    u64 root_objectid, u64 owner, u64 offset,
5449                                    struct btrfs_key *ins)
5450 {
5451         int ret;
5452         struct btrfs_block_group_cache *block_group;
5453         struct btrfs_caching_control *caching_ctl;
5454         u64 start = ins->objectid;
5455         u64 num_bytes = ins->offset;
5456
5457         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5458         cache_block_group(block_group, trans, 0);
5459         caching_ctl = get_caching_control(block_group);
5460
5461         if (!caching_ctl) {
5462                 BUG_ON(!block_group_cache_done(block_group));
5463                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5464                 BUG_ON(ret);
5465         } else {
5466                 mutex_lock(&caching_ctl->mutex);
5467
5468                 if (start >= caching_ctl->progress) {
5469                         ret = add_excluded_extent(root, start, num_bytes);
5470                         BUG_ON(ret);
5471                 } else if (start + num_bytes <= caching_ctl->progress) {
5472                         ret = btrfs_remove_free_space(block_group,
5473                                                       start, num_bytes);
5474                         BUG_ON(ret);
5475                 } else {
5476                         num_bytes = caching_ctl->progress - start;
5477                         ret = btrfs_remove_free_space(block_group,
5478                                                       start, num_bytes);
5479                         BUG_ON(ret);
5480
5481                         start = caching_ctl->progress;
5482                         num_bytes = ins->objectid + ins->offset -
5483                                     caching_ctl->progress;
5484                         ret = add_excluded_extent(root, start, num_bytes);
5485                         BUG_ON(ret);
5486                 }
5487
5488                 mutex_unlock(&caching_ctl->mutex);
5489                 put_caching_control(caching_ctl);
5490         }
5491
5492         ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5493         BUG_ON(ret);
5494         btrfs_put_block_group(block_group);
5495         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5496                                          0, owner, offset, ins, 1);
5497         return ret;
5498 }
5499
5500 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5501                                             struct btrfs_root *root,
5502                                             u64 bytenr, u32 blocksize,
5503                                             int level)
5504 {
5505         struct extent_buffer *buf;
5506
5507         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5508         if (!buf)
5509                 return ERR_PTR(-ENOMEM);
5510         btrfs_set_header_generation(buf, trans->transid);
5511         btrfs_set_buffer_lockdep_class(buf, level);
5512         btrfs_tree_lock(buf);
5513         clean_tree_block(trans, root, buf);
5514
5515         btrfs_set_lock_blocking(buf);
5516         btrfs_set_buffer_uptodate(buf);
5517
5518         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5519                 /*
5520                  * we allow two log transactions at a time, use different
5521                  * EXENT bit to differentiate dirty pages.
5522                  */
5523                 if (root->log_transid % 2 == 0)
5524                         set_extent_dirty(&root->dirty_log_pages, buf->start,
5525                                         buf->start + buf->len - 1, GFP_NOFS);
5526                 else
5527                         set_extent_new(&root->dirty_log_pages, buf->start,
5528                                         buf->start + buf->len - 1, GFP_NOFS);
5529         } else {
5530                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5531                          buf->start + buf->len - 1, GFP_NOFS);
5532         }
5533         trans->blocks_used++;
5534         /* this returns a buffer locked for blocking */
5535         return buf;
5536 }
5537
5538 static struct btrfs_block_rsv *
5539 use_block_rsv(struct btrfs_trans_handle *trans,
5540               struct btrfs_root *root, u32 blocksize)
5541 {
5542         struct btrfs_block_rsv *block_rsv;
5543         int ret;
5544
5545         block_rsv = get_block_rsv(trans, root);
5546
5547         if (block_rsv->size == 0) {
5548                 ret = reserve_metadata_bytes(block_rsv, blocksize);
5549                 if (ret)
5550                         return ERR_PTR(ret);
5551                 return block_rsv;
5552         }
5553
5554         ret = block_rsv_use_bytes(block_rsv, blocksize);
5555         if (!ret)
5556                 return block_rsv;
5557
5558         WARN_ON(1);
5559         printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
5560                 block_rsv->size, block_rsv->reserved,
5561                 block_rsv->freed[0], block_rsv->freed[1]);
5562
5563         return ERR_PTR(-ENOSPC);
5564 }
5565
5566 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5567 {
5568         block_rsv_add_bytes(block_rsv, blocksize, 0);
5569         block_rsv_release_bytes(block_rsv, NULL, 0);
5570 }
5571
5572 /*
5573  * finds a free extent and does all the dirty work required for allocation
5574  * returns the key for the extent through ins, and a tree buffer for
5575  * the first block of the extent through buf.
5576  *
5577  * returns the tree buffer or NULL.
5578  */
5579 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5580                                         struct btrfs_root *root, u32 blocksize,
5581                                         u64 parent, u64 root_objectid,
5582                                         struct btrfs_disk_key *key, int level,
5583                                         u64 hint, u64 empty_size)
5584 {
5585         struct btrfs_key ins;
5586         struct btrfs_block_rsv *block_rsv;
5587         struct extent_buffer *buf;
5588         u64 flags = 0;
5589         int ret;
5590
5591
5592         block_rsv = use_block_rsv(trans, root, blocksize);
5593         if (IS_ERR(block_rsv))
5594                 return ERR_CAST(block_rsv);
5595
5596         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5597                                    empty_size, hint, (u64)-1, &ins, 0);
5598         if (ret) {
5599                 unuse_block_rsv(block_rsv, blocksize);
5600                 return ERR_PTR(ret);
5601         }
5602
5603         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5604                                     blocksize, level);
5605         BUG_ON(IS_ERR(buf));
5606
5607         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5608                 if (parent == 0)
5609                         parent = ins.objectid;
5610                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5611         } else
5612                 BUG_ON(parent > 0);
5613
5614         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5615                 struct btrfs_delayed_extent_op *extent_op;
5616                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5617                 BUG_ON(!extent_op);
5618                 if (key)
5619                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
5620                 else
5621                         memset(&extent_op->key, 0, sizeof(extent_op->key));
5622                 extent_op->flags_to_set = flags;
5623                 extent_op->update_key = 1;
5624                 extent_op->update_flags = 1;
5625                 extent_op->is_data = 0;
5626
5627                 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5628                                         ins.offset, parent, root_objectid,
5629                                         level, BTRFS_ADD_DELAYED_EXTENT,
5630                                         extent_op);
5631                 BUG_ON(ret);
5632         }
5633         return buf;
5634 }
5635
5636 struct walk_control {
5637         u64 refs[BTRFS_MAX_LEVEL];
5638         u64 flags[BTRFS_MAX_LEVEL];
5639         struct btrfs_key update_progress;
5640         int stage;
5641         int level;
5642         int shared_level;
5643         int update_ref;
5644         int keep_locks;
5645         int reada_slot;
5646         int reada_count;
5647 };
5648
5649 #define DROP_REFERENCE  1
5650 #define UPDATE_BACKREF  2
5651
5652 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5653                                      struct btrfs_root *root,
5654                                      struct walk_control *wc,
5655                                      struct btrfs_path *path)
5656 {
5657         u64 bytenr;
5658         u64 generation;
5659         u64 refs;
5660         u64 flags;
5661         u64 last = 0;
5662         u32 nritems;
5663         u32 blocksize;
5664         struct btrfs_key key;
5665         struct extent_buffer *eb;
5666         int ret;
5667         int slot;
5668         int nread = 0;
5669
5670         if (path->slots[wc->level] < wc->reada_slot) {
5671                 wc->reada_count = wc->reada_count * 2 / 3;
5672                 wc->reada_count = max(wc->reada_count, 2);
5673         } else {
5674                 wc->reada_count = wc->reada_count * 3 / 2;
5675                 wc->reada_count = min_t(int, wc->reada_count,
5676                                         BTRFS_NODEPTRS_PER_BLOCK(root));
5677         }
5678
5679         eb = path->nodes[wc->level];
5680         nritems = btrfs_header_nritems(eb);
5681         blocksize = btrfs_level_size(root, wc->level - 1);
5682
5683         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5684                 if (nread >= wc->reada_count)
5685                         break;
5686
5687                 cond_resched();
5688                 bytenr = btrfs_node_blockptr(eb, slot);
5689                 generation = btrfs_node_ptr_generation(eb, slot);
5690
5691                 if (slot == path->slots[wc->level])
5692                         goto reada;
5693
5694                 if (wc->stage == UPDATE_BACKREF &&
5695                     generation <= root->root_key.offset)
5696                         continue;
5697
5698                 /* We don't lock the tree block, it's OK to be racy here */
5699                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5700                                                &refs, &flags);
5701                 BUG_ON(ret);
5702                 BUG_ON(refs == 0);
5703
5704                 if (wc->stage == DROP_REFERENCE) {
5705                         if (refs == 1)
5706                                 goto reada;
5707
5708                         if (wc->level == 1 &&
5709                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5710                                 continue;
5711                         if (!wc->update_ref ||
5712                             generation <= root->root_key.offset)
5713                                 continue;
5714                         btrfs_node_key_to_cpu(eb, &key, slot);
5715                         ret = btrfs_comp_cpu_keys(&key,
5716                                                   &wc->update_progress);
5717                         if (ret < 0)
5718                                 continue;
5719                 } else {
5720                         if (wc->level == 1 &&
5721                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5722                                 continue;
5723                 }
5724 reada:
5725                 ret = readahead_tree_block(root, bytenr, blocksize,
5726                                            generation);
5727                 if (ret)
5728                         break;
5729                 last = bytenr + blocksize;
5730                 nread++;
5731         }
5732         wc->reada_slot = slot;
5733 }
5734
5735 /*
5736  * hepler to process tree block while walking down the tree.
5737  *
5738  * when wc->stage == UPDATE_BACKREF, this function updates
5739  * back refs for pointers in the block.
5740  *
5741  * NOTE: return value 1 means we should stop walking down.
5742  */
5743 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5744                                    struct btrfs_root *root,
5745                                    struct btrfs_path *path,
5746                                    struct walk_control *wc, int lookup_info)
5747 {
5748         int level = wc->level;
5749         struct extent_buffer *eb = path->nodes[level];
5750         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5751         int ret;
5752
5753         if (wc->stage == UPDATE_BACKREF &&
5754             btrfs_header_owner(eb) != root->root_key.objectid)
5755                 return 1;
5756
5757         /*
5758          * when reference count of tree block is 1, it won't increase
5759          * again. once full backref flag is set, we never clear it.
5760          */
5761         if (lookup_info &&
5762             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5763              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5764                 BUG_ON(!path->locks[level]);
5765                 ret = btrfs_lookup_extent_info(trans, root,
5766                                                eb->start, eb->len,
5767                                                &wc->refs[level],
5768                                                &wc->flags[level]);
5769                 BUG_ON(ret);
5770                 BUG_ON(wc->refs[level] == 0);
5771         }
5772
5773         if (wc->stage == DROP_REFERENCE) {
5774                 if (wc->refs[level] > 1)
5775                         return 1;
5776
5777                 if (path->locks[level] && !wc->keep_locks) {
5778                         btrfs_tree_unlock(eb);
5779                         path->locks[level] = 0;
5780                 }
5781                 return 0;
5782         }
5783
5784         /* wc->stage == UPDATE_BACKREF */
5785         if (!(wc->flags[level] & flag)) {
5786                 BUG_ON(!path->locks[level]);
5787                 ret = btrfs_inc_ref(trans, root, eb, 1);
5788                 BUG_ON(ret);
5789                 ret = btrfs_dec_ref(trans, root, eb, 0);
5790                 BUG_ON(ret);
5791                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5792                                                   eb->len, flag, 0);
5793                 BUG_ON(ret);
5794                 wc->flags[level] |= flag;
5795         }
5796
5797         /*
5798          * the block is shared by multiple trees, so it's not good to
5799          * keep the tree lock
5800          */
5801         if (path->locks[level] && level > 0) {
5802                 btrfs_tree_unlock(eb);
5803                 path->locks[level] = 0;
5804         }
5805         return 0;
5806 }
5807
5808 /*
5809  * hepler to process tree block pointer.
5810  *
5811  * when wc->stage == DROP_REFERENCE, this function checks
5812  * reference count of the block pointed to. if the block
5813  * is shared and we need update back refs for the subtree
5814  * rooted at the block, this function changes wc->stage to
5815  * UPDATE_BACKREF. if the block is shared and there is no
5816  * need to update back, this function drops the reference
5817  * to the block.
5818  *
5819  * NOTE: return value 1 means we should stop walking down.
5820  */
5821 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5822                                  struct btrfs_root *root,
5823                                  struct btrfs_path *path,
5824                                  struct walk_control *wc, int *lookup_info)
5825 {
5826         u64 bytenr;
5827         u64 generation;
5828         u64 parent;
5829         u32 blocksize;
5830         struct btrfs_key key;
5831         struct extent_buffer *next;
5832         int level = wc->level;
5833         int reada = 0;
5834         int ret = 0;
5835
5836         generation = btrfs_node_ptr_generation(path->nodes[level],
5837                                                path->slots[level]);
5838         /*
5839          * if the lower level block was created before the snapshot
5840          * was created, we know there is no need to update back refs
5841          * for the subtree
5842          */
5843         if (wc->stage == UPDATE_BACKREF &&
5844             generation <= root->root_key.offset) {
5845                 *lookup_info = 1;
5846                 return 1;
5847         }
5848
5849         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5850         blocksize = btrfs_level_size(root, level - 1);
5851
5852         next = btrfs_find_tree_block(root, bytenr, blocksize);
5853         if (!next) {
5854                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5855                 if (!next)
5856                         return -ENOMEM;
5857                 reada = 1;
5858         }
5859         btrfs_tree_lock(next);
5860         btrfs_set_lock_blocking(next);
5861
5862         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5863                                        &wc->refs[level - 1],
5864                                        &wc->flags[level - 1]);
5865         BUG_ON(ret);
5866         BUG_ON(wc->refs[level - 1] == 0);
5867         *lookup_info = 0;
5868
5869         if (wc->stage == DROP_REFERENCE) {
5870                 if (wc->refs[level - 1] > 1) {
5871                         if (level == 1 &&
5872                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5873                                 goto skip;
5874
5875                         if (!wc->update_ref ||
5876                             generation <= root->root_key.offset)
5877                                 goto skip;
5878
5879                         btrfs_node_key_to_cpu(path->nodes[level], &key,
5880                                               path->slots[level]);
5881                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5882                         if (ret < 0)
5883                                 goto skip;
5884
5885                         wc->stage = UPDATE_BACKREF;
5886                         wc->shared_level = level - 1;
5887                 }
5888         } else {
5889                 if (level == 1 &&
5890                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5891                         goto skip;
5892         }
5893
5894         if (!btrfs_buffer_uptodate(next, generation)) {
5895                 btrfs_tree_unlock(next);
5896                 free_extent_buffer(next);
5897                 next = NULL;
5898                 *lookup_info = 1;
5899         }
5900
5901         if (!next) {
5902                 if (reada && level == 1)
5903                         reada_walk_down(trans, root, wc, path);
5904                 next = read_tree_block(root, bytenr, blocksize, generation);
5905                 btrfs_tree_lock(next);
5906                 btrfs_set_lock_blocking(next);
5907         }
5908
5909         level--;
5910         BUG_ON(level != btrfs_header_level(next));
5911         path->nodes[level] = next;
5912         path->slots[level] = 0;
5913         path->locks[level] = 1;
5914         wc->level = level;
5915         if (wc->level == 1)
5916                 wc->reada_slot = 0;
5917         return 0;
5918 skip:
5919         wc->refs[level - 1] = 0;
5920         wc->flags[level - 1] = 0;
5921         if (wc->stage == DROP_REFERENCE) {
5922                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5923                         parent = path->nodes[level]->start;
5924                 } else {
5925                         BUG_ON(root->root_key.objectid !=
5926                                btrfs_header_owner(path->nodes[level]));
5927                         parent = 0;
5928                 }
5929
5930                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5931                                         root->root_key.objectid, level - 1, 0);
5932                 BUG_ON(ret);
5933         }
5934         btrfs_tree_unlock(next);
5935         free_extent_buffer(next);
5936         *lookup_info = 1;
5937         return 1;
5938 }
5939
5940 /*
5941  * hepler to process tree block while walking up the tree.
5942  *
5943  * when wc->stage == DROP_REFERENCE, this function drops
5944  * reference count on the block.
5945  *
5946  * when wc->stage == UPDATE_BACKREF, this function changes
5947  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5948  * to UPDATE_BACKREF previously while processing the block.
5949  *
5950  * NOTE: return value 1 means we should stop walking up.
5951  */
5952 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5953                                  struct btrfs_root *root,
5954                                  struct btrfs_path *path,
5955                                  struct walk_control *wc)
5956 {
5957         int ret;
5958         int level = wc->level;
5959         struct extent_buffer *eb = path->nodes[level];
5960         u64 parent = 0;
5961
5962         if (wc->stage == UPDATE_BACKREF) {
5963                 BUG_ON(wc->shared_level < level);
5964                 if (level < wc->shared_level)
5965                         goto out;
5966
5967                 ret = find_next_key(path, level + 1, &wc->update_progress);
5968                 if (ret > 0)
5969                         wc->update_ref = 0;
5970
5971                 wc->stage = DROP_REFERENCE;
5972                 wc->shared_level = -1;
5973                 path->slots[level] = 0;
5974
5975                 /*
5976                  * check reference count again if the block isn't locked.
5977                  * we should start walking down the tree again if reference
5978                  * count is one.
5979                  */
5980                 if (!path->locks[level]) {
5981                         BUG_ON(level == 0);
5982                         btrfs_tree_lock(eb);
5983                         btrfs_set_lock_blocking(eb);
5984                         path->locks[level] = 1;
5985
5986                         ret = btrfs_lookup_extent_info(trans, root,
5987                                                        eb->start, eb->len,
5988                                                        &wc->refs[level],
5989                                                        &wc->flags[level]);
5990                         BUG_ON(ret);
5991                         BUG_ON(wc->refs[level] == 0);
5992                         if (wc->refs[level] == 1) {
5993                                 btrfs_tree_unlock(eb);
5994                                 path->locks[level] = 0;
5995                                 return 1;
5996                         }
5997                 }
5998         }
5999
6000         /* wc->stage == DROP_REFERENCE */
6001         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6002
6003         if (wc->refs[level] == 1) {
6004                 if (level == 0) {
6005                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6006                                 ret = btrfs_dec_ref(trans, root, eb, 1);
6007                         else
6008                                 ret = btrfs_dec_ref(trans, root, eb, 0);
6009                         BUG_ON(ret);
6010                 }
6011                 /* make block locked assertion in clean_tree_block happy */
6012                 if (!path->locks[level] &&
6013                     btrfs_header_generation(eb) == trans->transid) {
6014                         btrfs_tree_lock(eb);
6015                         btrfs_set_lock_blocking(eb);
6016                         path->locks[level] = 1;
6017                 }
6018                 clean_tree_block(trans, root, eb);
6019         }
6020
6021         if (eb == root->node) {
6022                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6023                         parent = eb->start;
6024                 else
6025                         BUG_ON(root->root_key.objectid !=
6026                                btrfs_header_owner(eb));
6027         } else {
6028                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6029                         parent = path->nodes[level + 1]->start;
6030                 else
6031                         BUG_ON(root->root_key.objectid !=
6032                                btrfs_header_owner(path->nodes[level + 1]));
6033         }
6034
6035         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6036 out:
6037         wc->refs[level] = 0;
6038         wc->flags[level] = 0;
6039         return 0;
6040 }
6041
6042 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6043                                    struct btrfs_root *root,
6044                                    struct btrfs_path *path,
6045                                    struct walk_control *wc)
6046 {
6047         int level = wc->level;
6048         int lookup_info = 1;
6049         int ret;
6050
6051         while (level >= 0) {
6052                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6053                 if (ret > 0)
6054                         break;
6055
6056                 if (level == 0)
6057                         break;
6058
6059                 if (path->slots[level] >=
6060                     btrfs_header_nritems(path->nodes[level]))
6061                         break;
6062
6063                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6064                 if (ret > 0) {
6065                         path->slots[level]++;
6066                         continue;
6067                 } else if (ret < 0)
6068                         return ret;
6069                 level = wc->level;
6070         }
6071         return 0;
6072 }
6073
6074 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6075                                  struct btrfs_root *root,
6076                                  struct btrfs_path *path,
6077                                  struct walk_control *wc, int max_level)
6078 {
6079         int level = wc->level;
6080         int ret;
6081
6082         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6083         while (level < max_level && path->nodes[level]) {
6084                 wc->level = level;
6085                 if (path->slots[level] + 1 <
6086                     btrfs_header_nritems(path->nodes[level])) {
6087                         path->slots[level]++;
6088                         return 0;
6089                 } else {
6090                         ret = walk_up_proc(trans, root, path, wc);
6091                         if (ret > 0)
6092                                 return 0;
6093
6094                         if (path->locks[level]) {
6095                                 btrfs_tree_unlock(path->nodes[level]);
6096                                 path->locks[level] = 0;
6097                         }
6098                         free_extent_buffer(path->nodes[level]);
6099                         path->nodes[level] = NULL;
6100                         level++;
6101                 }
6102         }
6103         return 1;
6104 }
6105
6106 /*
6107  * drop a subvolume tree.
6108  *
6109  * this function traverses the tree freeing any blocks that only
6110  * referenced by the tree.
6111  *
6112  * when a shared tree block is found. this function decreases its
6113  * reference count by one. if update_ref is true, this function
6114  * also make sure backrefs for the shared block and all lower level
6115  * blocks are properly updated.
6116  */
6117 int btrfs_drop_snapshot(struct btrfs_root *root,
6118                         struct btrfs_block_rsv *block_rsv, int update_ref)
6119 {
6120         struct btrfs_path *path;
6121         struct btrfs_trans_handle *trans;
6122         struct btrfs_root *tree_root = root->fs_info->tree_root;
6123         struct btrfs_root_item *root_item = &root->root_item;
6124         struct walk_control *wc;
6125         struct btrfs_key key;
6126         int err = 0;
6127         int ret;
6128         int level;
6129
6130         path = btrfs_alloc_path();
6131         BUG_ON(!path);
6132
6133         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6134         BUG_ON(!wc);
6135
6136         trans = btrfs_start_transaction(tree_root, 0);
6137         if (block_rsv)
6138                 trans->block_rsv = block_rsv;
6139
6140         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6141                 level = btrfs_header_level(root->node);
6142                 path->nodes[level] = btrfs_lock_root_node(root);
6143                 btrfs_set_lock_blocking(path->nodes[level]);
6144                 path->slots[level] = 0;
6145                 path->locks[level] = 1;
6146                 memset(&wc->update_progress, 0,
6147                        sizeof(wc->update_progress));
6148         } else {
6149                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6150                 memcpy(&wc->update_progress, &key,
6151                        sizeof(wc->update_progress));
6152
6153                 level = root_item->drop_level;
6154                 BUG_ON(level == 0);
6155                 path->lowest_level = level;
6156                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6157                 path->lowest_level = 0;
6158                 if (ret < 0) {
6159                         err = ret;
6160                         goto out;
6161                 }
6162                 WARN_ON(ret > 0);
6163
6164                 /*
6165                  * unlock our path, this is safe because only this
6166                  * function is allowed to delete this snapshot
6167                  */
6168                 btrfs_unlock_up_safe(path, 0);
6169
6170                 level = btrfs_header_level(root->node);
6171                 while (1) {
6172                         btrfs_tree_lock(path->nodes[level]);
6173                         btrfs_set_lock_blocking(path->nodes[level]);
6174
6175                         ret = btrfs_lookup_extent_info(trans, root,
6176                                                 path->nodes[level]->start,
6177                                                 path->nodes[level]->len,
6178                                                 &wc->refs[level],
6179                                                 &wc->flags[level]);
6180                         BUG_ON(ret);
6181                         BUG_ON(wc->refs[level] == 0);
6182
6183                         if (level == root_item->drop_level)
6184                                 break;
6185
6186                         btrfs_tree_unlock(path->nodes[level]);
6187                         WARN_ON(wc->refs[level] != 1);
6188                         level--;
6189                 }
6190         }
6191
6192         wc->level = level;
6193         wc->shared_level = -1;
6194         wc->stage = DROP_REFERENCE;
6195         wc->update_ref = update_ref;
6196         wc->keep_locks = 0;
6197         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6198
6199         while (1) {
6200                 ret = walk_down_tree(trans, root, path, wc);
6201                 if (ret < 0) {
6202                         err = ret;
6203                         break;
6204                 }
6205
6206                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6207                 if (ret < 0) {
6208                         err = ret;
6209                         break;
6210                 }
6211
6212                 if (ret > 0) {
6213                         BUG_ON(wc->stage != DROP_REFERENCE);
6214                         break;
6215                 }
6216
6217                 if (wc->stage == DROP_REFERENCE) {
6218                         level = wc->level;
6219                         btrfs_node_key(path->nodes[level],
6220                                        &root_item->drop_progress,
6221                                        path->slots[level]);
6222                         root_item->drop_level = level;
6223                 }
6224
6225                 BUG_ON(wc->level == 0);
6226                 if (btrfs_should_end_transaction(trans, tree_root)) {
6227                         ret = btrfs_update_root(trans, tree_root,
6228                                                 &root->root_key,
6229                                                 root_item);
6230                         BUG_ON(ret);
6231
6232                         btrfs_end_transaction_throttle(trans, tree_root);
6233                         trans = btrfs_start_transaction(tree_root, 0);
6234                         if (block_rsv)
6235                                 trans->block_rsv = block_rsv;
6236                 }
6237         }
6238         btrfs_release_path(root, path);
6239         BUG_ON(err);
6240
6241         ret = btrfs_del_root(trans, tree_root, &root->root_key);
6242         BUG_ON(ret);
6243
6244         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6245                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6246                                            NULL, NULL);
6247                 BUG_ON(ret < 0);
6248                 if (ret > 0) {
6249                         ret = btrfs_del_orphan_item(trans, tree_root,
6250                                                     root->root_key.objectid);
6251                         BUG_ON(ret);
6252                 }
6253         }
6254
6255         if (root->in_radix) {
6256                 btrfs_free_fs_root(tree_root->fs_info, root);
6257         } else {
6258                 free_extent_buffer(root->node);
6259                 free_extent_buffer(root->commit_root);
6260                 kfree(root);
6261         }
6262 out:
6263         btrfs_end_transaction_throttle(trans, tree_root);
6264         kfree(wc);
6265         btrfs_free_path(path);
6266         return err;
6267 }
6268
6269 /*
6270  * drop subtree rooted at tree block 'node'.
6271  *
6272  * NOTE: this function will unlock and release tree block 'node'
6273  */
6274 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6275                         struct btrfs_root *root,
6276                         struct extent_buffer *node,
6277                         struct extent_buffer *parent)
6278 {
6279         struct btrfs_path *path;
6280         struct walk_control *wc;
6281         int level;
6282         int parent_level;
6283         int ret = 0;
6284         int wret;
6285
6286         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6287
6288         path = btrfs_alloc_path();
6289         BUG_ON(!path);
6290
6291         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6292         BUG_ON(!wc);
6293
6294         btrfs_assert_tree_locked(parent);
6295         parent_level = btrfs_header_level(parent);
6296         extent_buffer_get(parent);
6297         path->nodes[parent_level] = parent;
6298         path->slots[parent_level] = btrfs_header_nritems(parent);
6299
6300         btrfs_assert_tree_locked(node);
6301         level = btrfs_header_level(node);
6302         path->nodes[level] = node;
6303         path->slots[level] = 0;
6304         path->locks[level] = 1;
6305
6306         wc->refs[parent_level] = 1;
6307         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6308         wc->level = level;
6309         wc->shared_level = -1;
6310         wc->stage = DROP_REFERENCE;
6311         wc->update_ref = 0;
6312         wc->keep_locks = 1;
6313         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6314
6315         while (1) {
6316                 wret = walk_down_tree(trans, root, path, wc);
6317                 if (wret < 0) {
6318                         ret = wret;
6319                         break;
6320                 }
6321
6322                 wret = walk_up_tree(trans, root, path, wc, parent_level);
6323                 if (wret < 0)
6324                         ret = wret;
6325                 if (wret != 0)
6326                         break;
6327         }
6328
6329         kfree(wc);
6330         btrfs_free_path(path);
6331         return ret;
6332 }
6333
6334 #if 0
6335 static unsigned long calc_ra(unsigned long start, unsigned long last,
6336                              unsigned long nr)
6337 {
6338         return min(last, start + nr - 1);
6339 }
6340
6341 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6342                                          u64 len)
6343 {
6344         u64 page_start;
6345         u64 page_end;
6346         unsigned long first_index;
6347         unsigned long last_index;
6348         unsigned long i;
6349         struct page *page;
6350         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6351         struct file_ra_state *ra;
6352         struct btrfs_ordered_extent *ordered;
6353         unsigned int total_read = 0;
6354         unsigned int total_dirty = 0;
6355         int ret = 0;
6356
6357         ra = kzalloc(sizeof(*ra), GFP_NOFS);
6358
6359         mutex_lock(&inode->i_mutex);
6360         first_index = start >> PAGE_CACHE_SHIFT;
6361         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6362
6363         /* make sure the dirty trick played by the caller work */
6364         ret = invalidate_inode_pages2_range(inode->i_mapping,
6365                                             first_index, last_index);
6366         if (ret)
6367                 goto out_unlock;
6368
6369         file_ra_state_init(ra, inode->i_mapping);
6370
6371         for (i = first_index ; i <= last_index; i++) {
6372                 if (total_read % ra->ra_pages == 0) {
6373                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
6374                                        calc_ra(i, last_index, ra->ra_pages));
6375                 }
6376                 total_read++;
6377 again:
6378                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
6379                         BUG_ON(1);
6380                 page = grab_cache_page(inode->i_mapping, i);
6381                 if (!page) {
6382                         ret = -ENOMEM;
6383                         goto out_unlock;
6384                 }
6385                 if (!PageUptodate(page)) {
6386                         btrfs_readpage(NULL, page);
6387                         lock_page(page);
6388                         if (!PageUptodate(page)) {
6389                                 unlock_page(page);
6390                                 page_cache_release(page);
6391                                 ret = -EIO;
6392                                 goto out_unlock;
6393                         }
6394                 }
6395                 wait_on_page_writeback(page);
6396
6397                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6398                 page_end = page_start + PAGE_CACHE_SIZE - 1;
6399                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
6400
6401                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6402                 if (ordered) {
6403                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6404                         unlock_page(page);
6405                         page_cache_release(page);
6406                         btrfs_start_ordered_extent(inode, ordered, 1);
6407                         btrfs_put_ordered_extent(ordered);
6408                         goto again;
6409                 }
6410                 set_page_extent_mapped(page);
6411
6412                 if (i == first_index)
6413                         set_extent_bits(io_tree, page_start, page_end,
6414                                         EXTENT_BOUNDARY, GFP_NOFS);
6415                 btrfs_set_extent_delalloc(inode, page_start, page_end);
6416
6417                 set_page_dirty(page);
6418                 total_dirty++;
6419
6420                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6421                 unlock_page(page);
6422                 page_cache_release(page);
6423         }
6424
6425 out_unlock:
6426         kfree(ra);
6427         mutex_unlock(&inode->i_mutex);
6428         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6429         return ret;
6430 }
6431
6432 static noinline int relocate_data_extent(struct inode *reloc_inode,
6433                                          struct btrfs_key *extent_key,
6434                                          u64 offset)
6435 {
6436         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6437         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6438         struct extent_map *em;
6439         u64 start = extent_key->objectid - offset;
6440         u64 end = start + extent_key->offset - 1;
6441
6442         em = alloc_extent_map(GFP_NOFS);
6443         BUG_ON(!em || IS_ERR(em));
6444
6445         em->start = start;
6446         em->len = extent_key->offset;
6447         em->block_len = extent_key->offset;
6448         em->block_start = extent_key->objectid;
6449         em->bdev = root->fs_info->fs_devices->latest_bdev;
6450         set_bit(EXTENT_FLAG_PINNED, &em->flags);
6451
6452         /* setup extent map to cheat btrfs_readpage */
6453         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6454         while (1) {
6455                 int ret;
6456                 write_lock(&em_tree->lock);
6457                 ret = add_extent_mapping(em_tree, em);
6458                 write_unlock(&em_tree->lock);
6459                 if (ret != -EEXIST) {
6460                         free_extent_map(em);
6461                         break;
6462                 }
6463                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
6464         }
6465         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6466
6467         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
6468 }
6469
6470 struct btrfs_ref_path {
6471         u64 extent_start;
6472         u64 nodes[BTRFS_MAX_LEVEL];
6473         u64 root_objectid;
6474         u64 root_generation;
6475         u64 owner_objectid;
6476         u32 num_refs;
6477         int lowest_level;
6478         int current_level;
6479         int shared_level;
6480
6481         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6482         u64 new_nodes[BTRFS_MAX_LEVEL];
6483 };
6484
6485 struct disk_extent {
6486         u64 ram_bytes;
6487         u64 disk_bytenr;
6488         u64 disk_num_bytes;
6489         u64 offset;
6490         u64 num_bytes;
6491         u8 compression;
6492         u8 encryption;
6493         u16 other_encoding;
6494 };
6495
6496 static int is_cowonly_root(u64 root_objectid)
6497 {
6498         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6499             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6500             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6501             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
6502             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6503             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
6504                 return 1;
6505         return 0;
6506 }
6507
6508 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
6509                                     struct btrfs_root *extent_root,
6510                                     struct btrfs_ref_path *ref_path,
6511                                     int first_time)
6512 {
6513         struct extent_buffer *leaf;
6514         struct btrfs_path *path;
6515         struct btrfs_extent_ref *ref;
6516         struct btrfs_key key;
6517         struct btrfs_key found_key;
6518         u64 bytenr;
6519         u32 nritems;
6520         int level;
6521         int ret = 1;
6522
6523         path = btrfs_alloc_path();
6524         if (!path)
6525                 return -ENOMEM;
6526
6527         if (first_time) {
6528                 ref_path->lowest_level = -1;
6529                 ref_path->current_level = -1;
6530                 ref_path->shared_level = -1;
6531                 goto walk_up;
6532         }
6533 walk_down:
6534         level = ref_path->current_level - 1;
6535         while (level >= -1) {
6536                 u64 parent;
6537                 if (level < ref_path->lowest_level)
6538                         break;
6539
6540                 if (level >= 0)
6541                         bytenr = ref_path->nodes[level];
6542                 else
6543                         bytenr = ref_path->extent_start;
6544                 BUG_ON(bytenr == 0);
6545
6546                 parent = ref_path->nodes[level + 1];
6547                 ref_path->nodes[level + 1] = 0;
6548                 ref_path->current_level = level;
6549                 BUG_ON(parent == 0);
6550
6551                 key.objectid = bytenr;
6552                 key.offset = parent + 1;
6553                 key.type = BTRFS_EXTENT_REF_KEY;
6554
6555                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6556                 if (ret < 0)
6557                         goto out;
6558                 BUG_ON(ret == 0);
6559
6560                 leaf = path->nodes[0];
6561                 nritems = btrfs_header_nritems(leaf);
6562                 if (path->slots[0] >= nritems) {
6563                         ret = btrfs_next_leaf(extent_root, path);
6564                         if (ret < 0)
6565                                 goto out;
6566                         if (ret > 0)
6567                                 goto next;
6568                         leaf = path->nodes[0];
6569                 }
6570
6571                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6572                 if (found_key.objectid == bytenr &&
6573                     found_key.type == BTRFS_EXTENT_REF_KEY) {
6574                         if (level < ref_path->shared_level)
6575                                 ref_path->shared_level = level;
6576                         goto found;
6577                 }
6578 next:
6579                 level--;
6580                 btrfs_release_path(extent_root, path);
6581                 cond_resched();
6582         }
6583         /* reached lowest level */
6584         ret = 1;
6585         goto out;
6586 walk_up:
6587         level = ref_path->current_level;
6588         while (level < BTRFS_MAX_LEVEL - 1) {
6589                 u64 ref_objectid;
6590
6591                 if (level >= 0)
6592                         bytenr = ref_path->nodes[level];
6593                 else
6594                         bytenr = ref_path->extent_start;
6595
6596                 BUG_ON(bytenr == 0);
6597
6598                 key.objectid = bytenr;
6599                 key.offset = 0;
6600                 key.type = BTRFS_EXTENT_REF_KEY;
6601
6602                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6603                 if (ret < 0)
6604                         goto out;
6605
6606                 leaf = path->nodes[0];
6607                 nritems = btrfs_header_nritems(leaf);
6608                 if (path->slots[0] >= nritems) {
6609                         ret = btrfs_next_leaf(extent_root, path);
6610                         if (ret < 0)
6611                                 goto out;
6612                         if (ret > 0) {
6613                                 /* the extent was freed by someone */
6614                                 if (ref_path->lowest_level == level)
6615                                         goto out;
6616                                 btrfs_release_path(extent_root, path);
6617                                 goto walk_down;
6618                         }
6619                         leaf = path->nodes[0];
6620                 }
6621
6622                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6623                 if (found_key.objectid != bytenr ||
6624                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
6625                         /* the extent was freed by someone */
6626                         if (ref_path->lowest_level == level) {
6627                                 ret = 1;
6628                                 goto out;
6629                         }
6630                         btrfs_release_path(extent_root, path);
6631                         goto walk_down;
6632                 }
6633 found:
6634                 ref = btrfs_item_ptr(leaf, path->slots[0],
6635                                 struct btrfs_extent_ref);
6636                 ref_objectid = btrfs_ref_objectid(leaf, ref);
6637                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6638                         if (first_time) {
6639                                 level = (int)ref_objectid;
6640                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
6641                                 ref_path->lowest_level = level;
6642                                 ref_path->current_level = level;
6643                                 ref_path->nodes[level] = bytenr;
6644                         } else {
6645                                 WARN_ON(ref_objectid != level);
6646                         }
6647                 } else {
6648                         WARN_ON(level != -1);
6649                 }
6650                 first_time = 0;
6651
6652                 if (ref_path->lowest_level == level) {
6653                         ref_path->owner_objectid = ref_objectid;
6654                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6655                 }
6656
6657                 /*
6658                  * the block is tree root or the block isn't in reference
6659                  * counted tree.
6660                  */
6661                 if (found_key.objectid == found_key.offset ||
6662                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6663                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6664                         ref_path->root_generation =
6665                                 btrfs_ref_generation(leaf, ref);
6666                         if (level < 0) {
6667                                 /* special reference from the tree log */
6668                                 ref_path->nodes[0] = found_key.offset;
6669                                 ref_path->current_level = 0;
6670                         }
6671                         ret = 0;
6672                         goto out;
6673                 }
6674
6675                 level++;
6676                 BUG_ON(ref_path->nodes[level] != 0);
6677                 ref_path->nodes[level] = found_key.offset;
6678                 ref_path->current_level = level;
6679
6680                 /*
6681                  * the reference was created in the running transaction,
6682                  * no need to continue walking up.
6683                  */
6684                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6685                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6686                         ref_path->root_generation =
6687                                 btrfs_ref_generation(leaf, ref);
6688                         ret = 0;
6689                         goto out;
6690                 }
6691
6692                 btrfs_release_path(extent_root, path);
6693                 cond_resched();
6694         }
6695         /* reached max tree level, but no tree root found. */
6696         BUG();
6697 out:
6698         btrfs_free_path(path);
6699         return ret;
6700 }
6701
6702 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6703                                 struct btrfs_root *extent_root,
6704                                 struct btrfs_ref_path *ref_path,
6705                                 u64 extent_start)
6706 {
6707         memset(ref_path, 0, sizeof(*ref_path));
6708         ref_path->extent_start = extent_start;
6709
6710         return __next_ref_path(trans, extent_root, ref_path, 1);
6711 }
6712
6713 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6714                                struct btrfs_root *extent_root,
6715                                struct btrfs_ref_path *ref_path)
6716 {
6717         return __next_ref_path(trans, extent_root, ref_path, 0);
6718 }
6719
6720 static noinline int get_new_locations(struct inode *reloc_inode,
6721                                       struct btrfs_key *extent_key,
6722                                       u64 offset, int no_fragment,
6723                                       struct disk_extent **extents,
6724                                       int *nr_extents)
6725 {
6726         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6727         struct btrfs_path *path;
6728         struct btrfs_file_extent_item *fi;
6729         struct extent_buffer *leaf;
6730         struct disk_extent *exts = *extents;
6731         struct btrfs_key found_key;
6732         u64 cur_pos;
6733         u64 last_byte;
6734         u32 nritems;
6735         int nr = 0;
6736         int max = *nr_extents;
6737         int ret;
6738
6739         WARN_ON(!no_fragment && *extents);
6740         if (!exts) {
6741                 max = 1;
6742                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6743                 if (!exts)
6744                         return -ENOMEM;
6745         }
6746
6747         path = btrfs_alloc_path();
6748         BUG_ON(!path);
6749
6750         cur_pos = extent_key->objectid - offset;
6751         last_byte = extent_key->objectid + extent_key->offset;
6752         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6753                                        cur_pos, 0);
6754         if (ret < 0)
6755                 goto out;
6756         if (ret > 0) {
6757                 ret = -ENOENT;
6758                 goto out;
6759         }
6760
6761         while (1) {
6762                 leaf = path->nodes[0];
6763                 nritems = btrfs_header_nritems(leaf);
6764                 if (path->slots[0] >= nritems) {
6765                         ret = btrfs_next_leaf(root, path);
6766                         if (ret < 0)
6767                                 goto out;
6768                         if (ret > 0)
6769                                 break;
6770                         leaf = path->nodes[0];
6771                 }
6772
6773                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6774                 if (found_key.offset != cur_pos ||
6775                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
6776                     found_key.objectid != reloc_inode->i_ino)
6777                         break;
6778
6779                 fi = btrfs_item_ptr(leaf, path->slots[0],
6780                                     struct btrfs_file_extent_item);
6781                 if (btrfs_file_extent_type(leaf, fi) !=
6782                     BTRFS_FILE_EXTENT_REG ||
6783                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6784                         break;
6785
6786                 if (nr == max) {
6787                         struct disk_extent *old = exts;
6788                         max *= 2;
6789                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6790                         memcpy(exts, old, sizeof(*exts) * nr);
6791                         if (old != *extents)
6792                                 kfree(old);
6793                 }
6794
6795                 exts[nr].disk_bytenr =
6796                         btrfs_file_extent_disk_bytenr(leaf, fi);
6797                 exts[nr].disk_num_bytes =
6798                         btrfs_file_extent_disk_num_bytes(leaf, fi);
6799                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6800                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6801                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6802                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6803                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6804                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6805                                                                            fi);
6806                 BUG_ON(exts[nr].offset > 0);
6807                 BUG_ON(exts[nr].compression || exts[nr].encryption);
6808                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
6809
6810                 cur_pos += exts[nr].num_bytes;
6811                 nr++;
6812
6813                 if (cur_pos + offset >= last_byte)
6814                         break;
6815
6816                 if (no_fragment) {
6817                         ret = 1;
6818                         goto out;
6819                 }
6820                 path->slots[0]++;
6821         }
6822
6823         BUG_ON(cur_pos + offset > last_byte);
6824         if (cur_pos + offset < last_byte) {
6825                 ret = -ENOENT;
6826                 goto out;
6827         }
6828         ret = 0;
6829 out:
6830         btrfs_free_path(path);
6831         if (ret) {
6832                 if (exts != *extents)
6833                         kfree(exts);
6834         } else {
6835                 *extents = exts;
6836                 *nr_extents = nr;
6837         }
6838         return ret;
6839 }
6840
6841 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6842                                         struct btrfs_root *root,
6843                                         struct btrfs_path *path,
6844                                         struct btrfs_key *extent_key,
6845                                         struct btrfs_key *leaf_key,
6846                                         struct btrfs_ref_path *ref_path,
6847                                         struct disk_extent *new_extents,
6848                                         int nr_extents)
6849 {
6850         struct extent_buffer *leaf;
6851         struct btrfs_file_extent_item *fi;
6852         struct inode *inode = NULL;
6853         struct btrfs_key key;
6854         u64 lock_start = 0;
6855         u64 lock_end = 0;
6856         u64 num_bytes;
6857         u64 ext_offset;
6858         u64 search_end = (u64)-1;
6859         u32 nritems;
6860         int nr_scaned = 0;
6861         int extent_locked = 0;
6862         int extent_type;
6863         int ret;
6864
6865         memcpy(&key, leaf_key, sizeof(key));
6866         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6867                 if (key.objectid < ref_path->owner_objectid ||
6868                     (key.objectid == ref_path->owner_objectid &&
6869                      key.type < BTRFS_EXTENT_DATA_KEY)) {
6870                         key.objectid = ref_path->owner_objectid;
6871                         key.type = BTRFS_EXTENT_DATA_KEY;
6872                         key.offset = 0;
6873                 }
6874         }
6875
6876         while (1) {
6877                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6878                 if (ret < 0)
6879                         goto out;
6880
6881                 leaf = path->nodes[0];
6882                 nritems = btrfs_header_nritems(leaf);
6883 next:
6884                 if (extent_locked && ret > 0) {
6885                         /*
6886                          * the file extent item was modified by someone
6887                          * before the extent got locked.
6888                          */
6889                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6890                                       lock_end, GFP_NOFS);
6891                         extent_locked = 0;
6892                 }
6893
6894                 if (path->slots[0] >= nritems) {
6895                         if (++nr_scaned > 2)
6896                                 break;
6897
6898                         BUG_ON(extent_locked);
6899                         ret = btrfs_next_leaf(root, path);
6900                         if (ret < 0)
6901                                 goto out;
6902                         if (ret > 0)
6903                                 break;
6904                         leaf = path->nodes[0];
6905                         nritems = btrfs_header_nritems(leaf);
6906                 }
6907
6908                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6909
6910                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6911                         if ((key.objectid > ref_path->owner_objectid) ||
6912                             (key.objectid == ref_path->owner_objectid &&
6913                              key.type > BTRFS_EXTENT_DATA_KEY) ||
6914                             key.offset >= search_end)
6915                                 break;
6916                 }
6917
6918                 if (inode && key.objectid != inode->i_ino) {
6919                         BUG_ON(extent_locked);
6920                         btrfs_release_path(root, path);
6921                         mutex_unlock(&inode->i_mutex);
6922                         iput(inode);
6923                         inode = NULL;
6924                         continue;
6925                 }
6926
6927                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6928                         path->slots[0]++;
6929                         ret = 1;
6930                         goto next;
6931                 }
6932                 fi = btrfs_item_ptr(leaf, path->slots[0],
6933                                     struct btrfs_file_extent_item);
6934                 extent_type = btrfs_file_extent_type(leaf, fi);
6935                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6936                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
6937                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6938                      extent_key->objectid)) {
6939                         path->slots[0]++;
6940                         ret = 1;
6941                         goto next;
6942                 }
6943
6944                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6945                 ext_offset = btrfs_file_extent_offset(leaf, fi);
6946
6947                 if (search_end == (u64)-1) {
6948                         search_end = key.offset - ext_offset +
6949                                 btrfs_file_extent_ram_bytes(leaf, fi);
6950                 }
6951
6952                 if (!extent_locked) {
6953                         lock_start = key.offset;
6954                         lock_end = lock_start + num_bytes - 1;
6955                 } else {
6956                         if (lock_start > key.offset ||
6957                             lock_end + 1 < key.offset + num_bytes) {
6958                                 unlock_extent(&BTRFS_I(inode)->io_tree,
6959                                               lock_start, lock_end, GFP_NOFS);
6960                                 extent_locked = 0;
6961                         }
6962                 }
6963
6964                 if (!inode) {
6965                         btrfs_release_path(root, path);
6966
6967                         inode = btrfs_iget_locked(root->fs_info->sb,
6968                                                   key.objectid, root);
6969                         if (inode->i_state & I_NEW) {
6970                                 BTRFS_I(inode)->root = root;
6971                                 BTRFS_I(inode)->location.objectid =
6972                                         key.objectid;
6973                                 BTRFS_I(inode)->location.type =
6974                                         BTRFS_INODE_ITEM_KEY;
6975                                 BTRFS_I(inode)->location.offset = 0;
6976                                 btrfs_read_locked_inode(inode);
6977                                 unlock_new_inode(inode);
6978                         }
6979                         /*
6980                          * some code call btrfs_commit_transaction while
6981                          * holding the i_mutex, so we can't use mutex_lock
6982                          * here.
6983                          */
6984                         if (is_bad_inode(inode) ||
6985                             !mutex_trylock(&inode->i_mutex)) {
6986                                 iput(inode);
6987                                 inode = NULL;
6988                                 key.offset = (u64)-1;
6989                                 goto skip;
6990                         }
6991                 }
6992
6993                 if (!extent_locked) {
6994                         struct btrfs_ordered_extent *ordered;
6995
6996                         btrfs_release_path(root, path);
6997
6998                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6999                                     lock_end, GFP_NOFS);
7000                         ordered = btrfs_lookup_first_ordered_extent(inode,
7001                                                                     lock_end);
7002                         if (ordered &&
7003                             ordered->file_offset <= lock_end &&
7004                             ordered->file_offset + ordered->len > lock_start) {
7005                                 unlock_extent(&BTRFS_I(inode)->io_tree,
7006                                               lock_start, lock_end, GFP_NOFS);
7007                                 btrfs_start_ordered_extent(inode, ordered, 1);
7008                                 btrfs_put_ordered_extent(ordered);
7009                                 key.offset += num_bytes;
7010                                 goto skip;
7011                         }
7012                         if (ordered)
7013                                 btrfs_put_ordered_extent(ordered);
7014
7015                         extent_locked = 1;
7016                         continue;
7017                 }
7018
7019                 if (nr_extents == 1) {
7020                         /* update extent pointer in place */
7021                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
7022                                                 new_extents[0].disk_bytenr);
7023                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7024                                                 new_extents[0].disk_num_bytes);
7025                         btrfs_mark_buffer_dirty(leaf);
7026
7027                         btrfs_drop_extent_cache(inode, key.offset,
7028                                                 key.offset + num_bytes - 1, 0);
7029
7030                         ret = btrfs_inc_extent_ref(trans, root,
7031                                                 new_extents[0].disk_bytenr,
7032                                                 new_extents[0].disk_num_bytes,
7033                                                 leaf->start,
7034                                                 root->root_key.objectid,
7035                                                 trans->transid,
7036                                                 key.objectid);
7037                         BUG_ON(ret);
7038
7039                         ret = btrfs_free_extent(trans, root,
7040                                                 extent_key->objectid,
7041                                                 extent_key->offset,
7042                                                 leaf->start,
7043                                                 btrfs_header_owner(leaf),
7044                                                 btrfs_header_generation(leaf),
7045                                                 key.objectid, 0);
7046                         BUG_ON(ret);
7047
7048                         btrfs_release_path(root, path);
7049                         key.offset += num_bytes;
7050                 } else {
7051                         BUG_ON(1);
7052 #if 0
7053                         u64 alloc_hint;
7054                         u64 extent_len;
7055                         int i;
7056                         /*
7057                          * drop old extent pointer at first, then insert the
7058                          * new pointers one bye one
7059                          */
7060                         btrfs_release_path(root, path);
7061                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
7062                                                  key.offset + num_bytes,
7063                                                  key.offset, &alloc_hint);
7064                         BUG_ON(ret);
7065
7066                         for (i = 0; i < nr_extents; i++) {
7067                                 if (ext_offset >= new_extents[i].num_bytes) {
7068                                         ext_offset -= new_extents[i].num_bytes;
7069                                         continue;
7070                                 }
7071                                 extent_len = min(new_extents[i].num_bytes -
7072                                                  ext_offset, num_bytes);
7073
7074                                 ret = btrfs_insert_empty_item(trans, root,
7075                                                               path, &key,
7076                                                               sizeof(*fi));
7077                                 BUG_ON(ret);
7078
7079                                 leaf = path->nodes[0];
7080                                 fi = btrfs_item_ptr(leaf, path->slots[0],
7081                                                 struct btrfs_file_extent_item);
7082                                 btrfs_set_file_extent_generation(leaf, fi,
7083                                                         trans->transid);
7084                                 btrfs_set_file_extent_type(leaf, fi,
7085                                                         BTRFS_FILE_EXTENT_REG);
7086                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7087                                                 new_extents[i].disk_bytenr);
7088                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7089                                                 new_extents[i].disk_num_bytes);
7090                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
7091                                                 new_extents[i].ram_bytes);
7092
7093                                 btrfs_set_file_extent_compression(leaf, fi,
7094                                                 new_extents[i].compression);
7095                                 btrfs_set_file_extent_encryption(leaf, fi,
7096                                                 new_extents[i].encryption);
7097                                 btrfs_set_file_extent_other_encoding(leaf, fi,
7098                                                 new_extents[i].other_encoding);
7099
7100                                 btrfs_set_file_extent_num_bytes(leaf, fi,
7101                                                         extent_len);
7102                                 ext_offset += new_extents[i].offset;
7103                                 btrfs_set_file_extent_offset(leaf, fi,
7104                                                         ext_offset);
7105                                 btrfs_mark_buffer_dirty(leaf);
7106
7107                                 btrfs_drop_extent_cache(inode, key.offset,
7108                                                 key.offset + extent_len - 1, 0);
7109
7110                                 ret = btrfs_inc_extent_ref(trans, root,
7111                                                 new_extents[i].disk_bytenr,
7112                                                 new_extents[i].disk_num_bytes,
7113                                                 leaf->start,
7114                                                 root->root_key.objectid,
7115                                                 trans->transid, key.objectid);
7116                                 BUG_ON(ret);
7117                                 btrfs_release_path(root, path);
7118
7119                                 inode_add_bytes(inode, extent_len);
7120
7121                                 ext_offset = 0;
7122                                 num_bytes -= extent_len;
7123                                 key.offset += extent_len;
7124
7125                                 if (num_bytes == 0)
7126                                         break;
7127                         }
7128                         BUG_ON(i >= nr_extents);
7129 #endif
7130                 }
7131
7132                 if (extent_locked) {
7133                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7134                                       lock_end, GFP_NOFS);
7135                         extent_locked = 0;
7136                 }
7137 skip:
7138                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
7139                     key.offset >= search_end)
7140                         break;
7141
7142                 cond_resched();
7143         }
7144         ret = 0;
7145 out:
7146         btrfs_release_path(root, path);
7147         if (inode) {
7148                 mutex_unlock(&inode->i_mutex);
7149                 if (extent_locked) {
7150                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7151                                       lock_end, GFP_NOFS);
7152                 }
7153                 iput(inode);
7154         }
7155         return ret;
7156 }
7157
7158 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7159                                struct btrfs_root *root,
7160                                struct extent_buffer *buf, u64 orig_start)
7161 {
7162         int level;
7163         int ret;
7164
7165         BUG_ON(btrfs_header_generation(buf) != trans->transid);
7166         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7167
7168         level = btrfs_header_level(buf);
7169         if (level == 0) {
7170                 struct btrfs_leaf_ref *ref;
7171                 struct btrfs_leaf_ref *orig_ref;
7172
7173                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7174                 if (!orig_ref)
7175                         return -ENOENT;
7176
7177                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7178                 if (!ref) {
7179                         btrfs_free_leaf_ref(root, orig_ref);
7180                         return -ENOMEM;
7181                 }
7182
7183                 ref->nritems = orig_ref->nritems;
7184                 memcpy(ref->extents, orig_ref->extents,
7185                         sizeof(ref->extents[0]) * ref->nritems);
7186
7187                 btrfs_free_leaf_ref(root, orig_ref);
7188
7189                 ref->root_gen = trans->transid;
7190                 ref->bytenr = buf->start;
7191                 ref->owner = btrfs_header_owner(buf);
7192                 ref->generation = btrfs_header_generation(buf);
7193
7194                 ret = btrfs_add_leaf_ref(root, ref, 0);
7195                 WARN_ON(ret);
7196                 btrfs_free_leaf_ref(root, ref);
7197         }
7198         return 0;
7199 }
7200
7201 static noinline int invalidate_extent_cache(struct btrfs_root *root,
7202                                         struct extent_buffer *leaf,
7203                                         struct btrfs_block_group_cache *group,
7204                                         struct btrfs_root *target_root)
7205 {
7206         struct btrfs_key key;
7207         struct inode *inode = NULL;
7208         struct btrfs_file_extent_item *fi;
7209         struct extent_state *cached_state = NULL;
7210         u64 num_bytes;
7211         u64 skip_objectid = 0;
7212         u32 nritems;
7213         u32 i;
7214
7215         nritems = btrfs_header_nritems(leaf);
7216         for (i = 0; i < nritems; i++) {
7217                 btrfs_item_key_to_cpu(leaf, &key, i);
7218                 if (key.objectid == skip_objectid ||
7219                     key.type != BTRFS_EXTENT_DATA_KEY)
7220                         continue;
7221                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7222                 if (btrfs_file_extent_type(leaf, fi) ==
7223                     BTRFS_FILE_EXTENT_INLINE)
7224                         continue;
7225                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7226                         continue;
7227                 if (!inode || inode->i_ino != key.objectid) {
7228                         iput(inode);
7229                         inode = btrfs_ilookup(target_root->fs_info->sb,
7230                                               key.objectid, target_root, 1);
7231                 }
7232                 if (!inode) {
7233                         skip_objectid = key.objectid;
7234                         continue;
7235                 }
7236                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7237
7238                 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7239                                  key.offset + num_bytes - 1, 0, &cached_state,
7240                                  GFP_NOFS);
7241                 btrfs_drop_extent_cache(inode, key.offset,
7242                                         key.offset + num_bytes - 1, 1);
7243                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7244                                      key.offset + num_bytes - 1, &cached_state,
7245                                      GFP_NOFS);
7246                 cond_resched();
7247         }
7248         iput(inode);
7249         return 0;
7250 }
7251
7252 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7253                                         struct btrfs_root *root,
7254                                         struct extent_buffer *leaf,
7255                                         struct btrfs_block_group_cache *group,
7256                                         struct inode *reloc_inode)
7257 {
7258         struct btrfs_key key;
7259         struct btrfs_key extent_key;
7260         struct btrfs_file_extent_item *fi;
7261         struct btrfs_leaf_ref *ref;
7262         struct disk_extent *new_extent;
7263         u64 bytenr;
7264         u64 num_bytes;
7265         u32 nritems;
7266         u32 i;
7267         int ext_index;
7268         int nr_extent;
7269         int ret;
7270
7271         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7272         BUG_ON(!new_extent);
7273
7274         ref = btrfs_lookup_leaf_ref(root, leaf->start);
7275         BUG_ON(!ref);
7276
7277         ext_index = -1;
7278         nritems = btrfs_header_nritems(leaf);
7279         for (i = 0; i < nritems; i++) {
7280                 btrfs_item_key_to_cpu(leaf, &key, i);
7281                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7282                         continue;
7283                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7284                 if (btrfs_file_extent_type(leaf, fi) ==
7285                     BTRFS_FILE_EXTENT_INLINE)
7286                         continue;
7287                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7288                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7289                 if (bytenr == 0)
7290                         continue;
7291
7292                 ext_index++;
7293                 if (bytenr >= group->key.objectid + group->key.offset ||
7294                     bytenr + num_bytes <= group->key.objectid)
7295                         continue;
7296
7297                 extent_key.objectid = bytenr;
7298                 extent_key.offset = num_bytes;
7299                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7300                 nr_extent = 1;
7301                 ret = get_new_locations(reloc_inode, &extent_key,
7302                                         group->key.objectid, 1,
7303                                         &new_extent, &nr_extent);
7304                 if (ret > 0)
7305                         continue;
7306                 BUG_ON(ret < 0);
7307
7308                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7309                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7310                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7311                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7312
7313                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7314                                                 new_extent->disk_bytenr);
7315                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7316                                                 new_extent->disk_num_bytes);
7317                 btrfs_mark_buffer_dirty(leaf);
7318
7319                 ret = btrfs_inc_extent_ref(trans, root,
7320                                         new_extent->disk_bytenr,
7321                                         new_extent->disk_num_bytes,
7322                                         leaf->start,
7323                                         root->root_key.objectid,
7324                                         trans->transid, key.objectid);
7325                 BUG_ON(ret);
7326
7327                 ret = btrfs_free_extent(trans, root,
7328                                         bytenr, num_bytes, leaf->start,
7329                                         btrfs_header_owner(leaf),
7330                                         btrfs_header_generation(leaf),
7331                                         key.objectid, 0);
7332                 BUG_ON(ret);
7333                 cond_resched();
7334         }
7335         kfree(new_extent);
7336         BUG_ON(ext_index + 1 != ref->nritems);
7337         btrfs_free_leaf_ref(root, ref);
7338         return 0;
7339 }
7340
7341 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7342                           struct btrfs_root *root)
7343 {
7344         struct btrfs_root *reloc_root;
7345         int ret;
7346
7347         if (root->reloc_root) {
7348                 reloc_root = root->reloc_root;
7349                 root->reloc_root = NULL;
7350                 list_add(&reloc_root->dead_list,
7351                          &root->fs_info->dead_reloc_roots);
7352
7353                 btrfs_set_root_bytenr(&reloc_root->root_item,
7354                                       reloc_root->node->start);
7355                 btrfs_set_root_level(&root->root_item,
7356                                      btrfs_header_level(reloc_root->node));
7357                 memset(&reloc_root->root_item.drop_progress, 0,
7358                         sizeof(struct btrfs_disk_key));
7359                 reloc_root->root_item.drop_level = 0;
7360
7361                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7362                                         &reloc_root->root_key,
7363                                         &reloc_root->root_item);
7364                 BUG_ON(ret);
7365         }
7366         return 0;
7367 }
7368
7369 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7370 {
7371         struct btrfs_trans_handle *trans;
7372         struct btrfs_root *reloc_root;
7373         struct btrfs_root *prev_root = NULL;
7374         struct list_head dead_roots;
7375         int ret;
7376         unsigned long nr;
7377
7378         INIT_LIST_HEAD(&dead_roots);
7379         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7380
7381         while (!list_empty(&dead_roots)) {
7382                 reloc_root = list_entry(dead_roots.prev,
7383                                         struct btrfs_root, dead_list);
7384                 list_del_init(&reloc_root->dead_list);
7385
7386                 BUG_ON(reloc_root->commit_root != NULL);
7387                 while (1) {
7388                         trans = btrfs_join_transaction(root, 1);
7389                         BUG_ON(!trans);
7390
7391                         mutex_lock(&root->fs_info->drop_mutex);
7392                         ret = btrfs_drop_snapshot(trans, reloc_root);
7393                         if (ret != -EAGAIN)
7394                                 break;
7395                         mutex_unlock(&root->fs_info->drop_mutex);
7396
7397                         nr = trans->blocks_used;
7398                         ret = btrfs_end_transaction(trans, root);
7399                         BUG_ON(ret);
7400                         btrfs_btree_balance_dirty(root, nr);
7401                 }
7402
7403                 free_extent_buffer(reloc_root->node);
7404
7405                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7406                                      &reloc_root->root_key);
7407                 BUG_ON(ret);
7408                 mutex_unlock(&root->fs_info->drop_mutex);
7409
7410                 nr = trans->blocks_used;
7411                 ret = btrfs_end_transaction(trans, root);
7412                 BUG_ON(ret);
7413                 btrfs_btree_balance_dirty(root, nr);
7414
7415                 kfree(prev_root);
7416                 prev_root = reloc_root;
7417         }
7418         if (prev_root) {
7419                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7420                 kfree(prev_root);
7421         }
7422         return 0;
7423 }
7424
7425 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7426 {
7427         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7428         return 0;
7429 }
7430
7431 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7432 {
7433         struct btrfs_root *reloc_root;
7434         struct btrfs_trans_handle *trans;
7435         struct btrfs_key location;
7436         int found;
7437         int ret;
7438
7439         mutex_lock(&root->fs_info->tree_reloc_mutex);
7440         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7441         BUG_ON(ret);
7442         found = !list_empty(&root->fs_info->dead_reloc_roots);
7443         mutex_unlock(&root->fs_info->tree_reloc_mutex);
7444
7445         if (found) {
7446                 trans = btrfs_start_transaction(root, 1);
7447                 BUG_ON(!trans);
7448                 ret = btrfs_commit_transaction(trans, root);
7449                 BUG_ON(ret);
7450         }
7451
7452         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7453         location.offset = (u64)-1;
7454         location.type = BTRFS_ROOT_ITEM_KEY;
7455
7456         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7457         BUG_ON(!reloc_root);
7458         btrfs_orphan_cleanup(reloc_root);
7459         return 0;
7460 }
7461
7462 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7463                                     struct btrfs_root *root)
7464 {
7465         struct btrfs_root *reloc_root;
7466         struct extent_buffer *eb;
7467         struct btrfs_root_item *root_item;
7468         struct btrfs_key root_key;
7469         int ret;
7470
7471         BUG_ON(!root->ref_cows);
7472         if (root->reloc_root)
7473                 return 0;
7474
7475         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7476         BUG_ON(!root_item);
7477
7478         ret = btrfs_copy_root(trans, root, root->commit_root,
7479                               &eb, BTRFS_TREE_RELOC_OBJECTID);
7480         BUG_ON(ret);
7481
7482         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7483         root_key.offset = root->root_key.objectid;
7484         root_key.type = BTRFS_ROOT_ITEM_KEY;
7485
7486         memcpy(root_item, &root->root_item, sizeof(root_item));
7487         btrfs_set_root_refs(root_item, 0);
7488         btrfs_set_root_bytenr(root_item, eb->start);
7489         btrfs_set_root_level(root_item, btrfs_header_level(eb));
7490         btrfs_set_root_generation(root_item, trans->transid);
7491
7492         btrfs_tree_unlock(eb);
7493         free_extent_buffer(eb);
7494
7495         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7496                                 &root_key, root_item);
7497         BUG_ON(ret);
7498         kfree(root_item);
7499
7500         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7501                                                  &root_key);
7502         BUG_ON(!reloc_root);
7503         reloc_root->last_trans = trans->transid;
7504         reloc_root->commit_root = NULL;
7505         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7506
7507         root->reloc_root = reloc_root;
7508         return 0;
7509 }
7510
7511 /*
7512  * Core function of space balance.
7513  *
7514  * The idea is using reloc trees to relocate tree blocks in reference
7515  * counted roots. There is one reloc tree for each subvol, and all
7516  * reloc trees share same root key objectid. Reloc trees are snapshots
7517  * of the latest committed roots of subvols (root->commit_root).
7518  *
7519  * To relocate a tree block referenced by a subvol, there are two steps.
7520  * COW the block through subvol's reloc tree, then update block pointer
7521  * in the subvol to point to the new block. Since all reloc trees share
7522  * same root key objectid, doing special handing for tree blocks owned
7523  * by them is easy. Once a tree block has been COWed in one reloc tree,
7524  * we can use the resulting new block directly when the same block is
7525  * required to COW again through other reloc trees. By this way, relocated
7526  * tree blocks are shared between reloc trees, so they are also shared
7527  * between subvols.
7528  */
7529 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
7530                                       struct btrfs_root *root,
7531                                       struct btrfs_path *path,
7532                                       struct btrfs_key *first_key,
7533                                       struct btrfs_ref_path *ref_path,
7534                                       struct btrfs_block_group_cache *group,
7535                                       struct inode *reloc_inode)
7536 {
7537         struct btrfs_root *reloc_root;
7538         struct extent_buffer *eb = NULL;
7539         struct btrfs_key *keys;
7540         u64 *nodes;
7541         int level;
7542         int shared_level;
7543         int lowest_level = 0;
7544         int ret;
7545
7546         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7547                 lowest_level = ref_path->owner_objectid;
7548
7549         if (!root->ref_cows) {
7550                 path->lowest_level = lowest_level;
7551                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7552                 BUG_ON(ret < 0);
7553                 path->lowest_level = 0;
7554                 btrfs_release_path(root, path);
7555                 return 0;
7556         }
7557
7558         mutex_lock(&root->fs_info->tree_reloc_mutex);
7559         ret = init_reloc_tree(trans, root);
7560         BUG_ON(ret);
7561         reloc_root = root->reloc_root;
7562
7563         shared_level = ref_path->shared_level;
7564         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
7565
7566         keys = ref_path->node_keys;
7567         nodes = ref_path->new_nodes;
7568         memset(&keys[shared_level + 1], 0,
7569                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7570         memset(&nodes[shared_level + 1], 0,
7571                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
7572
7573         if (nodes[lowest_level] == 0) {
7574                 path->lowest_level = lowest_level;
7575                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7576                                         0, 1);
7577                 BUG_ON(ret);
7578                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7579                         eb = path->nodes[level];
7580                         if (!eb || eb == reloc_root->node)
7581                                 break;
7582                         nodes[level] = eb->start;
7583                         if (level == 0)
7584                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7585                         else
7586                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7587                 }
7588                 if (nodes[0] &&
7589                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7590                         eb = path->nodes[0];
7591                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
7592                                                       group, reloc_inode);
7593                         BUG_ON(ret);
7594                 }
7595                 btrfs_release_path(reloc_root, path);
7596         } else {
7597                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
7598                                        lowest_level);
7599                 BUG_ON(ret);
7600         }
7601
7602         /*
7603          * replace tree blocks in the fs tree with tree blocks in
7604          * the reloc tree.
7605          */
7606         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7607         BUG_ON(ret < 0);
7608
7609         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7610                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7611                                         0, 0);
7612                 BUG_ON(ret);
7613                 extent_buffer_get(path->nodes[0]);
7614                 eb = path->nodes[0];
7615                 btrfs_release_path(reloc_root, path);
7616                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7617                 BUG_ON(ret);
7618                 free_extent_buffer(eb);
7619         }
7620
7621         mutex_unlock(&root->fs_info->tree_reloc_mutex);
7622         path->lowest_level = 0;
7623         return 0;
7624 }
7625
7626 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
7627                                         struct btrfs_root *root,
7628                                         struct btrfs_path *path,
7629                                         struct btrfs_key *first_key,
7630                                         struct btrfs_ref_path *ref_path)
7631 {
7632         int ret;
7633
7634         ret = relocate_one_path(trans, root, path, first_key,
7635                                 ref_path, NULL, NULL);
7636         BUG_ON(ret);
7637
7638         return 0;
7639 }
7640
7641 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7642                                     struct btrfs_root *extent_root,
7643                                     struct btrfs_path *path,
7644                                     struct btrfs_key *extent_key)
7645 {
7646         int ret;
7647
7648         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7649         if (ret)
7650                 goto out;
7651         ret = btrfs_del_item(trans, extent_root, path);
7652 out:
7653         btrfs_release_path(extent_root, path);
7654         return ret;
7655 }
7656
7657 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7658                                                 struct btrfs_ref_path *ref_path)
7659 {
7660         struct btrfs_key root_key;
7661
7662         root_key.objectid = ref_path->root_objectid;
7663         root_key.type = BTRFS_ROOT_ITEM_KEY;
7664         if (is_cowonly_root(ref_path->root_objectid))
7665                 root_key.offset = 0;
7666         else
7667                 root_key.offset = (u64)-1;
7668
7669         return btrfs_read_fs_root_no_name(fs_info, &root_key);
7670 }
7671
7672 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7673                                         struct btrfs_path *path,
7674                                         struct btrfs_key *extent_key,
7675                                         struct btrfs_block_group_cache *group,
7676                                         struct inode *reloc_inode, int pass)
7677 {
7678         struct btrfs_trans_handle *trans;
7679         struct btrfs_root *found_root;
7680         struct btrfs_ref_path *ref_path = NULL;
7681         struct disk_extent *new_extents = NULL;
7682         int nr_extents = 0;
7683         int loops;
7684         int ret;
7685         int level;
7686         struct btrfs_key first_key;
7687         u64 prev_block = 0;
7688
7689
7690         trans = btrfs_start_transaction(extent_root, 1);
7691         BUG_ON(!trans);
7692
7693         if (extent_key->objectid == 0) {
7694                 ret = del_extent_zero(trans, extent_root, path, extent_key);
7695                 goto out;
7696         }
7697
7698         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7699         if (!ref_path) {
7700                 ret = -ENOMEM;
7701                 goto out;
7702         }
7703
7704         for (loops = 0; ; loops++) {
7705                 if (loops == 0) {
7706                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7707                                                    extent_key->objectid);
7708                 } else {
7709                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7710                 }
7711                 if (ret < 0)
7712                         goto out;
7713                 if (ret > 0)
7714                         break;
7715
7716                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7717                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7718                         continue;
7719
7720                 found_root = read_ref_root(extent_root->fs_info, ref_path);
7721                 BUG_ON(!found_root);
7722                 /*
7723                  * for reference counted tree, only process reference paths
7724                  * rooted at the latest committed root.
7725                  */
7726                 if (found_root->ref_cows &&
7727                     ref_path->root_generation != found_root->root_key.offset)
7728                         continue;
7729
7730                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7731                         if (pass == 0) {
7732                                 /*
7733                                  * copy data extents to new locations
7734                                  */
7735                                 u64 group_start = group->key.objectid;
7736                                 ret = relocate_data_extent(reloc_inode,
7737                                                            extent_key,
7738                                                            group_start);
7739                                 if (ret < 0)
7740                                         goto out;
7741                                 break;
7742                         }
7743                         level = 0;
7744                 } else {
7745                         level = ref_path->owner_objectid;
7746                 }
7747
7748                 if (prev_block != ref_path->nodes[level]) {
7749                         struct extent_buffer *eb;
7750                         u64 block_start = ref_path->nodes[level];
7751                         u64 block_size = btrfs_level_size(found_root, level);
7752
7753                         eb = read_tree_block(found_root, block_start,
7754                                              block_size, 0);
7755                         btrfs_tree_lock(eb);
7756                         BUG_ON(level != btrfs_header_level(eb));
7757
7758                         if (level == 0)
7759                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
7760                         else
7761                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
7762
7763                         btrfs_tree_unlock(eb);
7764                         free_extent_buffer(eb);
7765                         prev_block = block_start;
7766                 }
7767
7768                 mutex_lock(&extent_root->fs_info->trans_mutex);
7769                 btrfs_record_root_in_trans(found_root);
7770                 mutex_unlock(&extent_root->fs_info->trans_mutex);
7771                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7772                         /*
7773                          * try to update data extent references while
7774                          * keeping metadata shared between snapshots.
7775                          */
7776                         if (pass == 1) {
7777                                 ret = relocate_one_path(trans, found_root,
7778                                                 path, &first_key, ref_path,
7779                                                 group, reloc_inode);
7780                                 if (ret < 0)
7781                                         goto out;
7782                                 continue;
7783                         }
7784                         /*
7785                          * use fallback method to process the remaining
7786                          * references.
7787                          */
7788                         if (!new_extents) {
7789                                 u64 group_start = group->key.objectid;
7790                                 new_extents = kmalloc(sizeof(*new_extents),
7791                                                       GFP_NOFS);
7792                                 nr_extents = 1;
7793                                 ret = get_new_locations(reloc_inode,
7794                                                         extent_key,
7795                                                         group_start, 1,
7796                                                         &new_extents,
7797                                                         &nr_extents);
7798                                 if (ret)
7799                                         goto out;
7800                         }
7801                         ret = replace_one_extent(trans, found_root,
7802                                                 path, extent_key,
7803                                                 &first_key, ref_path,
7804                                                 new_extents, nr_extents);
7805                 } else {
7806                         ret = relocate_tree_block(trans, found_root, path,
7807                                                   &first_key, ref_path);
7808                 }
7809                 if (ret < 0)
7810                         goto out;
7811         }
7812         ret = 0;
7813 out:
7814         btrfs_end_transaction(trans, extent_root);
7815         kfree(new_extents);
7816         kfree(ref_path);
7817         return ret;
7818 }
7819 #endif
7820
7821 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7822 {
7823         u64 num_devices;
7824         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7825                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7826
7827         num_devices = root->fs_info->fs_devices->rw_devices;
7828         if (num_devices == 1) {
7829                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7830                 stripped = flags & ~stripped;
7831
7832                 /* turn raid0 into single device chunks */
7833                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7834                         return stripped;
7835
7836                 /* turn mirroring into duplication */
7837                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7838                              BTRFS_BLOCK_GROUP_RAID10))
7839                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7840                 return flags;
7841         } else {
7842                 /* they already had raid on here, just return */
7843                 if (flags & stripped)
7844                         return flags;
7845
7846                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7847                 stripped = flags & ~stripped;
7848
7849                 /* switch duplicated blocks with raid1 */
7850                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7851                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7852
7853                 /* turn single device chunks into raid0 */
7854                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7855         }
7856         return flags;
7857 }
7858
7859 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
7860 {
7861         struct btrfs_space_info *sinfo = cache->space_info;
7862         u64 num_bytes;
7863         int ret = -ENOSPC;
7864
7865         if (cache->ro)
7866                 return 0;
7867
7868         spin_lock(&sinfo->lock);
7869         spin_lock(&cache->lock);
7870         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7871                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7872
7873         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7874             sinfo->bytes_may_use + sinfo->bytes_readonly +
7875             cache->reserved_pinned + num_bytes < sinfo->total_bytes) {
7876                 sinfo->bytes_readonly += num_bytes;
7877                 sinfo->bytes_reserved += cache->reserved_pinned;
7878                 cache->reserved_pinned = 0;
7879                 cache->ro = 1;
7880                 ret = 0;
7881         }
7882         spin_unlock(&cache->lock);
7883         spin_unlock(&sinfo->lock);
7884         return ret;
7885 }
7886
7887 int btrfs_set_block_group_ro(struct btrfs_root *root,
7888                              struct btrfs_block_group_cache *cache)
7889
7890 {
7891         struct btrfs_trans_handle *trans;
7892         u64 alloc_flags;
7893         int ret;
7894
7895         BUG_ON(cache->ro);
7896
7897         trans = btrfs_join_transaction(root, 1);
7898         BUG_ON(IS_ERR(trans));
7899
7900         alloc_flags = update_block_group_flags(root, cache->flags);
7901         if (alloc_flags != cache->flags)
7902                 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7903
7904         ret = set_block_group_ro(cache);
7905         if (!ret)
7906                 goto out;
7907         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7908         ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7909         if (ret < 0)
7910                 goto out;
7911         ret = set_block_group_ro(cache);
7912 out:
7913         btrfs_end_transaction(trans, root);
7914         return ret;
7915 }
7916
7917 int btrfs_set_block_group_rw(struct btrfs_root *root,
7918                               struct btrfs_block_group_cache *cache)
7919 {
7920         struct btrfs_space_info *sinfo = cache->space_info;
7921         u64 num_bytes;
7922
7923         BUG_ON(!cache->ro);
7924
7925         spin_lock(&sinfo->lock);
7926         spin_lock(&cache->lock);
7927         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7928                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7929         sinfo->bytes_readonly -= num_bytes;
7930         cache->ro = 0;
7931         spin_unlock(&cache->lock);
7932         spin_unlock(&sinfo->lock);
7933         return 0;
7934 }
7935
7936 /*
7937  * checks to see if its even possible to relocate this block group.
7938  *
7939  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7940  * ok to go ahead and try.
7941  */
7942 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7943 {
7944         struct btrfs_block_group_cache *block_group;
7945         struct btrfs_space_info *space_info;
7946         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7947         struct btrfs_device *device;
7948         int full = 0;
7949         int ret = 0;
7950
7951         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7952
7953         /* odd, couldn't find the block group, leave it alone */
7954         if (!block_group)
7955                 return -1;
7956
7957         /* no bytes used, we're good */
7958         if (!btrfs_block_group_used(&block_group->item))
7959                 goto out;
7960
7961         space_info = block_group->space_info;
7962         spin_lock(&space_info->lock);
7963
7964         full = space_info->full;
7965
7966         /*
7967          * if this is the last block group we have in this space, we can't
7968          * relocate it unless we're able to allocate a new chunk below.
7969          *
7970          * Otherwise, we need to make sure we have room in the space to handle
7971          * all of the extents from this block group.  If we can, we're good
7972          */
7973         if ((space_info->total_bytes != block_group->key.offset) &&
7974            (space_info->bytes_used + space_info->bytes_reserved +
7975             space_info->bytes_pinned + space_info->bytes_readonly +
7976             btrfs_block_group_used(&block_group->item) <
7977             space_info->total_bytes)) {
7978                 spin_unlock(&space_info->lock);
7979                 goto out;
7980         }
7981         spin_unlock(&space_info->lock);
7982
7983         /*
7984          * ok we don't have enough space, but maybe we have free space on our
7985          * devices to allocate new chunks for relocation, so loop through our
7986          * alloc devices and guess if we have enough space.  However, if we
7987          * were marked as full, then we know there aren't enough chunks, and we
7988          * can just return.
7989          */
7990         ret = -1;
7991         if (full)
7992                 goto out;
7993
7994         mutex_lock(&root->fs_info->chunk_mutex);
7995         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7996                 u64 min_free = btrfs_block_group_used(&block_group->item);
7997                 u64 dev_offset, max_avail;
7998
7999                 /*
8000                  * check to make sure we can actually find a chunk with enough
8001                  * space to fit our block group in.
8002                  */
8003                 if (device->total_bytes > device->bytes_used + min_free) {
8004                         ret = find_free_dev_extent(NULL, device, min_free,
8005                                                    &dev_offset, &max_avail);
8006                         if (!ret)
8007                                 break;
8008                         ret = -1;
8009                 }
8010         }
8011         mutex_unlock(&root->fs_info->chunk_mutex);
8012 out:
8013         btrfs_put_block_group(block_group);
8014         return ret;
8015 }
8016
8017 static int find_first_block_group(struct btrfs_root *root,
8018                 struct btrfs_path *path, struct btrfs_key *key)
8019 {
8020         int ret = 0;
8021         struct btrfs_key found_key;
8022         struct extent_buffer *leaf;
8023         int slot;
8024
8025         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8026         if (ret < 0)
8027                 goto out;
8028
8029         while (1) {
8030                 slot = path->slots[0];
8031                 leaf = path->nodes[0];
8032                 if (slot >= btrfs_header_nritems(leaf)) {
8033                         ret = btrfs_next_leaf(root, path);
8034                         if (ret == 0)
8035                                 continue;
8036                         if (ret < 0)
8037                                 goto out;
8038                         break;
8039                 }
8040                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8041
8042                 if (found_key.objectid >= key->objectid &&
8043                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8044                         ret = 0;
8045                         goto out;
8046                 }
8047                 path->slots[0]++;
8048         }
8049 out:
8050         return ret;
8051 }
8052
8053 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8054 {
8055         struct btrfs_block_group_cache *block_group;
8056         u64 last = 0;
8057
8058         while (1) {
8059                 struct inode *inode;
8060
8061                 block_group = btrfs_lookup_first_block_group(info, last);
8062                 while (block_group) {
8063                         spin_lock(&block_group->lock);
8064                         if (block_group->iref)
8065                                 break;
8066                         spin_unlock(&block_group->lock);
8067                         block_group = next_block_group(info->tree_root,
8068                                                        block_group);
8069                 }
8070                 if (!block_group) {
8071                         if (last == 0)
8072                                 break;
8073                         last = 0;
8074                         continue;
8075                 }
8076
8077                 inode = block_group->inode;
8078                 block_group->iref = 0;
8079                 block_group->inode = NULL;
8080                 spin_unlock(&block_group->lock);
8081                 iput(inode);
8082                 last = block_group->key.objectid + block_group->key.offset;
8083                 btrfs_put_block_group(block_group);
8084         }
8085 }
8086
8087 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8088 {
8089         struct btrfs_block_group_cache *block_group;
8090         struct btrfs_space_info *space_info;
8091         struct btrfs_caching_control *caching_ctl;
8092         struct rb_node *n;
8093
8094         down_write(&info->extent_commit_sem);
8095         while (!list_empty(&info->caching_block_groups)) {
8096                 caching_ctl = list_entry(info->caching_block_groups.next,
8097                                          struct btrfs_caching_control, list);
8098                 list_del(&caching_ctl->list);
8099                 put_caching_control(caching_ctl);
8100         }
8101         up_write(&info->extent_commit_sem);
8102
8103         spin_lock(&info->block_group_cache_lock);
8104         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8105                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8106                                        cache_node);
8107                 rb_erase(&block_group->cache_node,
8108                          &info->block_group_cache_tree);
8109                 spin_unlock(&info->block_group_cache_lock);
8110
8111                 down_write(&block_group->space_info->groups_sem);
8112                 list_del(&block_group->list);
8113                 up_write(&block_group->space_info->groups_sem);
8114
8115                 if (block_group->cached == BTRFS_CACHE_STARTED)
8116                         wait_block_group_cache_done(block_group);
8117
8118                 btrfs_remove_free_space_cache(block_group);
8119                 btrfs_put_block_group(block_group);
8120
8121                 spin_lock(&info->block_group_cache_lock);
8122         }
8123         spin_unlock(&info->block_group_cache_lock);
8124
8125         /* now that all the block groups are freed, go through and
8126          * free all the space_info structs.  This is only called during
8127          * the final stages of unmount, and so we know nobody is
8128          * using them.  We call synchronize_rcu() once before we start,
8129          * just to be on the safe side.
8130          */
8131         synchronize_rcu();
8132
8133         release_global_block_rsv(info);
8134
8135         while(!list_empty(&info->space_info)) {
8136                 space_info = list_entry(info->space_info.next,
8137                                         struct btrfs_space_info,
8138                                         list);
8139                 if (space_info->bytes_pinned > 0 ||
8140                     space_info->bytes_reserved > 0) {
8141                         WARN_ON(1);
8142                         dump_space_info(space_info, 0, 0);
8143                 }
8144                 list_del(&space_info->list);
8145                 kfree(space_info);
8146         }
8147         return 0;
8148 }
8149
8150 static void __link_block_group(struct btrfs_space_info *space_info,
8151                                struct btrfs_block_group_cache *cache)
8152 {
8153         int index = get_block_group_index(cache);
8154
8155         down_write(&space_info->groups_sem);
8156         list_add_tail(&cache->list, &space_info->block_groups[index]);
8157         up_write(&space_info->groups_sem);
8158 }
8159
8160 int btrfs_read_block_groups(struct btrfs_root *root)
8161 {
8162         struct btrfs_path *path;
8163         int ret;
8164         struct btrfs_block_group_cache *cache;
8165         struct btrfs_fs_info *info = root->fs_info;
8166         struct btrfs_space_info *space_info;
8167         struct btrfs_key key;
8168         struct btrfs_key found_key;
8169         struct extent_buffer *leaf;
8170         int need_clear = 0;
8171         u64 cache_gen;
8172
8173         root = info->extent_root;
8174         key.objectid = 0;
8175         key.offset = 0;
8176         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8177         path = btrfs_alloc_path();
8178         if (!path)
8179                 return -ENOMEM;
8180
8181         cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
8182         if (cache_gen != 0 &&
8183             btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
8184                 need_clear = 1;
8185
8186         while (1) {
8187                 ret = find_first_block_group(root, path, &key);
8188                 if (ret > 0)
8189                         break;
8190                 if (ret != 0)
8191                         goto error;
8192
8193                 leaf = path->nodes[0];
8194                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8195                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8196                 if (!cache) {
8197                         ret = -ENOMEM;
8198                         goto error;
8199                 }
8200
8201                 atomic_set(&cache->count, 1);
8202                 spin_lock_init(&cache->lock);
8203                 spin_lock_init(&cache->tree_lock);
8204                 cache->fs_info = info;
8205                 INIT_LIST_HEAD(&cache->list);
8206                 INIT_LIST_HEAD(&cache->cluster_list);
8207
8208                 if (need_clear)
8209                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8210
8211                 /*
8212                  * we only want to have 32k of ram per block group for keeping
8213                  * track of free space, and if we pass 1/2 of that we want to
8214                  * start converting things over to using bitmaps
8215                  */
8216                 cache->extents_thresh = ((1024 * 32) / 2) /
8217                         sizeof(struct btrfs_free_space);
8218
8219                 read_extent_buffer(leaf, &cache->item,
8220                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8221                                    sizeof(cache->item));
8222                 memcpy(&cache->key, &found_key, sizeof(found_key));
8223
8224                 key.objectid = found_key.objectid + found_key.offset;
8225                 btrfs_release_path(root, path);
8226                 cache->flags = btrfs_block_group_flags(&cache->item);
8227                 cache->sectorsize = root->sectorsize;
8228
8229                 /*
8230                  * check for two cases, either we are full, and therefore
8231                  * don't need to bother with the caching work since we won't
8232                  * find any space, or we are empty, and we can just add all
8233                  * the space in and be done with it.  This saves us _alot_ of
8234                  * time, particularly in the full case.
8235                  */
8236                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8237                         exclude_super_stripes(root, cache);
8238                         cache->last_byte_to_unpin = (u64)-1;
8239                         cache->cached = BTRFS_CACHE_FINISHED;
8240                         free_excluded_extents(root, cache);
8241                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8242                         exclude_super_stripes(root, cache);
8243                         cache->last_byte_to_unpin = (u64)-1;
8244                         cache->cached = BTRFS_CACHE_FINISHED;
8245                         add_new_free_space(cache, root->fs_info,
8246                                            found_key.objectid,
8247                                            found_key.objectid +
8248                                            found_key.offset);
8249                         free_excluded_extents(root, cache);
8250                 }
8251
8252                 ret = update_space_info(info, cache->flags, found_key.offset,
8253                                         btrfs_block_group_used(&cache->item),
8254                                         &space_info);
8255                 BUG_ON(ret);
8256                 cache->space_info = space_info;
8257                 spin_lock(&cache->space_info->lock);
8258                 cache->space_info->bytes_readonly += cache->bytes_super;
8259                 spin_unlock(&cache->space_info->lock);
8260
8261                 __link_block_group(space_info, cache);
8262
8263                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8264                 BUG_ON(ret);
8265
8266                 set_avail_alloc_bits(root->fs_info, cache->flags);
8267                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8268                         set_block_group_ro(cache);
8269         }
8270
8271         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8272                 if (!(get_alloc_profile(root, space_info->flags) &
8273                       (BTRFS_BLOCK_GROUP_RAID10 |
8274                        BTRFS_BLOCK_GROUP_RAID1 |
8275                        BTRFS_BLOCK_GROUP_DUP)))
8276                         continue;
8277                 /*
8278                  * avoid allocating from un-mirrored block group if there are
8279                  * mirrored block groups.
8280                  */
8281                 list_for_each_entry(cache, &space_info->block_groups[3], list)
8282                         set_block_group_ro(cache);
8283                 list_for_each_entry(cache, &space_info->block_groups[4], list)
8284                         set_block_group_ro(cache);
8285         }
8286
8287         init_global_block_rsv(info);
8288         ret = 0;
8289 error:
8290         btrfs_free_path(path);
8291         return ret;
8292 }
8293
8294 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8295                            struct btrfs_root *root, u64 bytes_used,
8296                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8297                            u64 size)
8298 {
8299         int ret;
8300         struct btrfs_root *extent_root;
8301         struct btrfs_block_group_cache *cache;
8302
8303         extent_root = root->fs_info->extent_root;
8304
8305         root->fs_info->last_trans_log_full_commit = trans->transid;
8306
8307         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8308         if (!cache)
8309                 return -ENOMEM;
8310
8311         cache->key.objectid = chunk_offset;
8312         cache->key.offset = size;
8313         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8314         cache->sectorsize = root->sectorsize;
8315         cache->fs_info = root->fs_info;
8316
8317         /*
8318          * we only want to have 32k of ram per block group for keeping track
8319          * of free space, and if we pass 1/2 of that we want to start
8320          * converting things over to using bitmaps
8321          */
8322         cache->extents_thresh = ((1024 * 32) / 2) /
8323                 sizeof(struct btrfs_free_space);
8324         atomic_set(&cache->count, 1);
8325         spin_lock_init(&cache->lock);
8326         spin_lock_init(&cache->tree_lock);
8327         INIT_LIST_HEAD(&cache->list);
8328         INIT_LIST_HEAD(&cache->cluster_list);
8329
8330         btrfs_set_block_group_used(&cache->item, bytes_used);
8331         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8332         cache->flags = type;
8333         btrfs_set_block_group_flags(&cache->item, type);
8334
8335         cache->last_byte_to_unpin = (u64)-1;
8336         cache->cached = BTRFS_CACHE_FINISHED;
8337         exclude_super_stripes(root, cache);
8338
8339         add_new_free_space(cache, root->fs_info, chunk_offset,
8340                            chunk_offset + size);
8341
8342         free_excluded_extents(root, cache);
8343
8344         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8345                                 &cache->space_info);
8346         BUG_ON(ret);
8347
8348         spin_lock(&cache->space_info->lock);
8349         cache->space_info->bytes_readonly += cache->bytes_super;
8350         spin_unlock(&cache->space_info->lock);
8351
8352         __link_block_group(cache->space_info, cache);
8353
8354         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8355         BUG_ON(ret);
8356
8357         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8358                                 sizeof(cache->item));
8359         BUG_ON(ret);
8360
8361         set_avail_alloc_bits(extent_root->fs_info, type);
8362
8363         return 0;
8364 }
8365
8366 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8367                              struct btrfs_root *root, u64 group_start)
8368 {
8369         struct btrfs_path *path;
8370         struct btrfs_block_group_cache *block_group;
8371         struct btrfs_free_cluster *cluster;
8372         struct btrfs_root *tree_root = root->fs_info->tree_root;
8373         struct btrfs_key key;
8374         struct inode *inode;
8375         int ret;
8376
8377         root = root->fs_info->extent_root;
8378
8379         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8380         BUG_ON(!block_group);
8381         BUG_ON(!block_group->ro);
8382
8383         /* make sure this block group isn't part of an allocation cluster */
8384         cluster = &root->fs_info->data_alloc_cluster;
8385         spin_lock(&cluster->refill_lock);
8386         btrfs_return_cluster_to_free_space(block_group, cluster);
8387         spin_unlock(&cluster->refill_lock);
8388
8389         /*
8390          * make sure this block group isn't part of a metadata
8391          * allocation cluster
8392          */
8393         cluster = &root->fs_info->meta_alloc_cluster;
8394         spin_lock(&cluster->refill_lock);
8395         btrfs_return_cluster_to_free_space(block_group, cluster);
8396         spin_unlock(&cluster->refill_lock);
8397
8398         path = btrfs_alloc_path();
8399         BUG_ON(!path);
8400
8401         inode = lookup_free_space_inode(root, block_group, path);
8402         if (!IS_ERR(inode)) {
8403                 btrfs_orphan_add(trans, inode);
8404                 clear_nlink(inode);
8405                 /* One for the block groups ref */
8406                 spin_lock(&block_group->lock);
8407                 if (block_group->iref) {
8408                         block_group->iref = 0;
8409                         block_group->inode = NULL;
8410                         spin_unlock(&block_group->lock);
8411                         iput(inode);
8412                 } else {
8413                         spin_unlock(&block_group->lock);
8414                 }
8415                 /* One for our lookup ref */
8416                 iput(inode);
8417         }
8418
8419         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8420         key.offset = block_group->key.objectid;
8421         key.type = 0;
8422
8423         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8424         if (ret < 0)
8425                 goto out;
8426         if (ret > 0)
8427                 btrfs_release_path(tree_root, path);
8428         if (ret == 0) {
8429                 ret = btrfs_del_item(trans, tree_root, path);
8430                 if (ret)
8431                         goto out;
8432                 btrfs_release_path(tree_root, path);
8433         }
8434
8435         spin_lock(&root->fs_info->block_group_cache_lock);
8436         rb_erase(&block_group->cache_node,
8437                  &root->fs_info->block_group_cache_tree);
8438         spin_unlock(&root->fs_info->block_group_cache_lock);
8439
8440         down_write(&block_group->space_info->groups_sem);
8441         /*
8442          * we must use list_del_init so people can check to see if they
8443          * are still on the list after taking the semaphore
8444          */
8445         list_del_init(&block_group->list);
8446         up_write(&block_group->space_info->groups_sem);
8447
8448         if (block_group->cached == BTRFS_CACHE_STARTED)
8449                 wait_block_group_cache_done(block_group);
8450
8451         btrfs_remove_free_space_cache(block_group);
8452
8453         spin_lock(&block_group->space_info->lock);
8454         block_group->space_info->total_bytes -= block_group->key.offset;
8455         block_group->space_info->bytes_readonly -= block_group->key.offset;
8456         spin_unlock(&block_group->space_info->lock);
8457
8458         memcpy(&key, &block_group->key, sizeof(key));
8459
8460         btrfs_clear_space_info_full(root->fs_info);
8461
8462         btrfs_put_block_group(block_group);
8463         btrfs_put_block_group(block_group);
8464
8465         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8466         if (ret > 0)
8467                 ret = -EIO;
8468         if (ret < 0)
8469                 goto out;
8470
8471         ret = btrfs_del_item(trans, root, path);
8472 out:
8473         btrfs_free_path(path);
8474         return ret;
8475 }