Merge branch 'for-chris' of
[pandora-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 /* control flags for do_chunk_alloc's force field
37  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
38  * if we really need one.
39  *
40  * CHUNK_ALLOC_FORCE means it must try to allocate one
41  *
42  * CHUNK_ALLOC_LIMITED means to only try and allocate one
43  * if we have very few chunks already allocated.  This is
44  * used as part of the clustering code to help make sure
45  * we have a good pool of storage to cluster in, without
46  * filling the FS with empty chunks
47  *
48  */
49 enum {
50         CHUNK_ALLOC_NO_FORCE = 0,
51         CHUNK_ALLOC_FORCE = 1,
52         CHUNK_ALLOC_LIMITED = 2,
53 };
54
55 static int update_block_group(struct btrfs_trans_handle *trans,
56                               struct btrfs_root *root,
57                               u64 bytenr, u64 num_bytes, int alloc);
58 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
59                                 struct btrfs_root *root,
60                                 u64 bytenr, u64 num_bytes, u64 parent,
61                                 u64 root_objectid, u64 owner_objectid,
62                                 u64 owner_offset, int refs_to_drop,
63                                 struct btrfs_delayed_extent_op *extra_op);
64 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
65                                     struct extent_buffer *leaf,
66                                     struct btrfs_extent_item *ei);
67 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
68                                       struct btrfs_root *root,
69                                       u64 parent, u64 root_objectid,
70                                       u64 flags, u64 owner, u64 offset,
71                                       struct btrfs_key *ins, int ref_mod);
72 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
73                                      struct btrfs_root *root,
74                                      u64 parent, u64 root_objectid,
75                                      u64 flags, struct btrfs_disk_key *key,
76                                      int level, struct btrfs_key *ins);
77 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
78                           struct btrfs_root *extent_root, u64 alloc_bytes,
79                           u64 flags, int force);
80 static int find_next_key(struct btrfs_path *path, int level,
81                          struct btrfs_key *key);
82 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
83                             int dump_block_groups);
84
85 static noinline int
86 block_group_cache_done(struct btrfs_block_group_cache *cache)
87 {
88         smp_mb();
89         return cache->cached == BTRFS_CACHE_FINISHED;
90 }
91
92 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
93 {
94         return (cache->flags & bits) == bits;
95 }
96
97 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
98 {
99         atomic_inc(&cache->count);
100 }
101
102 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
103 {
104         if (atomic_dec_and_test(&cache->count)) {
105                 WARN_ON(cache->pinned > 0);
106                 WARN_ON(cache->reserved > 0);
107                 WARN_ON(cache->reserved_pinned > 0);
108                 kfree(cache->free_space_ctl);
109                 kfree(cache);
110         }
111 }
112
113 /*
114  * this adds the block group to the fs_info rb tree for the block group
115  * cache
116  */
117 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
118                                 struct btrfs_block_group_cache *block_group)
119 {
120         struct rb_node **p;
121         struct rb_node *parent = NULL;
122         struct btrfs_block_group_cache *cache;
123
124         spin_lock(&info->block_group_cache_lock);
125         p = &info->block_group_cache_tree.rb_node;
126
127         while (*p) {
128                 parent = *p;
129                 cache = rb_entry(parent, struct btrfs_block_group_cache,
130                                  cache_node);
131                 if (block_group->key.objectid < cache->key.objectid) {
132                         p = &(*p)->rb_left;
133                 } else if (block_group->key.objectid > cache->key.objectid) {
134                         p = &(*p)->rb_right;
135                 } else {
136                         spin_unlock(&info->block_group_cache_lock);
137                         return -EEXIST;
138                 }
139         }
140
141         rb_link_node(&block_group->cache_node, parent, p);
142         rb_insert_color(&block_group->cache_node,
143                         &info->block_group_cache_tree);
144         spin_unlock(&info->block_group_cache_lock);
145
146         return 0;
147 }
148
149 /*
150  * This will return the block group at or after bytenr if contains is 0, else
151  * it will return the block group that contains the bytenr
152  */
153 static struct btrfs_block_group_cache *
154 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
155                               int contains)
156 {
157         struct btrfs_block_group_cache *cache, *ret = NULL;
158         struct rb_node *n;
159         u64 end, start;
160
161         spin_lock(&info->block_group_cache_lock);
162         n = info->block_group_cache_tree.rb_node;
163
164         while (n) {
165                 cache = rb_entry(n, struct btrfs_block_group_cache,
166                                  cache_node);
167                 end = cache->key.objectid + cache->key.offset - 1;
168                 start = cache->key.objectid;
169
170                 if (bytenr < start) {
171                         if (!contains && (!ret || start < ret->key.objectid))
172                                 ret = cache;
173                         n = n->rb_left;
174                 } else if (bytenr > start) {
175                         if (contains && bytenr <= end) {
176                                 ret = cache;
177                                 break;
178                         }
179                         n = n->rb_right;
180                 } else {
181                         ret = cache;
182                         break;
183                 }
184         }
185         if (ret)
186                 btrfs_get_block_group(ret);
187         spin_unlock(&info->block_group_cache_lock);
188
189         return ret;
190 }
191
192 static int add_excluded_extent(struct btrfs_root *root,
193                                u64 start, u64 num_bytes)
194 {
195         u64 end = start + num_bytes - 1;
196         set_extent_bits(&root->fs_info->freed_extents[0],
197                         start, end, EXTENT_UPTODATE, GFP_NOFS);
198         set_extent_bits(&root->fs_info->freed_extents[1],
199                         start, end, EXTENT_UPTODATE, GFP_NOFS);
200         return 0;
201 }
202
203 static void free_excluded_extents(struct btrfs_root *root,
204                                   struct btrfs_block_group_cache *cache)
205 {
206         u64 start, end;
207
208         start = cache->key.objectid;
209         end = start + cache->key.offset - 1;
210
211         clear_extent_bits(&root->fs_info->freed_extents[0],
212                           start, end, EXTENT_UPTODATE, GFP_NOFS);
213         clear_extent_bits(&root->fs_info->freed_extents[1],
214                           start, end, EXTENT_UPTODATE, GFP_NOFS);
215 }
216
217 static int exclude_super_stripes(struct btrfs_root *root,
218                                  struct btrfs_block_group_cache *cache)
219 {
220         u64 bytenr;
221         u64 *logical;
222         int stripe_len;
223         int i, nr, ret;
224
225         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
226                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
227                 cache->bytes_super += stripe_len;
228                 ret = add_excluded_extent(root, cache->key.objectid,
229                                           stripe_len);
230                 BUG_ON(ret);
231         }
232
233         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
234                 bytenr = btrfs_sb_offset(i);
235                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
236                                        cache->key.objectid, bytenr,
237                                        0, &logical, &nr, &stripe_len);
238                 BUG_ON(ret);
239
240                 while (nr--) {
241                         cache->bytes_super += stripe_len;
242                         ret = add_excluded_extent(root, logical[nr],
243                                                   stripe_len);
244                         BUG_ON(ret);
245                 }
246
247                 kfree(logical);
248         }
249         return 0;
250 }
251
252 static struct btrfs_caching_control *
253 get_caching_control(struct btrfs_block_group_cache *cache)
254 {
255         struct btrfs_caching_control *ctl;
256
257         spin_lock(&cache->lock);
258         if (cache->cached != BTRFS_CACHE_STARTED) {
259                 spin_unlock(&cache->lock);
260                 return NULL;
261         }
262
263         /* We're loading it the fast way, so we don't have a caching_ctl. */
264         if (!cache->caching_ctl) {
265                 spin_unlock(&cache->lock);
266                 return NULL;
267         }
268
269         ctl = cache->caching_ctl;
270         atomic_inc(&ctl->count);
271         spin_unlock(&cache->lock);
272         return ctl;
273 }
274
275 static void put_caching_control(struct btrfs_caching_control *ctl)
276 {
277         if (atomic_dec_and_test(&ctl->count))
278                 kfree(ctl);
279 }
280
281 /*
282  * this is only called by cache_block_group, since we could have freed extents
283  * we need to check the pinned_extents for any extents that can't be used yet
284  * since their free space will be released as soon as the transaction commits.
285  */
286 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
287                               struct btrfs_fs_info *info, u64 start, u64 end)
288 {
289         u64 extent_start, extent_end, size, total_added = 0;
290         int ret;
291
292         while (start < end) {
293                 ret = find_first_extent_bit(info->pinned_extents, start,
294                                             &extent_start, &extent_end,
295                                             EXTENT_DIRTY | EXTENT_UPTODATE);
296                 if (ret)
297                         break;
298
299                 if (extent_start <= start) {
300                         start = extent_end + 1;
301                 } else if (extent_start > start && extent_start < end) {
302                         size = extent_start - start;
303                         total_added += size;
304                         ret = btrfs_add_free_space(block_group, start,
305                                                    size);
306                         BUG_ON(ret);
307                         start = extent_end + 1;
308                 } else {
309                         break;
310                 }
311         }
312
313         if (start < end) {
314                 size = end - start;
315                 total_added += size;
316                 ret = btrfs_add_free_space(block_group, start, size);
317                 BUG_ON(ret);
318         }
319
320         return total_added;
321 }
322
323 static int caching_kthread(void *data)
324 {
325         struct btrfs_block_group_cache *block_group = data;
326         struct btrfs_fs_info *fs_info = block_group->fs_info;
327         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
328         struct btrfs_root *extent_root = fs_info->extent_root;
329         struct btrfs_path *path;
330         struct extent_buffer *leaf;
331         struct btrfs_key key;
332         u64 total_found = 0;
333         u64 last = 0;
334         u32 nritems;
335         int ret = 0;
336
337         path = btrfs_alloc_path();
338         if (!path)
339                 return -ENOMEM;
340
341         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
342
343         /*
344          * We don't want to deadlock with somebody trying to allocate a new
345          * extent for the extent root while also trying to search the extent
346          * root to add free space.  So we skip locking and search the commit
347          * root, since its read-only
348          */
349         path->skip_locking = 1;
350         path->search_commit_root = 1;
351         path->reada = 1;
352
353         key.objectid = last;
354         key.offset = 0;
355         key.type = BTRFS_EXTENT_ITEM_KEY;
356 again:
357         mutex_lock(&caching_ctl->mutex);
358         /* need to make sure the commit_root doesn't disappear */
359         down_read(&fs_info->extent_commit_sem);
360
361         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
362         if (ret < 0)
363                 goto err;
364
365         leaf = path->nodes[0];
366         nritems = btrfs_header_nritems(leaf);
367
368         while (1) {
369                 smp_mb();
370                 if (fs_info->closing > 1) {
371                         last = (u64)-1;
372                         break;
373                 }
374
375                 if (path->slots[0] < nritems) {
376                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
377                 } else {
378                         ret = find_next_key(path, 0, &key);
379                         if (ret)
380                                 break;
381
382                         if (need_resched() ||
383                             btrfs_next_leaf(extent_root, path)) {
384                                 caching_ctl->progress = last;
385                                 btrfs_release_path(path);
386                                 up_read(&fs_info->extent_commit_sem);
387                                 mutex_unlock(&caching_ctl->mutex);
388                                 cond_resched();
389                                 goto again;
390                         }
391                         leaf = path->nodes[0];
392                         nritems = btrfs_header_nritems(leaf);
393                         continue;
394                 }
395
396                 if (key.objectid < block_group->key.objectid) {
397                         path->slots[0]++;
398                         continue;
399                 }
400
401                 if (key.objectid >= block_group->key.objectid +
402                     block_group->key.offset)
403                         break;
404
405                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
406                         total_found += add_new_free_space(block_group,
407                                                           fs_info, last,
408                                                           key.objectid);
409                         last = key.objectid + key.offset;
410
411                         if (total_found > (1024 * 1024 * 2)) {
412                                 total_found = 0;
413                                 wake_up(&caching_ctl->wait);
414                         }
415                 }
416                 path->slots[0]++;
417         }
418         ret = 0;
419
420         total_found += add_new_free_space(block_group, fs_info, last,
421                                           block_group->key.objectid +
422                                           block_group->key.offset);
423         caching_ctl->progress = (u64)-1;
424
425         spin_lock(&block_group->lock);
426         block_group->caching_ctl = NULL;
427         block_group->cached = BTRFS_CACHE_FINISHED;
428         spin_unlock(&block_group->lock);
429
430 err:
431         btrfs_free_path(path);
432         up_read(&fs_info->extent_commit_sem);
433
434         free_excluded_extents(extent_root, block_group);
435
436         mutex_unlock(&caching_ctl->mutex);
437         wake_up(&caching_ctl->wait);
438
439         put_caching_control(caching_ctl);
440         atomic_dec(&block_group->space_info->caching_threads);
441         btrfs_put_block_group(block_group);
442
443         return 0;
444 }
445
446 static int cache_block_group(struct btrfs_block_group_cache *cache,
447                              struct btrfs_trans_handle *trans,
448                              struct btrfs_root *root,
449                              int load_cache_only)
450 {
451         struct btrfs_fs_info *fs_info = cache->fs_info;
452         struct btrfs_caching_control *caching_ctl;
453         struct task_struct *tsk;
454         int ret = 0;
455
456         smp_mb();
457         if (cache->cached != BTRFS_CACHE_NO)
458                 return 0;
459
460         /*
461          * We can't do the read from on-disk cache during a commit since we need
462          * to have the normal tree locking.  Also if we are currently trying to
463          * allocate blocks for the tree root we can't do the fast caching since
464          * we likely hold important locks.
465          */
466         if (trans && (!trans->transaction->in_commit) &&
467             (root && root != root->fs_info->tree_root)) {
468                 spin_lock(&cache->lock);
469                 if (cache->cached != BTRFS_CACHE_NO) {
470                         spin_unlock(&cache->lock);
471                         return 0;
472                 }
473                 cache->cached = BTRFS_CACHE_STARTED;
474                 spin_unlock(&cache->lock);
475
476                 ret = load_free_space_cache(fs_info, cache);
477
478                 spin_lock(&cache->lock);
479                 if (ret == 1) {
480                         cache->cached = BTRFS_CACHE_FINISHED;
481                         cache->last_byte_to_unpin = (u64)-1;
482                 } else {
483                         cache->cached = BTRFS_CACHE_NO;
484                 }
485                 spin_unlock(&cache->lock);
486                 if (ret == 1) {
487                         free_excluded_extents(fs_info->extent_root, cache);
488                         return 0;
489                 }
490         }
491
492         if (load_cache_only)
493                 return 0;
494
495         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
496         BUG_ON(!caching_ctl);
497
498         INIT_LIST_HEAD(&caching_ctl->list);
499         mutex_init(&caching_ctl->mutex);
500         init_waitqueue_head(&caching_ctl->wait);
501         caching_ctl->block_group = cache;
502         caching_ctl->progress = cache->key.objectid;
503         /* one for caching kthread, one for caching block group list */
504         atomic_set(&caching_ctl->count, 2);
505
506         spin_lock(&cache->lock);
507         if (cache->cached != BTRFS_CACHE_NO) {
508                 spin_unlock(&cache->lock);
509                 kfree(caching_ctl);
510                 return 0;
511         }
512         cache->caching_ctl = caching_ctl;
513         cache->cached = BTRFS_CACHE_STARTED;
514         spin_unlock(&cache->lock);
515
516         down_write(&fs_info->extent_commit_sem);
517         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
518         up_write(&fs_info->extent_commit_sem);
519
520         atomic_inc(&cache->space_info->caching_threads);
521         btrfs_get_block_group(cache);
522
523         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
524                           cache->key.objectid);
525         if (IS_ERR(tsk)) {
526                 ret = PTR_ERR(tsk);
527                 printk(KERN_ERR "error running thread %d\n", ret);
528                 BUG();
529         }
530
531         return ret;
532 }
533
534 /*
535  * return the block group that starts at or after bytenr
536  */
537 static struct btrfs_block_group_cache *
538 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
539 {
540         struct btrfs_block_group_cache *cache;
541
542         cache = block_group_cache_tree_search(info, bytenr, 0);
543
544         return cache;
545 }
546
547 /*
548  * return the block group that contains the given bytenr
549  */
550 struct btrfs_block_group_cache *btrfs_lookup_block_group(
551                                                  struct btrfs_fs_info *info,
552                                                  u64 bytenr)
553 {
554         struct btrfs_block_group_cache *cache;
555
556         cache = block_group_cache_tree_search(info, bytenr, 1);
557
558         return cache;
559 }
560
561 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
562                                                   u64 flags)
563 {
564         struct list_head *head = &info->space_info;
565         struct btrfs_space_info *found;
566
567         flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
568                  BTRFS_BLOCK_GROUP_METADATA;
569
570         rcu_read_lock();
571         list_for_each_entry_rcu(found, head, list) {
572                 if (found->flags & flags) {
573                         rcu_read_unlock();
574                         return found;
575                 }
576         }
577         rcu_read_unlock();
578         return NULL;
579 }
580
581 /*
582  * after adding space to the filesystem, we need to clear the full flags
583  * on all the space infos.
584  */
585 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
586 {
587         struct list_head *head = &info->space_info;
588         struct btrfs_space_info *found;
589
590         rcu_read_lock();
591         list_for_each_entry_rcu(found, head, list)
592                 found->full = 0;
593         rcu_read_unlock();
594 }
595
596 static u64 div_factor(u64 num, int factor)
597 {
598         if (factor == 10)
599                 return num;
600         num *= factor;
601         do_div(num, 10);
602         return num;
603 }
604
605 static u64 div_factor_fine(u64 num, int factor)
606 {
607         if (factor == 100)
608                 return num;
609         num *= factor;
610         do_div(num, 100);
611         return num;
612 }
613
614 u64 btrfs_find_block_group(struct btrfs_root *root,
615                            u64 search_start, u64 search_hint, int owner)
616 {
617         struct btrfs_block_group_cache *cache;
618         u64 used;
619         u64 last = max(search_hint, search_start);
620         u64 group_start = 0;
621         int full_search = 0;
622         int factor = 9;
623         int wrapped = 0;
624 again:
625         while (1) {
626                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
627                 if (!cache)
628                         break;
629
630                 spin_lock(&cache->lock);
631                 last = cache->key.objectid + cache->key.offset;
632                 used = btrfs_block_group_used(&cache->item);
633
634                 if ((full_search || !cache->ro) &&
635                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
636                         if (used + cache->pinned + cache->reserved <
637                             div_factor(cache->key.offset, factor)) {
638                                 group_start = cache->key.objectid;
639                                 spin_unlock(&cache->lock);
640                                 btrfs_put_block_group(cache);
641                                 goto found;
642                         }
643                 }
644                 spin_unlock(&cache->lock);
645                 btrfs_put_block_group(cache);
646                 cond_resched();
647         }
648         if (!wrapped) {
649                 last = search_start;
650                 wrapped = 1;
651                 goto again;
652         }
653         if (!full_search && factor < 10) {
654                 last = search_start;
655                 full_search = 1;
656                 factor = 10;
657                 goto again;
658         }
659 found:
660         return group_start;
661 }
662
663 /* simple helper to search for an existing extent at a given offset */
664 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
665 {
666         int ret;
667         struct btrfs_key key;
668         struct btrfs_path *path;
669
670         path = btrfs_alloc_path();
671         BUG_ON(!path);
672         key.objectid = start;
673         key.offset = len;
674         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
675         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
676                                 0, 0);
677         btrfs_free_path(path);
678         return ret;
679 }
680
681 /*
682  * helper function to lookup reference count and flags of extent.
683  *
684  * the head node for delayed ref is used to store the sum of all the
685  * reference count modifications queued up in the rbtree. the head
686  * node may also store the extent flags to set. This way you can check
687  * to see what the reference count and extent flags would be if all of
688  * the delayed refs are not processed.
689  */
690 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
691                              struct btrfs_root *root, u64 bytenr,
692                              u64 num_bytes, u64 *refs, u64 *flags)
693 {
694         struct btrfs_delayed_ref_head *head;
695         struct btrfs_delayed_ref_root *delayed_refs;
696         struct btrfs_path *path;
697         struct btrfs_extent_item *ei;
698         struct extent_buffer *leaf;
699         struct btrfs_key key;
700         u32 item_size;
701         u64 num_refs;
702         u64 extent_flags;
703         int ret;
704
705         path = btrfs_alloc_path();
706         if (!path)
707                 return -ENOMEM;
708
709         key.objectid = bytenr;
710         key.type = BTRFS_EXTENT_ITEM_KEY;
711         key.offset = num_bytes;
712         if (!trans) {
713                 path->skip_locking = 1;
714                 path->search_commit_root = 1;
715         }
716 again:
717         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
718                                 &key, path, 0, 0);
719         if (ret < 0)
720                 goto out_free;
721
722         if (ret == 0) {
723                 leaf = path->nodes[0];
724                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
725                 if (item_size >= sizeof(*ei)) {
726                         ei = btrfs_item_ptr(leaf, path->slots[0],
727                                             struct btrfs_extent_item);
728                         num_refs = btrfs_extent_refs(leaf, ei);
729                         extent_flags = btrfs_extent_flags(leaf, ei);
730                 } else {
731 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
732                         struct btrfs_extent_item_v0 *ei0;
733                         BUG_ON(item_size != sizeof(*ei0));
734                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
735                                              struct btrfs_extent_item_v0);
736                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
737                         /* FIXME: this isn't correct for data */
738                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
739 #else
740                         BUG();
741 #endif
742                 }
743                 BUG_ON(num_refs == 0);
744         } else {
745                 num_refs = 0;
746                 extent_flags = 0;
747                 ret = 0;
748         }
749
750         if (!trans)
751                 goto out;
752
753         delayed_refs = &trans->transaction->delayed_refs;
754         spin_lock(&delayed_refs->lock);
755         head = btrfs_find_delayed_ref_head(trans, bytenr);
756         if (head) {
757                 if (!mutex_trylock(&head->mutex)) {
758                         atomic_inc(&head->node.refs);
759                         spin_unlock(&delayed_refs->lock);
760
761                         btrfs_release_path(path);
762
763                         /*
764                          * Mutex was contended, block until it's released and try
765                          * again
766                          */
767                         mutex_lock(&head->mutex);
768                         mutex_unlock(&head->mutex);
769                         btrfs_put_delayed_ref(&head->node);
770                         goto again;
771                 }
772                 if (head->extent_op && head->extent_op->update_flags)
773                         extent_flags |= head->extent_op->flags_to_set;
774                 else
775                         BUG_ON(num_refs == 0);
776
777                 num_refs += head->node.ref_mod;
778                 mutex_unlock(&head->mutex);
779         }
780         spin_unlock(&delayed_refs->lock);
781 out:
782         WARN_ON(num_refs == 0);
783         if (refs)
784                 *refs = num_refs;
785         if (flags)
786                 *flags = extent_flags;
787 out_free:
788         btrfs_free_path(path);
789         return ret;
790 }
791
792 /*
793  * Back reference rules.  Back refs have three main goals:
794  *
795  * 1) differentiate between all holders of references to an extent so that
796  *    when a reference is dropped we can make sure it was a valid reference
797  *    before freeing the extent.
798  *
799  * 2) Provide enough information to quickly find the holders of an extent
800  *    if we notice a given block is corrupted or bad.
801  *
802  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
803  *    maintenance.  This is actually the same as #2, but with a slightly
804  *    different use case.
805  *
806  * There are two kinds of back refs. The implicit back refs is optimized
807  * for pointers in non-shared tree blocks. For a given pointer in a block,
808  * back refs of this kind provide information about the block's owner tree
809  * and the pointer's key. These information allow us to find the block by
810  * b-tree searching. The full back refs is for pointers in tree blocks not
811  * referenced by their owner trees. The location of tree block is recorded
812  * in the back refs. Actually the full back refs is generic, and can be
813  * used in all cases the implicit back refs is used. The major shortcoming
814  * of the full back refs is its overhead. Every time a tree block gets
815  * COWed, we have to update back refs entry for all pointers in it.
816  *
817  * For a newly allocated tree block, we use implicit back refs for
818  * pointers in it. This means most tree related operations only involve
819  * implicit back refs. For a tree block created in old transaction, the
820  * only way to drop a reference to it is COW it. So we can detect the
821  * event that tree block loses its owner tree's reference and do the
822  * back refs conversion.
823  *
824  * When a tree block is COW'd through a tree, there are four cases:
825  *
826  * The reference count of the block is one and the tree is the block's
827  * owner tree. Nothing to do in this case.
828  *
829  * The reference count of the block is one and the tree is not the
830  * block's owner tree. In this case, full back refs is used for pointers
831  * in the block. Remove these full back refs, add implicit back refs for
832  * every pointers in the new block.
833  *
834  * The reference count of the block is greater than one and the tree is
835  * the block's owner tree. In this case, implicit back refs is used for
836  * pointers in the block. Add full back refs for every pointers in the
837  * block, increase lower level extents' reference counts. The original
838  * implicit back refs are entailed to the new block.
839  *
840  * The reference count of the block is greater than one and the tree is
841  * not the block's owner tree. Add implicit back refs for every pointer in
842  * the new block, increase lower level extents' reference count.
843  *
844  * Back Reference Key composing:
845  *
846  * The key objectid corresponds to the first byte in the extent,
847  * The key type is used to differentiate between types of back refs.
848  * There are different meanings of the key offset for different types
849  * of back refs.
850  *
851  * File extents can be referenced by:
852  *
853  * - multiple snapshots, subvolumes, or different generations in one subvol
854  * - different files inside a single subvolume
855  * - different offsets inside a file (bookend extents in file.c)
856  *
857  * The extent ref structure for the implicit back refs has fields for:
858  *
859  * - Objectid of the subvolume root
860  * - objectid of the file holding the reference
861  * - original offset in the file
862  * - how many bookend extents
863  *
864  * The key offset for the implicit back refs is hash of the first
865  * three fields.
866  *
867  * The extent ref structure for the full back refs has field for:
868  *
869  * - number of pointers in the tree leaf
870  *
871  * The key offset for the implicit back refs is the first byte of
872  * the tree leaf
873  *
874  * When a file extent is allocated, The implicit back refs is used.
875  * the fields are filled in:
876  *
877  *     (root_key.objectid, inode objectid, offset in file, 1)
878  *
879  * When a file extent is removed file truncation, we find the
880  * corresponding implicit back refs and check the following fields:
881  *
882  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
883  *
884  * Btree extents can be referenced by:
885  *
886  * - Different subvolumes
887  *
888  * Both the implicit back refs and the full back refs for tree blocks
889  * only consist of key. The key offset for the implicit back refs is
890  * objectid of block's owner tree. The key offset for the full back refs
891  * is the first byte of parent block.
892  *
893  * When implicit back refs is used, information about the lowest key and
894  * level of the tree block are required. These information are stored in
895  * tree block info structure.
896  */
897
898 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
899 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
900                                   struct btrfs_root *root,
901                                   struct btrfs_path *path,
902                                   u64 owner, u32 extra_size)
903 {
904         struct btrfs_extent_item *item;
905         struct btrfs_extent_item_v0 *ei0;
906         struct btrfs_extent_ref_v0 *ref0;
907         struct btrfs_tree_block_info *bi;
908         struct extent_buffer *leaf;
909         struct btrfs_key key;
910         struct btrfs_key found_key;
911         u32 new_size = sizeof(*item);
912         u64 refs;
913         int ret;
914
915         leaf = path->nodes[0];
916         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
917
918         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
919         ei0 = btrfs_item_ptr(leaf, path->slots[0],
920                              struct btrfs_extent_item_v0);
921         refs = btrfs_extent_refs_v0(leaf, ei0);
922
923         if (owner == (u64)-1) {
924                 while (1) {
925                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
926                                 ret = btrfs_next_leaf(root, path);
927                                 if (ret < 0)
928                                         return ret;
929                                 BUG_ON(ret > 0);
930                                 leaf = path->nodes[0];
931                         }
932                         btrfs_item_key_to_cpu(leaf, &found_key,
933                                               path->slots[0]);
934                         BUG_ON(key.objectid != found_key.objectid);
935                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
936                                 path->slots[0]++;
937                                 continue;
938                         }
939                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
940                                               struct btrfs_extent_ref_v0);
941                         owner = btrfs_ref_objectid_v0(leaf, ref0);
942                         break;
943                 }
944         }
945         btrfs_release_path(path);
946
947         if (owner < BTRFS_FIRST_FREE_OBJECTID)
948                 new_size += sizeof(*bi);
949
950         new_size -= sizeof(*ei0);
951         ret = btrfs_search_slot(trans, root, &key, path,
952                                 new_size + extra_size, 1);
953         if (ret < 0)
954                 return ret;
955         BUG_ON(ret);
956
957         ret = btrfs_extend_item(trans, root, path, new_size);
958
959         leaf = path->nodes[0];
960         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
961         btrfs_set_extent_refs(leaf, item, refs);
962         /* FIXME: get real generation */
963         btrfs_set_extent_generation(leaf, item, 0);
964         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
965                 btrfs_set_extent_flags(leaf, item,
966                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
967                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
968                 bi = (struct btrfs_tree_block_info *)(item + 1);
969                 /* FIXME: get first key of the block */
970                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
971                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
972         } else {
973                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
974         }
975         btrfs_mark_buffer_dirty(leaf);
976         return 0;
977 }
978 #endif
979
980 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
981 {
982         u32 high_crc = ~(u32)0;
983         u32 low_crc = ~(u32)0;
984         __le64 lenum;
985
986         lenum = cpu_to_le64(root_objectid);
987         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
988         lenum = cpu_to_le64(owner);
989         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
990         lenum = cpu_to_le64(offset);
991         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
992
993         return ((u64)high_crc << 31) ^ (u64)low_crc;
994 }
995
996 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
997                                      struct btrfs_extent_data_ref *ref)
998 {
999         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1000                                     btrfs_extent_data_ref_objectid(leaf, ref),
1001                                     btrfs_extent_data_ref_offset(leaf, ref));
1002 }
1003
1004 static int match_extent_data_ref(struct extent_buffer *leaf,
1005                                  struct btrfs_extent_data_ref *ref,
1006                                  u64 root_objectid, u64 owner, u64 offset)
1007 {
1008         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1009             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1010             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1011                 return 0;
1012         return 1;
1013 }
1014
1015 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1016                                            struct btrfs_root *root,
1017                                            struct btrfs_path *path,
1018                                            u64 bytenr, u64 parent,
1019                                            u64 root_objectid,
1020                                            u64 owner, u64 offset)
1021 {
1022         struct btrfs_key key;
1023         struct btrfs_extent_data_ref *ref;
1024         struct extent_buffer *leaf;
1025         u32 nritems;
1026         int ret;
1027         int recow;
1028         int err = -ENOENT;
1029
1030         key.objectid = bytenr;
1031         if (parent) {
1032                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1033                 key.offset = parent;
1034         } else {
1035                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1036                 key.offset = hash_extent_data_ref(root_objectid,
1037                                                   owner, offset);
1038         }
1039 again:
1040         recow = 0;
1041         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1042         if (ret < 0) {
1043                 err = ret;
1044                 goto fail;
1045         }
1046
1047         if (parent) {
1048                 if (!ret)
1049                         return 0;
1050 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1051                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1052                 btrfs_release_path(path);
1053                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1054                 if (ret < 0) {
1055                         err = ret;
1056                         goto fail;
1057                 }
1058                 if (!ret)
1059                         return 0;
1060 #endif
1061                 goto fail;
1062         }
1063
1064         leaf = path->nodes[0];
1065         nritems = btrfs_header_nritems(leaf);
1066         while (1) {
1067                 if (path->slots[0] >= nritems) {
1068                         ret = btrfs_next_leaf(root, path);
1069                         if (ret < 0)
1070                                 err = ret;
1071                         if (ret)
1072                                 goto fail;
1073
1074                         leaf = path->nodes[0];
1075                         nritems = btrfs_header_nritems(leaf);
1076                         recow = 1;
1077                 }
1078
1079                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1080                 if (key.objectid != bytenr ||
1081                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1082                         goto fail;
1083
1084                 ref = btrfs_item_ptr(leaf, path->slots[0],
1085                                      struct btrfs_extent_data_ref);
1086
1087                 if (match_extent_data_ref(leaf, ref, root_objectid,
1088                                           owner, offset)) {
1089                         if (recow) {
1090                                 btrfs_release_path(path);
1091                                 goto again;
1092                         }
1093                         err = 0;
1094                         break;
1095                 }
1096                 path->slots[0]++;
1097         }
1098 fail:
1099         return err;
1100 }
1101
1102 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1103                                            struct btrfs_root *root,
1104                                            struct btrfs_path *path,
1105                                            u64 bytenr, u64 parent,
1106                                            u64 root_objectid, u64 owner,
1107                                            u64 offset, int refs_to_add)
1108 {
1109         struct btrfs_key key;
1110         struct extent_buffer *leaf;
1111         u32 size;
1112         u32 num_refs;
1113         int ret;
1114
1115         key.objectid = bytenr;
1116         if (parent) {
1117                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1118                 key.offset = parent;
1119                 size = sizeof(struct btrfs_shared_data_ref);
1120         } else {
1121                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1122                 key.offset = hash_extent_data_ref(root_objectid,
1123                                                   owner, offset);
1124                 size = sizeof(struct btrfs_extent_data_ref);
1125         }
1126
1127         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1128         if (ret && ret != -EEXIST)
1129                 goto fail;
1130
1131         leaf = path->nodes[0];
1132         if (parent) {
1133                 struct btrfs_shared_data_ref *ref;
1134                 ref = btrfs_item_ptr(leaf, path->slots[0],
1135                                      struct btrfs_shared_data_ref);
1136                 if (ret == 0) {
1137                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1138                 } else {
1139                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1140                         num_refs += refs_to_add;
1141                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1142                 }
1143         } else {
1144                 struct btrfs_extent_data_ref *ref;
1145                 while (ret == -EEXIST) {
1146                         ref = btrfs_item_ptr(leaf, path->slots[0],
1147                                              struct btrfs_extent_data_ref);
1148                         if (match_extent_data_ref(leaf, ref, root_objectid,
1149                                                   owner, offset))
1150                                 break;
1151                         btrfs_release_path(path);
1152                         key.offset++;
1153                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1154                                                       size);
1155                         if (ret && ret != -EEXIST)
1156                                 goto fail;
1157
1158                         leaf = path->nodes[0];
1159                 }
1160                 ref = btrfs_item_ptr(leaf, path->slots[0],
1161                                      struct btrfs_extent_data_ref);
1162                 if (ret == 0) {
1163                         btrfs_set_extent_data_ref_root(leaf, ref,
1164                                                        root_objectid);
1165                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1166                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1167                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1168                 } else {
1169                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1170                         num_refs += refs_to_add;
1171                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1172                 }
1173         }
1174         btrfs_mark_buffer_dirty(leaf);
1175         ret = 0;
1176 fail:
1177         btrfs_release_path(path);
1178         return ret;
1179 }
1180
1181 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1182                                            struct btrfs_root *root,
1183                                            struct btrfs_path *path,
1184                                            int refs_to_drop)
1185 {
1186         struct btrfs_key key;
1187         struct btrfs_extent_data_ref *ref1 = NULL;
1188         struct btrfs_shared_data_ref *ref2 = NULL;
1189         struct extent_buffer *leaf;
1190         u32 num_refs = 0;
1191         int ret = 0;
1192
1193         leaf = path->nodes[0];
1194         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1195
1196         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1197                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1198                                       struct btrfs_extent_data_ref);
1199                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1200         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1201                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1202                                       struct btrfs_shared_data_ref);
1203                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1204 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1205         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1206                 struct btrfs_extent_ref_v0 *ref0;
1207                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1208                                       struct btrfs_extent_ref_v0);
1209                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1210 #endif
1211         } else {
1212                 BUG();
1213         }
1214
1215         BUG_ON(num_refs < refs_to_drop);
1216         num_refs -= refs_to_drop;
1217
1218         if (num_refs == 0) {
1219                 ret = btrfs_del_item(trans, root, path);
1220         } else {
1221                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1222                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1223                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1224                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1225 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1226                 else {
1227                         struct btrfs_extent_ref_v0 *ref0;
1228                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1229                                         struct btrfs_extent_ref_v0);
1230                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1231                 }
1232 #endif
1233                 btrfs_mark_buffer_dirty(leaf);
1234         }
1235         return ret;
1236 }
1237
1238 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1239                                           struct btrfs_path *path,
1240                                           struct btrfs_extent_inline_ref *iref)
1241 {
1242         struct btrfs_key key;
1243         struct extent_buffer *leaf;
1244         struct btrfs_extent_data_ref *ref1;
1245         struct btrfs_shared_data_ref *ref2;
1246         u32 num_refs = 0;
1247
1248         leaf = path->nodes[0];
1249         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1250         if (iref) {
1251                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1252                     BTRFS_EXTENT_DATA_REF_KEY) {
1253                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1254                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1255                 } else {
1256                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1257                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1258                 }
1259         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1260                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1261                                       struct btrfs_extent_data_ref);
1262                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1263         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1264                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1265                                       struct btrfs_shared_data_ref);
1266                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1269                 struct btrfs_extent_ref_v0 *ref0;
1270                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1271                                       struct btrfs_extent_ref_v0);
1272                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1273 #endif
1274         } else {
1275                 WARN_ON(1);
1276         }
1277         return num_refs;
1278 }
1279
1280 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1281                                           struct btrfs_root *root,
1282                                           struct btrfs_path *path,
1283                                           u64 bytenr, u64 parent,
1284                                           u64 root_objectid)
1285 {
1286         struct btrfs_key key;
1287         int ret;
1288
1289         key.objectid = bytenr;
1290         if (parent) {
1291                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1292                 key.offset = parent;
1293         } else {
1294                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1295                 key.offset = root_objectid;
1296         }
1297
1298         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1299         if (ret > 0)
1300                 ret = -ENOENT;
1301 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1302         if (ret == -ENOENT && parent) {
1303                 btrfs_release_path(path);
1304                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1305                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1306                 if (ret > 0)
1307                         ret = -ENOENT;
1308         }
1309 #endif
1310         return ret;
1311 }
1312
1313 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1314                                           struct btrfs_root *root,
1315                                           struct btrfs_path *path,
1316                                           u64 bytenr, u64 parent,
1317                                           u64 root_objectid)
1318 {
1319         struct btrfs_key key;
1320         int ret;
1321
1322         key.objectid = bytenr;
1323         if (parent) {
1324                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1325                 key.offset = parent;
1326         } else {
1327                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1328                 key.offset = root_objectid;
1329         }
1330
1331         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1332         btrfs_release_path(path);
1333         return ret;
1334 }
1335
1336 static inline int extent_ref_type(u64 parent, u64 owner)
1337 {
1338         int type;
1339         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1340                 if (parent > 0)
1341                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1342                 else
1343                         type = BTRFS_TREE_BLOCK_REF_KEY;
1344         } else {
1345                 if (parent > 0)
1346                         type = BTRFS_SHARED_DATA_REF_KEY;
1347                 else
1348                         type = BTRFS_EXTENT_DATA_REF_KEY;
1349         }
1350         return type;
1351 }
1352
1353 static int find_next_key(struct btrfs_path *path, int level,
1354                          struct btrfs_key *key)
1355
1356 {
1357         for (; level < BTRFS_MAX_LEVEL; level++) {
1358                 if (!path->nodes[level])
1359                         break;
1360                 if (path->slots[level] + 1 >=
1361                     btrfs_header_nritems(path->nodes[level]))
1362                         continue;
1363                 if (level == 0)
1364                         btrfs_item_key_to_cpu(path->nodes[level], key,
1365                                               path->slots[level] + 1);
1366                 else
1367                         btrfs_node_key_to_cpu(path->nodes[level], key,
1368                                               path->slots[level] + 1);
1369                 return 0;
1370         }
1371         return 1;
1372 }
1373
1374 /*
1375  * look for inline back ref. if back ref is found, *ref_ret is set
1376  * to the address of inline back ref, and 0 is returned.
1377  *
1378  * if back ref isn't found, *ref_ret is set to the address where it
1379  * should be inserted, and -ENOENT is returned.
1380  *
1381  * if insert is true and there are too many inline back refs, the path
1382  * points to the extent item, and -EAGAIN is returned.
1383  *
1384  * NOTE: inline back refs are ordered in the same way that back ref
1385  *       items in the tree are ordered.
1386  */
1387 static noinline_for_stack
1388 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1389                                  struct btrfs_root *root,
1390                                  struct btrfs_path *path,
1391                                  struct btrfs_extent_inline_ref **ref_ret,
1392                                  u64 bytenr, u64 num_bytes,
1393                                  u64 parent, u64 root_objectid,
1394                                  u64 owner, u64 offset, int insert)
1395 {
1396         struct btrfs_key key;
1397         struct extent_buffer *leaf;
1398         struct btrfs_extent_item *ei;
1399         struct btrfs_extent_inline_ref *iref;
1400         u64 flags;
1401         u64 item_size;
1402         unsigned long ptr;
1403         unsigned long end;
1404         int extra_size;
1405         int type;
1406         int want;
1407         int ret;
1408         int err = 0;
1409
1410         key.objectid = bytenr;
1411         key.type = BTRFS_EXTENT_ITEM_KEY;
1412         key.offset = num_bytes;
1413
1414         want = extent_ref_type(parent, owner);
1415         if (insert) {
1416                 extra_size = btrfs_extent_inline_ref_size(want);
1417                 path->keep_locks = 1;
1418         } else
1419                 extra_size = -1;
1420         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1421         if (ret < 0) {
1422                 err = ret;
1423                 goto out;
1424         }
1425         BUG_ON(ret);
1426
1427         leaf = path->nodes[0];
1428         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1429 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1430         if (item_size < sizeof(*ei)) {
1431                 if (!insert) {
1432                         err = -ENOENT;
1433                         goto out;
1434                 }
1435                 ret = convert_extent_item_v0(trans, root, path, owner,
1436                                              extra_size);
1437                 if (ret < 0) {
1438                         err = ret;
1439                         goto out;
1440                 }
1441                 leaf = path->nodes[0];
1442                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1443         }
1444 #endif
1445         BUG_ON(item_size < sizeof(*ei));
1446
1447         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1448         flags = btrfs_extent_flags(leaf, ei);
1449
1450         ptr = (unsigned long)(ei + 1);
1451         end = (unsigned long)ei + item_size;
1452
1453         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1454                 ptr += sizeof(struct btrfs_tree_block_info);
1455                 BUG_ON(ptr > end);
1456         } else {
1457                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1458         }
1459
1460         err = -ENOENT;
1461         while (1) {
1462                 if (ptr >= end) {
1463                         WARN_ON(ptr > end);
1464                         break;
1465                 }
1466                 iref = (struct btrfs_extent_inline_ref *)ptr;
1467                 type = btrfs_extent_inline_ref_type(leaf, iref);
1468                 if (want < type)
1469                         break;
1470                 if (want > type) {
1471                         ptr += btrfs_extent_inline_ref_size(type);
1472                         continue;
1473                 }
1474
1475                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1476                         struct btrfs_extent_data_ref *dref;
1477                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1478                         if (match_extent_data_ref(leaf, dref, root_objectid,
1479                                                   owner, offset)) {
1480                                 err = 0;
1481                                 break;
1482                         }
1483                         if (hash_extent_data_ref_item(leaf, dref) <
1484                             hash_extent_data_ref(root_objectid, owner, offset))
1485                                 break;
1486                 } else {
1487                         u64 ref_offset;
1488                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1489                         if (parent > 0) {
1490                                 if (parent == ref_offset) {
1491                                         err = 0;
1492                                         break;
1493                                 }
1494                                 if (ref_offset < parent)
1495                                         break;
1496                         } else {
1497                                 if (root_objectid == ref_offset) {
1498                                         err = 0;
1499                                         break;
1500                                 }
1501                                 if (ref_offset < root_objectid)
1502                                         break;
1503                         }
1504                 }
1505                 ptr += btrfs_extent_inline_ref_size(type);
1506         }
1507         if (err == -ENOENT && insert) {
1508                 if (item_size + extra_size >=
1509                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1510                         err = -EAGAIN;
1511                         goto out;
1512                 }
1513                 /*
1514                  * To add new inline back ref, we have to make sure
1515                  * there is no corresponding back ref item.
1516                  * For simplicity, we just do not add new inline back
1517                  * ref if there is any kind of item for this block
1518                  */
1519                 if (find_next_key(path, 0, &key) == 0 &&
1520                     key.objectid == bytenr &&
1521                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1522                         err = -EAGAIN;
1523                         goto out;
1524                 }
1525         }
1526         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1527 out:
1528         if (insert) {
1529                 path->keep_locks = 0;
1530                 btrfs_unlock_up_safe(path, 1);
1531         }
1532         return err;
1533 }
1534
1535 /*
1536  * helper to add new inline back ref
1537  */
1538 static noinline_for_stack
1539 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1540                                 struct btrfs_root *root,
1541                                 struct btrfs_path *path,
1542                                 struct btrfs_extent_inline_ref *iref,
1543                                 u64 parent, u64 root_objectid,
1544                                 u64 owner, u64 offset, int refs_to_add,
1545                                 struct btrfs_delayed_extent_op *extent_op)
1546 {
1547         struct extent_buffer *leaf;
1548         struct btrfs_extent_item *ei;
1549         unsigned long ptr;
1550         unsigned long end;
1551         unsigned long item_offset;
1552         u64 refs;
1553         int size;
1554         int type;
1555         int ret;
1556
1557         leaf = path->nodes[0];
1558         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1559         item_offset = (unsigned long)iref - (unsigned long)ei;
1560
1561         type = extent_ref_type(parent, owner);
1562         size = btrfs_extent_inline_ref_size(type);
1563
1564         ret = btrfs_extend_item(trans, root, path, size);
1565
1566         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1567         refs = btrfs_extent_refs(leaf, ei);
1568         refs += refs_to_add;
1569         btrfs_set_extent_refs(leaf, ei, refs);
1570         if (extent_op)
1571                 __run_delayed_extent_op(extent_op, leaf, ei);
1572
1573         ptr = (unsigned long)ei + item_offset;
1574         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1575         if (ptr < end - size)
1576                 memmove_extent_buffer(leaf, ptr + size, ptr,
1577                                       end - size - ptr);
1578
1579         iref = (struct btrfs_extent_inline_ref *)ptr;
1580         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1581         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1582                 struct btrfs_extent_data_ref *dref;
1583                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1584                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1585                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1586                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1587                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1588         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1589                 struct btrfs_shared_data_ref *sref;
1590                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1591                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1592                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1593         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1594                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1595         } else {
1596                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1597         }
1598         btrfs_mark_buffer_dirty(leaf);
1599         return 0;
1600 }
1601
1602 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1603                                  struct btrfs_root *root,
1604                                  struct btrfs_path *path,
1605                                  struct btrfs_extent_inline_ref **ref_ret,
1606                                  u64 bytenr, u64 num_bytes, u64 parent,
1607                                  u64 root_objectid, u64 owner, u64 offset)
1608 {
1609         int ret;
1610
1611         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1612                                            bytenr, num_bytes, parent,
1613                                            root_objectid, owner, offset, 0);
1614         if (ret != -ENOENT)
1615                 return ret;
1616
1617         btrfs_release_path(path);
1618         *ref_ret = NULL;
1619
1620         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1621                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1622                                             root_objectid);
1623         } else {
1624                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1625                                              root_objectid, owner, offset);
1626         }
1627         return ret;
1628 }
1629
1630 /*
1631  * helper to update/remove inline back ref
1632  */
1633 static noinline_for_stack
1634 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1635                                  struct btrfs_root *root,
1636                                  struct btrfs_path *path,
1637                                  struct btrfs_extent_inline_ref *iref,
1638                                  int refs_to_mod,
1639                                  struct btrfs_delayed_extent_op *extent_op)
1640 {
1641         struct extent_buffer *leaf;
1642         struct btrfs_extent_item *ei;
1643         struct btrfs_extent_data_ref *dref = NULL;
1644         struct btrfs_shared_data_ref *sref = NULL;
1645         unsigned long ptr;
1646         unsigned long end;
1647         u32 item_size;
1648         int size;
1649         int type;
1650         int ret;
1651         u64 refs;
1652
1653         leaf = path->nodes[0];
1654         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1655         refs = btrfs_extent_refs(leaf, ei);
1656         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1657         refs += refs_to_mod;
1658         btrfs_set_extent_refs(leaf, ei, refs);
1659         if (extent_op)
1660                 __run_delayed_extent_op(extent_op, leaf, ei);
1661
1662         type = btrfs_extent_inline_ref_type(leaf, iref);
1663
1664         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1665                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1666                 refs = btrfs_extent_data_ref_count(leaf, dref);
1667         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1668                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1669                 refs = btrfs_shared_data_ref_count(leaf, sref);
1670         } else {
1671                 refs = 1;
1672                 BUG_ON(refs_to_mod != -1);
1673         }
1674
1675         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1676         refs += refs_to_mod;
1677
1678         if (refs > 0) {
1679                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1680                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1681                 else
1682                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1683         } else {
1684                 size =  btrfs_extent_inline_ref_size(type);
1685                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1686                 ptr = (unsigned long)iref;
1687                 end = (unsigned long)ei + item_size;
1688                 if (ptr + size < end)
1689                         memmove_extent_buffer(leaf, ptr, ptr + size,
1690                                               end - ptr - size);
1691                 item_size -= size;
1692                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1693         }
1694         btrfs_mark_buffer_dirty(leaf);
1695         return 0;
1696 }
1697
1698 static noinline_for_stack
1699 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1700                                  struct btrfs_root *root,
1701                                  struct btrfs_path *path,
1702                                  u64 bytenr, u64 num_bytes, u64 parent,
1703                                  u64 root_objectid, u64 owner,
1704                                  u64 offset, int refs_to_add,
1705                                  struct btrfs_delayed_extent_op *extent_op)
1706 {
1707         struct btrfs_extent_inline_ref *iref;
1708         int ret;
1709
1710         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1711                                            bytenr, num_bytes, parent,
1712                                            root_objectid, owner, offset, 1);
1713         if (ret == 0) {
1714                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1715                 ret = update_inline_extent_backref(trans, root, path, iref,
1716                                                    refs_to_add, extent_op);
1717         } else if (ret == -ENOENT) {
1718                 ret = setup_inline_extent_backref(trans, root, path, iref,
1719                                                   parent, root_objectid,
1720                                                   owner, offset, refs_to_add,
1721                                                   extent_op);
1722         }
1723         return ret;
1724 }
1725
1726 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1727                                  struct btrfs_root *root,
1728                                  struct btrfs_path *path,
1729                                  u64 bytenr, u64 parent, u64 root_objectid,
1730                                  u64 owner, u64 offset, int refs_to_add)
1731 {
1732         int ret;
1733         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1734                 BUG_ON(refs_to_add != 1);
1735                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1736                                             parent, root_objectid);
1737         } else {
1738                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1739                                              parent, root_objectid,
1740                                              owner, offset, refs_to_add);
1741         }
1742         return ret;
1743 }
1744
1745 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1746                                  struct btrfs_root *root,
1747                                  struct btrfs_path *path,
1748                                  struct btrfs_extent_inline_ref *iref,
1749                                  int refs_to_drop, int is_data)
1750 {
1751         int ret;
1752
1753         BUG_ON(!is_data && refs_to_drop != 1);
1754         if (iref) {
1755                 ret = update_inline_extent_backref(trans, root, path, iref,
1756                                                    -refs_to_drop, NULL);
1757         } else if (is_data) {
1758                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1759         } else {
1760                 ret = btrfs_del_item(trans, root, path);
1761         }
1762         return ret;
1763 }
1764
1765 static int btrfs_issue_discard(struct block_device *bdev,
1766                                 u64 start, u64 len)
1767 {
1768         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1769 }
1770
1771 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1772                                 u64 num_bytes, u64 *actual_bytes)
1773 {
1774         int ret;
1775         u64 discarded_bytes = 0;
1776         struct btrfs_multi_bio *multi = NULL;
1777
1778
1779         /* Tell the block device(s) that the sectors can be discarded */
1780         ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1781                               bytenr, &num_bytes, &multi, 0);
1782         if (!ret) {
1783                 struct btrfs_bio_stripe *stripe = multi->stripes;
1784                 int i;
1785
1786
1787                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1788                         ret = btrfs_issue_discard(stripe->dev->bdev,
1789                                                   stripe->physical,
1790                                                   stripe->length);
1791                         if (!ret)
1792                                 discarded_bytes += stripe->length;
1793                         else if (ret != -EOPNOTSUPP)
1794                                 break;
1795                 }
1796                 kfree(multi);
1797         }
1798         if (discarded_bytes && ret == -EOPNOTSUPP)
1799                 ret = 0;
1800
1801         if (actual_bytes)
1802                 *actual_bytes = discarded_bytes;
1803
1804
1805         return ret;
1806 }
1807
1808 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1809                          struct btrfs_root *root,
1810                          u64 bytenr, u64 num_bytes, u64 parent,
1811                          u64 root_objectid, u64 owner, u64 offset)
1812 {
1813         int ret;
1814         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1815                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1816
1817         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1818                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1819                                         parent, root_objectid, (int)owner,
1820                                         BTRFS_ADD_DELAYED_REF, NULL);
1821         } else {
1822                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1823                                         parent, root_objectid, owner, offset,
1824                                         BTRFS_ADD_DELAYED_REF, NULL);
1825         }
1826         return ret;
1827 }
1828
1829 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1830                                   struct btrfs_root *root,
1831                                   u64 bytenr, u64 num_bytes,
1832                                   u64 parent, u64 root_objectid,
1833                                   u64 owner, u64 offset, int refs_to_add,
1834                                   struct btrfs_delayed_extent_op *extent_op)
1835 {
1836         struct btrfs_path *path;
1837         struct extent_buffer *leaf;
1838         struct btrfs_extent_item *item;
1839         u64 refs;
1840         int ret;
1841         int err = 0;
1842
1843         path = btrfs_alloc_path();
1844         if (!path)
1845                 return -ENOMEM;
1846
1847         path->reada = 1;
1848         path->leave_spinning = 1;
1849         /* this will setup the path even if it fails to insert the back ref */
1850         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1851                                            path, bytenr, num_bytes, parent,
1852                                            root_objectid, owner, offset,
1853                                            refs_to_add, extent_op);
1854         if (ret == 0)
1855                 goto out;
1856
1857         if (ret != -EAGAIN) {
1858                 err = ret;
1859                 goto out;
1860         }
1861
1862         leaf = path->nodes[0];
1863         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1864         refs = btrfs_extent_refs(leaf, item);
1865         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1866         if (extent_op)
1867                 __run_delayed_extent_op(extent_op, leaf, item);
1868
1869         btrfs_mark_buffer_dirty(leaf);
1870         btrfs_release_path(path);
1871
1872         path->reada = 1;
1873         path->leave_spinning = 1;
1874
1875         /* now insert the actual backref */
1876         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1877                                     path, bytenr, parent, root_objectid,
1878                                     owner, offset, refs_to_add);
1879         BUG_ON(ret);
1880 out:
1881         btrfs_free_path(path);
1882         return err;
1883 }
1884
1885 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1886                                 struct btrfs_root *root,
1887                                 struct btrfs_delayed_ref_node *node,
1888                                 struct btrfs_delayed_extent_op *extent_op,
1889                                 int insert_reserved)
1890 {
1891         int ret = 0;
1892         struct btrfs_delayed_data_ref *ref;
1893         struct btrfs_key ins;
1894         u64 parent = 0;
1895         u64 ref_root = 0;
1896         u64 flags = 0;
1897
1898         ins.objectid = node->bytenr;
1899         ins.offset = node->num_bytes;
1900         ins.type = BTRFS_EXTENT_ITEM_KEY;
1901
1902         ref = btrfs_delayed_node_to_data_ref(node);
1903         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1904                 parent = ref->parent;
1905         else
1906                 ref_root = ref->root;
1907
1908         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1909                 if (extent_op) {
1910                         BUG_ON(extent_op->update_key);
1911                         flags |= extent_op->flags_to_set;
1912                 }
1913                 ret = alloc_reserved_file_extent(trans, root,
1914                                                  parent, ref_root, flags,
1915                                                  ref->objectid, ref->offset,
1916                                                  &ins, node->ref_mod);
1917         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1918                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1919                                              node->num_bytes, parent,
1920                                              ref_root, ref->objectid,
1921                                              ref->offset, node->ref_mod,
1922                                              extent_op);
1923         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1924                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1925                                           node->num_bytes, parent,
1926                                           ref_root, ref->objectid,
1927                                           ref->offset, node->ref_mod,
1928                                           extent_op);
1929         } else {
1930                 BUG();
1931         }
1932         return ret;
1933 }
1934
1935 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1936                                     struct extent_buffer *leaf,
1937                                     struct btrfs_extent_item *ei)
1938 {
1939         u64 flags = btrfs_extent_flags(leaf, ei);
1940         if (extent_op->update_flags) {
1941                 flags |= extent_op->flags_to_set;
1942                 btrfs_set_extent_flags(leaf, ei, flags);
1943         }
1944
1945         if (extent_op->update_key) {
1946                 struct btrfs_tree_block_info *bi;
1947                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1948                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1949                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1950         }
1951 }
1952
1953 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1954                                  struct btrfs_root *root,
1955                                  struct btrfs_delayed_ref_node *node,
1956                                  struct btrfs_delayed_extent_op *extent_op)
1957 {
1958         struct btrfs_key key;
1959         struct btrfs_path *path;
1960         struct btrfs_extent_item *ei;
1961         struct extent_buffer *leaf;
1962         u32 item_size;
1963         int ret;
1964         int err = 0;
1965
1966         path = btrfs_alloc_path();
1967         if (!path)
1968                 return -ENOMEM;
1969
1970         key.objectid = node->bytenr;
1971         key.type = BTRFS_EXTENT_ITEM_KEY;
1972         key.offset = node->num_bytes;
1973
1974         path->reada = 1;
1975         path->leave_spinning = 1;
1976         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1977                                 path, 0, 1);
1978         if (ret < 0) {
1979                 err = ret;
1980                 goto out;
1981         }
1982         if (ret > 0) {
1983                 err = -EIO;
1984                 goto out;
1985         }
1986
1987         leaf = path->nodes[0];
1988         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1989 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1990         if (item_size < sizeof(*ei)) {
1991                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1992                                              path, (u64)-1, 0);
1993                 if (ret < 0) {
1994                         err = ret;
1995                         goto out;
1996                 }
1997                 leaf = path->nodes[0];
1998                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1999         }
2000 #endif
2001         BUG_ON(item_size < sizeof(*ei));
2002         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2003         __run_delayed_extent_op(extent_op, leaf, ei);
2004
2005         btrfs_mark_buffer_dirty(leaf);
2006 out:
2007         btrfs_free_path(path);
2008         return err;
2009 }
2010
2011 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2012                                 struct btrfs_root *root,
2013                                 struct btrfs_delayed_ref_node *node,
2014                                 struct btrfs_delayed_extent_op *extent_op,
2015                                 int insert_reserved)
2016 {
2017         int ret = 0;
2018         struct btrfs_delayed_tree_ref *ref;
2019         struct btrfs_key ins;
2020         u64 parent = 0;
2021         u64 ref_root = 0;
2022
2023         ins.objectid = node->bytenr;
2024         ins.offset = node->num_bytes;
2025         ins.type = BTRFS_EXTENT_ITEM_KEY;
2026
2027         ref = btrfs_delayed_node_to_tree_ref(node);
2028         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2029                 parent = ref->parent;
2030         else
2031                 ref_root = ref->root;
2032
2033         BUG_ON(node->ref_mod != 1);
2034         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2035                 BUG_ON(!extent_op || !extent_op->update_flags ||
2036                        !extent_op->update_key);
2037                 ret = alloc_reserved_tree_block(trans, root,
2038                                                 parent, ref_root,
2039                                                 extent_op->flags_to_set,
2040                                                 &extent_op->key,
2041                                                 ref->level, &ins);
2042         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2043                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2044                                              node->num_bytes, parent, ref_root,
2045                                              ref->level, 0, 1, extent_op);
2046         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2047                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2048                                           node->num_bytes, parent, ref_root,
2049                                           ref->level, 0, 1, extent_op);
2050         } else {
2051                 BUG();
2052         }
2053         return ret;
2054 }
2055
2056 /* helper function to actually process a single delayed ref entry */
2057 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2058                                struct btrfs_root *root,
2059                                struct btrfs_delayed_ref_node *node,
2060                                struct btrfs_delayed_extent_op *extent_op,
2061                                int insert_reserved)
2062 {
2063         int ret;
2064         if (btrfs_delayed_ref_is_head(node)) {
2065                 struct btrfs_delayed_ref_head *head;
2066                 /*
2067                  * we've hit the end of the chain and we were supposed
2068                  * to insert this extent into the tree.  But, it got
2069                  * deleted before we ever needed to insert it, so all
2070                  * we have to do is clean up the accounting
2071                  */
2072                 BUG_ON(extent_op);
2073                 head = btrfs_delayed_node_to_head(node);
2074                 if (insert_reserved) {
2075                         btrfs_pin_extent(root, node->bytenr,
2076                                          node->num_bytes, 1);
2077                         if (head->is_data) {
2078                                 ret = btrfs_del_csums(trans, root,
2079                                                       node->bytenr,
2080                                                       node->num_bytes);
2081                                 BUG_ON(ret);
2082                         }
2083                 }
2084                 mutex_unlock(&head->mutex);
2085                 return 0;
2086         }
2087
2088         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2089             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2090                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2091                                            insert_reserved);
2092         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2093                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2094                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2095                                            insert_reserved);
2096         else
2097                 BUG();
2098         return ret;
2099 }
2100
2101 static noinline struct btrfs_delayed_ref_node *
2102 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2103 {
2104         struct rb_node *node;
2105         struct btrfs_delayed_ref_node *ref;
2106         int action = BTRFS_ADD_DELAYED_REF;
2107 again:
2108         /*
2109          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2110          * this prevents ref count from going down to zero when
2111          * there still are pending delayed ref.
2112          */
2113         node = rb_prev(&head->node.rb_node);
2114         while (1) {
2115                 if (!node)
2116                         break;
2117                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2118                                 rb_node);
2119                 if (ref->bytenr != head->node.bytenr)
2120                         break;
2121                 if (ref->action == action)
2122                         return ref;
2123                 node = rb_prev(node);
2124         }
2125         if (action == BTRFS_ADD_DELAYED_REF) {
2126                 action = BTRFS_DROP_DELAYED_REF;
2127                 goto again;
2128         }
2129         return NULL;
2130 }
2131
2132 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2133                                        struct btrfs_root *root,
2134                                        struct list_head *cluster)
2135 {
2136         struct btrfs_delayed_ref_root *delayed_refs;
2137         struct btrfs_delayed_ref_node *ref;
2138         struct btrfs_delayed_ref_head *locked_ref = NULL;
2139         struct btrfs_delayed_extent_op *extent_op;
2140         int ret;
2141         int count = 0;
2142         int must_insert_reserved = 0;
2143
2144         delayed_refs = &trans->transaction->delayed_refs;
2145         while (1) {
2146                 if (!locked_ref) {
2147                         /* pick a new head ref from the cluster list */
2148                         if (list_empty(cluster))
2149                                 break;
2150
2151                         locked_ref = list_entry(cluster->next,
2152                                      struct btrfs_delayed_ref_head, cluster);
2153
2154                         /* grab the lock that says we are going to process
2155                          * all the refs for this head */
2156                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2157
2158                         /*
2159                          * we may have dropped the spin lock to get the head
2160                          * mutex lock, and that might have given someone else
2161                          * time to free the head.  If that's true, it has been
2162                          * removed from our list and we can move on.
2163                          */
2164                         if (ret == -EAGAIN) {
2165                                 locked_ref = NULL;
2166                                 count++;
2167                                 continue;
2168                         }
2169                 }
2170
2171                 /*
2172                  * record the must insert reserved flag before we
2173                  * drop the spin lock.
2174                  */
2175                 must_insert_reserved = locked_ref->must_insert_reserved;
2176                 locked_ref->must_insert_reserved = 0;
2177
2178                 extent_op = locked_ref->extent_op;
2179                 locked_ref->extent_op = NULL;
2180
2181                 /*
2182                  * locked_ref is the head node, so we have to go one
2183                  * node back for any delayed ref updates
2184                  */
2185                 ref = select_delayed_ref(locked_ref);
2186                 if (!ref) {
2187                         /* All delayed refs have been processed, Go ahead
2188                          * and send the head node to run_one_delayed_ref,
2189                          * so that any accounting fixes can happen
2190                          */
2191                         ref = &locked_ref->node;
2192
2193                         if (extent_op && must_insert_reserved) {
2194                                 kfree(extent_op);
2195                                 extent_op = NULL;
2196                         }
2197
2198                         if (extent_op) {
2199                                 spin_unlock(&delayed_refs->lock);
2200
2201                                 ret = run_delayed_extent_op(trans, root,
2202                                                             ref, extent_op);
2203                                 BUG_ON(ret);
2204                                 kfree(extent_op);
2205
2206                                 cond_resched();
2207                                 spin_lock(&delayed_refs->lock);
2208                                 continue;
2209                         }
2210
2211                         list_del_init(&locked_ref->cluster);
2212                         locked_ref = NULL;
2213                 }
2214
2215                 ref->in_tree = 0;
2216                 rb_erase(&ref->rb_node, &delayed_refs->root);
2217                 delayed_refs->num_entries--;
2218
2219                 spin_unlock(&delayed_refs->lock);
2220
2221                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2222                                           must_insert_reserved);
2223                 BUG_ON(ret);
2224
2225                 btrfs_put_delayed_ref(ref);
2226                 kfree(extent_op);
2227                 count++;
2228
2229                 cond_resched();
2230                 spin_lock(&delayed_refs->lock);
2231         }
2232         return count;
2233 }
2234
2235 /*
2236  * this starts processing the delayed reference count updates and
2237  * extent insertions we have queued up so far.  count can be
2238  * 0, which means to process everything in the tree at the start
2239  * of the run (but not newly added entries), or it can be some target
2240  * number you'd like to process.
2241  */
2242 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2243                            struct btrfs_root *root, unsigned long count)
2244 {
2245         struct rb_node *node;
2246         struct btrfs_delayed_ref_root *delayed_refs;
2247         struct btrfs_delayed_ref_node *ref;
2248         struct list_head cluster;
2249         int ret;
2250         int run_all = count == (unsigned long)-1;
2251         int run_most = 0;
2252
2253         if (root == root->fs_info->extent_root)
2254                 root = root->fs_info->tree_root;
2255
2256         delayed_refs = &trans->transaction->delayed_refs;
2257         INIT_LIST_HEAD(&cluster);
2258 again:
2259         spin_lock(&delayed_refs->lock);
2260         if (count == 0) {
2261                 count = delayed_refs->num_entries * 2;
2262                 run_most = 1;
2263         }
2264         while (1) {
2265                 if (!(run_all || run_most) &&
2266                     delayed_refs->num_heads_ready < 64)
2267                         break;
2268
2269                 /*
2270                  * go find something we can process in the rbtree.  We start at
2271                  * the beginning of the tree, and then build a cluster
2272                  * of refs to process starting at the first one we are able to
2273                  * lock
2274                  */
2275                 ret = btrfs_find_ref_cluster(trans, &cluster,
2276                                              delayed_refs->run_delayed_start);
2277                 if (ret)
2278                         break;
2279
2280                 ret = run_clustered_refs(trans, root, &cluster);
2281                 BUG_ON(ret < 0);
2282
2283                 count -= min_t(unsigned long, ret, count);
2284
2285                 if (count == 0)
2286                         break;
2287         }
2288
2289         if (run_all) {
2290                 node = rb_first(&delayed_refs->root);
2291                 if (!node)
2292                         goto out;
2293                 count = (unsigned long)-1;
2294
2295                 while (node) {
2296                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2297                                        rb_node);
2298                         if (btrfs_delayed_ref_is_head(ref)) {
2299                                 struct btrfs_delayed_ref_head *head;
2300
2301                                 head = btrfs_delayed_node_to_head(ref);
2302                                 atomic_inc(&ref->refs);
2303
2304                                 spin_unlock(&delayed_refs->lock);
2305                                 /*
2306                                  * Mutex was contended, block until it's
2307                                  * released and try again
2308                                  */
2309                                 mutex_lock(&head->mutex);
2310                                 mutex_unlock(&head->mutex);
2311
2312                                 btrfs_put_delayed_ref(ref);
2313                                 cond_resched();
2314                                 goto again;
2315                         }
2316                         node = rb_next(node);
2317                 }
2318                 spin_unlock(&delayed_refs->lock);
2319                 schedule_timeout(1);
2320                 goto again;
2321         }
2322 out:
2323         spin_unlock(&delayed_refs->lock);
2324         return 0;
2325 }
2326
2327 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2328                                 struct btrfs_root *root,
2329                                 u64 bytenr, u64 num_bytes, u64 flags,
2330                                 int is_data)
2331 {
2332         struct btrfs_delayed_extent_op *extent_op;
2333         int ret;
2334
2335         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2336         if (!extent_op)
2337                 return -ENOMEM;
2338
2339         extent_op->flags_to_set = flags;
2340         extent_op->update_flags = 1;
2341         extent_op->update_key = 0;
2342         extent_op->is_data = is_data ? 1 : 0;
2343
2344         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2345         if (ret)
2346                 kfree(extent_op);
2347         return ret;
2348 }
2349
2350 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2351                                       struct btrfs_root *root,
2352                                       struct btrfs_path *path,
2353                                       u64 objectid, u64 offset, u64 bytenr)
2354 {
2355         struct btrfs_delayed_ref_head *head;
2356         struct btrfs_delayed_ref_node *ref;
2357         struct btrfs_delayed_data_ref *data_ref;
2358         struct btrfs_delayed_ref_root *delayed_refs;
2359         struct rb_node *node;
2360         int ret = 0;
2361
2362         ret = -ENOENT;
2363         delayed_refs = &trans->transaction->delayed_refs;
2364         spin_lock(&delayed_refs->lock);
2365         head = btrfs_find_delayed_ref_head(trans, bytenr);
2366         if (!head)
2367                 goto out;
2368
2369         if (!mutex_trylock(&head->mutex)) {
2370                 atomic_inc(&head->node.refs);
2371                 spin_unlock(&delayed_refs->lock);
2372
2373                 btrfs_release_path(path);
2374
2375                 /*
2376                  * Mutex was contended, block until it's released and let
2377                  * caller try again
2378                  */
2379                 mutex_lock(&head->mutex);
2380                 mutex_unlock(&head->mutex);
2381                 btrfs_put_delayed_ref(&head->node);
2382                 return -EAGAIN;
2383         }
2384
2385         node = rb_prev(&head->node.rb_node);
2386         if (!node)
2387                 goto out_unlock;
2388
2389         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2390
2391         if (ref->bytenr != bytenr)
2392                 goto out_unlock;
2393
2394         ret = 1;
2395         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2396                 goto out_unlock;
2397
2398         data_ref = btrfs_delayed_node_to_data_ref(ref);
2399
2400         node = rb_prev(node);
2401         if (node) {
2402                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2403                 if (ref->bytenr == bytenr)
2404                         goto out_unlock;
2405         }
2406
2407         if (data_ref->root != root->root_key.objectid ||
2408             data_ref->objectid != objectid || data_ref->offset != offset)
2409                 goto out_unlock;
2410
2411         ret = 0;
2412 out_unlock:
2413         mutex_unlock(&head->mutex);
2414 out:
2415         spin_unlock(&delayed_refs->lock);
2416         return ret;
2417 }
2418
2419 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2420                                         struct btrfs_root *root,
2421                                         struct btrfs_path *path,
2422                                         u64 objectid, u64 offset, u64 bytenr)
2423 {
2424         struct btrfs_root *extent_root = root->fs_info->extent_root;
2425         struct extent_buffer *leaf;
2426         struct btrfs_extent_data_ref *ref;
2427         struct btrfs_extent_inline_ref *iref;
2428         struct btrfs_extent_item *ei;
2429         struct btrfs_key key;
2430         u32 item_size;
2431         int ret;
2432
2433         key.objectid = bytenr;
2434         key.offset = (u64)-1;
2435         key.type = BTRFS_EXTENT_ITEM_KEY;
2436
2437         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2438         if (ret < 0)
2439                 goto out;
2440         BUG_ON(ret == 0);
2441
2442         ret = -ENOENT;
2443         if (path->slots[0] == 0)
2444                 goto out;
2445
2446         path->slots[0]--;
2447         leaf = path->nodes[0];
2448         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2449
2450         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2451                 goto out;
2452
2453         ret = 1;
2454         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2455 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2456         if (item_size < sizeof(*ei)) {
2457                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2458                 goto out;
2459         }
2460 #endif
2461         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2462
2463         if (item_size != sizeof(*ei) +
2464             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2465                 goto out;
2466
2467         if (btrfs_extent_generation(leaf, ei) <=
2468             btrfs_root_last_snapshot(&root->root_item))
2469                 goto out;
2470
2471         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2472         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2473             BTRFS_EXTENT_DATA_REF_KEY)
2474                 goto out;
2475
2476         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2477         if (btrfs_extent_refs(leaf, ei) !=
2478             btrfs_extent_data_ref_count(leaf, ref) ||
2479             btrfs_extent_data_ref_root(leaf, ref) !=
2480             root->root_key.objectid ||
2481             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2482             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2483                 goto out;
2484
2485         ret = 0;
2486 out:
2487         return ret;
2488 }
2489
2490 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2491                           struct btrfs_root *root,
2492                           u64 objectid, u64 offset, u64 bytenr)
2493 {
2494         struct btrfs_path *path;
2495         int ret;
2496         int ret2;
2497
2498         path = btrfs_alloc_path();
2499         if (!path)
2500                 return -ENOENT;
2501
2502         do {
2503                 ret = check_committed_ref(trans, root, path, objectid,
2504                                           offset, bytenr);
2505                 if (ret && ret != -ENOENT)
2506                         goto out;
2507
2508                 ret2 = check_delayed_ref(trans, root, path, objectid,
2509                                          offset, bytenr);
2510         } while (ret2 == -EAGAIN);
2511
2512         if (ret2 && ret2 != -ENOENT) {
2513                 ret = ret2;
2514                 goto out;
2515         }
2516
2517         if (ret != -ENOENT || ret2 != -ENOENT)
2518                 ret = 0;
2519 out:
2520         btrfs_free_path(path);
2521         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2522                 WARN_ON(ret > 0);
2523         return ret;
2524 }
2525
2526 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2527                            struct btrfs_root *root,
2528                            struct extent_buffer *buf,
2529                            int full_backref, int inc)
2530 {
2531         u64 bytenr;
2532         u64 num_bytes;
2533         u64 parent;
2534         u64 ref_root;
2535         u32 nritems;
2536         struct btrfs_key key;
2537         struct btrfs_file_extent_item *fi;
2538         int i;
2539         int level;
2540         int ret = 0;
2541         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2542                             u64, u64, u64, u64, u64, u64);
2543
2544         ref_root = btrfs_header_owner(buf);
2545         nritems = btrfs_header_nritems(buf);
2546         level = btrfs_header_level(buf);
2547
2548         if (!root->ref_cows && level == 0)
2549                 return 0;
2550
2551         if (inc)
2552                 process_func = btrfs_inc_extent_ref;
2553         else
2554                 process_func = btrfs_free_extent;
2555
2556         if (full_backref)
2557                 parent = buf->start;
2558         else
2559                 parent = 0;
2560
2561         for (i = 0; i < nritems; i++) {
2562                 if (level == 0) {
2563                         btrfs_item_key_to_cpu(buf, &key, i);
2564                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2565                                 continue;
2566                         fi = btrfs_item_ptr(buf, i,
2567                                             struct btrfs_file_extent_item);
2568                         if (btrfs_file_extent_type(buf, fi) ==
2569                             BTRFS_FILE_EXTENT_INLINE)
2570                                 continue;
2571                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2572                         if (bytenr == 0)
2573                                 continue;
2574
2575                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2576                         key.offset -= btrfs_file_extent_offset(buf, fi);
2577                         ret = process_func(trans, root, bytenr, num_bytes,
2578                                            parent, ref_root, key.objectid,
2579                                            key.offset);
2580                         if (ret)
2581                                 goto fail;
2582                 } else {
2583                         bytenr = btrfs_node_blockptr(buf, i);
2584                         num_bytes = btrfs_level_size(root, level - 1);
2585                         ret = process_func(trans, root, bytenr, num_bytes,
2586                                            parent, ref_root, level - 1, 0);
2587                         if (ret)
2588                                 goto fail;
2589                 }
2590         }
2591         return 0;
2592 fail:
2593         BUG();
2594         return ret;
2595 }
2596
2597 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2598                   struct extent_buffer *buf, int full_backref)
2599 {
2600         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2601 }
2602
2603 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2604                   struct extent_buffer *buf, int full_backref)
2605 {
2606         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2607 }
2608
2609 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2610                                  struct btrfs_root *root,
2611                                  struct btrfs_path *path,
2612                                  struct btrfs_block_group_cache *cache)
2613 {
2614         int ret;
2615         struct btrfs_root *extent_root = root->fs_info->extent_root;
2616         unsigned long bi;
2617         struct extent_buffer *leaf;
2618
2619         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2620         if (ret < 0)
2621                 goto fail;
2622         BUG_ON(ret);
2623
2624         leaf = path->nodes[0];
2625         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2626         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2627         btrfs_mark_buffer_dirty(leaf);
2628         btrfs_release_path(path);
2629 fail:
2630         if (ret)
2631                 return ret;
2632         return 0;
2633
2634 }
2635
2636 static struct btrfs_block_group_cache *
2637 next_block_group(struct btrfs_root *root,
2638                  struct btrfs_block_group_cache *cache)
2639 {
2640         struct rb_node *node;
2641         spin_lock(&root->fs_info->block_group_cache_lock);
2642         node = rb_next(&cache->cache_node);
2643         btrfs_put_block_group(cache);
2644         if (node) {
2645                 cache = rb_entry(node, struct btrfs_block_group_cache,
2646                                  cache_node);
2647                 btrfs_get_block_group(cache);
2648         } else
2649                 cache = NULL;
2650         spin_unlock(&root->fs_info->block_group_cache_lock);
2651         return cache;
2652 }
2653
2654 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2655                             struct btrfs_trans_handle *trans,
2656                             struct btrfs_path *path)
2657 {
2658         struct btrfs_root *root = block_group->fs_info->tree_root;
2659         struct inode *inode = NULL;
2660         u64 alloc_hint = 0;
2661         int dcs = BTRFS_DC_ERROR;
2662         int num_pages = 0;
2663         int retries = 0;
2664         int ret = 0;
2665
2666         /*
2667          * If this block group is smaller than 100 megs don't bother caching the
2668          * block group.
2669          */
2670         if (block_group->key.offset < (100 * 1024 * 1024)) {
2671                 spin_lock(&block_group->lock);
2672                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2673                 spin_unlock(&block_group->lock);
2674                 return 0;
2675         }
2676
2677 again:
2678         inode = lookup_free_space_inode(root, block_group, path);
2679         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2680                 ret = PTR_ERR(inode);
2681                 btrfs_release_path(path);
2682                 goto out;
2683         }
2684
2685         if (IS_ERR(inode)) {
2686                 BUG_ON(retries);
2687                 retries++;
2688
2689                 if (block_group->ro)
2690                         goto out_free;
2691
2692                 ret = create_free_space_inode(root, trans, block_group, path);
2693                 if (ret)
2694                         goto out_free;
2695                 goto again;
2696         }
2697
2698         /*
2699          * We want to set the generation to 0, that way if anything goes wrong
2700          * from here on out we know not to trust this cache when we load up next
2701          * time.
2702          */
2703         BTRFS_I(inode)->generation = 0;
2704         ret = btrfs_update_inode(trans, root, inode);
2705         WARN_ON(ret);
2706
2707         if (i_size_read(inode) > 0) {
2708                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2709                                                       inode);
2710                 if (ret)
2711                         goto out_put;
2712         }
2713
2714         spin_lock(&block_group->lock);
2715         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2716                 /* We're not cached, don't bother trying to write stuff out */
2717                 dcs = BTRFS_DC_WRITTEN;
2718                 spin_unlock(&block_group->lock);
2719                 goto out_put;
2720         }
2721         spin_unlock(&block_group->lock);
2722
2723         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2724         if (!num_pages)
2725                 num_pages = 1;
2726
2727         /*
2728          * Just to make absolutely sure we have enough space, we're going to
2729          * preallocate 12 pages worth of space for each block group.  In
2730          * practice we ought to use at most 8, but we need extra space so we can
2731          * add our header and have a terminator between the extents and the
2732          * bitmaps.
2733          */
2734         num_pages *= 16;
2735         num_pages *= PAGE_CACHE_SIZE;
2736
2737         ret = btrfs_check_data_free_space(inode, num_pages);
2738         if (ret)
2739                 goto out_put;
2740
2741         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2742                                               num_pages, num_pages,
2743                                               &alloc_hint);
2744         if (!ret)
2745                 dcs = BTRFS_DC_SETUP;
2746         btrfs_free_reserved_data_space(inode, num_pages);
2747 out_put:
2748         iput(inode);
2749 out_free:
2750         btrfs_release_path(path);
2751 out:
2752         spin_lock(&block_group->lock);
2753         block_group->disk_cache_state = dcs;
2754         spin_unlock(&block_group->lock);
2755
2756         return ret;
2757 }
2758
2759 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2760                                    struct btrfs_root *root)
2761 {
2762         struct btrfs_block_group_cache *cache;
2763         int err = 0;
2764         struct btrfs_path *path;
2765         u64 last = 0;
2766
2767         path = btrfs_alloc_path();
2768         if (!path)
2769                 return -ENOMEM;
2770
2771 again:
2772         while (1) {
2773                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2774                 while (cache) {
2775                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2776                                 break;
2777                         cache = next_block_group(root, cache);
2778                 }
2779                 if (!cache) {
2780                         if (last == 0)
2781                                 break;
2782                         last = 0;
2783                         continue;
2784                 }
2785                 err = cache_save_setup(cache, trans, path);
2786                 last = cache->key.objectid + cache->key.offset;
2787                 btrfs_put_block_group(cache);
2788         }
2789
2790         while (1) {
2791                 if (last == 0) {
2792                         err = btrfs_run_delayed_refs(trans, root,
2793                                                      (unsigned long)-1);
2794                         BUG_ON(err);
2795                 }
2796
2797                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2798                 while (cache) {
2799                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2800                                 btrfs_put_block_group(cache);
2801                                 goto again;
2802                         }
2803
2804                         if (cache->dirty)
2805                                 break;
2806                         cache = next_block_group(root, cache);
2807                 }
2808                 if (!cache) {
2809                         if (last == 0)
2810                                 break;
2811                         last = 0;
2812                         continue;
2813                 }
2814
2815                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2816                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2817                 cache->dirty = 0;
2818                 last = cache->key.objectid + cache->key.offset;
2819
2820                 err = write_one_cache_group(trans, root, path, cache);
2821                 BUG_ON(err);
2822                 btrfs_put_block_group(cache);
2823         }
2824
2825         while (1) {
2826                 /*
2827                  * I don't think this is needed since we're just marking our
2828                  * preallocated extent as written, but just in case it can't
2829                  * hurt.
2830                  */
2831                 if (last == 0) {
2832                         err = btrfs_run_delayed_refs(trans, root,
2833                                                      (unsigned long)-1);
2834                         BUG_ON(err);
2835                 }
2836
2837                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2838                 while (cache) {
2839                         /*
2840                          * Really this shouldn't happen, but it could if we
2841                          * couldn't write the entire preallocated extent and
2842                          * splitting the extent resulted in a new block.
2843                          */
2844                         if (cache->dirty) {
2845                                 btrfs_put_block_group(cache);
2846                                 goto again;
2847                         }
2848                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2849                                 break;
2850                         cache = next_block_group(root, cache);
2851                 }
2852                 if (!cache) {
2853                         if (last == 0)
2854                                 break;
2855                         last = 0;
2856                         continue;
2857                 }
2858
2859                 btrfs_write_out_cache(root, trans, cache, path);
2860
2861                 /*
2862                  * If we didn't have an error then the cache state is still
2863                  * NEED_WRITE, so we can set it to WRITTEN.
2864                  */
2865                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2866                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
2867                 last = cache->key.objectid + cache->key.offset;
2868                 btrfs_put_block_group(cache);
2869         }
2870
2871         btrfs_free_path(path);
2872         return 0;
2873 }
2874
2875 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2876 {
2877         struct btrfs_block_group_cache *block_group;
2878         int readonly = 0;
2879
2880         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2881         if (!block_group || block_group->ro)
2882                 readonly = 1;
2883         if (block_group)
2884                 btrfs_put_block_group(block_group);
2885         return readonly;
2886 }
2887
2888 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2889                              u64 total_bytes, u64 bytes_used,
2890                              struct btrfs_space_info **space_info)
2891 {
2892         struct btrfs_space_info *found;
2893         int i;
2894         int factor;
2895
2896         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2897                      BTRFS_BLOCK_GROUP_RAID10))
2898                 factor = 2;
2899         else
2900                 factor = 1;
2901
2902         found = __find_space_info(info, flags);
2903         if (found) {
2904                 spin_lock(&found->lock);
2905                 found->total_bytes += total_bytes;
2906                 found->disk_total += total_bytes * factor;
2907                 found->bytes_used += bytes_used;
2908                 found->disk_used += bytes_used * factor;
2909                 found->full = 0;
2910                 spin_unlock(&found->lock);
2911                 *space_info = found;
2912                 return 0;
2913         }
2914         found = kzalloc(sizeof(*found), GFP_NOFS);
2915         if (!found)
2916                 return -ENOMEM;
2917
2918         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2919                 INIT_LIST_HEAD(&found->block_groups[i]);
2920         init_rwsem(&found->groups_sem);
2921         spin_lock_init(&found->lock);
2922         found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2923                                 BTRFS_BLOCK_GROUP_SYSTEM |
2924                                 BTRFS_BLOCK_GROUP_METADATA);
2925         found->total_bytes = total_bytes;
2926         found->disk_total = total_bytes * factor;
2927         found->bytes_used = bytes_used;
2928         found->disk_used = bytes_used * factor;
2929         found->bytes_pinned = 0;
2930         found->bytes_reserved = 0;
2931         found->bytes_readonly = 0;
2932         found->bytes_may_use = 0;
2933         found->full = 0;
2934         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
2935         found->chunk_alloc = 0;
2936         *space_info = found;
2937         list_add_rcu(&found->list, &info->space_info);
2938         atomic_set(&found->caching_threads, 0);
2939         return 0;
2940 }
2941
2942 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2943 {
2944         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2945                                    BTRFS_BLOCK_GROUP_RAID1 |
2946                                    BTRFS_BLOCK_GROUP_RAID10 |
2947                                    BTRFS_BLOCK_GROUP_DUP);
2948         if (extra_flags) {
2949                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2950                         fs_info->avail_data_alloc_bits |= extra_flags;
2951                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2952                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2953                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2954                         fs_info->avail_system_alloc_bits |= extra_flags;
2955         }
2956 }
2957
2958 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2959 {
2960         /*
2961          * we add in the count of missing devices because we want
2962          * to make sure that any RAID levels on a degraded FS
2963          * continue to be honored.
2964          */
2965         u64 num_devices = root->fs_info->fs_devices->rw_devices +
2966                 root->fs_info->fs_devices->missing_devices;
2967
2968         if (num_devices == 1)
2969                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2970         if (num_devices < 4)
2971                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2972
2973         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2974             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2975                       BTRFS_BLOCK_GROUP_RAID10))) {
2976                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2977         }
2978
2979         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2980             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2981                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2982         }
2983
2984         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2985             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2986              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2987              (flags & BTRFS_BLOCK_GROUP_DUP)))
2988                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2989         return flags;
2990 }
2991
2992 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
2993 {
2994         if (flags & BTRFS_BLOCK_GROUP_DATA)
2995                 flags |= root->fs_info->avail_data_alloc_bits &
2996                          root->fs_info->data_alloc_profile;
2997         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2998                 flags |= root->fs_info->avail_system_alloc_bits &
2999                          root->fs_info->system_alloc_profile;
3000         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3001                 flags |= root->fs_info->avail_metadata_alloc_bits &
3002                          root->fs_info->metadata_alloc_profile;
3003         return btrfs_reduce_alloc_profile(root, flags);
3004 }
3005
3006 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3007 {
3008         u64 flags;
3009
3010         if (data)
3011                 flags = BTRFS_BLOCK_GROUP_DATA;
3012         else if (root == root->fs_info->chunk_root)
3013                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3014         else
3015                 flags = BTRFS_BLOCK_GROUP_METADATA;
3016
3017         return get_alloc_profile(root, flags);
3018 }
3019
3020 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3021 {
3022         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3023                                                        BTRFS_BLOCK_GROUP_DATA);
3024 }
3025
3026 /*
3027  * This will check the space that the inode allocates from to make sure we have
3028  * enough space for bytes.
3029  */
3030 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3031 {
3032         struct btrfs_space_info *data_sinfo;
3033         struct btrfs_root *root = BTRFS_I(inode)->root;
3034         u64 used;
3035         int ret = 0, committed = 0, alloc_chunk = 1;
3036
3037         /* make sure bytes are sectorsize aligned */
3038         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3039
3040         if (root == root->fs_info->tree_root ||
3041             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3042                 alloc_chunk = 0;
3043                 committed = 1;
3044         }
3045
3046         data_sinfo = BTRFS_I(inode)->space_info;
3047         if (!data_sinfo)
3048                 goto alloc;
3049
3050 again:
3051         /* make sure we have enough space to handle the data first */
3052         spin_lock(&data_sinfo->lock);
3053         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3054                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3055                 data_sinfo->bytes_may_use;
3056
3057         if (used + bytes > data_sinfo->total_bytes) {
3058                 struct btrfs_trans_handle *trans;
3059
3060                 /*
3061                  * if we don't have enough free bytes in this space then we need
3062                  * to alloc a new chunk.
3063                  */
3064                 if (!data_sinfo->full && alloc_chunk) {
3065                         u64 alloc_target;
3066
3067                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3068                         spin_unlock(&data_sinfo->lock);
3069 alloc:
3070                         alloc_target = btrfs_get_alloc_profile(root, 1);
3071                         trans = btrfs_join_transaction(root);
3072                         if (IS_ERR(trans))
3073                                 return PTR_ERR(trans);
3074
3075                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3076                                              bytes + 2 * 1024 * 1024,
3077                                              alloc_target,
3078                                              CHUNK_ALLOC_NO_FORCE);
3079                         btrfs_end_transaction(trans, root);
3080                         if (ret < 0) {
3081                                 if (ret != -ENOSPC)
3082                                         return ret;
3083                                 else
3084                                         goto commit_trans;
3085                         }
3086
3087                         if (!data_sinfo) {
3088                                 btrfs_set_inode_space_info(root, inode);
3089                                 data_sinfo = BTRFS_I(inode)->space_info;
3090                         }
3091                         goto again;
3092                 }
3093                 spin_unlock(&data_sinfo->lock);
3094
3095                 /* commit the current transaction and try again */
3096 commit_trans:
3097                 if (!committed &&
3098                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3099                         committed = 1;
3100                         trans = btrfs_join_transaction(root);
3101                         if (IS_ERR(trans))
3102                                 return PTR_ERR(trans);
3103                         ret = btrfs_commit_transaction(trans, root);
3104                         if (ret)
3105                                 return ret;
3106                         goto again;
3107                 }
3108
3109                 return -ENOSPC;
3110         }
3111         data_sinfo->bytes_may_use += bytes;
3112         BTRFS_I(inode)->reserved_bytes += bytes;
3113         spin_unlock(&data_sinfo->lock);
3114
3115         return 0;
3116 }
3117
3118 /*
3119  * called when we are clearing an delalloc extent from the
3120  * inode's io_tree or there was an error for whatever reason
3121  * after calling btrfs_check_data_free_space
3122  */
3123 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3124 {
3125         struct btrfs_root *root = BTRFS_I(inode)->root;
3126         struct btrfs_space_info *data_sinfo;
3127
3128         /* make sure bytes are sectorsize aligned */
3129         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3130
3131         data_sinfo = BTRFS_I(inode)->space_info;
3132         spin_lock(&data_sinfo->lock);
3133         data_sinfo->bytes_may_use -= bytes;
3134         BTRFS_I(inode)->reserved_bytes -= bytes;
3135         spin_unlock(&data_sinfo->lock);
3136 }
3137
3138 static void force_metadata_allocation(struct btrfs_fs_info *info)
3139 {
3140         struct list_head *head = &info->space_info;
3141         struct btrfs_space_info *found;
3142
3143         rcu_read_lock();
3144         list_for_each_entry_rcu(found, head, list) {
3145                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3146                         found->force_alloc = CHUNK_ALLOC_FORCE;
3147         }
3148         rcu_read_unlock();
3149 }
3150
3151 static int should_alloc_chunk(struct btrfs_root *root,
3152                               struct btrfs_space_info *sinfo, u64 alloc_bytes,
3153                               int force)
3154 {
3155         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3156         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3157         u64 thresh;
3158
3159         if (force == CHUNK_ALLOC_FORCE)
3160                 return 1;
3161
3162         /*
3163          * in limited mode, we want to have some free space up to
3164          * about 1% of the FS size.
3165          */
3166         if (force == CHUNK_ALLOC_LIMITED) {
3167                 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3168                 thresh = max_t(u64, 64 * 1024 * 1024,
3169                                div_factor_fine(thresh, 1));
3170
3171                 if (num_bytes - num_allocated < thresh)
3172                         return 1;
3173         }
3174
3175         /*
3176          * we have two similar checks here, one based on percentage
3177          * and once based on a hard number of 256MB.  The idea
3178          * is that if we have a good amount of free
3179          * room, don't allocate a chunk.  A good mount is
3180          * less than 80% utilized of the chunks we have allocated,
3181          * or more than 256MB free
3182          */
3183         if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3184                 return 0;
3185
3186         if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
3187                 return 0;
3188
3189         thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3190
3191         /* 256MB or 5% of the FS */
3192         thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3193
3194         if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3195                 return 0;
3196         return 1;
3197 }
3198
3199 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3200                           struct btrfs_root *extent_root, u64 alloc_bytes,
3201                           u64 flags, int force)
3202 {
3203         struct btrfs_space_info *space_info;
3204         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3205         int wait_for_alloc = 0;
3206         int ret = 0;
3207
3208         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3209
3210         space_info = __find_space_info(extent_root->fs_info, flags);
3211         if (!space_info) {
3212                 ret = update_space_info(extent_root->fs_info, flags,
3213                                         0, 0, &space_info);
3214                 BUG_ON(ret);
3215         }
3216         BUG_ON(!space_info);
3217
3218 again:
3219         spin_lock(&space_info->lock);
3220         if (space_info->force_alloc)
3221                 force = space_info->force_alloc;
3222         if (space_info->full) {
3223                 spin_unlock(&space_info->lock);
3224                 return 0;
3225         }
3226
3227         if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3228                 spin_unlock(&space_info->lock);
3229                 return 0;
3230         } else if (space_info->chunk_alloc) {
3231                 wait_for_alloc = 1;
3232         } else {
3233                 space_info->chunk_alloc = 1;
3234         }
3235
3236         spin_unlock(&space_info->lock);
3237
3238         mutex_lock(&fs_info->chunk_mutex);
3239
3240         /*
3241          * The chunk_mutex is held throughout the entirety of a chunk
3242          * allocation, so once we've acquired the chunk_mutex we know that the
3243          * other guy is done and we need to recheck and see if we should
3244          * allocate.
3245          */
3246         if (wait_for_alloc) {
3247                 mutex_unlock(&fs_info->chunk_mutex);
3248                 wait_for_alloc = 0;
3249                 goto again;
3250         }
3251
3252         /*
3253          * If we have mixed data/metadata chunks we want to make sure we keep
3254          * allocating mixed chunks instead of individual chunks.
3255          */
3256         if (btrfs_mixed_space_info(space_info))
3257                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3258
3259         /*
3260          * if we're doing a data chunk, go ahead and make sure that
3261          * we keep a reasonable number of metadata chunks allocated in the
3262          * FS as well.
3263          */
3264         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3265                 fs_info->data_chunk_allocations++;
3266                 if (!(fs_info->data_chunk_allocations %
3267                       fs_info->metadata_ratio))
3268                         force_metadata_allocation(fs_info);
3269         }
3270
3271         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3272         spin_lock(&space_info->lock);
3273         if (ret)
3274                 space_info->full = 1;
3275         else
3276                 ret = 1;
3277
3278         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3279         space_info->chunk_alloc = 0;
3280         spin_unlock(&space_info->lock);
3281         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3282         return ret;
3283 }
3284
3285 /*
3286  * shrink metadata reservation for delalloc
3287  */
3288 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3289                            struct btrfs_root *root, u64 to_reclaim, int sync)
3290 {
3291         struct btrfs_block_rsv *block_rsv;
3292         struct btrfs_space_info *space_info;
3293         u64 reserved;
3294         u64 max_reclaim;
3295         u64 reclaimed = 0;
3296         long time_left;
3297         int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3298         int loops = 0;
3299         unsigned long progress;
3300
3301         block_rsv = &root->fs_info->delalloc_block_rsv;
3302         space_info = block_rsv->space_info;
3303
3304         smp_mb();
3305         reserved = space_info->bytes_reserved;
3306         progress = space_info->reservation_progress;
3307
3308         if (reserved == 0)
3309                 return 0;
3310
3311         /* nothing to shrink - nothing to reclaim */
3312         if (root->fs_info->delalloc_bytes == 0)
3313                 return 0;
3314
3315         max_reclaim = min(reserved, to_reclaim);
3316
3317         while (loops < 1024) {
3318                 /* have the flusher threads jump in and do some IO */
3319                 smp_mb();
3320                 nr_pages = min_t(unsigned long, nr_pages,
3321                        root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3322                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3323
3324                 spin_lock(&space_info->lock);
3325                 if (reserved > space_info->bytes_reserved)
3326                         reclaimed += reserved - space_info->bytes_reserved;
3327                 reserved = space_info->bytes_reserved;
3328                 spin_unlock(&space_info->lock);
3329
3330                 loops++;
3331
3332                 if (reserved == 0 || reclaimed >= max_reclaim)
3333                         break;
3334
3335                 if (trans && trans->transaction->blocked)
3336                         return -EAGAIN;
3337
3338                 time_left = schedule_timeout_interruptible(1);
3339
3340                 /* We were interrupted, exit */
3341                 if (time_left)
3342                         break;
3343
3344                 /* we've kicked the IO a few times, if anything has been freed,
3345                  * exit.  There is no sense in looping here for a long time
3346                  * when we really need to commit the transaction, or there are
3347                  * just too many writers without enough free space
3348                  */
3349
3350                 if (loops > 3) {
3351                         smp_mb();
3352                         if (progress != space_info->reservation_progress)
3353                                 break;
3354                 }
3355
3356         }
3357         return reclaimed >= to_reclaim;
3358 }
3359
3360 /*
3361  * Retries tells us how many times we've called reserve_metadata_bytes.  The
3362  * idea is if this is the first call (retries == 0) then we will add to our
3363  * reserved count if we can't make the allocation in order to hold our place
3364  * while we go and try and free up space.  That way for retries > 1 we don't try
3365  * and add space, we just check to see if the amount of unused space is >= the
3366  * total space, meaning that our reservation is valid.
3367  *
3368  * However if we don't intend to retry this reservation, pass -1 as retries so
3369  * that it short circuits this logic.
3370  */
3371 static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3372                                   struct btrfs_root *root,
3373                                   struct btrfs_block_rsv *block_rsv,
3374                                   u64 orig_bytes, int flush)
3375 {
3376         struct btrfs_space_info *space_info = block_rsv->space_info;
3377         u64 unused;
3378         u64 num_bytes = orig_bytes;
3379         int retries = 0;
3380         int ret = 0;
3381         bool reserved = false;
3382         bool committed = false;
3383
3384 again:
3385         ret = -ENOSPC;
3386         if (reserved)
3387                 num_bytes = 0;
3388
3389         spin_lock(&space_info->lock);
3390         unused = space_info->bytes_used + space_info->bytes_reserved +
3391                  space_info->bytes_pinned + space_info->bytes_readonly +
3392                  space_info->bytes_may_use;
3393
3394         /*
3395          * The idea here is that we've not already over-reserved the block group
3396          * then we can go ahead and save our reservation first and then start
3397          * flushing if we need to.  Otherwise if we've already overcommitted
3398          * lets start flushing stuff first and then come back and try to make
3399          * our reservation.
3400          */
3401         if (unused <= space_info->total_bytes) {
3402                 unused = space_info->total_bytes - unused;
3403                 if (unused >= num_bytes) {
3404                         if (!reserved)
3405                                 space_info->bytes_reserved += orig_bytes;
3406                         ret = 0;
3407                 } else {
3408                         /*
3409                          * Ok set num_bytes to orig_bytes since we aren't
3410                          * overocmmitted, this way we only try and reclaim what
3411                          * we need.
3412                          */
3413                         num_bytes = orig_bytes;
3414                 }
3415         } else {
3416                 /*
3417                  * Ok we're over committed, set num_bytes to the overcommitted
3418                  * amount plus the amount of bytes that we need for this
3419                  * reservation.
3420                  */
3421                 num_bytes = unused - space_info->total_bytes +
3422                         (orig_bytes * (retries + 1));
3423         }
3424
3425         /*
3426          * Couldn't make our reservation, save our place so while we're trying
3427          * to reclaim space we can actually use it instead of somebody else
3428          * stealing it from us.
3429          */
3430         if (ret && !reserved) {
3431                 space_info->bytes_reserved += orig_bytes;
3432                 reserved = true;
3433         }
3434
3435         spin_unlock(&space_info->lock);
3436
3437         if (!ret)
3438                 return 0;
3439
3440         if (!flush)
3441                 goto out;
3442
3443         /*
3444          * We do synchronous shrinking since we don't actually unreserve
3445          * metadata until after the IO is completed.
3446          */
3447         ret = shrink_delalloc(trans, root, num_bytes, 1);
3448         if (ret > 0)
3449                 return 0;
3450         else if (ret < 0)
3451                 goto out;
3452
3453         /*
3454          * So if we were overcommitted it's possible that somebody else flushed
3455          * out enough space and we simply didn't have enough space to reclaim,
3456          * so go back around and try again.
3457          */
3458         if (retries < 2) {
3459                 retries++;
3460                 goto again;
3461         }
3462
3463         spin_lock(&space_info->lock);
3464         /*
3465          * Not enough space to be reclaimed, don't bother committing the
3466          * transaction.
3467          */
3468         if (space_info->bytes_pinned < orig_bytes)
3469                 ret = -ENOSPC;
3470         spin_unlock(&space_info->lock);
3471         if (ret)
3472                 goto out;
3473
3474         ret = -EAGAIN;
3475         if (trans || committed)
3476                 goto out;
3477
3478         ret = -ENOSPC;
3479         trans = btrfs_join_transaction(root);
3480         if (IS_ERR(trans))
3481                 goto out;
3482         ret = btrfs_commit_transaction(trans, root);
3483         if (!ret) {
3484                 trans = NULL;
3485                 committed = true;
3486                 goto again;
3487         }
3488
3489 out:
3490         if (reserved) {
3491                 spin_lock(&space_info->lock);
3492                 space_info->bytes_reserved -= orig_bytes;
3493                 spin_unlock(&space_info->lock);
3494         }
3495
3496         return ret;
3497 }
3498
3499 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3500                                              struct btrfs_root *root)
3501 {
3502         struct btrfs_block_rsv *block_rsv;
3503         if (root->ref_cows)
3504                 block_rsv = trans->block_rsv;
3505         else
3506                 block_rsv = root->block_rsv;
3507
3508         if (!block_rsv)
3509                 block_rsv = &root->fs_info->empty_block_rsv;
3510
3511         return block_rsv;
3512 }
3513
3514 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3515                                u64 num_bytes)
3516 {
3517         int ret = -ENOSPC;
3518         spin_lock(&block_rsv->lock);
3519         if (block_rsv->reserved >= num_bytes) {
3520                 block_rsv->reserved -= num_bytes;
3521                 if (block_rsv->reserved < block_rsv->size)
3522                         block_rsv->full = 0;
3523                 ret = 0;
3524         }
3525         spin_unlock(&block_rsv->lock);
3526         return ret;
3527 }
3528
3529 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3530                                 u64 num_bytes, int update_size)
3531 {
3532         spin_lock(&block_rsv->lock);
3533         block_rsv->reserved += num_bytes;
3534         if (update_size)
3535                 block_rsv->size += num_bytes;
3536         else if (block_rsv->reserved >= block_rsv->size)
3537                 block_rsv->full = 1;
3538         spin_unlock(&block_rsv->lock);
3539 }
3540
3541 static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3542                                     struct btrfs_block_rsv *dest, u64 num_bytes)
3543 {
3544         struct btrfs_space_info *space_info = block_rsv->space_info;
3545
3546         spin_lock(&block_rsv->lock);
3547         if (num_bytes == (u64)-1)
3548                 num_bytes = block_rsv->size;
3549         block_rsv->size -= num_bytes;
3550         if (block_rsv->reserved >= block_rsv->size) {
3551                 num_bytes = block_rsv->reserved - block_rsv->size;
3552                 block_rsv->reserved = block_rsv->size;
3553                 block_rsv->full = 1;
3554         } else {
3555                 num_bytes = 0;
3556         }
3557         spin_unlock(&block_rsv->lock);
3558
3559         if (num_bytes > 0) {
3560                 if (dest) {
3561                         spin_lock(&dest->lock);
3562                         if (!dest->full) {
3563                                 u64 bytes_to_add;
3564
3565                                 bytes_to_add = dest->size - dest->reserved;
3566                                 bytes_to_add = min(num_bytes, bytes_to_add);
3567                                 dest->reserved += bytes_to_add;
3568                                 if (dest->reserved >= dest->size)
3569                                         dest->full = 1;
3570                                 num_bytes -= bytes_to_add;
3571                         }
3572                         spin_unlock(&dest->lock);
3573                 }
3574                 if (num_bytes) {
3575                         spin_lock(&space_info->lock);
3576                         space_info->bytes_reserved -= num_bytes;
3577                         space_info->reservation_progress++;
3578                         spin_unlock(&space_info->lock);
3579                 }
3580         }
3581 }
3582
3583 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3584                                    struct btrfs_block_rsv *dst, u64 num_bytes)
3585 {
3586         int ret;
3587
3588         ret = block_rsv_use_bytes(src, num_bytes);
3589         if (ret)
3590                 return ret;
3591
3592         block_rsv_add_bytes(dst, num_bytes, 1);
3593         return 0;
3594 }
3595
3596 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3597 {
3598         memset(rsv, 0, sizeof(*rsv));
3599         spin_lock_init(&rsv->lock);
3600         atomic_set(&rsv->usage, 1);
3601         rsv->priority = 6;
3602         INIT_LIST_HEAD(&rsv->list);
3603 }
3604
3605 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3606 {
3607         struct btrfs_block_rsv *block_rsv;
3608         struct btrfs_fs_info *fs_info = root->fs_info;
3609
3610         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3611         if (!block_rsv)
3612                 return NULL;
3613
3614         btrfs_init_block_rsv(block_rsv);
3615         block_rsv->space_info = __find_space_info(fs_info,
3616                                                   BTRFS_BLOCK_GROUP_METADATA);
3617         return block_rsv;
3618 }
3619
3620 void btrfs_free_block_rsv(struct btrfs_root *root,
3621                           struct btrfs_block_rsv *rsv)
3622 {
3623         if (rsv && atomic_dec_and_test(&rsv->usage)) {
3624                 btrfs_block_rsv_release(root, rsv, (u64)-1);
3625                 if (!rsv->durable)
3626                         kfree(rsv);
3627         }
3628 }
3629
3630 /*
3631  * make the block_rsv struct be able to capture freed space.
3632  * the captured space will re-add to the the block_rsv struct
3633  * after transaction commit
3634  */
3635 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3636                                  struct btrfs_block_rsv *block_rsv)
3637 {
3638         block_rsv->durable = 1;
3639         mutex_lock(&fs_info->durable_block_rsv_mutex);
3640         list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3641         mutex_unlock(&fs_info->durable_block_rsv_mutex);
3642 }
3643
3644 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3645                         struct btrfs_root *root,
3646                         struct btrfs_block_rsv *block_rsv,
3647                         u64 num_bytes)
3648 {
3649         int ret;
3650
3651         if (num_bytes == 0)
3652                 return 0;
3653
3654         ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
3655         if (!ret) {
3656                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3657                 return 0;
3658         }
3659
3660         return ret;
3661 }
3662
3663 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3664                           struct btrfs_root *root,
3665                           struct btrfs_block_rsv *block_rsv,
3666                           u64 min_reserved, int min_factor)
3667 {
3668         u64 num_bytes = 0;
3669         int commit_trans = 0;
3670         int ret = -ENOSPC;
3671
3672         if (!block_rsv)
3673                 return 0;
3674
3675         spin_lock(&block_rsv->lock);
3676         if (min_factor > 0)
3677                 num_bytes = div_factor(block_rsv->size, min_factor);
3678         if (min_reserved > num_bytes)
3679                 num_bytes = min_reserved;
3680
3681         if (block_rsv->reserved >= num_bytes) {
3682                 ret = 0;
3683         } else {
3684                 num_bytes -= block_rsv->reserved;
3685                 if (block_rsv->durable &&
3686                     block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3687                         commit_trans = 1;
3688         }
3689         spin_unlock(&block_rsv->lock);
3690         if (!ret)
3691                 return 0;
3692
3693         if (block_rsv->refill_used) {
3694                 ret = reserve_metadata_bytes(trans, root, block_rsv,
3695                                              num_bytes, 0);
3696                 if (!ret) {
3697                         block_rsv_add_bytes(block_rsv, num_bytes, 0);
3698                         return 0;
3699                 }
3700         }
3701
3702         if (commit_trans) {
3703                 if (trans)
3704                         return -EAGAIN;
3705
3706                 trans = btrfs_join_transaction(root);
3707                 BUG_ON(IS_ERR(trans));
3708                 ret = btrfs_commit_transaction(trans, root);
3709                 return 0;
3710         }
3711
3712         return -ENOSPC;
3713 }
3714
3715 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3716                             struct btrfs_block_rsv *dst_rsv,
3717                             u64 num_bytes)
3718 {
3719         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3720 }
3721
3722 void btrfs_block_rsv_release(struct btrfs_root *root,
3723                              struct btrfs_block_rsv *block_rsv,
3724                              u64 num_bytes)
3725 {
3726         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3727         if (global_rsv->full || global_rsv == block_rsv ||
3728             block_rsv->space_info != global_rsv->space_info)
3729                 global_rsv = NULL;
3730         block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3731 }
3732
3733 /*
3734  * helper to calculate size of global block reservation.
3735  * the desired value is sum of space used by extent tree,
3736  * checksum tree and root tree
3737  */
3738 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3739 {
3740         struct btrfs_space_info *sinfo;
3741         u64 num_bytes;
3742         u64 meta_used;
3743         u64 data_used;
3744         int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3745
3746         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3747         spin_lock(&sinfo->lock);
3748         data_used = sinfo->bytes_used;
3749         spin_unlock(&sinfo->lock);
3750
3751         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3752         spin_lock(&sinfo->lock);
3753         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3754                 data_used = 0;
3755         meta_used = sinfo->bytes_used;
3756         spin_unlock(&sinfo->lock);
3757
3758         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3759                     csum_size * 2;
3760         num_bytes += div64_u64(data_used + meta_used, 50);
3761
3762         if (num_bytes * 3 > meta_used)
3763                 num_bytes = div64_u64(meta_used, 3);
3764
3765         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3766 }
3767
3768 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3769 {
3770         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3771         struct btrfs_space_info *sinfo = block_rsv->space_info;
3772         u64 num_bytes;
3773
3774         num_bytes = calc_global_metadata_size(fs_info);
3775
3776         spin_lock(&block_rsv->lock);
3777         spin_lock(&sinfo->lock);
3778
3779         block_rsv->size = num_bytes;
3780
3781         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3782                     sinfo->bytes_reserved + sinfo->bytes_readonly +
3783                     sinfo->bytes_may_use;
3784
3785         if (sinfo->total_bytes > num_bytes) {
3786                 num_bytes = sinfo->total_bytes - num_bytes;
3787                 block_rsv->reserved += num_bytes;
3788                 sinfo->bytes_reserved += num_bytes;
3789         }
3790
3791         if (block_rsv->reserved >= block_rsv->size) {
3792                 num_bytes = block_rsv->reserved - block_rsv->size;
3793                 sinfo->bytes_reserved -= num_bytes;
3794                 sinfo->reservation_progress++;
3795                 block_rsv->reserved = block_rsv->size;
3796                 block_rsv->full = 1;
3797         }
3798
3799         spin_unlock(&sinfo->lock);
3800         spin_unlock(&block_rsv->lock);
3801 }
3802
3803 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3804 {
3805         struct btrfs_space_info *space_info;
3806
3807         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3808         fs_info->chunk_block_rsv.space_info = space_info;
3809         fs_info->chunk_block_rsv.priority = 10;
3810
3811         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3812         fs_info->global_block_rsv.space_info = space_info;
3813         fs_info->global_block_rsv.priority = 10;
3814         fs_info->global_block_rsv.refill_used = 1;
3815         fs_info->delalloc_block_rsv.space_info = space_info;
3816         fs_info->trans_block_rsv.space_info = space_info;
3817         fs_info->empty_block_rsv.space_info = space_info;
3818         fs_info->empty_block_rsv.priority = 10;
3819
3820         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3821         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3822         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3823         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3824         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3825
3826         btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3827
3828         btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3829
3830         update_global_block_rsv(fs_info);
3831 }
3832
3833 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3834 {
3835         block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3836         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3837         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3838         WARN_ON(fs_info->trans_block_rsv.size > 0);
3839         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3840         WARN_ON(fs_info->chunk_block_rsv.size > 0);
3841         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3842 }
3843
3844 int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
3845                                     struct btrfs_root *root,
3846                                     struct btrfs_block_rsv *rsv)
3847 {
3848         struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
3849         u64 num_bytes;
3850         int ret;
3851
3852         /*
3853          * Truncate should be freeing data, but give us 2 items just in case it
3854          * needs to use some space.  We may want to be smarter about this in the
3855          * future.
3856          */
3857         num_bytes = btrfs_calc_trans_metadata_size(root, 2);
3858
3859         /* We already have enough bytes, just return */
3860         if (rsv->reserved >= num_bytes)
3861                 return 0;
3862
3863         num_bytes -= rsv->reserved;
3864
3865         /*
3866          * You should have reserved enough space before hand to do this, so this
3867          * should not fail.
3868          */
3869         ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
3870         BUG_ON(ret);
3871
3872         return 0;
3873 }
3874
3875 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3876                                  struct btrfs_root *root,
3877                                  int num_items)
3878 {
3879         u64 num_bytes;
3880         int ret;
3881
3882         if (num_items == 0 || root->fs_info->chunk_root == root)
3883                 return 0;
3884
3885         num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
3886         ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3887                                   num_bytes);
3888         if (!ret) {
3889                 trans->bytes_reserved += num_bytes;
3890                 trans->block_rsv = &root->fs_info->trans_block_rsv;
3891         }
3892         return ret;
3893 }
3894
3895 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3896                                   struct btrfs_root *root)
3897 {
3898         if (!trans->bytes_reserved)
3899                 return;
3900
3901         BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3902         btrfs_block_rsv_release(root, trans->block_rsv,
3903                                 trans->bytes_reserved);
3904         trans->bytes_reserved = 0;
3905 }
3906
3907 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3908                                   struct inode *inode)
3909 {
3910         struct btrfs_root *root = BTRFS_I(inode)->root;
3911         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3912         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3913
3914         /*
3915          * We need to hold space in order to delete our orphan item once we've
3916          * added it, so this takes the reservation so we can release it later
3917          * when we are truly done with the orphan item.
3918          */
3919         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3920         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3921 }
3922
3923 void btrfs_orphan_release_metadata(struct inode *inode)
3924 {
3925         struct btrfs_root *root = BTRFS_I(inode)->root;
3926         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3927         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3928 }
3929
3930 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3931                                 struct btrfs_pending_snapshot *pending)
3932 {
3933         struct btrfs_root *root = pending->root;
3934         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3935         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3936         /*
3937          * two for root back/forward refs, two for directory entries
3938          * and one for root of the snapshot.
3939          */
3940         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
3941         dst_rsv->space_info = src_rsv->space_info;
3942         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3943 }
3944
3945 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3946 {
3947         return num_bytes >>= 3;
3948 }
3949
3950 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3951 {
3952         struct btrfs_root *root = BTRFS_I(inode)->root;
3953         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3954         u64 to_reserve;
3955         int nr_extents;
3956         int reserved_extents;
3957         int ret;
3958
3959         if (btrfs_transaction_in_commit(root->fs_info))
3960                 schedule_timeout(1);
3961
3962         num_bytes = ALIGN(num_bytes, root->sectorsize);
3963
3964         nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3965         reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
3966
3967         if (nr_extents > reserved_extents) {
3968                 nr_extents -= reserved_extents;
3969                 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
3970         } else {
3971                 nr_extents = 0;
3972                 to_reserve = 0;
3973         }
3974
3975         to_reserve += calc_csum_metadata_size(inode, num_bytes);
3976         ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
3977         if (ret)
3978                 return ret;
3979
3980         atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
3981         atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3982
3983         block_rsv_add_bytes(block_rsv, to_reserve, 1);
3984
3985         if (block_rsv->size > 512 * 1024 * 1024)
3986                 shrink_delalloc(NULL, root, to_reserve, 0);
3987
3988         return 0;
3989 }
3990
3991 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3992 {
3993         struct btrfs_root *root = BTRFS_I(inode)->root;
3994         u64 to_free;
3995         int nr_extents;
3996         int reserved_extents;
3997
3998         num_bytes = ALIGN(num_bytes, root->sectorsize);
3999         atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4000         WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
4001
4002         reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4003         do {
4004                 int old, new;
4005
4006                 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4007                 if (nr_extents >= reserved_extents) {
4008                         nr_extents = 0;
4009                         break;
4010                 }
4011                 old = reserved_extents;
4012                 nr_extents = reserved_extents - nr_extents;
4013                 new = reserved_extents - nr_extents;
4014                 old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
4015                                      reserved_extents, new);
4016                 if (likely(old == reserved_extents))
4017                         break;
4018                 reserved_extents = old;
4019         } while (1);
4020
4021         to_free = calc_csum_metadata_size(inode, num_bytes);
4022         if (nr_extents > 0)
4023                 to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
4024
4025         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4026                                 to_free);
4027 }
4028
4029 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4030 {
4031         int ret;
4032
4033         ret = btrfs_check_data_free_space(inode, num_bytes);
4034         if (ret)
4035                 return ret;
4036
4037         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4038         if (ret) {
4039                 btrfs_free_reserved_data_space(inode, num_bytes);
4040                 return ret;
4041         }
4042
4043         return 0;
4044 }
4045
4046 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4047 {
4048         btrfs_delalloc_release_metadata(inode, num_bytes);
4049         btrfs_free_reserved_data_space(inode, num_bytes);
4050 }
4051
4052 static int update_block_group(struct btrfs_trans_handle *trans,
4053                               struct btrfs_root *root,
4054                               u64 bytenr, u64 num_bytes, int alloc)
4055 {
4056         struct btrfs_block_group_cache *cache = NULL;
4057         struct btrfs_fs_info *info = root->fs_info;
4058         u64 total = num_bytes;
4059         u64 old_val;
4060         u64 byte_in_group;
4061         int factor;
4062
4063         /* block accounting for super block */
4064         spin_lock(&info->delalloc_lock);
4065         old_val = btrfs_super_bytes_used(&info->super_copy);
4066         if (alloc)
4067                 old_val += num_bytes;
4068         else
4069                 old_val -= num_bytes;
4070         btrfs_set_super_bytes_used(&info->super_copy, old_val);
4071         spin_unlock(&info->delalloc_lock);
4072
4073         while (total) {
4074                 cache = btrfs_lookup_block_group(info, bytenr);
4075                 if (!cache)
4076                         return -1;
4077                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4078                                     BTRFS_BLOCK_GROUP_RAID1 |
4079                                     BTRFS_BLOCK_GROUP_RAID10))
4080                         factor = 2;
4081                 else
4082                         factor = 1;
4083                 /*
4084                  * If this block group has free space cache written out, we
4085                  * need to make sure to load it if we are removing space.  This
4086                  * is because we need the unpinning stage to actually add the
4087                  * space back to the block group, otherwise we will leak space.
4088                  */
4089                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4090                         cache_block_group(cache, trans, NULL, 1);
4091
4092                 byte_in_group = bytenr - cache->key.objectid;
4093                 WARN_ON(byte_in_group > cache->key.offset);
4094
4095                 spin_lock(&cache->space_info->lock);
4096                 spin_lock(&cache->lock);
4097
4098                 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4099                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4100                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4101
4102                 cache->dirty = 1;
4103                 old_val = btrfs_block_group_used(&cache->item);
4104                 num_bytes = min(total, cache->key.offset - byte_in_group);
4105                 if (alloc) {
4106                         old_val += num_bytes;
4107                         btrfs_set_block_group_used(&cache->item, old_val);
4108                         cache->reserved -= num_bytes;
4109                         cache->space_info->bytes_reserved -= num_bytes;
4110                         cache->space_info->reservation_progress++;
4111                         cache->space_info->bytes_used += num_bytes;
4112                         cache->space_info->disk_used += num_bytes * factor;
4113                         spin_unlock(&cache->lock);
4114                         spin_unlock(&cache->space_info->lock);
4115                 } else {
4116                         old_val -= num_bytes;
4117                         btrfs_set_block_group_used(&cache->item, old_val);
4118                         cache->pinned += num_bytes;
4119                         cache->space_info->bytes_pinned += num_bytes;
4120                         cache->space_info->bytes_used -= num_bytes;
4121                         cache->space_info->disk_used -= num_bytes * factor;
4122                         spin_unlock(&cache->lock);
4123                         spin_unlock(&cache->space_info->lock);
4124
4125                         set_extent_dirty(info->pinned_extents,
4126                                          bytenr, bytenr + num_bytes - 1,
4127                                          GFP_NOFS | __GFP_NOFAIL);
4128                 }
4129                 btrfs_put_block_group(cache);
4130                 total -= num_bytes;
4131                 bytenr += num_bytes;
4132         }
4133         return 0;
4134 }
4135
4136 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4137 {
4138         struct btrfs_block_group_cache *cache;
4139         u64 bytenr;
4140
4141         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4142         if (!cache)
4143                 return 0;
4144
4145         bytenr = cache->key.objectid;
4146         btrfs_put_block_group(cache);
4147
4148         return bytenr;
4149 }
4150
4151 static int pin_down_extent(struct btrfs_root *root,
4152                            struct btrfs_block_group_cache *cache,
4153                            u64 bytenr, u64 num_bytes, int reserved)
4154 {
4155         spin_lock(&cache->space_info->lock);
4156         spin_lock(&cache->lock);
4157         cache->pinned += num_bytes;
4158         cache->space_info->bytes_pinned += num_bytes;
4159         if (reserved) {
4160                 cache->reserved -= num_bytes;
4161                 cache->space_info->bytes_reserved -= num_bytes;
4162                 cache->space_info->reservation_progress++;
4163         }
4164         spin_unlock(&cache->lock);
4165         spin_unlock(&cache->space_info->lock);
4166
4167         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4168                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4169         return 0;
4170 }
4171
4172 /*
4173  * this function must be called within transaction
4174  */
4175 int btrfs_pin_extent(struct btrfs_root *root,
4176                      u64 bytenr, u64 num_bytes, int reserved)
4177 {
4178         struct btrfs_block_group_cache *cache;
4179
4180         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4181         BUG_ON(!cache);
4182
4183         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4184
4185         btrfs_put_block_group(cache);
4186         return 0;
4187 }
4188
4189 /*
4190  * update size of reserved extents. this function may return -EAGAIN
4191  * if 'reserve' is true or 'sinfo' is false.
4192  */
4193 int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4194                                 u64 num_bytes, int reserve, int sinfo)
4195 {
4196         int ret = 0;
4197         if (sinfo) {
4198                 struct btrfs_space_info *space_info = cache->space_info;
4199                 spin_lock(&space_info->lock);
4200                 spin_lock(&cache->lock);
4201                 if (reserve) {
4202                         if (cache->ro) {
4203                                 ret = -EAGAIN;
4204                         } else {
4205                                 cache->reserved += num_bytes;
4206                                 space_info->bytes_reserved += num_bytes;
4207                         }
4208                 } else {
4209                         if (cache->ro)
4210                                 space_info->bytes_readonly += num_bytes;
4211                         cache->reserved -= num_bytes;
4212                         space_info->bytes_reserved -= num_bytes;
4213                         space_info->reservation_progress++;
4214                 }
4215                 spin_unlock(&cache->lock);
4216                 spin_unlock(&space_info->lock);
4217         } else {
4218                 spin_lock(&cache->lock);
4219                 if (cache->ro) {
4220                         ret = -EAGAIN;
4221                 } else {
4222                         if (reserve)
4223                                 cache->reserved += num_bytes;
4224                         else
4225                                 cache->reserved -= num_bytes;
4226                 }
4227                 spin_unlock(&cache->lock);
4228         }
4229         return ret;
4230 }
4231
4232 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4233                                 struct btrfs_root *root)
4234 {
4235         struct btrfs_fs_info *fs_info = root->fs_info;
4236         struct btrfs_caching_control *next;
4237         struct btrfs_caching_control *caching_ctl;
4238         struct btrfs_block_group_cache *cache;
4239
4240         down_write(&fs_info->extent_commit_sem);
4241
4242         list_for_each_entry_safe(caching_ctl, next,
4243                                  &fs_info->caching_block_groups, list) {
4244                 cache = caching_ctl->block_group;
4245                 if (block_group_cache_done(cache)) {
4246                         cache->last_byte_to_unpin = (u64)-1;
4247                         list_del_init(&caching_ctl->list);
4248                         put_caching_control(caching_ctl);
4249                 } else {
4250                         cache->last_byte_to_unpin = caching_ctl->progress;
4251                 }
4252         }
4253
4254         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4255                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4256         else
4257                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4258
4259         up_write(&fs_info->extent_commit_sem);
4260
4261         update_global_block_rsv(fs_info);
4262         return 0;
4263 }
4264
4265 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4266 {
4267         struct btrfs_fs_info *fs_info = root->fs_info;
4268         struct btrfs_block_group_cache *cache = NULL;
4269         u64 len;
4270
4271         while (start <= end) {
4272                 if (!cache ||
4273                     start >= cache->key.objectid + cache->key.offset) {
4274                         if (cache)
4275                                 btrfs_put_block_group(cache);
4276                         cache = btrfs_lookup_block_group(fs_info, start);
4277                         BUG_ON(!cache);
4278                 }
4279
4280                 len = cache->key.objectid + cache->key.offset - start;
4281                 len = min(len, end + 1 - start);
4282
4283                 if (start < cache->last_byte_to_unpin) {
4284                         len = min(len, cache->last_byte_to_unpin - start);
4285                         btrfs_add_free_space(cache, start, len);
4286                 }
4287
4288                 start += len;
4289
4290                 spin_lock(&cache->space_info->lock);
4291                 spin_lock(&cache->lock);
4292                 cache->pinned -= len;
4293                 cache->space_info->bytes_pinned -= len;
4294                 if (cache->ro) {
4295                         cache->space_info->bytes_readonly += len;
4296                 } else if (cache->reserved_pinned > 0) {
4297                         len = min(len, cache->reserved_pinned);
4298                         cache->reserved_pinned -= len;
4299                         cache->space_info->bytes_reserved += len;
4300                 }
4301                 spin_unlock(&cache->lock);
4302                 spin_unlock(&cache->space_info->lock);
4303         }
4304
4305         if (cache)
4306                 btrfs_put_block_group(cache);
4307         return 0;
4308 }
4309
4310 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4311                                struct btrfs_root *root)
4312 {
4313         struct btrfs_fs_info *fs_info = root->fs_info;
4314         struct extent_io_tree *unpin;
4315         struct btrfs_block_rsv *block_rsv;
4316         struct btrfs_block_rsv *next_rsv;
4317         u64 start;
4318         u64 end;
4319         int idx;
4320         int ret;
4321
4322         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4323                 unpin = &fs_info->freed_extents[1];
4324         else
4325                 unpin = &fs_info->freed_extents[0];
4326
4327         while (1) {
4328                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4329                                             EXTENT_DIRTY);
4330                 if (ret)
4331                         break;
4332
4333                 if (btrfs_test_opt(root, DISCARD))
4334                         ret = btrfs_discard_extent(root, start,
4335                                                    end + 1 - start, NULL);
4336
4337                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4338                 unpin_extent_range(root, start, end);
4339                 cond_resched();
4340         }
4341
4342         mutex_lock(&fs_info->durable_block_rsv_mutex);
4343         list_for_each_entry_safe(block_rsv, next_rsv,
4344                                  &fs_info->durable_block_rsv_list, list) {
4345
4346                 idx = trans->transid & 0x1;
4347                 if (block_rsv->freed[idx] > 0) {
4348                         block_rsv_add_bytes(block_rsv,
4349                                             block_rsv->freed[idx], 0);
4350                         block_rsv->freed[idx] = 0;
4351                 }
4352                 if (atomic_read(&block_rsv->usage) == 0) {
4353                         btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4354
4355                         if (block_rsv->freed[0] == 0 &&
4356                             block_rsv->freed[1] == 0) {
4357                                 list_del_init(&block_rsv->list);
4358                                 kfree(block_rsv);
4359                         }
4360                 } else {
4361                         btrfs_block_rsv_release(root, block_rsv, 0);
4362                 }
4363         }
4364         mutex_unlock(&fs_info->durable_block_rsv_mutex);
4365
4366         return 0;
4367 }
4368
4369 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4370                                 struct btrfs_root *root,
4371                                 u64 bytenr, u64 num_bytes, u64 parent,
4372                                 u64 root_objectid, u64 owner_objectid,
4373                                 u64 owner_offset, int refs_to_drop,
4374                                 struct btrfs_delayed_extent_op *extent_op)
4375 {
4376         struct btrfs_key key;
4377         struct btrfs_path *path;
4378         struct btrfs_fs_info *info = root->fs_info;
4379         struct btrfs_root *extent_root = info->extent_root;
4380         struct extent_buffer *leaf;
4381         struct btrfs_extent_item *ei;
4382         struct btrfs_extent_inline_ref *iref;
4383         int ret;
4384         int is_data;
4385         int extent_slot = 0;
4386         int found_extent = 0;
4387         int num_to_del = 1;
4388         u32 item_size;
4389         u64 refs;
4390
4391         path = btrfs_alloc_path();
4392         if (!path)
4393                 return -ENOMEM;
4394
4395         path->reada = 1;
4396         path->leave_spinning = 1;
4397
4398         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4399         BUG_ON(!is_data && refs_to_drop != 1);
4400
4401         ret = lookup_extent_backref(trans, extent_root, path, &iref,
4402                                     bytenr, num_bytes, parent,
4403                                     root_objectid, owner_objectid,
4404                                     owner_offset);
4405         if (ret == 0) {
4406                 extent_slot = path->slots[0];
4407                 while (extent_slot >= 0) {
4408                         btrfs_item_key_to_cpu(path->nodes[0], &key,
4409                                               extent_slot);
4410                         if (key.objectid != bytenr)
4411                                 break;
4412                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4413                             key.offset == num_bytes) {
4414                                 found_extent = 1;
4415                                 break;
4416                         }
4417                         if (path->slots[0] - extent_slot > 5)
4418                                 break;
4419                         extent_slot--;
4420                 }
4421 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4422                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4423                 if (found_extent && item_size < sizeof(*ei))
4424                         found_extent = 0;
4425 #endif
4426                 if (!found_extent) {
4427                         BUG_ON(iref);
4428                         ret = remove_extent_backref(trans, extent_root, path,
4429                                                     NULL, refs_to_drop,
4430                                                     is_data);
4431                         BUG_ON(ret);
4432                         btrfs_release_path(path);
4433                         path->leave_spinning = 1;
4434
4435                         key.objectid = bytenr;
4436                         key.type = BTRFS_EXTENT_ITEM_KEY;
4437                         key.offset = num_bytes;
4438
4439                         ret = btrfs_search_slot(trans, extent_root,
4440                                                 &key, path, -1, 1);
4441                         if (ret) {
4442                                 printk(KERN_ERR "umm, got %d back from search"
4443                                        ", was looking for %llu\n", ret,
4444                                        (unsigned long long)bytenr);
4445                                 btrfs_print_leaf(extent_root, path->nodes[0]);
4446                         }
4447                         BUG_ON(ret);
4448                         extent_slot = path->slots[0];
4449                 }
4450         } else {
4451                 btrfs_print_leaf(extent_root, path->nodes[0]);
4452                 WARN_ON(1);
4453                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4454                        "parent %llu root %llu  owner %llu offset %llu\n",
4455                        (unsigned long long)bytenr,
4456                        (unsigned long long)parent,
4457                        (unsigned long long)root_objectid,
4458                        (unsigned long long)owner_objectid,
4459                        (unsigned long long)owner_offset);
4460         }
4461
4462         leaf = path->nodes[0];
4463         item_size = btrfs_item_size_nr(leaf, extent_slot);
4464 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4465         if (item_size < sizeof(*ei)) {
4466                 BUG_ON(found_extent || extent_slot != path->slots[0]);
4467                 ret = convert_extent_item_v0(trans, extent_root, path,
4468                                              owner_objectid, 0);
4469                 BUG_ON(ret < 0);
4470
4471                 btrfs_release_path(path);
4472                 path->leave_spinning = 1;
4473
4474                 key.objectid = bytenr;
4475                 key.type = BTRFS_EXTENT_ITEM_KEY;
4476                 key.offset = num_bytes;
4477
4478                 ret = btrfs_search_slot(trans, extent_root, &key, path,
4479                                         -1, 1);
4480                 if (ret) {
4481                         printk(KERN_ERR "umm, got %d back from search"
4482                                ", was looking for %llu\n", ret,
4483                                (unsigned long long)bytenr);
4484                         btrfs_print_leaf(extent_root, path->nodes[0]);
4485                 }
4486                 BUG_ON(ret);
4487                 extent_slot = path->slots[0];
4488                 leaf = path->nodes[0];
4489                 item_size = btrfs_item_size_nr(leaf, extent_slot);
4490         }
4491 #endif
4492         BUG_ON(item_size < sizeof(*ei));
4493         ei = btrfs_item_ptr(leaf, extent_slot,
4494                             struct btrfs_extent_item);
4495         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4496                 struct btrfs_tree_block_info *bi;
4497                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4498                 bi = (struct btrfs_tree_block_info *)(ei + 1);
4499                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4500         }
4501
4502         refs = btrfs_extent_refs(leaf, ei);
4503         BUG_ON(refs < refs_to_drop);
4504         refs -= refs_to_drop;
4505
4506         if (refs > 0) {
4507                 if (extent_op)
4508                         __run_delayed_extent_op(extent_op, leaf, ei);
4509                 /*
4510                  * In the case of inline back ref, reference count will
4511                  * be updated by remove_extent_backref
4512                  */
4513                 if (iref) {
4514                         BUG_ON(!found_extent);
4515                 } else {
4516                         btrfs_set_extent_refs(leaf, ei, refs);
4517                         btrfs_mark_buffer_dirty(leaf);
4518                 }
4519                 if (found_extent) {
4520                         ret = remove_extent_backref(trans, extent_root, path,
4521                                                     iref, refs_to_drop,
4522                                                     is_data);
4523                         BUG_ON(ret);
4524                 }
4525         } else {
4526                 if (found_extent) {
4527                         BUG_ON(is_data && refs_to_drop !=
4528                                extent_data_ref_count(root, path, iref));
4529                         if (iref) {
4530                                 BUG_ON(path->slots[0] != extent_slot);
4531                         } else {
4532                                 BUG_ON(path->slots[0] != extent_slot + 1);
4533                                 path->slots[0] = extent_slot;
4534                                 num_to_del = 2;
4535                         }
4536                 }
4537
4538                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4539                                       num_to_del);
4540                 BUG_ON(ret);
4541                 btrfs_release_path(path);
4542
4543                 if (is_data) {
4544                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4545                         BUG_ON(ret);
4546                 } else {
4547                         invalidate_mapping_pages(info->btree_inode->i_mapping,
4548                              bytenr >> PAGE_CACHE_SHIFT,
4549                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4550                 }
4551
4552                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4553                 BUG_ON(ret);
4554         }
4555         btrfs_free_path(path);
4556         return ret;
4557 }
4558
4559 /*
4560  * when we free an block, it is possible (and likely) that we free the last
4561  * delayed ref for that extent as well.  This searches the delayed ref tree for
4562  * a given extent, and if there are no other delayed refs to be processed, it
4563  * removes it from the tree.
4564  */
4565 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4566                                       struct btrfs_root *root, u64 bytenr)
4567 {
4568         struct btrfs_delayed_ref_head *head;
4569         struct btrfs_delayed_ref_root *delayed_refs;
4570         struct btrfs_delayed_ref_node *ref;
4571         struct rb_node *node;
4572         int ret = 0;
4573
4574         delayed_refs = &trans->transaction->delayed_refs;
4575         spin_lock(&delayed_refs->lock);
4576         head = btrfs_find_delayed_ref_head(trans, bytenr);
4577         if (!head)
4578                 goto out;
4579
4580         node = rb_prev(&head->node.rb_node);
4581         if (!node)
4582                 goto out;
4583
4584         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4585
4586         /* there are still entries for this ref, we can't drop it */
4587         if (ref->bytenr == bytenr)
4588                 goto out;
4589
4590         if (head->extent_op) {
4591                 if (!head->must_insert_reserved)
4592                         goto out;
4593                 kfree(head->extent_op);
4594                 head->extent_op = NULL;
4595         }
4596
4597         /*
4598          * waiting for the lock here would deadlock.  If someone else has it
4599          * locked they are already in the process of dropping it anyway
4600          */
4601         if (!mutex_trylock(&head->mutex))
4602                 goto out;
4603
4604         /*
4605          * at this point we have a head with no other entries.  Go
4606          * ahead and process it.
4607          */
4608         head->node.in_tree = 0;
4609         rb_erase(&head->node.rb_node, &delayed_refs->root);
4610
4611         delayed_refs->num_entries--;
4612
4613         /*
4614          * we don't take a ref on the node because we're removing it from the
4615          * tree, so we just steal the ref the tree was holding.
4616          */
4617         delayed_refs->num_heads--;
4618         if (list_empty(&head->cluster))
4619                 delayed_refs->num_heads_ready--;
4620
4621         list_del_init(&head->cluster);
4622         spin_unlock(&delayed_refs->lock);
4623
4624         BUG_ON(head->extent_op);
4625         if (head->must_insert_reserved)
4626                 ret = 1;
4627
4628         mutex_unlock(&head->mutex);
4629         btrfs_put_delayed_ref(&head->node);
4630         return ret;
4631 out:
4632         spin_unlock(&delayed_refs->lock);
4633         return 0;
4634 }
4635
4636 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4637                            struct btrfs_root *root,
4638                            struct extent_buffer *buf,
4639                            u64 parent, int last_ref)
4640 {
4641         struct btrfs_block_rsv *block_rsv;
4642         struct btrfs_block_group_cache *cache = NULL;
4643         int ret;
4644
4645         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4646                 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4647                                                 parent, root->root_key.objectid,
4648                                                 btrfs_header_level(buf),
4649                                                 BTRFS_DROP_DELAYED_REF, NULL);
4650                 BUG_ON(ret);
4651         }
4652
4653         if (!last_ref)
4654                 return;
4655
4656         block_rsv = get_block_rsv(trans, root);
4657         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4658         if (block_rsv->space_info != cache->space_info)
4659                 goto out;
4660
4661         if (btrfs_header_generation(buf) == trans->transid) {
4662                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4663                         ret = check_ref_cleanup(trans, root, buf->start);
4664                         if (!ret)
4665                                 goto pin;
4666                 }
4667
4668                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4669                         pin_down_extent(root, cache, buf->start, buf->len, 1);
4670                         goto pin;
4671                 }
4672
4673                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4674
4675                 btrfs_add_free_space(cache, buf->start, buf->len);
4676                 ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
4677                 if (ret == -EAGAIN) {
4678                         /* block group became read-only */
4679                         btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
4680                         goto out;
4681                 }
4682
4683                 ret = 1;
4684                 spin_lock(&block_rsv->lock);
4685                 if (block_rsv->reserved < block_rsv->size) {
4686                         block_rsv->reserved += buf->len;
4687                         ret = 0;
4688                 }
4689                 spin_unlock(&block_rsv->lock);
4690
4691                 if (ret) {
4692                         spin_lock(&cache->space_info->lock);
4693                         cache->space_info->bytes_reserved -= buf->len;
4694                         cache->space_info->reservation_progress++;
4695                         spin_unlock(&cache->space_info->lock);
4696                 }
4697                 goto out;
4698         }
4699 pin:
4700         if (block_rsv->durable && !cache->ro) {
4701                 ret = 0;
4702                 spin_lock(&cache->lock);
4703                 if (!cache->ro) {
4704                         cache->reserved_pinned += buf->len;
4705                         ret = 1;
4706                 }
4707                 spin_unlock(&cache->lock);
4708
4709                 if (ret) {
4710                         spin_lock(&block_rsv->lock);
4711                         block_rsv->freed[trans->transid & 0x1] += buf->len;
4712                         spin_unlock(&block_rsv->lock);
4713                 }
4714         }
4715 out:
4716         /*
4717          * Deleting the buffer, clear the corrupt flag since it doesn't matter
4718          * anymore.
4719          */
4720         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
4721         btrfs_put_block_group(cache);
4722 }
4723
4724 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4725                       struct btrfs_root *root,
4726                       u64 bytenr, u64 num_bytes, u64 parent,
4727                       u64 root_objectid, u64 owner, u64 offset)
4728 {
4729         int ret;
4730
4731         /*
4732          * tree log blocks never actually go into the extent allocation
4733          * tree, just update pinning info and exit early.
4734          */
4735         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4736                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4737                 /* unlocks the pinned mutex */
4738                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4739                 ret = 0;
4740         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4741                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4742                                         parent, root_objectid, (int)owner,
4743                                         BTRFS_DROP_DELAYED_REF, NULL);
4744                 BUG_ON(ret);
4745         } else {
4746                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4747                                         parent, root_objectid, owner,
4748                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
4749                 BUG_ON(ret);
4750         }
4751         return ret;
4752 }
4753
4754 static u64 stripe_align(struct btrfs_root *root, u64 val)
4755 {
4756         u64 mask = ((u64)root->stripesize - 1);
4757         u64 ret = (val + mask) & ~mask;
4758         return ret;
4759 }
4760
4761 /*
4762  * when we wait for progress in the block group caching, its because
4763  * our allocation attempt failed at least once.  So, we must sleep
4764  * and let some progress happen before we try again.
4765  *
4766  * This function will sleep at least once waiting for new free space to
4767  * show up, and then it will check the block group free space numbers
4768  * for our min num_bytes.  Another option is to have it go ahead
4769  * and look in the rbtree for a free extent of a given size, but this
4770  * is a good start.
4771  */
4772 static noinline int
4773 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4774                                 u64 num_bytes)
4775 {
4776         struct btrfs_caching_control *caching_ctl;
4777         DEFINE_WAIT(wait);
4778
4779         caching_ctl = get_caching_control(cache);
4780         if (!caching_ctl)
4781                 return 0;
4782
4783         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4784                    (cache->free_space_ctl->free_space >= num_bytes));
4785
4786         put_caching_control(caching_ctl);
4787         return 0;
4788 }
4789
4790 static noinline int
4791 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4792 {
4793         struct btrfs_caching_control *caching_ctl;
4794         DEFINE_WAIT(wait);
4795
4796         caching_ctl = get_caching_control(cache);
4797         if (!caching_ctl)
4798                 return 0;
4799
4800         wait_event(caching_ctl->wait, block_group_cache_done(cache));
4801
4802         put_caching_control(caching_ctl);
4803         return 0;
4804 }
4805
4806 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4807 {
4808         int index;
4809         if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4810                 index = 0;
4811         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4812                 index = 1;
4813         else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4814                 index = 2;
4815         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4816                 index = 3;
4817         else
4818                 index = 4;
4819         return index;
4820 }
4821
4822 enum btrfs_loop_type {
4823         LOOP_FIND_IDEAL = 0,
4824         LOOP_CACHING_NOWAIT = 1,
4825         LOOP_CACHING_WAIT = 2,
4826         LOOP_ALLOC_CHUNK = 3,
4827         LOOP_NO_EMPTY_SIZE = 4,
4828 };
4829
4830 /*
4831  * walks the btree of allocated extents and find a hole of a given size.
4832  * The key ins is changed to record the hole:
4833  * ins->objectid == block start
4834  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4835  * ins->offset == number of blocks
4836  * Any available blocks before search_start are skipped.
4837  */
4838 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4839                                      struct btrfs_root *orig_root,
4840                                      u64 num_bytes, u64 empty_size,
4841                                      u64 search_start, u64 search_end,
4842                                      u64 hint_byte, struct btrfs_key *ins,
4843                                      int data)
4844 {
4845         int ret = 0;
4846         struct btrfs_root *root = orig_root->fs_info->extent_root;
4847         struct btrfs_free_cluster *last_ptr = NULL;
4848         struct btrfs_block_group_cache *block_group = NULL;
4849         int empty_cluster = 2 * 1024 * 1024;
4850         int allowed_chunk_alloc = 0;
4851         int done_chunk_alloc = 0;
4852         struct btrfs_space_info *space_info;
4853         int last_ptr_loop = 0;
4854         int loop = 0;
4855         int index = 0;
4856         bool found_uncached_bg = false;
4857         bool failed_cluster_refill = false;
4858         bool failed_alloc = false;
4859         bool use_cluster = true;
4860         u64 ideal_cache_percent = 0;
4861         u64 ideal_cache_offset = 0;
4862
4863         WARN_ON(num_bytes < root->sectorsize);
4864         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4865         ins->objectid = 0;
4866         ins->offset = 0;
4867
4868         space_info = __find_space_info(root->fs_info, data);
4869         if (!space_info) {
4870                 printk(KERN_ERR "No space info for %d\n", data);
4871                 return -ENOSPC;
4872         }
4873
4874         /*
4875          * If the space info is for both data and metadata it means we have a
4876          * small filesystem and we can't use the clustering stuff.
4877          */
4878         if (btrfs_mixed_space_info(space_info))
4879                 use_cluster = false;
4880
4881         if (orig_root->ref_cows || empty_size)
4882                 allowed_chunk_alloc = 1;
4883
4884         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
4885                 last_ptr = &root->fs_info->meta_alloc_cluster;
4886                 if (!btrfs_test_opt(root, SSD))
4887                         empty_cluster = 64 * 1024;
4888         }
4889
4890         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4891             btrfs_test_opt(root, SSD)) {
4892                 last_ptr = &root->fs_info->data_alloc_cluster;
4893         }
4894
4895         if (last_ptr) {
4896                 spin_lock(&last_ptr->lock);
4897                 if (last_ptr->block_group)
4898                         hint_byte = last_ptr->window_start;
4899                 spin_unlock(&last_ptr->lock);
4900         }
4901
4902         search_start = max(search_start, first_logical_byte(root, 0));
4903         search_start = max(search_start, hint_byte);
4904
4905         if (!last_ptr)
4906                 empty_cluster = 0;
4907
4908         if (search_start == hint_byte) {
4909 ideal_cache:
4910                 block_group = btrfs_lookup_block_group(root->fs_info,
4911                                                        search_start);
4912                 /*
4913                  * we don't want to use the block group if it doesn't match our
4914                  * allocation bits, or if its not cached.
4915                  *
4916                  * However if we are re-searching with an ideal block group
4917                  * picked out then we don't care that the block group is cached.
4918                  */
4919                 if (block_group && block_group_bits(block_group, data) &&
4920                     (block_group->cached != BTRFS_CACHE_NO ||
4921                      search_start == ideal_cache_offset)) {
4922                         down_read(&space_info->groups_sem);
4923                         if (list_empty(&block_group->list) ||
4924                             block_group->ro) {
4925                                 /*
4926                                  * someone is removing this block group,
4927                                  * we can't jump into the have_block_group
4928                                  * target because our list pointers are not
4929                                  * valid
4930                                  */
4931                                 btrfs_put_block_group(block_group);
4932                                 up_read(&space_info->groups_sem);
4933                         } else {
4934                                 index = get_block_group_index(block_group);
4935                                 goto have_block_group;
4936                         }
4937                 } else if (block_group) {
4938                         btrfs_put_block_group(block_group);
4939                 }
4940         }
4941 search:
4942         down_read(&space_info->groups_sem);
4943         list_for_each_entry(block_group, &space_info->block_groups[index],
4944                             list) {
4945                 u64 offset;
4946                 int cached;
4947
4948                 btrfs_get_block_group(block_group);
4949                 search_start = block_group->key.objectid;
4950
4951                 /*
4952                  * this can happen if we end up cycling through all the
4953                  * raid types, but we want to make sure we only allocate
4954                  * for the proper type.
4955                  */
4956                 if (!block_group_bits(block_group, data)) {
4957                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
4958                                 BTRFS_BLOCK_GROUP_RAID1 |
4959                                 BTRFS_BLOCK_GROUP_RAID10;
4960
4961                         /*
4962                          * if they asked for extra copies and this block group
4963                          * doesn't provide them, bail.  This does allow us to
4964                          * fill raid0 from raid1.
4965                          */
4966                         if ((data & extra) && !(block_group->flags & extra))
4967                                 goto loop;
4968                 }
4969
4970 have_block_group:
4971                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4972                         u64 free_percent;
4973
4974                         ret = cache_block_group(block_group, trans,
4975                                                 orig_root, 1);
4976                         if (block_group->cached == BTRFS_CACHE_FINISHED)
4977                                 goto have_block_group;
4978
4979                         free_percent = btrfs_block_group_used(&block_group->item);
4980                         free_percent *= 100;
4981                         free_percent = div64_u64(free_percent,
4982                                                  block_group->key.offset);
4983                         free_percent = 100 - free_percent;
4984                         if (free_percent > ideal_cache_percent &&
4985                             likely(!block_group->ro)) {
4986                                 ideal_cache_offset = block_group->key.objectid;
4987                                 ideal_cache_percent = free_percent;
4988                         }
4989
4990                         /*
4991                          * We only want to start kthread caching if we are at
4992                          * the point where we will wait for caching to make
4993                          * progress, or if our ideal search is over and we've
4994                          * found somebody to start caching.
4995                          */
4996                         if (loop > LOOP_CACHING_NOWAIT ||
4997                             (loop > LOOP_FIND_IDEAL &&
4998                              atomic_read(&space_info->caching_threads) < 2)) {
4999                                 ret = cache_block_group(block_group, trans,
5000                                                         orig_root, 0);
5001                                 BUG_ON(ret);
5002                         }
5003                         found_uncached_bg = true;
5004
5005                         /*
5006                          * If loop is set for cached only, try the next block
5007                          * group.
5008                          */
5009                         if (loop == LOOP_FIND_IDEAL)
5010                                 goto loop;
5011                 }
5012
5013                 cached = block_group_cache_done(block_group);
5014                 if (unlikely(!cached))
5015                         found_uncached_bg = true;
5016
5017                 if (unlikely(block_group->ro))
5018                         goto loop;
5019
5020                 spin_lock(&block_group->free_space_ctl->tree_lock);
5021                 if (cached &&
5022                     block_group->free_space_ctl->free_space <
5023                     num_bytes + empty_size) {
5024                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5025                         goto loop;
5026                 }
5027                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5028
5029                 /*
5030                  * Ok we want to try and use the cluster allocator, so lets look
5031                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
5032                  * have tried the cluster allocator plenty of times at this
5033                  * point and not have found anything, so we are likely way too
5034                  * fragmented for the clustering stuff to find anything, so lets
5035                  * just skip it and let the allocator find whatever block it can
5036                  * find
5037                  */
5038                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
5039                         /*
5040                          * the refill lock keeps out other
5041                          * people trying to start a new cluster
5042                          */
5043                         spin_lock(&last_ptr->refill_lock);
5044                         if (last_ptr->block_group &&
5045                             (last_ptr->block_group->ro ||
5046                             !block_group_bits(last_ptr->block_group, data))) {
5047                                 offset = 0;
5048                                 goto refill_cluster;
5049                         }
5050
5051                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5052                                                  num_bytes, search_start);
5053                         if (offset) {
5054                                 /* we have a block, we're done */
5055                                 spin_unlock(&last_ptr->refill_lock);
5056                                 goto checks;
5057                         }
5058
5059                         spin_lock(&last_ptr->lock);
5060                         /*
5061                          * whoops, this cluster doesn't actually point to
5062                          * this block group.  Get a ref on the block
5063                          * group is does point to and try again
5064                          */
5065                         if (!last_ptr_loop && last_ptr->block_group &&
5066                             last_ptr->block_group != block_group) {
5067
5068                                 btrfs_put_block_group(block_group);
5069                                 block_group = last_ptr->block_group;
5070                                 btrfs_get_block_group(block_group);
5071                                 spin_unlock(&last_ptr->lock);
5072                                 spin_unlock(&last_ptr->refill_lock);
5073
5074                                 last_ptr_loop = 1;
5075                                 search_start = block_group->key.objectid;
5076                                 /*
5077                                  * we know this block group is properly
5078                                  * in the list because
5079                                  * btrfs_remove_block_group, drops the
5080                                  * cluster before it removes the block
5081                                  * group from the list
5082                                  */
5083                                 goto have_block_group;
5084                         }
5085                         spin_unlock(&last_ptr->lock);
5086 refill_cluster:
5087                         /*
5088                          * this cluster didn't work out, free it and
5089                          * start over
5090                          */
5091                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5092
5093                         last_ptr_loop = 0;
5094
5095                         /* allocate a cluster in this block group */
5096                         ret = btrfs_find_space_cluster(trans, root,
5097                                                block_group, last_ptr,
5098                                                offset, num_bytes,
5099                                                empty_cluster + empty_size);
5100                         if (ret == 0) {
5101                                 /*
5102                                  * now pull our allocation out of this
5103                                  * cluster
5104                                  */
5105                                 offset = btrfs_alloc_from_cluster(block_group,
5106                                                   last_ptr, num_bytes,
5107                                                   search_start);
5108                                 if (offset) {
5109                                         /* we found one, proceed */
5110                                         spin_unlock(&last_ptr->refill_lock);
5111                                         goto checks;
5112                                 }
5113                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5114                                    && !failed_cluster_refill) {
5115                                 spin_unlock(&last_ptr->refill_lock);
5116
5117                                 failed_cluster_refill = true;
5118                                 wait_block_group_cache_progress(block_group,
5119                                        num_bytes + empty_cluster + empty_size);
5120                                 goto have_block_group;
5121                         }
5122
5123                         /*
5124                          * at this point we either didn't find a cluster
5125                          * or we weren't able to allocate a block from our
5126                          * cluster.  Free the cluster we've been trying
5127                          * to use, and go to the next block group
5128                          */
5129                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5130                         spin_unlock(&last_ptr->refill_lock);
5131                         goto loop;
5132                 }
5133
5134                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5135                                                     num_bytes, empty_size);
5136                 /*
5137                  * If we didn't find a chunk, and we haven't failed on this
5138                  * block group before, and this block group is in the middle of
5139                  * caching and we are ok with waiting, then go ahead and wait
5140                  * for progress to be made, and set failed_alloc to true.
5141                  *
5142                  * If failed_alloc is true then we've already waited on this
5143                  * block group once and should move on to the next block group.
5144                  */
5145                 if (!offset && !failed_alloc && !cached &&
5146                     loop > LOOP_CACHING_NOWAIT) {
5147                         wait_block_group_cache_progress(block_group,
5148                                                 num_bytes + empty_size);
5149                         failed_alloc = true;
5150                         goto have_block_group;
5151                 } else if (!offset) {
5152                         goto loop;
5153                 }
5154 checks:
5155                 search_start = stripe_align(root, offset);
5156                 /* move on to the next group */
5157                 if (search_start + num_bytes >= search_end) {
5158                         btrfs_add_free_space(block_group, offset, num_bytes);
5159                         goto loop;
5160                 }
5161
5162                 /* move on to the next group */
5163                 if (search_start + num_bytes >
5164                     block_group->key.objectid + block_group->key.offset) {
5165                         btrfs_add_free_space(block_group, offset, num_bytes);
5166                         goto loop;
5167                 }
5168
5169                 ins->objectid = search_start;
5170                 ins->offset = num_bytes;
5171
5172                 if (offset < search_start)
5173                         btrfs_add_free_space(block_group, offset,
5174                                              search_start - offset);
5175                 BUG_ON(offset > search_start);
5176
5177                 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
5178                                             (data & BTRFS_BLOCK_GROUP_DATA));
5179                 if (ret == -EAGAIN) {
5180                         btrfs_add_free_space(block_group, offset, num_bytes);
5181                         goto loop;
5182                 }
5183
5184                 /* we are all good, lets return */
5185                 ins->objectid = search_start;
5186                 ins->offset = num_bytes;
5187
5188                 if (offset < search_start)
5189                         btrfs_add_free_space(block_group, offset,
5190                                              search_start - offset);
5191                 BUG_ON(offset > search_start);
5192                 btrfs_put_block_group(block_group);
5193                 break;
5194 loop:
5195                 failed_cluster_refill = false;
5196                 failed_alloc = false;
5197                 BUG_ON(index != get_block_group_index(block_group));
5198                 btrfs_put_block_group(block_group);
5199         }
5200         up_read(&space_info->groups_sem);
5201
5202         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5203                 goto search;
5204
5205         /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5206          *                      for them to make caching progress.  Also
5207          *                      determine the best possible bg to cache
5208          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5209          *                      caching kthreads as we move along
5210          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5211          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5212          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5213          *                      again
5214          */
5215         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5216             (found_uncached_bg || empty_size || empty_cluster ||
5217              allowed_chunk_alloc)) {
5218                 index = 0;
5219                 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5220                         found_uncached_bg = false;
5221                         loop++;
5222                         if (!ideal_cache_percent &&
5223                             atomic_read(&space_info->caching_threads))
5224                                 goto search;
5225
5226                         /*
5227                          * 1 of the following 2 things have happened so far
5228                          *
5229                          * 1) We found an ideal block group for caching that
5230                          * is mostly full and will cache quickly, so we might
5231                          * as well wait for it.
5232                          *
5233                          * 2) We searched for cached only and we didn't find
5234                          * anything, and we didn't start any caching kthreads
5235                          * either, so chances are we will loop through and
5236                          * start a couple caching kthreads, and then come back
5237                          * around and just wait for them.  This will be slower
5238                          * because we will have 2 caching kthreads reading at
5239                          * the same time when we could have just started one
5240                          * and waited for it to get far enough to give us an
5241                          * allocation, so go ahead and go to the wait caching
5242                          * loop.
5243                          */
5244                         loop = LOOP_CACHING_WAIT;
5245                         search_start = ideal_cache_offset;
5246                         ideal_cache_percent = 0;
5247                         goto ideal_cache;
5248                 } else if (loop == LOOP_FIND_IDEAL) {
5249                         /*
5250                          * Didn't find a uncached bg, wait on anything we find
5251                          * next.
5252                          */
5253                         loop = LOOP_CACHING_WAIT;
5254                         goto search;
5255                 }
5256
5257                 if (loop < LOOP_CACHING_WAIT) {
5258                         loop++;
5259                         goto search;
5260                 }
5261
5262                 if (loop == LOOP_ALLOC_CHUNK) {
5263                         empty_size = 0;
5264                         empty_cluster = 0;
5265                 }
5266
5267                 if (allowed_chunk_alloc) {
5268                         ret = do_chunk_alloc(trans, root, num_bytes +
5269                                              2 * 1024 * 1024, data,
5270                                              CHUNK_ALLOC_LIMITED);
5271                         allowed_chunk_alloc = 0;
5272                         done_chunk_alloc = 1;
5273                 } else if (!done_chunk_alloc &&
5274                            space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
5275                         space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5276                 }
5277
5278                 if (loop < LOOP_NO_EMPTY_SIZE) {
5279                         loop++;
5280                         goto search;
5281                 }
5282                 ret = -ENOSPC;
5283         } else if (!ins->objectid) {
5284                 ret = -ENOSPC;
5285         } else if (ins->objectid) {
5286                 ret = 0;
5287         }
5288
5289         return ret;
5290 }
5291
5292 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5293                             int dump_block_groups)
5294 {
5295         struct btrfs_block_group_cache *cache;
5296         int index = 0;
5297
5298         spin_lock(&info->lock);
5299         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5300                (unsigned long long)(info->total_bytes - info->bytes_used -
5301                                     info->bytes_pinned - info->bytes_reserved -
5302                                     info->bytes_readonly),
5303                (info->full) ? "" : "not ");
5304         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5305                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5306                (unsigned long long)info->total_bytes,
5307                (unsigned long long)info->bytes_used,
5308                (unsigned long long)info->bytes_pinned,
5309                (unsigned long long)info->bytes_reserved,
5310                (unsigned long long)info->bytes_may_use,
5311                (unsigned long long)info->bytes_readonly);
5312         spin_unlock(&info->lock);
5313
5314         if (!dump_block_groups)
5315                 return;
5316
5317         down_read(&info->groups_sem);
5318 again:
5319         list_for_each_entry(cache, &info->block_groups[index], list) {
5320                 spin_lock(&cache->lock);
5321                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5322                        "%llu pinned %llu reserved\n",
5323                        (unsigned long long)cache->key.objectid,
5324                        (unsigned long long)cache->key.offset,
5325                        (unsigned long long)btrfs_block_group_used(&cache->item),
5326                        (unsigned long long)cache->pinned,
5327                        (unsigned long long)cache->reserved);
5328                 btrfs_dump_free_space(cache, bytes);
5329                 spin_unlock(&cache->lock);
5330         }
5331         if (++index < BTRFS_NR_RAID_TYPES)
5332                 goto again;
5333         up_read(&info->groups_sem);
5334 }
5335
5336 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5337                          struct btrfs_root *root,
5338                          u64 num_bytes, u64 min_alloc_size,
5339                          u64 empty_size, u64 hint_byte,
5340                          u64 search_end, struct btrfs_key *ins,
5341                          u64 data)
5342 {
5343         int ret;
5344         u64 search_start = 0;
5345
5346         data = btrfs_get_alloc_profile(root, data);
5347 again:
5348         /*
5349          * the only place that sets empty_size is btrfs_realloc_node, which
5350          * is not called recursively on allocations
5351          */
5352         if (empty_size || root->ref_cows)
5353                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5354                                      num_bytes + 2 * 1024 * 1024, data,
5355                                      CHUNK_ALLOC_NO_FORCE);
5356
5357         WARN_ON(num_bytes < root->sectorsize);
5358         ret = find_free_extent(trans, root, num_bytes, empty_size,
5359                                search_start, search_end, hint_byte,
5360                                ins, data);
5361
5362         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5363                 num_bytes = num_bytes >> 1;
5364                 num_bytes = num_bytes & ~(root->sectorsize - 1);
5365                 num_bytes = max(num_bytes, min_alloc_size);
5366                 do_chunk_alloc(trans, root->fs_info->extent_root,
5367                                num_bytes, data, CHUNK_ALLOC_FORCE);
5368                 goto again;
5369         }
5370         if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5371                 struct btrfs_space_info *sinfo;
5372
5373                 sinfo = __find_space_info(root->fs_info, data);
5374                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5375                        "wanted %llu\n", (unsigned long long)data,
5376                        (unsigned long long)num_bytes);
5377                 dump_space_info(sinfo, num_bytes, 1);
5378         }
5379
5380         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5381
5382         return ret;
5383 }
5384
5385 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5386 {
5387         struct btrfs_block_group_cache *cache;
5388         int ret = 0;
5389
5390         cache = btrfs_lookup_block_group(root->fs_info, start);
5391         if (!cache) {
5392                 printk(KERN_ERR "Unable to find block group for %llu\n",
5393                        (unsigned long long)start);
5394                 return -ENOSPC;
5395         }
5396
5397         if (btrfs_test_opt(root, DISCARD))
5398                 ret = btrfs_discard_extent(root, start, len, NULL);
5399
5400         btrfs_add_free_space(cache, start, len);
5401         btrfs_update_reserved_bytes(cache, len, 0, 1);
5402         btrfs_put_block_group(cache);
5403
5404         trace_btrfs_reserved_extent_free(root, start, len);
5405
5406         return ret;
5407 }
5408
5409 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5410                                       struct btrfs_root *root,
5411                                       u64 parent, u64 root_objectid,
5412                                       u64 flags, u64 owner, u64 offset,
5413                                       struct btrfs_key *ins, int ref_mod)
5414 {
5415         int ret;
5416         struct btrfs_fs_info *fs_info = root->fs_info;
5417         struct btrfs_extent_item *extent_item;
5418         struct btrfs_extent_inline_ref *iref;
5419         struct btrfs_path *path;
5420         struct extent_buffer *leaf;
5421         int type;
5422         u32 size;
5423
5424         if (parent > 0)
5425                 type = BTRFS_SHARED_DATA_REF_KEY;
5426         else
5427                 type = BTRFS_EXTENT_DATA_REF_KEY;
5428
5429         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5430
5431         path = btrfs_alloc_path();
5432         if (!path)
5433                 return -ENOMEM;
5434
5435         path->leave_spinning = 1;
5436         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5437                                       ins, size);
5438         BUG_ON(ret);
5439
5440         leaf = path->nodes[0];
5441         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5442                                      struct btrfs_extent_item);
5443         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5444         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5445         btrfs_set_extent_flags(leaf, extent_item,
5446                                flags | BTRFS_EXTENT_FLAG_DATA);
5447
5448         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5449         btrfs_set_extent_inline_ref_type(leaf, iref, type);
5450         if (parent > 0) {
5451                 struct btrfs_shared_data_ref *ref;
5452                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5453                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5454                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5455         } else {
5456                 struct btrfs_extent_data_ref *ref;
5457                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5458                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5459                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5460                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5461                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5462         }
5463
5464         btrfs_mark_buffer_dirty(path->nodes[0]);
5465         btrfs_free_path(path);
5466
5467         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5468         if (ret) {
5469                 printk(KERN_ERR "btrfs update block group failed for %llu "
5470                        "%llu\n", (unsigned long long)ins->objectid,
5471                        (unsigned long long)ins->offset);
5472                 BUG();
5473         }
5474         return ret;
5475 }
5476
5477 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5478                                      struct btrfs_root *root,
5479                                      u64 parent, u64 root_objectid,
5480                                      u64 flags, struct btrfs_disk_key *key,
5481                                      int level, struct btrfs_key *ins)
5482 {
5483         int ret;
5484         struct btrfs_fs_info *fs_info = root->fs_info;
5485         struct btrfs_extent_item *extent_item;
5486         struct btrfs_tree_block_info *block_info;
5487         struct btrfs_extent_inline_ref *iref;
5488         struct btrfs_path *path;
5489         struct extent_buffer *leaf;
5490         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5491
5492         path = btrfs_alloc_path();
5493         BUG_ON(!path);
5494
5495         path->leave_spinning = 1;
5496         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5497                                       ins, size);
5498         BUG_ON(ret);
5499
5500         leaf = path->nodes[0];
5501         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5502                                      struct btrfs_extent_item);
5503         btrfs_set_extent_refs(leaf, extent_item, 1);
5504         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5505         btrfs_set_extent_flags(leaf, extent_item,
5506                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5507         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5508
5509         btrfs_set_tree_block_key(leaf, block_info, key);
5510         btrfs_set_tree_block_level(leaf, block_info, level);
5511
5512         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5513         if (parent > 0) {
5514                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5515                 btrfs_set_extent_inline_ref_type(leaf, iref,
5516                                                  BTRFS_SHARED_BLOCK_REF_KEY);
5517                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5518         } else {
5519                 btrfs_set_extent_inline_ref_type(leaf, iref,
5520                                                  BTRFS_TREE_BLOCK_REF_KEY);
5521                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5522         }
5523
5524         btrfs_mark_buffer_dirty(leaf);
5525         btrfs_free_path(path);
5526
5527         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5528         if (ret) {
5529                 printk(KERN_ERR "btrfs update block group failed for %llu "
5530                        "%llu\n", (unsigned long long)ins->objectid,
5531                        (unsigned long long)ins->offset);
5532                 BUG();
5533         }
5534         return ret;
5535 }
5536
5537 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5538                                      struct btrfs_root *root,
5539                                      u64 root_objectid, u64 owner,
5540                                      u64 offset, struct btrfs_key *ins)
5541 {
5542         int ret;
5543
5544         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5545
5546         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5547                                          0, root_objectid, owner, offset,
5548                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
5549         return ret;
5550 }
5551
5552 /*
5553  * this is used by the tree logging recovery code.  It records that
5554  * an extent has been allocated and makes sure to clear the free
5555  * space cache bits as well
5556  */
5557 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5558                                    struct btrfs_root *root,
5559                                    u64 root_objectid, u64 owner, u64 offset,
5560                                    struct btrfs_key *ins)
5561 {
5562         int ret;
5563         struct btrfs_block_group_cache *block_group;
5564         struct btrfs_caching_control *caching_ctl;
5565         u64 start = ins->objectid;
5566         u64 num_bytes = ins->offset;
5567
5568         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5569         cache_block_group(block_group, trans, NULL, 0);
5570         caching_ctl = get_caching_control(block_group);
5571
5572         if (!caching_ctl) {
5573                 BUG_ON(!block_group_cache_done(block_group));
5574                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5575                 BUG_ON(ret);
5576         } else {
5577                 mutex_lock(&caching_ctl->mutex);
5578
5579                 if (start >= caching_ctl->progress) {
5580                         ret = add_excluded_extent(root, start, num_bytes);
5581                         BUG_ON(ret);
5582                 } else if (start + num_bytes <= caching_ctl->progress) {
5583                         ret = btrfs_remove_free_space(block_group,
5584                                                       start, num_bytes);
5585                         BUG_ON(ret);
5586                 } else {
5587                         num_bytes = caching_ctl->progress - start;
5588                         ret = btrfs_remove_free_space(block_group,
5589                                                       start, num_bytes);
5590                         BUG_ON(ret);
5591
5592                         start = caching_ctl->progress;
5593                         num_bytes = ins->objectid + ins->offset -
5594                                     caching_ctl->progress;
5595                         ret = add_excluded_extent(root, start, num_bytes);
5596                         BUG_ON(ret);
5597                 }
5598
5599                 mutex_unlock(&caching_ctl->mutex);
5600                 put_caching_control(caching_ctl);
5601         }
5602
5603         ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
5604         BUG_ON(ret);
5605         btrfs_put_block_group(block_group);
5606         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5607                                          0, owner, offset, ins, 1);
5608         return ret;
5609 }
5610
5611 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5612                                             struct btrfs_root *root,
5613                                             u64 bytenr, u32 blocksize,
5614                                             int level)
5615 {
5616         struct extent_buffer *buf;
5617
5618         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5619         if (!buf)
5620                 return ERR_PTR(-ENOMEM);
5621         btrfs_set_header_generation(buf, trans->transid);
5622         btrfs_set_buffer_lockdep_class(buf, level);
5623         btrfs_tree_lock(buf);
5624         clean_tree_block(trans, root, buf);
5625
5626         btrfs_set_lock_blocking(buf);
5627         btrfs_set_buffer_uptodate(buf);
5628
5629         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5630                 /*
5631                  * we allow two log transactions at a time, use different
5632                  * EXENT bit to differentiate dirty pages.
5633                  */
5634                 if (root->log_transid % 2 == 0)
5635                         set_extent_dirty(&root->dirty_log_pages, buf->start,
5636                                         buf->start + buf->len - 1, GFP_NOFS);
5637                 else
5638                         set_extent_new(&root->dirty_log_pages, buf->start,
5639                                         buf->start + buf->len - 1, GFP_NOFS);
5640         } else {
5641                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5642                          buf->start + buf->len - 1, GFP_NOFS);
5643         }
5644         trans->blocks_used++;
5645         /* this returns a buffer locked for blocking */
5646         return buf;
5647 }
5648
5649 static struct btrfs_block_rsv *
5650 use_block_rsv(struct btrfs_trans_handle *trans,
5651               struct btrfs_root *root, u32 blocksize)
5652 {
5653         struct btrfs_block_rsv *block_rsv;
5654         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5655         int ret;
5656
5657         block_rsv = get_block_rsv(trans, root);
5658
5659         if (block_rsv->size == 0) {
5660                 ret = reserve_metadata_bytes(trans, root, block_rsv,
5661                                              blocksize, 0);
5662                 /*
5663                  * If we couldn't reserve metadata bytes try and use some from
5664                  * the global reserve.
5665                  */
5666                 if (ret && block_rsv != global_rsv) {
5667                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5668                         if (!ret)
5669                                 return global_rsv;
5670                         return ERR_PTR(ret);
5671                 } else if (ret) {
5672                         return ERR_PTR(ret);
5673                 }
5674                 return block_rsv;
5675         }
5676
5677         ret = block_rsv_use_bytes(block_rsv, blocksize);
5678         if (!ret)
5679                 return block_rsv;
5680         if (ret) {
5681                 WARN_ON(1);
5682                 ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
5683                                              0);
5684                 if (!ret) {
5685                         spin_lock(&block_rsv->lock);
5686                         block_rsv->size += blocksize;
5687                         spin_unlock(&block_rsv->lock);
5688                         return block_rsv;
5689                 } else if (ret && block_rsv != global_rsv) {
5690                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5691                         if (!ret)
5692                                 return global_rsv;
5693                 }
5694         }
5695
5696         return ERR_PTR(-ENOSPC);
5697 }
5698
5699 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5700 {
5701         block_rsv_add_bytes(block_rsv, blocksize, 0);
5702         block_rsv_release_bytes(block_rsv, NULL, 0);
5703 }
5704
5705 /*
5706  * finds a free extent and does all the dirty work required for allocation
5707  * returns the key for the extent through ins, and a tree buffer for
5708  * the first block of the extent through buf.
5709  *
5710  * returns the tree buffer or NULL.
5711  */
5712 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5713                                         struct btrfs_root *root, u32 blocksize,
5714                                         u64 parent, u64 root_objectid,
5715                                         struct btrfs_disk_key *key, int level,
5716                                         u64 hint, u64 empty_size)
5717 {
5718         struct btrfs_key ins;
5719         struct btrfs_block_rsv *block_rsv;
5720         struct extent_buffer *buf;
5721         u64 flags = 0;
5722         int ret;
5723
5724
5725         block_rsv = use_block_rsv(trans, root, blocksize);
5726         if (IS_ERR(block_rsv))
5727                 return ERR_CAST(block_rsv);
5728
5729         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5730                                    empty_size, hint, (u64)-1, &ins, 0);
5731         if (ret) {
5732                 unuse_block_rsv(block_rsv, blocksize);
5733                 return ERR_PTR(ret);
5734         }
5735
5736         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5737                                     blocksize, level);
5738         BUG_ON(IS_ERR(buf));
5739
5740         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5741                 if (parent == 0)
5742                         parent = ins.objectid;
5743                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5744         } else
5745                 BUG_ON(parent > 0);
5746
5747         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5748                 struct btrfs_delayed_extent_op *extent_op;
5749                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5750                 BUG_ON(!extent_op);
5751                 if (key)
5752                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
5753                 else
5754                         memset(&extent_op->key, 0, sizeof(extent_op->key));
5755                 extent_op->flags_to_set = flags;
5756                 extent_op->update_key = 1;
5757                 extent_op->update_flags = 1;
5758                 extent_op->is_data = 0;
5759
5760                 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5761                                         ins.offset, parent, root_objectid,
5762                                         level, BTRFS_ADD_DELAYED_EXTENT,
5763                                         extent_op);
5764                 BUG_ON(ret);
5765         }
5766         return buf;
5767 }
5768
5769 struct walk_control {
5770         u64 refs[BTRFS_MAX_LEVEL];
5771         u64 flags[BTRFS_MAX_LEVEL];
5772         struct btrfs_key update_progress;
5773         int stage;
5774         int level;
5775         int shared_level;
5776         int update_ref;
5777         int keep_locks;
5778         int reada_slot;
5779         int reada_count;
5780 };
5781
5782 #define DROP_REFERENCE  1
5783 #define UPDATE_BACKREF  2
5784
5785 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5786                                      struct btrfs_root *root,
5787                                      struct walk_control *wc,
5788                                      struct btrfs_path *path)
5789 {
5790         u64 bytenr;
5791         u64 generation;
5792         u64 refs;
5793         u64 flags;
5794         u32 nritems;
5795         u32 blocksize;
5796         struct btrfs_key key;
5797         struct extent_buffer *eb;
5798         int ret;
5799         int slot;
5800         int nread = 0;
5801
5802         if (path->slots[wc->level] < wc->reada_slot) {
5803                 wc->reada_count = wc->reada_count * 2 / 3;
5804                 wc->reada_count = max(wc->reada_count, 2);
5805         } else {
5806                 wc->reada_count = wc->reada_count * 3 / 2;
5807                 wc->reada_count = min_t(int, wc->reada_count,
5808                                         BTRFS_NODEPTRS_PER_BLOCK(root));
5809         }
5810
5811         eb = path->nodes[wc->level];
5812         nritems = btrfs_header_nritems(eb);
5813         blocksize = btrfs_level_size(root, wc->level - 1);
5814
5815         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5816                 if (nread >= wc->reada_count)
5817                         break;
5818
5819                 cond_resched();
5820                 bytenr = btrfs_node_blockptr(eb, slot);
5821                 generation = btrfs_node_ptr_generation(eb, slot);
5822
5823                 if (slot == path->slots[wc->level])
5824                         goto reada;
5825
5826                 if (wc->stage == UPDATE_BACKREF &&
5827                     generation <= root->root_key.offset)
5828                         continue;
5829
5830                 /* We don't lock the tree block, it's OK to be racy here */
5831                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5832                                                &refs, &flags);
5833                 BUG_ON(ret);
5834                 BUG_ON(refs == 0);
5835
5836                 if (wc->stage == DROP_REFERENCE) {
5837                         if (refs == 1)
5838                                 goto reada;
5839
5840                         if (wc->level == 1 &&
5841                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5842                                 continue;
5843                         if (!wc->update_ref ||
5844                             generation <= root->root_key.offset)
5845                                 continue;
5846                         btrfs_node_key_to_cpu(eb, &key, slot);
5847                         ret = btrfs_comp_cpu_keys(&key,
5848                                                   &wc->update_progress);
5849                         if (ret < 0)
5850                                 continue;
5851                 } else {
5852                         if (wc->level == 1 &&
5853                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5854                                 continue;
5855                 }
5856 reada:
5857                 ret = readahead_tree_block(root, bytenr, blocksize,
5858                                            generation);
5859                 if (ret)
5860                         break;
5861                 nread++;
5862         }
5863         wc->reada_slot = slot;
5864 }
5865
5866 /*
5867  * hepler to process tree block while walking down the tree.
5868  *
5869  * when wc->stage == UPDATE_BACKREF, this function updates
5870  * back refs for pointers in the block.
5871  *
5872  * NOTE: return value 1 means we should stop walking down.
5873  */
5874 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5875                                    struct btrfs_root *root,
5876                                    struct btrfs_path *path,
5877                                    struct walk_control *wc, int lookup_info)
5878 {
5879         int level = wc->level;
5880         struct extent_buffer *eb = path->nodes[level];
5881         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5882         int ret;
5883
5884         if (wc->stage == UPDATE_BACKREF &&
5885             btrfs_header_owner(eb) != root->root_key.objectid)
5886                 return 1;
5887
5888         /*
5889          * when reference count of tree block is 1, it won't increase
5890          * again. once full backref flag is set, we never clear it.
5891          */
5892         if (lookup_info &&
5893             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5894              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5895                 BUG_ON(!path->locks[level]);
5896                 ret = btrfs_lookup_extent_info(trans, root,
5897                                                eb->start, eb->len,
5898                                                &wc->refs[level],
5899                                                &wc->flags[level]);
5900                 BUG_ON(ret);
5901                 BUG_ON(wc->refs[level] == 0);
5902         }
5903
5904         if (wc->stage == DROP_REFERENCE) {
5905                 if (wc->refs[level] > 1)
5906                         return 1;
5907
5908                 if (path->locks[level] && !wc->keep_locks) {
5909                         btrfs_tree_unlock(eb);
5910                         path->locks[level] = 0;
5911                 }
5912                 return 0;
5913         }
5914
5915         /* wc->stage == UPDATE_BACKREF */
5916         if (!(wc->flags[level] & flag)) {
5917                 BUG_ON(!path->locks[level]);
5918                 ret = btrfs_inc_ref(trans, root, eb, 1);
5919                 BUG_ON(ret);
5920                 ret = btrfs_dec_ref(trans, root, eb, 0);
5921                 BUG_ON(ret);
5922                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5923                                                   eb->len, flag, 0);
5924                 BUG_ON(ret);
5925                 wc->flags[level] |= flag;
5926         }
5927
5928         /*
5929          * the block is shared by multiple trees, so it's not good to
5930          * keep the tree lock
5931          */
5932         if (path->locks[level] && level > 0) {
5933                 btrfs_tree_unlock(eb);
5934                 path->locks[level] = 0;
5935         }
5936         return 0;
5937 }
5938
5939 /*
5940  * hepler to process tree block pointer.
5941  *
5942  * when wc->stage == DROP_REFERENCE, this function checks
5943  * reference count of the block pointed to. if the block
5944  * is shared and we need update back refs for the subtree
5945  * rooted at the block, this function changes wc->stage to
5946  * UPDATE_BACKREF. if the block is shared and there is no
5947  * need to update back, this function drops the reference
5948  * to the block.
5949  *
5950  * NOTE: return value 1 means we should stop walking down.
5951  */
5952 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5953                                  struct btrfs_root *root,
5954                                  struct btrfs_path *path,
5955                                  struct walk_control *wc, int *lookup_info)
5956 {
5957         u64 bytenr;
5958         u64 generation;
5959         u64 parent;
5960         u32 blocksize;
5961         struct btrfs_key key;
5962         struct extent_buffer *next;
5963         int level = wc->level;
5964         int reada = 0;
5965         int ret = 0;
5966
5967         generation = btrfs_node_ptr_generation(path->nodes[level],
5968                                                path->slots[level]);
5969         /*
5970          * if the lower level block was created before the snapshot
5971          * was created, we know there is no need to update back refs
5972          * for the subtree
5973          */
5974         if (wc->stage == UPDATE_BACKREF &&
5975             generation <= root->root_key.offset) {
5976                 *lookup_info = 1;
5977                 return 1;
5978         }
5979
5980         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5981         blocksize = btrfs_level_size(root, level - 1);
5982
5983         next = btrfs_find_tree_block(root, bytenr, blocksize);
5984         if (!next) {
5985                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5986                 if (!next)
5987                         return -ENOMEM;
5988                 reada = 1;
5989         }
5990         btrfs_tree_lock(next);
5991         btrfs_set_lock_blocking(next);
5992
5993         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5994                                        &wc->refs[level - 1],
5995                                        &wc->flags[level - 1]);
5996         BUG_ON(ret);
5997         BUG_ON(wc->refs[level - 1] == 0);
5998         *lookup_info = 0;
5999
6000         if (wc->stage == DROP_REFERENCE) {
6001                 if (wc->refs[level - 1] > 1) {
6002                         if (level == 1 &&
6003                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6004                                 goto skip;
6005
6006                         if (!wc->update_ref ||
6007                             generation <= root->root_key.offset)
6008                                 goto skip;
6009
6010                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6011                                               path->slots[level]);
6012                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6013                         if (ret < 0)
6014                                 goto skip;
6015
6016                         wc->stage = UPDATE_BACKREF;
6017                         wc->shared_level = level - 1;
6018                 }
6019         } else {
6020                 if (level == 1 &&
6021                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6022                         goto skip;
6023         }
6024
6025         if (!btrfs_buffer_uptodate(next, generation)) {
6026                 btrfs_tree_unlock(next);
6027                 free_extent_buffer(next);
6028                 next = NULL;
6029                 *lookup_info = 1;
6030         }
6031
6032         if (!next) {
6033                 if (reada && level == 1)
6034                         reada_walk_down(trans, root, wc, path);
6035                 next = read_tree_block(root, bytenr, blocksize, generation);
6036                 if (!next)
6037                         return -EIO;
6038                 btrfs_tree_lock(next);
6039                 btrfs_set_lock_blocking(next);
6040         }
6041
6042         level--;
6043         BUG_ON(level != btrfs_header_level(next));
6044         path->nodes[level] = next;
6045         path->slots[level] = 0;
6046         path->locks[level] = 1;
6047         wc->level = level;
6048         if (wc->level == 1)
6049                 wc->reada_slot = 0;
6050         return 0;
6051 skip:
6052         wc->refs[level - 1] = 0;
6053         wc->flags[level - 1] = 0;
6054         if (wc->stage == DROP_REFERENCE) {
6055                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6056                         parent = path->nodes[level]->start;
6057                 } else {
6058                         BUG_ON(root->root_key.objectid !=
6059                                btrfs_header_owner(path->nodes[level]));
6060                         parent = 0;
6061                 }
6062
6063                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6064                                         root->root_key.objectid, level - 1, 0);
6065                 BUG_ON(ret);
6066         }
6067         btrfs_tree_unlock(next);
6068         free_extent_buffer(next);
6069         *lookup_info = 1;
6070         return 1;
6071 }
6072
6073 /*
6074  * hepler to process tree block while walking up the tree.
6075  *
6076  * when wc->stage == DROP_REFERENCE, this function drops
6077  * reference count on the block.
6078  *
6079  * when wc->stage == UPDATE_BACKREF, this function changes
6080  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6081  * to UPDATE_BACKREF previously while processing the block.
6082  *
6083  * NOTE: return value 1 means we should stop walking up.
6084  */
6085 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6086                                  struct btrfs_root *root,
6087                                  struct btrfs_path *path,
6088                                  struct walk_control *wc)
6089 {
6090         int ret;
6091         int level = wc->level;
6092         struct extent_buffer *eb = path->nodes[level];
6093         u64 parent = 0;
6094
6095         if (wc->stage == UPDATE_BACKREF) {
6096                 BUG_ON(wc->shared_level < level);
6097                 if (level < wc->shared_level)
6098                         goto out;
6099
6100                 ret = find_next_key(path, level + 1, &wc->update_progress);
6101                 if (ret > 0)
6102                         wc->update_ref = 0;
6103
6104                 wc->stage = DROP_REFERENCE;
6105                 wc->shared_level = -1;
6106                 path->slots[level] = 0;
6107
6108                 /*
6109                  * check reference count again if the block isn't locked.
6110                  * we should start walking down the tree again if reference
6111                  * count is one.
6112                  */
6113                 if (!path->locks[level]) {
6114                         BUG_ON(level == 0);
6115                         btrfs_tree_lock(eb);
6116                         btrfs_set_lock_blocking(eb);
6117                         path->locks[level] = 1;
6118
6119                         ret = btrfs_lookup_extent_info(trans, root,
6120                                                        eb->start, eb->len,
6121                                                        &wc->refs[level],
6122                                                        &wc->flags[level]);
6123                         BUG_ON(ret);
6124                         BUG_ON(wc->refs[level] == 0);
6125                         if (wc->refs[level] == 1) {
6126                                 btrfs_tree_unlock(eb);
6127                                 path->locks[level] = 0;
6128                                 return 1;
6129                         }
6130                 }
6131         }
6132
6133         /* wc->stage == DROP_REFERENCE */
6134         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6135
6136         if (wc->refs[level] == 1) {
6137                 if (level == 0) {
6138                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6139                                 ret = btrfs_dec_ref(trans, root, eb, 1);
6140                         else
6141                                 ret = btrfs_dec_ref(trans, root, eb, 0);
6142                         BUG_ON(ret);
6143                 }
6144                 /* make block locked assertion in clean_tree_block happy */
6145                 if (!path->locks[level] &&
6146                     btrfs_header_generation(eb) == trans->transid) {
6147                         btrfs_tree_lock(eb);
6148                         btrfs_set_lock_blocking(eb);
6149                         path->locks[level] = 1;
6150                 }
6151                 clean_tree_block(trans, root, eb);
6152         }
6153
6154         if (eb == root->node) {
6155                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6156                         parent = eb->start;
6157                 else
6158                         BUG_ON(root->root_key.objectid !=
6159                                btrfs_header_owner(eb));
6160         } else {
6161                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6162                         parent = path->nodes[level + 1]->start;
6163                 else
6164                         BUG_ON(root->root_key.objectid !=
6165                                btrfs_header_owner(path->nodes[level + 1]));
6166         }
6167
6168         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6169 out:
6170         wc->refs[level] = 0;
6171         wc->flags[level] = 0;
6172         return 0;
6173 }
6174
6175 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6176                                    struct btrfs_root *root,
6177                                    struct btrfs_path *path,
6178                                    struct walk_control *wc)
6179 {
6180         int level = wc->level;
6181         int lookup_info = 1;
6182         int ret;
6183
6184         while (level >= 0) {
6185                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6186                 if (ret > 0)
6187                         break;
6188
6189                 if (level == 0)
6190                         break;
6191
6192                 if (path->slots[level] >=
6193                     btrfs_header_nritems(path->nodes[level]))
6194                         break;
6195
6196                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6197                 if (ret > 0) {
6198                         path->slots[level]++;
6199                         continue;
6200                 } else if (ret < 0)
6201                         return ret;
6202                 level = wc->level;
6203         }
6204         return 0;
6205 }
6206
6207 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6208                                  struct btrfs_root *root,
6209                                  struct btrfs_path *path,
6210                                  struct walk_control *wc, int max_level)
6211 {
6212         int level = wc->level;
6213         int ret;
6214
6215         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6216         while (level < max_level && path->nodes[level]) {
6217                 wc->level = level;
6218                 if (path->slots[level] + 1 <
6219                     btrfs_header_nritems(path->nodes[level])) {
6220                         path->slots[level]++;
6221                         return 0;
6222                 } else {
6223                         ret = walk_up_proc(trans, root, path, wc);
6224                         if (ret > 0)
6225                                 return 0;
6226
6227                         if (path->locks[level]) {
6228                                 btrfs_tree_unlock(path->nodes[level]);
6229                                 path->locks[level] = 0;
6230                         }
6231                         free_extent_buffer(path->nodes[level]);
6232                         path->nodes[level] = NULL;
6233                         level++;
6234                 }
6235         }
6236         return 1;
6237 }
6238
6239 /*
6240  * drop a subvolume tree.
6241  *
6242  * this function traverses the tree freeing any blocks that only
6243  * referenced by the tree.
6244  *
6245  * when a shared tree block is found. this function decreases its
6246  * reference count by one. if update_ref is true, this function
6247  * also make sure backrefs for the shared block and all lower level
6248  * blocks are properly updated.
6249  */
6250 int btrfs_drop_snapshot(struct btrfs_root *root,
6251                         struct btrfs_block_rsv *block_rsv, int update_ref)
6252 {
6253         struct btrfs_path *path;
6254         struct btrfs_trans_handle *trans;
6255         struct btrfs_root *tree_root = root->fs_info->tree_root;
6256         struct btrfs_root_item *root_item = &root->root_item;
6257         struct walk_control *wc;
6258         struct btrfs_key key;
6259         int err = 0;
6260         int ret;
6261         int level;
6262
6263         path = btrfs_alloc_path();
6264         BUG_ON(!path);
6265
6266         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6267         BUG_ON(!wc);
6268
6269         trans = btrfs_start_transaction(tree_root, 0);
6270         BUG_ON(IS_ERR(trans));
6271
6272         if (block_rsv)
6273                 trans->block_rsv = block_rsv;
6274
6275         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6276                 level = btrfs_header_level(root->node);
6277                 path->nodes[level] = btrfs_lock_root_node(root);
6278                 btrfs_set_lock_blocking(path->nodes[level]);
6279                 path->slots[level] = 0;
6280                 path->locks[level] = 1;
6281                 memset(&wc->update_progress, 0,
6282                        sizeof(wc->update_progress));
6283         } else {
6284                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6285                 memcpy(&wc->update_progress, &key,
6286                        sizeof(wc->update_progress));
6287
6288                 level = root_item->drop_level;
6289                 BUG_ON(level == 0);
6290                 path->lowest_level = level;
6291                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6292                 path->lowest_level = 0;
6293                 if (ret < 0) {
6294                         err = ret;
6295                         goto out;
6296                 }
6297                 WARN_ON(ret > 0);
6298
6299                 /*
6300                  * unlock our path, this is safe because only this
6301                  * function is allowed to delete this snapshot
6302                  */
6303                 btrfs_unlock_up_safe(path, 0);
6304
6305                 level = btrfs_header_level(root->node);
6306                 while (1) {
6307                         btrfs_tree_lock(path->nodes[level]);
6308                         btrfs_set_lock_blocking(path->nodes[level]);
6309
6310                         ret = btrfs_lookup_extent_info(trans, root,
6311                                                 path->nodes[level]->start,
6312                                                 path->nodes[level]->len,
6313                                                 &wc->refs[level],
6314                                                 &wc->flags[level]);
6315                         BUG_ON(ret);
6316                         BUG_ON(wc->refs[level] == 0);
6317
6318                         if (level == root_item->drop_level)
6319                                 break;
6320
6321                         btrfs_tree_unlock(path->nodes[level]);
6322                         WARN_ON(wc->refs[level] != 1);
6323                         level--;
6324                 }
6325         }
6326
6327         wc->level = level;
6328         wc->shared_level = -1;
6329         wc->stage = DROP_REFERENCE;
6330         wc->update_ref = update_ref;
6331         wc->keep_locks = 0;
6332         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6333
6334         while (1) {
6335                 ret = walk_down_tree(trans, root, path, wc);
6336                 if (ret < 0) {
6337                         err = ret;
6338                         break;
6339                 }
6340
6341                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6342                 if (ret < 0) {
6343                         err = ret;
6344                         break;
6345                 }
6346
6347                 if (ret > 0) {
6348                         BUG_ON(wc->stage != DROP_REFERENCE);
6349                         break;
6350                 }
6351
6352                 if (wc->stage == DROP_REFERENCE) {
6353                         level = wc->level;
6354                         btrfs_node_key(path->nodes[level],
6355                                        &root_item->drop_progress,
6356                                        path->slots[level]);
6357                         root_item->drop_level = level;
6358                 }
6359
6360                 BUG_ON(wc->level == 0);
6361                 if (btrfs_should_end_transaction(trans, tree_root)) {
6362                         ret = btrfs_update_root(trans, tree_root,
6363                                                 &root->root_key,
6364                                                 root_item);
6365                         BUG_ON(ret);
6366
6367                         btrfs_end_transaction_throttle(trans, tree_root);
6368                         trans = btrfs_start_transaction(tree_root, 0);
6369                         BUG_ON(IS_ERR(trans));
6370                         if (block_rsv)
6371                                 trans->block_rsv = block_rsv;
6372                 }
6373         }
6374         btrfs_release_path(path);
6375         BUG_ON(err);
6376
6377         ret = btrfs_del_root(trans, tree_root, &root->root_key);
6378         BUG_ON(ret);
6379
6380         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6381                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6382                                            NULL, NULL);
6383                 BUG_ON(ret < 0);
6384                 if (ret > 0) {
6385                         /* if we fail to delete the orphan item this time
6386                          * around, it'll get picked up the next time.
6387                          *
6388                          * The most common failure here is just -ENOENT.
6389                          */
6390                         btrfs_del_orphan_item(trans, tree_root,
6391                                               root->root_key.objectid);
6392                 }
6393         }
6394
6395         if (root->in_radix) {
6396                 btrfs_free_fs_root(tree_root->fs_info, root);
6397         } else {
6398                 free_extent_buffer(root->node);
6399                 free_extent_buffer(root->commit_root);
6400                 kfree(root);
6401         }
6402 out:
6403         btrfs_end_transaction_throttle(trans, tree_root);
6404         kfree(wc);
6405         btrfs_free_path(path);
6406         return err;
6407 }
6408
6409 /*
6410  * drop subtree rooted at tree block 'node'.
6411  *
6412  * NOTE: this function will unlock and release tree block 'node'
6413  */
6414 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6415                         struct btrfs_root *root,
6416                         struct extent_buffer *node,
6417                         struct extent_buffer *parent)
6418 {
6419         struct btrfs_path *path;
6420         struct walk_control *wc;
6421         int level;
6422         int parent_level;
6423         int ret = 0;
6424         int wret;
6425
6426         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6427
6428         path = btrfs_alloc_path();
6429         if (!path)
6430                 return -ENOMEM;
6431
6432         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6433         if (!wc) {
6434                 btrfs_free_path(path);
6435                 return -ENOMEM;
6436         }
6437
6438         btrfs_assert_tree_locked(parent);
6439         parent_level = btrfs_header_level(parent);
6440         extent_buffer_get(parent);
6441         path->nodes[parent_level] = parent;
6442         path->slots[parent_level] = btrfs_header_nritems(parent);
6443
6444         btrfs_assert_tree_locked(node);
6445         level = btrfs_header_level(node);
6446         path->nodes[level] = node;
6447         path->slots[level] = 0;
6448         path->locks[level] = 1;
6449
6450         wc->refs[parent_level] = 1;
6451         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6452         wc->level = level;
6453         wc->shared_level = -1;
6454         wc->stage = DROP_REFERENCE;
6455         wc->update_ref = 0;
6456         wc->keep_locks = 1;
6457         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6458
6459         while (1) {
6460                 wret = walk_down_tree(trans, root, path, wc);
6461                 if (wret < 0) {
6462                         ret = wret;
6463                         break;
6464                 }
6465
6466                 wret = walk_up_tree(trans, root, path, wc, parent_level);
6467                 if (wret < 0)
6468                         ret = wret;
6469                 if (wret != 0)
6470                         break;
6471         }
6472
6473         kfree(wc);
6474         btrfs_free_path(path);
6475         return ret;
6476 }
6477
6478 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6479 {
6480         u64 num_devices;
6481         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6482                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6483
6484         /*
6485          * we add in the count of missing devices because we want
6486          * to make sure that any RAID levels on a degraded FS
6487          * continue to be honored.
6488          */
6489         num_devices = root->fs_info->fs_devices->rw_devices +
6490                 root->fs_info->fs_devices->missing_devices;
6491
6492         if (num_devices == 1) {
6493                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6494                 stripped = flags & ~stripped;
6495
6496                 /* turn raid0 into single device chunks */
6497                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6498                         return stripped;
6499
6500                 /* turn mirroring into duplication */
6501                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6502                              BTRFS_BLOCK_GROUP_RAID10))
6503                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6504                 return flags;
6505         } else {
6506                 /* they already had raid on here, just return */
6507                 if (flags & stripped)
6508                         return flags;
6509
6510                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6511                 stripped = flags & ~stripped;
6512
6513                 /* switch duplicated blocks with raid1 */
6514                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6515                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6516
6517                 /* turn single device chunks into raid0 */
6518                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6519         }
6520         return flags;
6521 }
6522
6523 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
6524 {
6525         struct btrfs_space_info *sinfo = cache->space_info;
6526         u64 num_bytes;
6527         int ret = -ENOSPC;
6528
6529         if (cache->ro)
6530                 return 0;
6531
6532         spin_lock(&sinfo->lock);
6533         spin_lock(&cache->lock);
6534         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6535                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6536
6537         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
6538             sinfo->bytes_may_use + sinfo->bytes_readonly +
6539             cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
6540                 sinfo->bytes_readonly += num_bytes;
6541                 sinfo->bytes_reserved += cache->reserved_pinned;
6542                 cache->reserved_pinned = 0;
6543                 cache->ro = 1;
6544                 ret = 0;
6545         }
6546
6547         spin_unlock(&cache->lock);
6548         spin_unlock(&sinfo->lock);
6549         return ret;
6550 }
6551
6552 int btrfs_set_block_group_ro(struct btrfs_root *root,
6553                              struct btrfs_block_group_cache *cache)
6554
6555 {
6556         struct btrfs_trans_handle *trans;
6557         u64 alloc_flags;
6558         int ret;
6559
6560         BUG_ON(cache->ro);
6561
6562         trans = btrfs_join_transaction(root);
6563         BUG_ON(IS_ERR(trans));
6564
6565         alloc_flags = update_block_group_flags(root, cache->flags);
6566         if (alloc_flags != cache->flags)
6567                 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6568                                CHUNK_ALLOC_FORCE);
6569
6570         ret = set_block_group_ro(cache);
6571         if (!ret)
6572                 goto out;
6573         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
6574         ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6575                              CHUNK_ALLOC_FORCE);
6576         if (ret < 0)
6577                 goto out;
6578         ret = set_block_group_ro(cache);
6579 out:
6580         btrfs_end_transaction(trans, root);
6581         return ret;
6582 }
6583
6584 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
6585                             struct btrfs_root *root, u64 type)
6586 {
6587         u64 alloc_flags = get_alloc_profile(root, type);
6588         return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6589                               CHUNK_ALLOC_FORCE);
6590 }
6591
6592 /*
6593  * helper to account the unused space of all the readonly block group in the
6594  * list. takes mirrors into account.
6595  */
6596 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
6597 {
6598         struct btrfs_block_group_cache *block_group;
6599         u64 free_bytes = 0;
6600         int factor;
6601
6602         list_for_each_entry(block_group, groups_list, list) {
6603                 spin_lock(&block_group->lock);
6604
6605                 if (!block_group->ro) {
6606                         spin_unlock(&block_group->lock);
6607                         continue;
6608                 }
6609
6610                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
6611                                           BTRFS_BLOCK_GROUP_RAID10 |
6612                                           BTRFS_BLOCK_GROUP_DUP))
6613                         factor = 2;
6614                 else
6615                         factor = 1;
6616
6617                 free_bytes += (block_group->key.offset -
6618                                btrfs_block_group_used(&block_group->item)) *
6619                                factor;
6620
6621                 spin_unlock(&block_group->lock);
6622         }
6623
6624         return free_bytes;
6625 }
6626
6627 /*
6628  * helper to account the unused space of all the readonly block group in the
6629  * space_info. takes mirrors into account.
6630  */
6631 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
6632 {
6633         int i;
6634         u64 free_bytes = 0;
6635
6636         spin_lock(&sinfo->lock);
6637
6638         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
6639                 if (!list_empty(&sinfo->block_groups[i]))
6640                         free_bytes += __btrfs_get_ro_block_group_free_space(
6641                                                 &sinfo->block_groups[i]);
6642
6643         spin_unlock(&sinfo->lock);
6644
6645         return free_bytes;
6646 }
6647
6648 int btrfs_set_block_group_rw(struct btrfs_root *root,
6649                               struct btrfs_block_group_cache *cache)
6650 {
6651         struct btrfs_space_info *sinfo = cache->space_info;
6652         u64 num_bytes;
6653
6654         BUG_ON(!cache->ro);
6655
6656         spin_lock(&sinfo->lock);
6657         spin_lock(&cache->lock);
6658         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6659                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6660         sinfo->bytes_readonly -= num_bytes;
6661         cache->ro = 0;
6662         spin_unlock(&cache->lock);
6663         spin_unlock(&sinfo->lock);
6664         return 0;
6665 }
6666
6667 /*
6668  * checks to see if its even possible to relocate this block group.
6669  *
6670  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
6671  * ok to go ahead and try.
6672  */
6673 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6674 {
6675         struct btrfs_block_group_cache *block_group;
6676         struct btrfs_space_info *space_info;
6677         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6678         struct btrfs_device *device;
6679         int full = 0;
6680         int ret = 0;
6681
6682         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
6683
6684         /* odd, couldn't find the block group, leave it alone */
6685         if (!block_group)
6686                 return -1;
6687
6688         /* no bytes used, we're good */
6689         if (!btrfs_block_group_used(&block_group->item))
6690                 goto out;
6691
6692         space_info = block_group->space_info;
6693         spin_lock(&space_info->lock);
6694
6695         full = space_info->full;
6696
6697         /*
6698          * if this is the last block group we have in this space, we can't
6699          * relocate it unless we're able to allocate a new chunk below.
6700          *
6701          * Otherwise, we need to make sure we have room in the space to handle
6702          * all of the extents from this block group.  If we can, we're good
6703          */
6704         if ((space_info->total_bytes != block_group->key.offset) &&
6705            (space_info->bytes_used + space_info->bytes_reserved +
6706             space_info->bytes_pinned + space_info->bytes_readonly +
6707             btrfs_block_group_used(&block_group->item) <
6708             space_info->total_bytes)) {
6709                 spin_unlock(&space_info->lock);
6710                 goto out;
6711         }
6712         spin_unlock(&space_info->lock);
6713
6714         /*
6715          * ok we don't have enough space, but maybe we have free space on our
6716          * devices to allocate new chunks for relocation, so loop through our
6717          * alloc devices and guess if we have enough space.  However, if we
6718          * were marked as full, then we know there aren't enough chunks, and we
6719          * can just return.
6720          */
6721         ret = -1;
6722         if (full)
6723                 goto out;
6724
6725         mutex_lock(&root->fs_info->chunk_mutex);
6726         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6727                 u64 min_free = btrfs_block_group_used(&block_group->item);
6728                 u64 dev_offset;
6729
6730                 /*
6731                  * check to make sure we can actually find a chunk with enough
6732                  * space to fit our block group in.
6733                  */
6734                 if (device->total_bytes > device->bytes_used + min_free) {
6735                         ret = find_free_dev_extent(NULL, device, min_free,
6736                                                    &dev_offset, NULL);
6737                         if (!ret)
6738                                 break;
6739                         ret = -1;
6740                 }
6741         }
6742         mutex_unlock(&root->fs_info->chunk_mutex);
6743 out:
6744         btrfs_put_block_group(block_group);
6745         return ret;
6746 }
6747
6748 static int find_first_block_group(struct btrfs_root *root,
6749                 struct btrfs_path *path, struct btrfs_key *key)
6750 {
6751         int ret = 0;
6752         struct btrfs_key found_key;
6753         struct extent_buffer *leaf;
6754         int slot;
6755
6756         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6757         if (ret < 0)
6758                 goto out;
6759
6760         while (1) {
6761                 slot = path->slots[0];
6762                 leaf = path->nodes[0];
6763                 if (slot >= btrfs_header_nritems(leaf)) {
6764                         ret = btrfs_next_leaf(root, path);
6765                         if (ret == 0)
6766                                 continue;
6767                         if (ret < 0)
6768                                 goto out;
6769                         break;
6770                 }
6771                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6772
6773                 if (found_key.objectid >= key->objectid &&
6774                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6775                         ret = 0;
6776                         goto out;
6777                 }
6778                 path->slots[0]++;
6779         }
6780 out:
6781         return ret;
6782 }
6783
6784 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
6785 {
6786         struct btrfs_block_group_cache *block_group;
6787         u64 last = 0;
6788
6789         while (1) {
6790                 struct inode *inode;
6791
6792                 block_group = btrfs_lookup_first_block_group(info, last);
6793                 while (block_group) {
6794                         spin_lock(&block_group->lock);
6795                         if (block_group->iref)
6796                                 break;
6797                         spin_unlock(&block_group->lock);
6798                         block_group = next_block_group(info->tree_root,
6799                                                        block_group);
6800                 }
6801                 if (!block_group) {
6802                         if (last == 0)
6803                                 break;
6804                         last = 0;
6805                         continue;
6806                 }
6807
6808                 inode = block_group->inode;
6809                 block_group->iref = 0;
6810                 block_group->inode = NULL;
6811                 spin_unlock(&block_group->lock);
6812                 iput(inode);
6813                 last = block_group->key.objectid + block_group->key.offset;
6814                 btrfs_put_block_group(block_group);
6815         }
6816 }
6817
6818 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6819 {
6820         struct btrfs_block_group_cache *block_group;
6821         struct btrfs_space_info *space_info;
6822         struct btrfs_caching_control *caching_ctl;
6823         struct rb_node *n;
6824
6825         down_write(&info->extent_commit_sem);
6826         while (!list_empty(&info->caching_block_groups)) {
6827                 caching_ctl = list_entry(info->caching_block_groups.next,
6828                                          struct btrfs_caching_control, list);
6829                 list_del(&caching_ctl->list);
6830                 put_caching_control(caching_ctl);
6831         }
6832         up_write(&info->extent_commit_sem);
6833
6834         spin_lock(&info->block_group_cache_lock);
6835         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6836                 block_group = rb_entry(n, struct btrfs_block_group_cache,
6837                                        cache_node);
6838                 rb_erase(&block_group->cache_node,
6839                          &info->block_group_cache_tree);
6840                 spin_unlock(&info->block_group_cache_lock);
6841
6842                 down_write(&block_group->space_info->groups_sem);
6843                 list_del(&block_group->list);
6844                 up_write(&block_group->space_info->groups_sem);
6845
6846                 if (block_group->cached == BTRFS_CACHE_STARTED)
6847                         wait_block_group_cache_done(block_group);
6848
6849                 /*
6850                  * We haven't cached this block group, which means we could
6851                  * possibly have excluded extents on this block group.
6852                  */
6853                 if (block_group->cached == BTRFS_CACHE_NO)
6854                         free_excluded_extents(info->extent_root, block_group);
6855
6856                 btrfs_remove_free_space_cache(block_group);
6857                 btrfs_put_block_group(block_group);
6858
6859                 spin_lock(&info->block_group_cache_lock);
6860         }
6861         spin_unlock(&info->block_group_cache_lock);
6862
6863         /* now that all the block groups are freed, go through and
6864          * free all the space_info structs.  This is only called during
6865          * the final stages of unmount, and so we know nobody is
6866          * using them.  We call synchronize_rcu() once before we start,
6867          * just to be on the safe side.
6868          */
6869         synchronize_rcu();
6870
6871         release_global_block_rsv(info);
6872
6873         while(!list_empty(&info->space_info)) {
6874                 space_info = list_entry(info->space_info.next,
6875                                         struct btrfs_space_info,
6876                                         list);
6877                 if (space_info->bytes_pinned > 0 ||
6878                     space_info->bytes_reserved > 0) {
6879                         WARN_ON(1);
6880                         dump_space_info(space_info, 0, 0);
6881                 }
6882                 list_del(&space_info->list);
6883                 kfree(space_info);
6884         }
6885         return 0;
6886 }
6887
6888 static void __link_block_group(struct btrfs_space_info *space_info,
6889                                struct btrfs_block_group_cache *cache)
6890 {
6891         int index = get_block_group_index(cache);
6892
6893         down_write(&space_info->groups_sem);
6894         list_add_tail(&cache->list, &space_info->block_groups[index]);
6895         up_write(&space_info->groups_sem);
6896 }
6897
6898 int btrfs_read_block_groups(struct btrfs_root *root)
6899 {
6900         struct btrfs_path *path;
6901         int ret;
6902         struct btrfs_block_group_cache *cache;
6903         struct btrfs_fs_info *info = root->fs_info;
6904         struct btrfs_space_info *space_info;
6905         struct btrfs_key key;
6906         struct btrfs_key found_key;
6907         struct extent_buffer *leaf;
6908         int need_clear = 0;
6909         u64 cache_gen;
6910
6911         root = info->extent_root;
6912         key.objectid = 0;
6913         key.offset = 0;
6914         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
6915         path = btrfs_alloc_path();
6916         if (!path)
6917                 return -ENOMEM;
6918         path->reada = 1;
6919
6920         cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
6921         if (cache_gen != 0 &&
6922             btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
6923                 need_clear = 1;
6924         if (btrfs_test_opt(root, CLEAR_CACHE))
6925                 need_clear = 1;
6926         if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
6927                 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
6928
6929         while (1) {
6930                 ret = find_first_block_group(root, path, &key);
6931                 if (ret > 0)
6932                         break;
6933                 if (ret != 0)
6934                         goto error;
6935                 leaf = path->nodes[0];
6936                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6937                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6938                 if (!cache) {
6939                         ret = -ENOMEM;
6940                         goto error;
6941                 }
6942                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
6943                                                 GFP_NOFS);
6944                 if (!cache->free_space_ctl) {
6945                         kfree(cache);
6946                         ret = -ENOMEM;
6947                         goto error;
6948                 }
6949
6950                 atomic_set(&cache->count, 1);
6951                 spin_lock_init(&cache->lock);
6952                 cache->fs_info = info;
6953                 INIT_LIST_HEAD(&cache->list);
6954                 INIT_LIST_HEAD(&cache->cluster_list);
6955
6956                 if (need_clear)
6957                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6958
6959                 read_extent_buffer(leaf, &cache->item,
6960                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
6961                                    sizeof(cache->item));
6962                 memcpy(&cache->key, &found_key, sizeof(found_key));
6963
6964                 key.objectid = found_key.objectid + found_key.offset;
6965                 btrfs_release_path(path);
6966                 cache->flags = btrfs_block_group_flags(&cache->item);
6967                 cache->sectorsize = root->sectorsize;
6968
6969                 btrfs_init_free_space_ctl(cache);
6970
6971                 /*
6972                  * We need to exclude the super stripes now so that the space
6973                  * info has super bytes accounted for, otherwise we'll think
6974                  * we have more space than we actually do.
6975                  */
6976                 exclude_super_stripes(root, cache);
6977
6978                 /*
6979                  * check for two cases, either we are full, and therefore
6980                  * don't need to bother with the caching work since we won't
6981                  * find any space, or we are empty, and we can just add all
6982                  * the space in and be done with it.  This saves us _alot_ of
6983                  * time, particularly in the full case.
6984                  */
6985                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
6986                         cache->last_byte_to_unpin = (u64)-1;
6987                         cache->cached = BTRFS_CACHE_FINISHED;
6988                         free_excluded_extents(root, cache);
6989                 } else if (btrfs_block_group_used(&cache->item) == 0) {
6990                         cache->last_byte_to_unpin = (u64)-1;
6991                         cache->cached = BTRFS_CACHE_FINISHED;
6992                         add_new_free_space(cache, root->fs_info,
6993                                            found_key.objectid,
6994                                            found_key.objectid +
6995                                            found_key.offset);
6996                         free_excluded_extents(root, cache);
6997                 }
6998
6999                 ret = update_space_info(info, cache->flags, found_key.offset,
7000                                         btrfs_block_group_used(&cache->item),
7001                                         &space_info);
7002                 BUG_ON(ret);
7003                 cache->space_info = space_info;
7004                 spin_lock(&cache->space_info->lock);
7005                 cache->space_info->bytes_readonly += cache->bytes_super;
7006                 spin_unlock(&cache->space_info->lock);
7007
7008                 __link_block_group(space_info, cache);
7009
7010                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7011                 BUG_ON(ret);
7012
7013                 set_avail_alloc_bits(root->fs_info, cache->flags);
7014                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7015                         set_block_group_ro(cache);
7016         }
7017
7018         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7019                 if (!(get_alloc_profile(root, space_info->flags) &
7020                       (BTRFS_BLOCK_GROUP_RAID10 |
7021                        BTRFS_BLOCK_GROUP_RAID1 |
7022                        BTRFS_BLOCK_GROUP_DUP)))
7023                         continue;
7024                 /*
7025                  * avoid allocating from un-mirrored block group if there are
7026                  * mirrored block groups.
7027                  */
7028                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7029                         set_block_group_ro(cache);
7030                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7031                         set_block_group_ro(cache);
7032         }
7033
7034         init_global_block_rsv(info);
7035         ret = 0;
7036 error:
7037         btrfs_free_path(path);
7038         return ret;
7039 }
7040
7041 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7042                            struct btrfs_root *root, u64 bytes_used,
7043                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7044                            u64 size)
7045 {
7046         int ret;
7047         struct btrfs_root *extent_root;
7048         struct btrfs_block_group_cache *cache;
7049
7050         extent_root = root->fs_info->extent_root;
7051
7052         root->fs_info->last_trans_log_full_commit = trans->transid;
7053
7054         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7055         if (!cache)
7056                 return -ENOMEM;
7057         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7058                                         GFP_NOFS);
7059         if (!cache->free_space_ctl) {
7060                 kfree(cache);
7061                 return -ENOMEM;
7062         }
7063
7064         cache->key.objectid = chunk_offset;
7065         cache->key.offset = size;
7066         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7067         cache->sectorsize = root->sectorsize;
7068         cache->fs_info = root->fs_info;
7069
7070         atomic_set(&cache->count, 1);
7071         spin_lock_init(&cache->lock);
7072         INIT_LIST_HEAD(&cache->list);
7073         INIT_LIST_HEAD(&cache->cluster_list);
7074
7075         btrfs_init_free_space_ctl(cache);
7076
7077         btrfs_set_block_group_used(&cache->item, bytes_used);
7078         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7079         cache->flags = type;
7080         btrfs_set_block_group_flags(&cache->item, type);
7081
7082         cache->last_byte_to_unpin = (u64)-1;
7083         cache->cached = BTRFS_CACHE_FINISHED;
7084         exclude_super_stripes(root, cache);
7085
7086         add_new_free_space(cache, root->fs_info, chunk_offset,
7087                            chunk_offset + size);
7088
7089         free_excluded_extents(root, cache);
7090
7091         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7092                                 &cache->space_info);
7093         BUG_ON(ret);
7094
7095         spin_lock(&cache->space_info->lock);
7096         cache->space_info->bytes_readonly += cache->bytes_super;
7097         spin_unlock(&cache->space_info->lock);
7098
7099         __link_block_group(cache->space_info, cache);
7100
7101         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7102         BUG_ON(ret);
7103
7104         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7105                                 sizeof(cache->item));
7106         BUG_ON(ret);
7107
7108         set_avail_alloc_bits(extent_root->fs_info, type);
7109
7110         return 0;
7111 }
7112
7113 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7114                              struct btrfs_root *root, u64 group_start)
7115 {
7116         struct btrfs_path *path;
7117         struct btrfs_block_group_cache *block_group;
7118         struct btrfs_free_cluster *cluster;
7119         struct btrfs_root *tree_root = root->fs_info->tree_root;
7120         struct btrfs_key key;
7121         struct inode *inode;
7122         int ret;
7123         int factor;
7124
7125         root = root->fs_info->extent_root;
7126
7127         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7128         BUG_ON(!block_group);
7129         BUG_ON(!block_group->ro);
7130
7131         /*
7132          * Free the reserved super bytes from this block group before
7133          * remove it.
7134          */
7135         free_excluded_extents(root, block_group);
7136
7137         memcpy(&key, &block_group->key, sizeof(key));
7138         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7139                                   BTRFS_BLOCK_GROUP_RAID1 |
7140                                   BTRFS_BLOCK_GROUP_RAID10))
7141                 factor = 2;
7142         else
7143                 factor = 1;
7144
7145         /* make sure this block group isn't part of an allocation cluster */
7146         cluster = &root->fs_info->data_alloc_cluster;
7147         spin_lock(&cluster->refill_lock);
7148         btrfs_return_cluster_to_free_space(block_group, cluster);
7149         spin_unlock(&cluster->refill_lock);
7150
7151         /*
7152          * make sure this block group isn't part of a metadata
7153          * allocation cluster
7154          */
7155         cluster = &root->fs_info->meta_alloc_cluster;
7156         spin_lock(&cluster->refill_lock);
7157         btrfs_return_cluster_to_free_space(block_group, cluster);
7158         spin_unlock(&cluster->refill_lock);
7159
7160         path = btrfs_alloc_path();
7161         BUG_ON(!path);
7162
7163         inode = lookup_free_space_inode(root, block_group, path);
7164         if (!IS_ERR(inode)) {
7165                 btrfs_orphan_add(trans, inode);
7166                 clear_nlink(inode);
7167                 /* One for the block groups ref */
7168                 spin_lock(&block_group->lock);
7169                 if (block_group->iref) {
7170                         block_group->iref = 0;
7171                         block_group->inode = NULL;
7172                         spin_unlock(&block_group->lock);
7173                         iput(inode);
7174                 } else {
7175                         spin_unlock(&block_group->lock);
7176                 }
7177                 /* One for our lookup ref */
7178                 iput(inode);
7179         }
7180
7181         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7182         key.offset = block_group->key.objectid;
7183         key.type = 0;
7184
7185         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7186         if (ret < 0)
7187                 goto out;
7188         if (ret > 0)
7189                 btrfs_release_path(path);
7190         if (ret == 0) {
7191                 ret = btrfs_del_item(trans, tree_root, path);
7192                 if (ret)
7193                         goto out;
7194                 btrfs_release_path(path);
7195         }
7196
7197         spin_lock(&root->fs_info->block_group_cache_lock);
7198         rb_erase(&block_group->cache_node,
7199                  &root->fs_info->block_group_cache_tree);
7200         spin_unlock(&root->fs_info->block_group_cache_lock);
7201
7202         down_write(&block_group->space_info->groups_sem);
7203         /*
7204          * we must use list_del_init so people can check to see if they
7205          * are still on the list after taking the semaphore
7206          */
7207         list_del_init(&block_group->list);
7208         up_write(&block_group->space_info->groups_sem);
7209
7210         if (block_group->cached == BTRFS_CACHE_STARTED)
7211                 wait_block_group_cache_done(block_group);
7212
7213         btrfs_remove_free_space_cache(block_group);
7214
7215         spin_lock(&block_group->space_info->lock);
7216         block_group->space_info->total_bytes -= block_group->key.offset;
7217         block_group->space_info->bytes_readonly -= block_group->key.offset;
7218         block_group->space_info->disk_total -= block_group->key.offset * factor;
7219         spin_unlock(&block_group->space_info->lock);
7220
7221         memcpy(&key, &block_group->key, sizeof(key));
7222
7223         btrfs_clear_space_info_full(root->fs_info);
7224
7225         btrfs_put_block_group(block_group);
7226         btrfs_put_block_group(block_group);
7227
7228         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7229         if (ret > 0)
7230                 ret = -EIO;
7231         if (ret < 0)
7232                 goto out;
7233
7234         ret = btrfs_del_item(trans, root, path);
7235 out:
7236         btrfs_free_path(path);
7237         return ret;
7238 }
7239
7240 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7241 {
7242         struct btrfs_space_info *space_info;
7243         struct btrfs_super_block *disk_super;
7244         u64 features;
7245         u64 flags;
7246         int mixed = 0;
7247         int ret;
7248
7249         disk_super = &fs_info->super_copy;
7250         if (!btrfs_super_root(disk_super))
7251                 return 1;
7252
7253         features = btrfs_super_incompat_flags(disk_super);
7254         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7255                 mixed = 1;
7256
7257         flags = BTRFS_BLOCK_GROUP_SYSTEM;
7258         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7259         if (ret)
7260                 goto out;
7261
7262         if (mixed) {
7263                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7264                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7265         } else {
7266                 flags = BTRFS_BLOCK_GROUP_METADATA;
7267                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7268                 if (ret)
7269                         goto out;
7270
7271                 flags = BTRFS_BLOCK_GROUP_DATA;
7272                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7273         }
7274 out:
7275         return ret;
7276 }
7277
7278 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7279 {
7280         return unpin_extent_range(root, start, end);
7281 }
7282
7283 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7284                                u64 num_bytes, u64 *actual_bytes)
7285 {
7286         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7287 }
7288
7289 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7290 {
7291         struct btrfs_fs_info *fs_info = root->fs_info;
7292         struct btrfs_block_group_cache *cache = NULL;
7293         u64 group_trimmed;
7294         u64 start;
7295         u64 end;
7296         u64 trimmed = 0;
7297         int ret = 0;
7298
7299         cache = btrfs_lookup_block_group(fs_info, range->start);
7300
7301         while (cache) {
7302                 if (cache->key.objectid >= (range->start + range->len)) {
7303                         btrfs_put_block_group(cache);
7304                         break;
7305                 }
7306
7307                 start = max(range->start, cache->key.objectid);
7308                 end = min(range->start + range->len,
7309                                 cache->key.objectid + cache->key.offset);
7310
7311                 if (end - start >= range->minlen) {
7312                         if (!block_group_cache_done(cache)) {
7313                                 ret = cache_block_group(cache, NULL, root, 0);
7314                                 if (!ret)
7315                                         wait_block_group_cache_done(cache);
7316                         }
7317                         ret = btrfs_trim_block_group(cache,
7318                                                      &group_trimmed,
7319                                                      start,
7320                                                      end,
7321                                                      range->minlen);
7322
7323                         trimmed += group_trimmed;
7324                         if (ret) {
7325                                 btrfs_put_block_group(cache);
7326                                 break;
7327                         }
7328                 }
7329
7330                 cache = next_block_group(fs_info->tree_root, cache);
7331         }
7332
7333         range->len = trimmed;
7334         return ret;
7335 }