Btrfs: fix space leak when we fail to make an allocation
[pandora-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 /* control flags for do_chunk_alloc's force field
37  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
38  * if we really need one.
39  *
40  * CHUNK_ALLOC_FORCE means it must try to allocate one
41  *
42  * CHUNK_ALLOC_LIMITED means to only try and allocate one
43  * if we have very few chunks already allocated.  This is
44  * used as part of the clustering code to help make sure
45  * we have a good pool of storage to cluster in, without
46  * filling the FS with empty chunks
47  *
48  */
49 enum {
50         CHUNK_ALLOC_NO_FORCE = 0,
51         CHUNK_ALLOC_FORCE = 1,
52         CHUNK_ALLOC_LIMITED = 2,
53 };
54
55 /*
56  * Control how reservations are dealt with.
57  *
58  * RESERVE_FREE - freeing a reservation.
59  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
60  *   ENOSPC accounting
61  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
62  *   bytes_may_use as the ENOSPC accounting is done elsewhere
63  */
64 enum {
65         RESERVE_FREE = 0,
66         RESERVE_ALLOC = 1,
67         RESERVE_ALLOC_NO_ACCOUNT = 2,
68 };
69
70 static int update_block_group(struct btrfs_trans_handle *trans,
71                               struct btrfs_root *root,
72                               u64 bytenr, u64 num_bytes, int alloc);
73 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
74                                 struct btrfs_root *root,
75                                 u64 bytenr, u64 num_bytes, u64 parent,
76                                 u64 root_objectid, u64 owner_objectid,
77                                 u64 owner_offset, int refs_to_drop,
78                                 struct btrfs_delayed_extent_op *extra_op);
79 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
80                                     struct extent_buffer *leaf,
81                                     struct btrfs_extent_item *ei);
82 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
83                                       struct btrfs_root *root,
84                                       u64 parent, u64 root_objectid,
85                                       u64 flags, u64 owner, u64 offset,
86                                       struct btrfs_key *ins, int ref_mod);
87 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
88                                      struct btrfs_root *root,
89                                      u64 parent, u64 root_objectid,
90                                      u64 flags, struct btrfs_disk_key *key,
91                                      int level, struct btrfs_key *ins);
92 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
93                           struct btrfs_root *extent_root, u64 alloc_bytes,
94                           u64 flags, int force);
95 static int find_next_key(struct btrfs_path *path, int level,
96                          struct btrfs_key *key);
97 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
98                             int dump_block_groups);
99 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
100                                        u64 num_bytes, int reserve);
101
102 static noinline int
103 block_group_cache_done(struct btrfs_block_group_cache *cache)
104 {
105         smp_mb();
106         return cache->cached == BTRFS_CACHE_FINISHED;
107 }
108
109 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
110 {
111         return (cache->flags & bits) == bits;
112 }
113
114 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
115 {
116         atomic_inc(&cache->count);
117 }
118
119 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
120 {
121         if (atomic_dec_and_test(&cache->count)) {
122                 WARN_ON(cache->pinned > 0);
123                 WARN_ON(cache->reserved > 0);
124                 kfree(cache->free_space_ctl);
125                 kfree(cache);
126         }
127 }
128
129 /*
130  * this adds the block group to the fs_info rb tree for the block group
131  * cache
132  */
133 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
134                                 struct btrfs_block_group_cache *block_group)
135 {
136         struct rb_node **p;
137         struct rb_node *parent = NULL;
138         struct btrfs_block_group_cache *cache;
139
140         spin_lock(&info->block_group_cache_lock);
141         p = &info->block_group_cache_tree.rb_node;
142
143         while (*p) {
144                 parent = *p;
145                 cache = rb_entry(parent, struct btrfs_block_group_cache,
146                                  cache_node);
147                 if (block_group->key.objectid < cache->key.objectid) {
148                         p = &(*p)->rb_left;
149                 } else if (block_group->key.objectid > cache->key.objectid) {
150                         p = &(*p)->rb_right;
151                 } else {
152                         spin_unlock(&info->block_group_cache_lock);
153                         return -EEXIST;
154                 }
155         }
156
157         rb_link_node(&block_group->cache_node, parent, p);
158         rb_insert_color(&block_group->cache_node,
159                         &info->block_group_cache_tree);
160         spin_unlock(&info->block_group_cache_lock);
161
162         return 0;
163 }
164
165 /*
166  * This will return the block group at or after bytenr if contains is 0, else
167  * it will return the block group that contains the bytenr
168  */
169 static struct btrfs_block_group_cache *
170 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
171                               int contains)
172 {
173         struct btrfs_block_group_cache *cache, *ret = NULL;
174         struct rb_node *n;
175         u64 end, start;
176
177         spin_lock(&info->block_group_cache_lock);
178         n = info->block_group_cache_tree.rb_node;
179
180         while (n) {
181                 cache = rb_entry(n, struct btrfs_block_group_cache,
182                                  cache_node);
183                 end = cache->key.objectid + cache->key.offset - 1;
184                 start = cache->key.objectid;
185
186                 if (bytenr < start) {
187                         if (!contains && (!ret || start < ret->key.objectid))
188                                 ret = cache;
189                         n = n->rb_left;
190                 } else if (bytenr > start) {
191                         if (contains && bytenr <= end) {
192                                 ret = cache;
193                                 break;
194                         }
195                         n = n->rb_right;
196                 } else {
197                         ret = cache;
198                         break;
199                 }
200         }
201         if (ret)
202                 btrfs_get_block_group(ret);
203         spin_unlock(&info->block_group_cache_lock);
204
205         return ret;
206 }
207
208 static int add_excluded_extent(struct btrfs_root *root,
209                                u64 start, u64 num_bytes)
210 {
211         u64 end = start + num_bytes - 1;
212         set_extent_bits(&root->fs_info->freed_extents[0],
213                         start, end, EXTENT_UPTODATE, GFP_NOFS);
214         set_extent_bits(&root->fs_info->freed_extents[1],
215                         start, end, EXTENT_UPTODATE, GFP_NOFS);
216         return 0;
217 }
218
219 static void free_excluded_extents(struct btrfs_root *root,
220                                   struct btrfs_block_group_cache *cache)
221 {
222         u64 start, end;
223
224         start = cache->key.objectid;
225         end = start + cache->key.offset - 1;
226
227         clear_extent_bits(&root->fs_info->freed_extents[0],
228                           start, end, EXTENT_UPTODATE, GFP_NOFS);
229         clear_extent_bits(&root->fs_info->freed_extents[1],
230                           start, end, EXTENT_UPTODATE, GFP_NOFS);
231 }
232
233 static int exclude_super_stripes(struct btrfs_root *root,
234                                  struct btrfs_block_group_cache *cache)
235 {
236         u64 bytenr;
237         u64 *logical;
238         int stripe_len;
239         int i, nr, ret;
240
241         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
242                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
243                 cache->bytes_super += stripe_len;
244                 ret = add_excluded_extent(root, cache->key.objectid,
245                                           stripe_len);
246                 BUG_ON(ret);
247         }
248
249         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
250                 bytenr = btrfs_sb_offset(i);
251                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
252                                        cache->key.objectid, bytenr,
253                                        0, &logical, &nr, &stripe_len);
254                 BUG_ON(ret);
255
256                 while (nr--) {
257                         cache->bytes_super += stripe_len;
258                         ret = add_excluded_extent(root, logical[nr],
259                                                   stripe_len);
260                         BUG_ON(ret);
261                 }
262
263                 kfree(logical);
264         }
265         return 0;
266 }
267
268 static struct btrfs_caching_control *
269 get_caching_control(struct btrfs_block_group_cache *cache)
270 {
271         struct btrfs_caching_control *ctl;
272
273         spin_lock(&cache->lock);
274         if (cache->cached != BTRFS_CACHE_STARTED) {
275                 spin_unlock(&cache->lock);
276                 return NULL;
277         }
278
279         /* We're loading it the fast way, so we don't have a caching_ctl. */
280         if (!cache->caching_ctl) {
281                 spin_unlock(&cache->lock);
282                 return NULL;
283         }
284
285         ctl = cache->caching_ctl;
286         atomic_inc(&ctl->count);
287         spin_unlock(&cache->lock);
288         return ctl;
289 }
290
291 static void put_caching_control(struct btrfs_caching_control *ctl)
292 {
293         if (atomic_dec_and_test(&ctl->count))
294                 kfree(ctl);
295 }
296
297 /*
298  * this is only called by cache_block_group, since we could have freed extents
299  * we need to check the pinned_extents for any extents that can't be used yet
300  * since their free space will be released as soon as the transaction commits.
301  */
302 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
303                               struct btrfs_fs_info *info, u64 start, u64 end)
304 {
305         u64 extent_start, extent_end, size, total_added = 0;
306         int ret;
307
308         while (start < end) {
309                 ret = find_first_extent_bit(info->pinned_extents, start,
310                                             &extent_start, &extent_end,
311                                             EXTENT_DIRTY | EXTENT_UPTODATE);
312                 if (ret)
313                         break;
314
315                 if (extent_start <= start) {
316                         start = extent_end + 1;
317                 } else if (extent_start > start && extent_start < end) {
318                         size = extent_start - start;
319                         total_added += size;
320                         ret = btrfs_add_free_space(block_group, start,
321                                                    size);
322                         BUG_ON(ret);
323                         start = extent_end + 1;
324                 } else {
325                         break;
326                 }
327         }
328
329         if (start < end) {
330                 size = end - start;
331                 total_added += size;
332                 ret = btrfs_add_free_space(block_group, start, size);
333                 BUG_ON(ret);
334         }
335
336         return total_added;
337 }
338
339 static noinline void caching_thread(struct btrfs_work *work)
340 {
341         struct btrfs_block_group_cache *block_group;
342         struct btrfs_fs_info *fs_info;
343         struct btrfs_caching_control *caching_ctl;
344         struct btrfs_root *extent_root;
345         struct btrfs_path *path;
346         struct extent_buffer *leaf;
347         struct btrfs_key key;
348         u64 total_found = 0;
349         u64 last = 0;
350         u32 nritems;
351         int ret = 0;
352
353         caching_ctl = container_of(work, struct btrfs_caching_control, work);
354         block_group = caching_ctl->block_group;
355         fs_info = block_group->fs_info;
356         extent_root = fs_info->extent_root;
357
358         path = btrfs_alloc_path();
359         if (!path)
360                 goto out;
361
362         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
363
364         /*
365          * We don't want to deadlock with somebody trying to allocate a new
366          * extent for the extent root while also trying to search the extent
367          * root to add free space.  So we skip locking and search the commit
368          * root, since its read-only
369          */
370         path->skip_locking = 1;
371         path->search_commit_root = 1;
372         path->reada = 1;
373
374         key.objectid = last;
375         key.offset = 0;
376         key.type = BTRFS_EXTENT_ITEM_KEY;
377 again:
378         mutex_lock(&caching_ctl->mutex);
379         /* need to make sure the commit_root doesn't disappear */
380         down_read(&fs_info->extent_commit_sem);
381
382         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
383         if (ret < 0)
384                 goto err;
385
386         leaf = path->nodes[0];
387         nritems = btrfs_header_nritems(leaf);
388
389         while (1) {
390                 if (btrfs_fs_closing(fs_info) > 1) {
391                         last = (u64)-1;
392                         break;
393                 }
394
395                 if (path->slots[0] < nritems) {
396                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
397                 } else {
398                         ret = find_next_key(path, 0, &key);
399                         if (ret)
400                                 break;
401
402                         if (need_resched() ||
403                             btrfs_next_leaf(extent_root, path)) {
404                                 caching_ctl->progress = last;
405                                 btrfs_release_path(path);
406                                 up_read(&fs_info->extent_commit_sem);
407                                 mutex_unlock(&caching_ctl->mutex);
408                                 cond_resched();
409                                 goto again;
410                         }
411                         leaf = path->nodes[0];
412                         nritems = btrfs_header_nritems(leaf);
413                         continue;
414                 }
415
416                 if (key.objectid < block_group->key.objectid) {
417                         path->slots[0]++;
418                         continue;
419                 }
420
421                 if (key.objectid >= block_group->key.objectid +
422                     block_group->key.offset)
423                         break;
424
425                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
426                         total_found += add_new_free_space(block_group,
427                                                           fs_info, last,
428                                                           key.objectid);
429                         last = key.objectid + key.offset;
430
431                         if (total_found > (1024 * 1024 * 2)) {
432                                 total_found = 0;
433                                 wake_up(&caching_ctl->wait);
434                         }
435                 }
436                 path->slots[0]++;
437         }
438         ret = 0;
439
440         total_found += add_new_free_space(block_group, fs_info, last,
441                                           block_group->key.objectid +
442                                           block_group->key.offset);
443         caching_ctl->progress = (u64)-1;
444
445         spin_lock(&block_group->lock);
446         block_group->caching_ctl = NULL;
447         block_group->cached = BTRFS_CACHE_FINISHED;
448         spin_unlock(&block_group->lock);
449
450 err:
451         btrfs_free_path(path);
452         up_read(&fs_info->extent_commit_sem);
453
454         free_excluded_extents(extent_root, block_group);
455
456         mutex_unlock(&caching_ctl->mutex);
457 out:
458         wake_up(&caching_ctl->wait);
459
460         put_caching_control(caching_ctl);
461         btrfs_put_block_group(block_group);
462 }
463
464 static int cache_block_group(struct btrfs_block_group_cache *cache,
465                              struct btrfs_trans_handle *trans,
466                              struct btrfs_root *root,
467                              int load_cache_only)
468 {
469         struct btrfs_fs_info *fs_info = cache->fs_info;
470         struct btrfs_caching_control *caching_ctl;
471         int ret = 0;
472
473         smp_mb();
474         if (cache->cached != BTRFS_CACHE_NO)
475                 return 0;
476
477         /*
478          * We can't do the read from on-disk cache during a commit since we need
479          * to have the normal tree locking.  Also if we are currently trying to
480          * allocate blocks for the tree root we can't do the fast caching since
481          * we likely hold important locks.
482          */
483         if (trans && (!trans->transaction->in_commit) &&
484             (root && root != root->fs_info->tree_root)) {
485                 spin_lock(&cache->lock);
486                 if (cache->cached != BTRFS_CACHE_NO) {
487                         spin_unlock(&cache->lock);
488                         return 0;
489                 }
490                 cache->cached = BTRFS_CACHE_STARTED;
491                 spin_unlock(&cache->lock);
492
493                 ret = load_free_space_cache(fs_info, cache);
494
495                 spin_lock(&cache->lock);
496                 if (ret == 1) {
497                         cache->cached = BTRFS_CACHE_FINISHED;
498                         cache->last_byte_to_unpin = (u64)-1;
499                 } else {
500                         cache->cached = BTRFS_CACHE_NO;
501                 }
502                 spin_unlock(&cache->lock);
503                 if (ret == 1) {
504                         free_excluded_extents(fs_info->extent_root, cache);
505                         return 0;
506                 }
507         }
508
509         if (load_cache_only)
510                 return 0;
511
512         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
513         BUG_ON(!caching_ctl);
514
515         INIT_LIST_HEAD(&caching_ctl->list);
516         mutex_init(&caching_ctl->mutex);
517         init_waitqueue_head(&caching_ctl->wait);
518         caching_ctl->block_group = cache;
519         caching_ctl->progress = cache->key.objectid;
520         /* one for caching kthread, one for caching block group list */
521         atomic_set(&caching_ctl->count, 2);
522         caching_ctl->work.func = caching_thread;
523
524         spin_lock(&cache->lock);
525         if (cache->cached != BTRFS_CACHE_NO) {
526                 spin_unlock(&cache->lock);
527                 kfree(caching_ctl);
528                 return 0;
529         }
530         cache->caching_ctl = caching_ctl;
531         cache->cached = BTRFS_CACHE_STARTED;
532         spin_unlock(&cache->lock);
533
534         down_write(&fs_info->extent_commit_sem);
535         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
536         up_write(&fs_info->extent_commit_sem);
537
538         btrfs_get_block_group(cache);
539
540         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
541
542         return ret;
543 }
544
545 /*
546  * return the block group that starts at or after bytenr
547  */
548 static struct btrfs_block_group_cache *
549 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
550 {
551         struct btrfs_block_group_cache *cache;
552
553         cache = block_group_cache_tree_search(info, bytenr, 0);
554
555         return cache;
556 }
557
558 /*
559  * return the block group that contains the given bytenr
560  */
561 struct btrfs_block_group_cache *btrfs_lookup_block_group(
562                                                  struct btrfs_fs_info *info,
563                                                  u64 bytenr)
564 {
565         struct btrfs_block_group_cache *cache;
566
567         cache = block_group_cache_tree_search(info, bytenr, 1);
568
569         return cache;
570 }
571
572 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
573                                                   u64 flags)
574 {
575         struct list_head *head = &info->space_info;
576         struct btrfs_space_info *found;
577
578         flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
579                  BTRFS_BLOCK_GROUP_METADATA;
580
581         rcu_read_lock();
582         list_for_each_entry_rcu(found, head, list) {
583                 if (found->flags & flags) {
584                         rcu_read_unlock();
585                         return found;
586                 }
587         }
588         rcu_read_unlock();
589         return NULL;
590 }
591
592 /*
593  * after adding space to the filesystem, we need to clear the full flags
594  * on all the space infos.
595  */
596 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
597 {
598         struct list_head *head = &info->space_info;
599         struct btrfs_space_info *found;
600
601         rcu_read_lock();
602         list_for_each_entry_rcu(found, head, list)
603                 found->full = 0;
604         rcu_read_unlock();
605 }
606
607 static u64 div_factor(u64 num, int factor)
608 {
609         if (factor == 10)
610                 return num;
611         num *= factor;
612         do_div(num, 10);
613         return num;
614 }
615
616 static u64 div_factor_fine(u64 num, int factor)
617 {
618         if (factor == 100)
619                 return num;
620         num *= factor;
621         do_div(num, 100);
622         return num;
623 }
624
625 u64 btrfs_find_block_group(struct btrfs_root *root,
626                            u64 search_start, u64 search_hint, int owner)
627 {
628         struct btrfs_block_group_cache *cache;
629         u64 used;
630         u64 last = max(search_hint, search_start);
631         u64 group_start = 0;
632         int full_search = 0;
633         int factor = 9;
634         int wrapped = 0;
635 again:
636         while (1) {
637                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
638                 if (!cache)
639                         break;
640
641                 spin_lock(&cache->lock);
642                 last = cache->key.objectid + cache->key.offset;
643                 used = btrfs_block_group_used(&cache->item);
644
645                 if ((full_search || !cache->ro) &&
646                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
647                         if (used + cache->pinned + cache->reserved <
648                             div_factor(cache->key.offset, factor)) {
649                                 group_start = cache->key.objectid;
650                                 spin_unlock(&cache->lock);
651                                 btrfs_put_block_group(cache);
652                                 goto found;
653                         }
654                 }
655                 spin_unlock(&cache->lock);
656                 btrfs_put_block_group(cache);
657                 cond_resched();
658         }
659         if (!wrapped) {
660                 last = search_start;
661                 wrapped = 1;
662                 goto again;
663         }
664         if (!full_search && factor < 10) {
665                 last = search_start;
666                 full_search = 1;
667                 factor = 10;
668                 goto again;
669         }
670 found:
671         return group_start;
672 }
673
674 /* simple helper to search for an existing extent at a given offset */
675 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
676 {
677         int ret;
678         struct btrfs_key key;
679         struct btrfs_path *path;
680
681         path = btrfs_alloc_path();
682         if (!path)
683                 return -ENOMEM;
684
685         key.objectid = start;
686         key.offset = len;
687         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
688         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
689                                 0, 0);
690         btrfs_free_path(path);
691         return ret;
692 }
693
694 /*
695  * helper function to lookup reference count and flags of extent.
696  *
697  * the head node for delayed ref is used to store the sum of all the
698  * reference count modifications queued up in the rbtree. the head
699  * node may also store the extent flags to set. This way you can check
700  * to see what the reference count and extent flags would be if all of
701  * the delayed refs are not processed.
702  */
703 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
704                              struct btrfs_root *root, u64 bytenr,
705                              u64 num_bytes, u64 *refs, u64 *flags)
706 {
707         struct btrfs_delayed_ref_head *head;
708         struct btrfs_delayed_ref_root *delayed_refs;
709         struct btrfs_path *path;
710         struct btrfs_extent_item *ei;
711         struct extent_buffer *leaf;
712         struct btrfs_key key;
713         u32 item_size;
714         u64 num_refs;
715         u64 extent_flags;
716         int ret;
717
718         path = btrfs_alloc_path();
719         if (!path)
720                 return -ENOMEM;
721
722         key.objectid = bytenr;
723         key.type = BTRFS_EXTENT_ITEM_KEY;
724         key.offset = num_bytes;
725         if (!trans) {
726                 path->skip_locking = 1;
727                 path->search_commit_root = 1;
728         }
729 again:
730         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
731                                 &key, path, 0, 0);
732         if (ret < 0)
733                 goto out_free;
734
735         if (ret == 0) {
736                 leaf = path->nodes[0];
737                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
738                 if (item_size >= sizeof(*ei)) {
739                         ei = btrfs_item_ptr(leaf, path->slots[0],
740                                             struct btrfs_extent_item);
741                         num_refs = btrfs_extent_refs(leaf, ei);
742                         extent_flags = btrfs_extent_flags(leaf, ei);
743                 } else {
744 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
745                         struct btrfs_extent_item_v0 *ei0;
746                         BUG_ON(item_size != sizeof(*ei0));
747                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
748                                              struct btrfs_extent_item_v0);
749                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
750                         /* FIXME: this isn't correct for data */
751                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
752 #else
753                         BUG();
754 #endif
755                 }
756                 BUG_ON(num_refs == 0);
757         } else {
758                 num_refs = 0;
759                 extent_flags = 0;
760                 ret = 0;
761         }
762
763         if (!trans)
764                 goto out;
765
766         delayed_refs = &trans->transaction->delayed_refs;
767         spin_lock(&delayed_refs->lock);
768         head = btrfs_find_delayed_ref_head(trans, bytenr);
769         if (head) {
770                 if (!mutex_trylock(&head->mutex)) {
771                         atomic_inc(&head->node.refs);
772                         spin_unlock(&delayed_refs->lock);
773
774                         btrfs_release_path(path);
775
776                         /*
777                          * Mutex was contended, block until it's released and try
778                          * again
779                          */
780                         mutex_lock(&head->mutex);
781                         mutex_unlock(&head->mutex);
782                         btrfs_put_delayed_ref(&head->node);
783                         goto again;
784                 }
785                 if (head->extent_op && head->extent_op->update_flags)
786                         extent_flags |= head->extent_op->flags_to_set;
787                 else
788                         BUG_ON(num_refs == 0);
789
790                 num_refs += head->node.ref_mod;
791                 mutex_unlock(&head->mutex);
792         }
793         spin_unlock(&delayed_refs->lock);
794 out:
795         WARN_ON(num_refs == 0);
796         if (refs)
797                 *refs = num_refs;
798         if (flags)
799                 *flags = extent_flags;
800 out_free:
801         btrfs_free_path(path);
802         return ret;
803 }
804
805 /*
806  * Back reference rules.  Back refs have three main goals:
807  *
808  * 1) differentiate between all holders of references to an extent so that
809  *    when a reference is dropped we can make sure it was a valid reference
810  *    before freeing the extent.
811  *
812  * 2) Provide enough information to quickly find the holders of an extent
813  *    if we notice a given block is corrupted or bad.
814  *
815  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
816  *    maintenance.  This is actually the same as #2, but with a slightly
817  *    different use case.
818  *
819  * There are two kinds of back refs. The implicit back refs is optimized
820  * for pointers in non-shared tree blocks. For a given pointer in a block,
821  * back refs of this kind provide information about the block's owner tree
822  * and the pointer's key. These information allow us to find the block by
823  * b-tree searching. The full back refs is for pointers in tree blocks not
824  * referenced by their owner trees. The location of tree block is recorded
825  * in the back refs. Actually the full back refs is generic, and can be
826  * used in all cases the implicit back refs is used. The major shortcoming
827  * of the full back refs is its overhead. Every time a tree block gets
828  * COWed, we have to update back refs entry for all pointers in it.
829  *
830  * For a newly allocated tree block, we use implicit back refs for
831  * pointers in it. This means most tree related operations only involve
832  * implicit back refs. For a tree block created in old transaction, the
833  * only way to drop a reference to it is COW it. So we can detect the
834  * event that tree block loses its owner tree's reference and do the
835  * back refs conversion.
836  *
837  * When a tree block is COW'd through a tree, there are four cases:
838  *
839  * The reference count of the block is one and the tree is the block's
840  * owner tree. Nothing to do in this case.
841  *
842  * The reference count of the block is one and the tree is not the
843  * block's owner tree. In this case, full back refs is used for pointers
844  * in the block. Remove these full back refs, add implicit back refs for
845  * every pointers in the new block.
846  *
847  * The reference count of the block is greater than one and the tree is
848  * the block's owner tree. In this case, implicit back refs is used for
849  * pointers in the block. Add full back refs for every pointers in the
850  * block, increase lower level extents' reference counts. The original
851  * implicit back refs are entailed to the new block.
852  *
853  * The reference count of the block is greater than one and the tree is
854  * not the block's owner tree. Add implicit back refs for every pointer in
855  * the new block, increase lower level extents' reference count.
856  *
857  * Back Reference Key composing:
858  *
859  * The key objectid corresponds to the first byte in the extent,
860  * The key type is used to differentiate between types of back refs.
861  * There are different meanings of the key offset for different types
862  * of back refs.
863  *
864  * File extents can be referenced by:
865  *
866  * - multiple snapshots, subvolumes, or different generations in one subvol
867  * - different files inside a single subvolume
868  * - different offsets inside a file (bookend extents in file.c)
869  *
870  * The extent ref structure for the implicit back refs has fields for:
871  *
872  * - Objectid of the subvolume root
873  * - objectid of the file holding the reference
874  * - original offset in the file
875  * - how many bookend extents
876  *
877  * The key offset for the implicit back refs is hash of the first
878  * three fields.
879  *
880  * The extent ref structure for the full back refs has field for:
881  *
882  * - number of pointers in the tree leaf
883  *
884  * The key offset for the implicit back refs is the first byte of
885  * the tree leaf
886  *
887  * When a file extent is allocated, The implicit back refs is used.
888  * the fields are filled in:
889  *
890  *     (root_key.objectid, inode objectid, offset in file, 1)
891  *
892  * When a file extent is removed file truncation, we find the
893  * corresponding implicit back refs and check the following fields:
894  *
895  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
896  *
897  * Btree extents can be referenced by:
898  *
899  * - Different subvolumes
900  *
901  * Both the implicit back refs and the full back refs for tree blocks
902  * only consist of key. The key offset for the implicit back refs is
903  * objectid of block's owner tree. The key offset for the full back refs
904  * is the first byte of parent block.
905  *
906  * When implicit back refs is used, information about the lowest key and
907  * level of the tree block are required. These information are stored in
908  * tree block info structure.
909  */
910
911 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
912 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
913                                   struct btrfs_root *root,
914                                   struct btrfs_path *path,
915                                   u64 owner, u32 extra_size)
916 {
917         struct btrfs_extent_item *item;
918         struct btrfs_extent_item_v0 *ei0;
919         struct btrfs_extent_ref_v0 *ref0;
920         struct btrfs_tree_block_info *bi;
921         struct extent_buffer *leaf;
922         struct btrfs_key key;
923         struct btrfs_key found_key;
924         u32 new_size = sizeof(*item);
925         u64 refs;
926         int ret;
927
928         leaf = path->nodes[0];
929         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
930
931         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
932         ei0 = btrfs_item_ptr(leaf, path->slots[0],
933                              struct btrfs_extent_item_v0);
934         refs = btrfs_extent_refs_v0(leaf, ei0);
935
936         if (owner == (u64)-1) {
937                 while (1) {
938                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
939                                 ret = btrfs_next_leaf(root, path);
940                                 if (ret < 0)
941                                         return ret;
942                                 BUG_ON(ret > 0);
943                                 leaf = path->nodes[0];
944                         }
945                         btrfs_item_key_to_cpu(leaf, &found_key,
946                                               path->slots[0]);
947                         BUG_ON(key.objectid != found_key.objectid);
948                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
949                                 path->slots[0]++;
950                                 continue;
951                         }
952                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
953                                               struct btrfs_extent_ref_v0);
954                         owner = btrfs_ref_objectid_v0(leaf, ref0);
955                         break;
956                 }
957         }
958         btrfs_release_path(path);
959
960         if (owner < BTRFS_FIRST_FREE_OBJECTID)
961                 new_size += sizeof(*bi);
962
963         new_size -= sizeof(*ei0);
964         ret = btrfs_search_slot(trans, root, &key, path,
965                                 new_size + extra_size, 1);
966         if (ret < 0)
967                 return ret;
968         BUG_ON(ret);
969
970         ret = btrfs_extend_item(trans, root, path, new_size);
971
972         leaf = path->nodes[0];
973         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
974         btrfs_set_extent_refs(leaf, item, refs);
975         /* FIXME: get real generation */
976         btrfs_set_extent_generation(leaf, item, 0);
977         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
978                 btrfs_set_extent_flags(leaf, item,
979                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
980                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
981                 bi = (struct btrfs_tree_block_info *)(item + 1);
982                 /* FIXME: get first key of the block */
983                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
984                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
985         } else {
986                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
987         }
988         btrfs_mark_buffer_dirty(leaf);
989         return 0;
990 }
991 #endif
992
993 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
994 {
995         u32 high_crc = ~(u32)0;
996         u32 low_crc = ~(u32)0;
997         __le64 lenum;
998
999         lenum = cpu_to_le64(root_objectid);
1000         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1001         lenum = cpu_to_le64(owner);
1002         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1003         lenum = cpu_to_le64(offset);
1004         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1005
1006         return ((u64)high_crc << 31) ^ (u64)low_crc;
1007 }
1008
1009 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1010                                      struct btrfs_extent_data_ref *ref)
1011 {
1012         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1013                                     btrfs_extent_data_ref_objectid(leaf, ref),
1014                                     btrfs_extent_data_ref_offset(leaf, ref));
1015 }
1016
1017 static int match_extent_data_ref(struct extent_buffer *leaf,
1018                                  struct btrfs_extent_data_ref *ref,
1019                                  u64 root_objectid, u64 owner, u64 offset)
1020 {
1021         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1022             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1023             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1024                 return 0;
1025         return 1;
1026 }
1027
1028 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1029                                            struct btrfs_root *root,
1030                                            struct btrfs_path *path,
1031                                            u64 bytenr, u64 parent,
1032                                            u64 root_objectid,
1033                                            u64 owner, u64 offset)
1034 {
1035         struct btrfs_key key;
1036         struct btrfs_extent_data_ref *ref;
1037         struct extent_buffer *leaf;
1038         u32 nritems;
1039         int ret;
1040         int recow;
1041         int err = -ENOENT;
1042
1043         key.objectid = bytenr;
1044         if (parent) {
1045                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1046                 key.offset = parent;
1047         } else {
1048                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1049                 key.offset = hash_extent_data_ref(root_objectid,
1050                                                   owner, offset);
1051         }
1052 again:
1053         recow = 0;
1054         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1055         if (ret < 0) {
1056                 err = ret;
1057                 goto fail;
1058         }
1059
1060         if (parent) {
1061                 if (!ret)
1062                         return 0;
1063 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1064                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1065                 btrfs_release_path(path);
1066                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1067                 if (ret < 0) {
1068                         err = ret;
1069                         goto fail;
1070                 }
1071                 if (!ret)
1072                         return 0;
1073 #endif
1074                 goto fail;
1075         }
1076
1077         leaf = path->nodes[0];
1078         nritems = btrfs_header_nritems(leaf);
1079         while (1) {
1080                 if (path->slots[0] >= nritems) {
1081                         ret = btrfs_next_leaf(root, path);
1082                         if (ret < 0)
1083                                 err = ret;
1084                         if (ret)
1085                                 goto fail;
1086
1087                         leaf = path->nodes[0];
1088                         nritems = btrfs_header_nritems(leaf);
1089                         recow = 1;
1090                 }
1091
1092                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1093                 if (key.objectid != bytenr ||
1094                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1095                         goto fail;
1096
1097                 ref = btrfs_item_ptr(leaf, path->slots[0],
1098                                      struct btrfs_extent_data_ref);
1099
1100                 if (match_extent_data_ref(leaf, ref, root_objectid,
1101                                           owner, offset)) {
1102                         if (recow) {
1103                                 btrfs_release_path(path);
1104                                 goto again;
1105                         }
1106                         err = 0;
1107                         break;
1108                 }
1109                 path->slots[0]++;
1110         }
1111 fail:
1112         return err;
1113 }
1114
1115 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1116                                            struct btrfs_root *root,
1117                                            struct btrfs_path *path,
1118                                            u64 bytenr, u64 parent,
1119                                            u64 root_objectid, u64 owner,
1120                                            u64 offset, int refs_to_add)
1121 {
1122         struct btrfs_key key;
1123         struct extent_buffer *leaf;
1124         u32 size;
1125         u32 num_refs;
1126         int ret;
1127
1128         key.objectid = bytenr;
1129         if (parent) {
1130                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1131                 key.offset = parent;
1132                 size = sizeof(struct btrfs_shared_data_ref);
1133         } else {
1134                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1135                 key.offset = hash_extent_data_ref(root_objectid,
1136                                                   owner, offset);
1137                 size = sizeof(struct btrfs_extent_data_ref);
1138         }
1139
1140         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1141         if (ret && ret != -EEXIST)
1142                 goto fail;
1143
1144         leaf = path->nodes[0];
1145         if (parent) {
1146                 struct btrfs_shared_data_ref *ref;
1147                 ref = btrfs_item_ptr(leaf, path->slots[0],
1148                                      struct btrfs_shared_data_ref);
1149                 if (ret == 0) {
1150                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1151                 } else {
1152                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1153                         num_refs += refs_to_add;
1154                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1155                 }
1156         } else {
1157                 struct btrfs_extent_data_ref *ref;
1158                 while (ret == -EEXIST) {
1159                         ref = btrfs_item_ptr(leaf, path->slots[0],
1160                                              struct btrfs_extent_data_ref);
1161                         if (match_extent_data_ref(leaf, ref, root_objectid,
1162                                                   owner, offset))
1163                                 break;
1164                         btrfs_release_path(path);
1165                         key.offset++;
1166                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1167                                                       size);
1168                         if (ret && ret != -EEXIST)
1169                                 goto fail;
1170
1171                         leaf = path->nodes[0];
1172                 }
1173                 ref = btrfs_item_ptr(leaf, path->slots[0],
1174                                      struct btrfs_extent_data_ref);
1175                 if (ret == 0) {
1176                         btrfs_set_extent_data_ref_root(leaf, ref,
1177                                                        root_objectid);
1178                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1179                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1180                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1181                 } else {
1182                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1183                         num_refs += refs_to_add;
1184                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1185                 }
1186         }
1187         btrfs_mark_buffer_dirty(leaf);
1188         ret = 0;
1189 fail:
1190         btrfs_release_path(path);
1191         return ret;
1192 }
1193
1194 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1195                                            struct btrfs_root *root,
1196                                            struct btrfs_path *path,
1197                                            int refs_to_drop)
1198 {
1199         struct btrfs_key key;
1200         struct btrfs_extent_data_ref *ref1 = NULL;
1201         struct btrfs_shared_data_ref *ref2 = NULL;
1202         struct extent_buffer *leaf;
1203         u32 num_refs = 0;
1204         int ret = 0;
1205
1206         leaf = path->nodes[0];
1207         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1208
1209         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1210                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1211                                       struct btrfs_extent_data_ref);
1212                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1213         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1214                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1215                                       struct btrfs_shared_data_ref);
1216                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1217 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1218         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1219                 struct btrfs_extent_ref_v0 *ref0;
1220                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1221                                       struct btrfs_extent_ref_v0);
1222                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1223 #endif
1224         } else {
1225                 BUG();
1226         }
1227
1228         BUG_ON(num_refs < refs_to_drop);
1229         num_refs -= refs_to_drop;
1230
1231         if (num_refs == 0) {
1232                 ret = btrfs_del_item(trans, root, path);
1233         } else {
1234                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1235                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1236                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1237                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1238 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1239                 else {
1240                         struct btrfs_extent_ref_v0 *ref0;
1241                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1242                                         struct btrfs_extent_ref_v0);
1243                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1244                 }
1245 #endif
1246                 btrfs_mark_buffer_dirty(leaf);
1247         }
1248         return ret;
1249 }
1250
1251 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1252                                           struct btrfs_path *path,
1253                                           struct btrfs_extent_inline_ref *iref)
1254 {
1255         struct btrfs_key key;
1256         struct extent_buffer *leaf;
1257         struct btrfs_extent_data_ref *ref1;
1258         struct btrfs_shared_data_ref *ref2;
1259         u32 num_refs = 0;
1260
1261         leaf = path->nodes[0];
1262         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1263         if (iref) {
1264                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1265                     BTRFS_EXTENT_DATA_REF_KEY) {
1266                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1267                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1268                 } else {
1269                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1270                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1271                 }
1272         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1273                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1274                                       struct btrfs_extent_data_ref);
1275                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1276         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1277                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1278                                       struct btrfs_shared_data_ref);
1279                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1280 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1281         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1282                 struct btrfs_extent_ref_v0 *ref0;
1283                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1284                                       struct btrfs_extent_ref_v0);
1285                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1286 #endif
1287         } else {
1288                 WARN_ON(1);
1289         }
1290         return num_refs;
1291 }
1292
1293 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1294                                           struct btrfs_root *root,
1295                                           struct btrfs_path *path,
1296                                           u64 bytenr, u64 parent,
1297                                           u64 root_objectid)
1298 {
1299         struct btrfs_key key;
1300         int ret;
1301
1302         key.objectid = bytenr;
1303         if (parent) {
1304                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1305                 key.offset = parent;
1306         } else {
1307                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1308                 key.offset = root_objectid;
1309         }
1310
1311         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1312         if (ret > 0)
1313                 ret = -ENOENT;
1314 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1315         if (ret == -ENOENT && parent) {
1316                 btrfs_release_path(path);
1317                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1318                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1319                 if (ret > 0)
1320                         ret = -ENOENT;
1321         }
1322 #endif
1323         return ret;
1324 }
1325
1326 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1327                                           struct btrfs_root *root,
1328                                           struct btrfs_path *path,
1329                                           u64 bytenr, u64 parent,
1330                                           u64 root_objectid)
1331 {
1332         struct btrfs_key key;
1333         int ret;
1334
1335         key.objectid = bytenr;
1336         if (parent) {
1337                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1338                 key.offset = parent;
1339         } else {
1340                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1341                 key.offset = root_objectid;
1342         }
1343
1344         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1345         btrfs_release_path(path);
1346         return ret;
1347 }
1348
1349 static inline int extent_ref_type(u64 parent, u64 owner)
1350 {
1351         int type;
1352         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1353                 if (parent > 0)
1354                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1355                 else
1356                         type = BTRFS_TREE_BLOCK_REF_KEY;
1357         } else {
1358                 if (parent > 0)
1359                         type = BTRFS_SHARED_DATA_REF_KEY;
1360                 else
1361                         type = BTRFS_EXTENT_DATA_REF_KEY;
1362         }
1363         return type;
1364 }
1365
1366 static int find_next_key(struct btrfs_path *path, int level,
1367                          struct btrfs_key *key)
1368
1369 {
1370         for (; level < BTRFS_MAX_LEVEL; level++) {
1371                 if (!path->nodes[level])
1372                         break;
1373                 if (path->slots[level] + 1 >=
1374                     btrfs_header_nritems(path->nodes[level]))
1375                         continue;
1376                 if (level == 0)
1377                         btrfs_item_key_to_cpu(path->nodes[level], key,
1378                                               path->slots[level] + 1);
1379                 else
1380                         btrfs_node_key_to_cpu(path->nodes[level], key,
1381                                               path->slots[level] + 1);
1382                 return 0;
1383         }
1384         return 1;
1385 }
1386
1387 /*
1388  * look for inline back ref. if back ref is found, *ref_ret is set
1389  * to the address of inline back ref, and 0 is returned.
1390  *
1391  * if back ref isn't found, *ref_ret is set to the address where it
1392  * should be inserted, and -ENOENT is returned.
1393  *
1394  * if insert is true and there are too many inline back refs, the path
1395  * points to the extent item, and -EAGAIN is returned.
1396  *
1397  * NOTE: inline back refs are ordered in the same way that back ref
1398  *       items in the tree are ordered.
1399  */
1400 static noinline_for_stack
1401 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1402                                  struct btrfs_root *root,
1403                                  struct btrfs_path *path,
1404                                  struct btrfs_extent_inline_ref **ref_ret,
1405                                  u64 bytenr, u64 num_bytes,
1406                                  u64 parent, u64 root_objectid,
1407                                  u64 owner, u64 offset, int insert)
1408 {
1409         struct btrfs_key key;
1410         struct extent_buffer *leaf;
1411         struct btrfs_extent_item *ei;
1412         struct btrfs_extent_inline_ref *iref;
1413         u64 flags;
1414         u64 item_size;
1415         unsigned long ptr;
1416         unsigned long end;
1417         int extra_size;
1418         int type;
1419         int want;
1420         int ret;
1421         int err = 0;
1422
1423         key.objectid = bytenr;
1424         key.type = BTRFS_EXTENT_ITEM_KEY;
1425         key.offset = num_bytes;
1426
1427         want = extent_ref_type(parent, owner);
1428         if (insert) {
1429                 extra_size = btrfs_extent_inline_ref_size(want);
1430                 path->keep_locks = 1;
1431         } else
1432                 extra_size = -1;
1433         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1434         if (ret < 0) {
1435                 err = ret;
1436                 goto out;
1437         }
1438         BUG_ON(ret);
1439
1440         leaf = path->nodes[0];
1441         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1442 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1443         if (item_size < sizeof(*ei)) {
1444                 if (!insert) {
1445                         err = -ENOENT;
1446                         goto out;
1447                 }
1448                 ret = convert_extent_item_v0(trans, root, path, owner,
1449                                              extra_size);
1450                 if (ret < 0) {
1451                         err = ret;
1452                         goto out;
1453                 }
1454                 leaf = path->nodes[0];
1455                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1456         }
1457 #endif
1458         BUG_ON(item_size < sizeof(*ei));
1459
1460         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1461         flags = btrfs_extent_flags(leaf, ei);
1462
1463         ptr = (unsigned long)(ei + 1);
1464         end = (unsigned long)ei + item_size;
1465
1466         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1467                 ptr += sizeof(struct btrfs_tree_block_info);
1468                 BUG_ON(ptr > end);
1469         } else {
1470                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1471         }
1472
1473         err = -ENOENT;
1474         while (1) {
1475                 if (ptr >= end) {
1476                         WARN_ON(ptr > end);
1477                         break;
1478                 }
1479                 iref = (struct btrfs_extent_inline_ref *)ptr;
1480                 type = btrfs_extent_inline_ref_type(leaf, iref);
1481                 if (want < type)
1482                         break;
1483                 if (want > type) {
1484                         ptr += btrfs_extent_inline_ref_size(type);
1485                         continue;
1486                 }
1487
1488                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1489                         struct btrfs_extent_data_ref *dref;
1490                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1491                         if (match_extent_data_ref(leaf, dref, root_objectid,
1492                                                   owner, offset)) {
1493                                 err = 0;
1494                                 break;
1495                         }
1496                         if (hash_extent_data_ref_item(leaf, dref) <
1497                             hash_extent_data_ref(root_objectid, owner, offset))
1498                                 break;
1499                 } else {
1500                         u64 ref_offset;
1501                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1502                         if (parent > 0) {
1503                                 if (parent == ref_offset) {
1504                                         err = 0;
1505                                         break;
1506                                 }
1507                                 if (ref_offset < parent)
1508                                         break;
1509                         } else {
1510                                 if (root_objectid == ref_offset) {
1511                                         err = 0;
1512                                         break;
1513                                 }
1514                                 if (ref_offset < root_objectid)
1515                                         break;
1516                         }
1517                 }
1518                 ptr += btrfs_extent_inline_ref_size(type);
1519         }
1520         if (err == -ENOENT && insert) {
1521                 if (item_size + extra_size >=
1522                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1523                         err = -EAGAIN;
1524                         goto out;
1525                 }
1526                 /*
1527                  * To add new inline back ref, we have to make sure
1528                  * there is no corresponding back ref item.
1529                  * For simplicity, we just do not add new inline back
1530                  * ref if there is any kind of item for this block
1531                  */
1532                 if (find_next_key(path, 0, &key) == 0 &&
1533                     key.objectid == bytenr &&
1534                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1535                         err = -EAGAIN;
1536                         goto out;
1537                 }
1538         }
1539         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1540 out:
1541         if (insert) {
1542                 path->keep_locks = 0;
1543                 btrfs_unlock_up_safe(path, 1);
1544         }
1545         return err;
1546 }
1547
1548 /*
1549  * helper to add new inline back ref
1550  */
1551 static noinline_for_stack
1552 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1553                                 struct btrfs_root *root,
1554                                 struct btrfs_path *path,
1555                                 struct btrfs_extent_inline_ref *iref,
1556                                 u64 parent, u64 root_objectid,
1557                                 u64 owner, u64 offset, int refs_to_add,
1558                                 struct btrfs_delayed_extent_op *extent_op)
1559 {
1560         struct extent_buffer *leaf;
1561         struct btrfs_extent_item *ei;
1562         unsigned long ptr;
1563         unsigned long end;
1564         unsigned long item_offset;
1565         u64 refs;
1566         int size;
1567         int type;
1568         int ret;
1569
1570         leaf = path->nodes[0];
1571         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1572         item_offset = (unsigned long)iref - (unsigned long)ei;
1573
1574         type = extent_ref_type(parent, owner);
1575         size = btrfs_extent_inline_ref_size(type);
1576
1577         ret = btrfs_extend_item(trans, root, path, size);
1578
1579         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1580         refs = btrfs_extent_refs(leaf, ei);
1581         refs += refs_to_add;
1582         btrfs_set_extent_refs(leaf, ei, refs);
1583         if (extent_op)
1584                 __run_delayed_extent_op(extent_op, leaf, ei);
1585
1586         ptr = (unsigned long)ei + item_offset;
1587         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1588         if (ptr < end - size)
1589                 memmove_extent_buffer(leaf, ptr + size, ptr,
1590                                       end - size - ptr);
1591
1592         iref = (struct btrfs_extent_inline_ref *)ptr;
1593         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1594         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1595                 struct btrfs_extent_data_ref *dref;
1596                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1597                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1598                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1599                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1600                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1601         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1602                 struct btrfs_shared_data_ref *sref;
1603                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1604                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1605                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1606         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1607                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1608         } else {
1609                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1610         }
1611         btrfs_mark_buffer_dirty(leaf);
1612         return 0;
1613 }
1614
1615 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1616                                  struct btrfs_root *root,
1617                                  struct btrfs_path *path,
1618                                  struct btrfs_extent_inline_ref **ref_ret,
1619                                  u64 bytenr, u64 num_bytes, u64 parent,
1620                                  u64 root_objectid, u64 owner, u64 offset)
1621 {
1622         int ret;
1623
1624         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1625                                            bytenr, num_bytes, parent,
1626                                            root_objectid, owner, offset, 0);
1627         if (ret != -ENOENT)
1628                 return ret;
1629
1630         btrfs_release_path(path);
1631         *ref_ret = NULL;
1632
1633         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1634                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1635                                             root_objectid);
1636         } else {
1637                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1638                                              root_objectid, owner, offset);
1639         }
1640         return ret;
1641 }
1642
1643 /*
1644  * helper to update/remove inline back ref
1645  */
1646 static noinline_for_stack
1647 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1648                                  struct btrfs_root *root,
1649                                  struct btrfs_path *path,
1650                                  struct btrfs_extent_inline_ref *iref,
1651                                  int refs_to_mod,
1652                                  struct btrfs_delayed_extent_op *extent_op)
1653 {
1654         struct extent_buffer *leaf;
1655         struct btrfs_extent_item *ei;
1656         struct btrfs_extent_data_ref *dref = NULL;
1657         struct btrfs_shared_data_ref *sref = NULL;
1658         unsigned long ptr;
1659         unsigned long end;
1660         u32 item_size;
1661         int size;
1662         int type;
1663         int ret;
1664         u64 refs;
1665
1666         leaf = path->nodes[0];
1667         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1668         refs = btrfs_extent_refs(leaf, ei);
1669         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1670         refs += refs_to_mod;
1671         btrfs_set_extent_refs(leaf, ei, refs);
1672         if (extent_op)
1673                 __run_delayed_extent_op(extent_op, leaf, ei);
1674
1675         type = btrfs_extent_inline_ref_type(leaf, iref);
1676
1677         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1678                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1679                 refs = btrfs_extent_data_ref_count(leaf, dref);
1680         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1681                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1682                 refs = btrfs_shared_data_ref_count(leaf, sref);
1683         } else {
1684                 refs = 1;
1685                 BUG_ON(refs_to_mod != -1);
1686         }
1687
1688         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1689         refs += refs_to_mod;
1690
1691         if (refs > 0) {
1692                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1693                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1694                 else
1695                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1696         } else {
1697                 size =  btrfs_extent_inline_ref_size(type);
1698                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1699                 ptr = (unsigned long)iref;
1700                 end = (unsigned long)ei + item_size;
1701                 if (ptr + size < end)
1702                         memmove_extent_buffer(leaf, ptr, ptr + size,
1703                                               end - ptr - size);
1704                 item_size -= size;
1705                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1706         }
1707         btrfs_mark_buffer_dirty(leaf);
1708         return 0;
1709 }
1710
1711 static noinline_for_stack
1712 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1713                                  struct btrfs_root *root,
1714                                  struct btrfs_path *path,
1715                                  u64 bytenr, u64 num_bytes, u64 parent,
1716                                  u64 root_objectid, u64 owner,
1717                                  u64 offset, int refs_to_add,
1718                                  struct btrfs_delayed_extent_op *extent_op)
1719 {
1720         struct btrfs_extent_inline_ref *iref;
1721         int ret;
1722
1723         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1724                                            bytenr, num_bytes, parent,
1725                                            root_objectid, owner, offset, 1);
1726         if (ret == 0) {
1727                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1728                 ret = update_inline_extent_backref(trans, root, path, iref,
1729                                                    refs_to_add, extent_op);
1730         } else if (ret == -ENOENT) {
1731                 ret = setup_inline_extent_backref(trans, root, path, iref,
1732                                                   parent, root_objectid,
1733                                                   owner, offset, refs_to_add,
1734                                                   extent_op);
1735         }
1736         return ret;
1737 }
1738
1739 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1740                                  struct btrfs_root *root,
1741                                  struct btrfs_path *path,
1742                                  u64 bytenr, u64 parent, u64 root_objectid,
1743                                  u64 owner, u64 offset, int refs_to_add)
1744 {
1745         int ret;
1746         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1747                 BUG_ON(refs_to_add != 1);
1748                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1749                                             parent, root_objectid);
1750         } else {
1751                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1752                                              parent, root_objectid,
1753                                              owner, offset, refs_to_add);
1754         }
1755         return ret;
1756 }
1757
1758 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1759                                  struct btrfs_root *root,
1760                                  struct btrfs_path *path,
1761                                  struct btrfs_extent_inline_ref *iref,
1762                                  int refs_to_drop, int is_data)
1763 {
1764         int ret;
1765
1766         BUG_ON(!is_data && refs_to_drop != 1);
1767         if (iref) {
1768                 ret = update_inline_extent_backref(trans, root, path, iref,
1769                                                    -refs_to_drop, NULL);
1770         } else if (is_data) {
1771                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1772         } else {
1773                 ret = btrfs_del_item(trans, root, path);
1774         }
1775         return ret;
1776 }
1777
1778 static int btrfs_issue_discard(struct block_device *bdev,
1779                                 u64 start, u64 len)
1780 {
1781         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1782 }
1783
1784 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1785                                 u64 num_bytes, u64 *actual_bytes)
1786 {
1787         int ret;
1788         u64 discarded_bytes = 0;
1789         struct btrfs_multi_bio *multi = NULL;
1790
1791
1792         /* Tell the block device(s) that the sectors can be discarded */
1793         ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1794                               bytenr, &num_bytes, &multi, 0);
1795         if (!ret) {
1796                 struct btrfs_bio_stripe *stripe = multi->stripes;
1797                 int i;
1798
1799
1800                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1801                         if (!stripe->dev->can_discard)
1802                                 continue;
1803
1804                         ret = btrfs_issue_discard(stripe->dev->bdev,
1805                                                   stripe->physical,
1806                                                   stripe->length);
1807                         if (!ret)
1808                                 discarded_bytes += stripe->length;
1809                         else if (ret != -EOPNOTSUPP)
1810                                 break;
1811
1812                         /*
1813                          * Just in case we get back EOPNOTSUPP for some reason,
1814                          * just ignore the return value so we don't screw up
1815                          * people calling discard_extent.
1816                          */
1817                         ret = 0;
1818                 }
1819                 kfree(multi);
1820         }
1821
1822         if (actual_bytes)
1823                 *actual_bytes = discarded_bytes;
1824
1825
1826         return ret;
1827 }
1828
1829 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1830                          struct btrfs_root *root,
1831                          u64 bytenr, u64 num_bytes, u64 parent,
1832                          u64 root_objectid, u64 owner, u64 offset)
1833 {
1834         int ret;
1835         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1836                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1837
1838         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1839                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1840                                         parent, root_objectid, (int)owner,
1841                                         BTRFS_ADD_DELAYED_REF, NULL);
1842         } else {
1843                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1844                                         parent, root_objectid, owner, offset,
1845                                         BTRFS_ADD_DELAYED_REF, NULL);
1846         }
1847         return ret;
1848 }
1849
1850 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1851                                   struct btrfs_root *root,
1852                                   u64 bytenr, u64 num_bytes,
1853                                   u64 parent, u64 root_objectid,
1854                                   u64 owner, u64 offset, int refs_to_add,
1855                                   struct btrfs_delayed_extent_op *extent_op)
1856 {
1857         struct btrfs_path *path;
1858         struct extent_buffer *leaf;
1859         struct btrfs_extent_item *item;
1860         u64 refs;
1861         int ret;
1862         int err = 0;
1863
1864         path = btrfs_alloc_path();
1865         if (!path)
1866                 return -ENOMEM;
1867
1868         path->reada = 1;
1869         path->leave_spinning = 1;
1870         /* this will setup the path even if it fails to insert the back ref */
1871         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1872                                            path, bytenr, num_bytes, parent,
1873                                            root_objectid, owner, offset,
1874                                            refs_to_add, extent_op);
1875         if (ret == 0)
1876                 goto out;
1877
1878         if (ret != -EAGAIN) {
1879                 err = ret;
1880                 goto out;
1881         }
1882
1883         leaf = path->nodes[0];
1884         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1885         refs = btrfs_extent_refs(leaf, item);
1886         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1887         if (extent_op)
1888                 __run_delayed_extent_op(extent_op, leaf, item);
1889
1890         btrfs_mark_buffer_dirty(leaf);
1891         btrfs_release_path(path);
1892
1893         path->reada = 1;
1894         path->leave_spinning = 1;
1895
1896         /* now insert the actual backref */
1897         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1898                                     path, bytenr, parent, root_objectid,
1899                                     owner, offset, refs_to_add);
1900         BUG_ON(ret);
1901 out:
1902         btrfs_free_path(path);
1903         return err;
1904 }
1905
1906 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1907                                 struct btrfs_root *root,
1908                                 struct btrfs_delayed_ref_node *node,
1909                                 struct btrfs_delayed_extent_op *extent_op,
1910                                 int insert_reserved)
1911 {
1912         int ret = 0;
1913         struct btrfs_delayed_data_ref *ref;
1914         struct btrfs_key ins;
1915         u64 parent = 0;
1916         u64 ref_root = 0;
1917         u64 flags = 0;
1918
1919         ins.objectid = node->bytenr;
1920         ins.offset = node->num_bytes;
1921         ins.type = BTRFS_EXTENT_ITEM_KEY;
1922
1923         ref = btrfs_delayed_node_to_data_ref(node);
1924         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1925                 parent = ref->parent;
1926         else
1927                 ref_root = ref->root;
1928
1929         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1930                 if (extent_op) {
1931                         BUG_ON(extent_op->update_key);
1932                         flags |= extent_op->flags_to_set;
1933                 }
1934                 ret = alloc_reserved_file_extent(trans, root,
1935                                                  parent, ref_root, flags,
1936                                                  ref->objectid, ref->offset,
1937                                                  &ins, node->ref_mod);
1938         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1939                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1940                                              node->num_bytes, parent,
1941                                              ref_root, ref->objectid,
1942                                              ref->offset, node->ref_mod,
1943                                              extent_op);
1944         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1945                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1946                                           node->num_bytes, parent,
1947                                           ref_root, ref->objectid,
1948                                           ref->offset, node->ref_mod,
1949                                           extent_op);
1950         } else {
1951                 BUG();
1952         }
1953         return ret;
1954 }
1955
1956 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1957                                     struct extent_buffer *leaf,
1958                                     struct btrfs_extent_item *ei)
1959 {
1960         u64 flags = btrfs_extent_flags(leaf, ei);
1961         if (extent_op->update_flags) {
1962                 flags |= extent_op->flags_to_set;
1963                 btrfs_set_extent_flags(leaf, ei, flags);
1964         }
1965
1966         if (extent_op->update_key) {
1967                 struct btrfs_tree_block_info *bi;
1968                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1969                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1970                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1971         }
1972 }
1973
1974 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1975                                  struct btrfs_root *root,
1976                                  struct btrfs_delayed_ref_node *node,
1977                                  struct btrfs_delayed_extent_op *extent_op)
1978 {
1979         struct btrfs_key key;
1980         struct btrfs_path *path;
1981         struct btrfs_extent_item *ei;
1982         struct extent_buffer *leaf;
1983         u32 item_size;
1984         int ret;
1985         int err = 0;
1986
1987         path = btrfs_alloc_path();
1988         if (!path)
1989                 return -ENOMEM;
1990
1991         key.objectid = node->bytenr;
1992         key.type = BTRFS_EXTENT_ITEM_KEY;
1993         key.offset = node->num_bytes;
1994
1995         path->reada = 1;
1996         path->leave_spinning = 1;
1997         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1998                                 path, 0, 1);
1999         if (ret < 0) {
2000                 err = ret;
2001                 goto out;
2002         }
2003         if (ret > 0) {
2004                 err = -EIO;
2005                 goto out;
2006         }
2007
2008         leaf = path->nodes[0];
2009         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2010 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2011         if (item_size < sizeof(*ei)) {
2012                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2013                                              path, (u64)-1, 0);
2014                 if (ret < 0) {
2015                         err = ret;
2016                         goto out;
2017                 }
2018                 leaf = path->nodes[0];
2019                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2020         }
2021 #endif
2022         BUG_ON(item_size < sizeof(*ei));
2023         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2024         __run_delayed_extent_op(extent_op, leaf, ei);
2025
2026         btrfs_mark_buffer_dirty(leaf);
2027 out:
2028         btrfs_free_path(path);
2029         return err;
2030 }
2031
2032 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2033                                 struct btrfs_root *root,
2034                                 struct btrfs_delayed_ref_node *node,
2035                                 struct btrfs_delayed_extent_op *extent_op,
2036                                 int insert_reserved)
2037 {
2038         int ret = 0;
2039         struct btrfs_delayed_tree_ref *ref;
2040         struct btrfs_key ins;
2041         u64 parent = 0;
2042         u64 ref_root = 0;
2043
2044         ins.objectid = node->bytenr;
2045         ins.offset = node->num_bytes;
2046         ins.type = BTRFS_EXTENT_ITEM_KEY;
2047
2048         ref = btrfs_delayed_node_to_tree_ref(node);
2049         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2050                 parent = ref->parent;
2051         else
2052                 ref_root = ref->root;
2053
2054         BUG_ON(node->ref_mod != 1);
2055         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2056                 BUG_ON(!extent_op || !extent_op->update_flags ||
2057                        !extent_op->update_key);
2058                 ret = alloc_reserved_tree_block(trans, root,
2059                                                 parent, ref_root,
2060                                                 extent_op->flags_to_set,
2061                                                 &extent_op->key,
2062                                                 ref->level, &ins);
2063         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2064                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2065                                              node->num_bytes, parent, ref_root,
2066                                              ref->level, 0, 1, extent_op);
2067         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2068                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2069                                           node->num_bytes, parent, ref_root,
2070                                           ref->level, 0, 1, extent_op);
2071         } else {
2072                 BUG();
2073         }
2074         return ret;
2075 }
2076
2077 /* helper function to actually process a single delayed ref entry */
2078 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2079                                struct btrfs_root *root,
2080                                struct btrfs_delayed_ref_node *node,
2081                                struct btrfs_delayed_extent_op *extent_op,
2082                                int insert_reserved)
2083 {
2084         int ret;
2085         if (btrfs_delayed_ref_is_head(node)) {
2086                 struct btrfs_delayed_ref_head *head;
2087                 /*
2088                  * we've hit the end of the chain and we were supposed
2089                  * to insert this extent into the tree.  But, it got
2090                  * deleted before we ever needed to insert it, so all
2091                  * we have to do is clean up the accounting
2092                  */
2093                 BUG_ON(extent_op);
2094                 head = btrfs_delayed_node_to_head(node);
2095                 if (insert_reserved) {
2096                         btrfs_pin_extent(root, node->bytenr,
2097                                          node->num_bytes, 1);
2098                         if (head->is_data) {
2099                                 ret = btrfs_del_csums(trans, root,
2100                                                       node->bytenr,
2101                                                       node->num_bytes);
2102                                 BUG_ON(ret);
2103                         }
2104                 }
2105                 mutex_unlock(&head->mutex);
2106                 return 0;
2107         }
2108
2109         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2110             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2111                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2112                                            insert_reserved);
2113         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2114                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2115                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2116                                            insert_reserved);
2117         else
2118                 BUG();
2119         return ret;
2120 }
2121
2122 static noinline struct btrfs_delayed_ref_node *
2123 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2124 {
2125         struct rb_node *node;
2126         struct btrfs_delayed_ref_node *ref;
2127         int action = BTRFS_ADD_DELAYED_REF;
2128 again:
2129         /*
2130          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2131          * this prevents ref count from going down to zero when
2132          * there still are pending delayed ref.
2133          */
2134         node = rb_prev(&head->node.rb_node);
2135         while (1) {
2136                 if (!node)
2137                         break;
2138                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2139                                 rb_node);
2140                 if (ref->bytenr != head->node.bytenr)
2141                         break;
2142                 if (ref->action == action)
2143                         return ref;
2144                 node = rb_prev(node);
2145         }
2146         if (action == BTRFS_ADD_DELAYED_REF) {
2147                 action = BTRFS_DROP_DELAYED_REF;
2148                 goto again;
2149         }
2150         return NULL;
2151 }
2152
2153 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2154                                        struct btrfs_root *root,
2155                                        struct list_head *cluster)
2156 {
2157         struct btrfs_delayed_ref_root *delayed_refs;
2158         struct btrfs_delayed_ref_node *ref;
2159         struct btrfs_delayed_ref_head *locked_ref = NULL;
2160         struct btrfs_delayed_extent_op *extent_op;
2161         int ret;
2162         int count = 0;
2163         int must_insert_reserved = 0;
2164
2165         delayed_refs = &trans->transaction->delayed_refs;
2166         while (1) {
2167                 if (!locked_ref) {
2168                         /* pick a new head ref from the cluster list */
2169                         if (list_empty(cluster))
2170                                 break;
2171
2172                         locked_ref = list_entry(cluster->next,
2173                                      struct btrfs_delayed_ref_head, cluster);
2174
2175                         /* grab the lock that says we are going to process
2176                          * all the refs for this head */
2177                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2178
2179                         /*
2180                          * we may have dropped the spin lock to get the head
2181                          * mutex lock, and that might have given someone else
2182                          * time to free the head.  If that's true, it has been
2183                          * removed from our list and we can move on.
2184                          */
2185                         if (ret == -EAGAIN) {
2186                                 locked_ref = NULL;
2187                                 count++;
2188                                 continue;
2189                         }
2190                 }
2191
2192                 /*
2193                  * record the must insert reserved flag before we
2194                  * drop the spin lock.
2195                  */
2196                 must_insert_reserved = locked_ref->must_insert_reserved;
2197                 locked_ref->must_insert_reserved = 0;
2198
2199                 extent_op = locked_ref->extent_op;
2200                 locked_ref->extent_op = NULL;
2201
2202                 /*
2203                  * locked_ref is the head node, so we have to go one
2204                  * node back for any delayed ref updates
2205                  */
2206                 ref = select_delayed_ref(locked_ref);
2207                 if (!ref) {
2208                         /* All delayed refs have been processed, Go ahead
2209                          * and send the head node to run_one_delayed_ref,
2210                          * so that any accounting fixes can happen
2211                          */
2212                         ref = &locked_ref->node;
2213
2214                         if (extent_op && must_insert_reserved) {
2215                                 kfree(extent_op);
2216                                 extent_op = NULL;
2217                         }
2218
2219                         if (extent_op) {
2220                                 spin_unlock(&delayed_refs->lock);
2221
2222                                 ret = run_delayed_extent_op(trans, root,
2223                                                             ref, extent_op);
2224                                 BUG_ON(ret);
2225                                 kfree(extent_op);
2226
2227                                 cond_resched();
2228                                 spin_lock(&delayed_refs->lock);
2229                                 continue;
2230                         }
2231
2232                         list_del_init(&locked_ref->cluster);
2233                         locked_ref = NULL;
2234                 }
2235
2236                 ref->in_tree = 0;
2237                 rb_erase(&ref->rb_node, &delayed_refs->root);
2238                 delayed_refs->num_entries--;
2239
2240                 spin_unlock(&delayed_refs->lock);
2241
2242                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2243                                           must_insert_reserved);
2244                 BUG_ON(ret);
2245
2246                 btrfs_put_delayed_ref(ref);
2247                 kfree(extent_op);
2248                 count++;
2249
2250                 cond_resched();
2251                 spin_lock(&delayed_refs->lock);
2252         }
2253         return count;
2254 }
2255
2256 /*
2257  * this starts processing the delayed reference count updates and
2258  * extent insertions we have queued up so far.  count can be
2259  * 0, which means to process everything in the tree at the start
2260  * of the run (but not newly added entries), or it can be some target
2261  * number you'd like to process.
2262  */
2263 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2264                            struct btrfs_root *root, unsigned long count)
2265 {
2266         struct rb_node *node;
2267         struct btrfs_delayed_ref_root *delayed_refs;
2268         struct btrfs_delayed_ref_node *ref;
2269         struct list_head cluster;
2270         int ret;
2271         int run_all = count == (unsigned long)-1;
2272         int run_most = 0;
2273
2274         if (root == root->fs_info->extent_root)
2275                 root = root->fs_info->tree_root;
2276
2277         delayed_refs = &trans->transaction->delayed_refs;
2278         INIT_LIST_HEAD(&cluster);
2279 again:
2280         spin_lock(&delayed_refs->lock);
2281         if (count == 0) {
2282                 count = delayed_refs->num_entries * 2;
2283                 run_most = 1;
2284         }
2285         while (1) {
2286                 if (!(run_all || run_most) &&
2287                     delayed_refs->num_heads_ready < 64)
2288                         break;
2289
2290                 /*
2291                  * go find something we can process in the rbtree.  We start at
2292                  * the beginning of the tree, and then build a cluster
2293                  * of refs to process starting at the first one we are able to
2294                  * lock
2295                  */
2296                 ret = btrfs_find_ref_cluster(trans, &cluster,
2297                                              delayed_refs->run_delayed_start);
2298                 if (ret)
2299                         break;
2300
2301                 ret = run_clustered_refs(trans, root, &cluster);
2302                 BUG_ON(ret < 0);
2303
2304                 count -= min_t(unsigned long, ret, count);
2305
2306                 if (count == 0)
2307                         break;
2308         }
2309
2310         if (run_all) {
2311                 node = rb_first(&delayed_refs->root);
2312                 if (!node)
2313                         goto out;
2314                 count = (unsigned long)-1;
2315
2316                 while (node) {
2317                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2318                                        rb_node);
2319                         if (btrfs_delayed_ref_is_head(ref)) {
2320                                 struct btrfs_delayed_ref_head *head;
2321
2322                                 head = btrfs_delayed_node_to_head(ref);
2323                                 atomic_inc(&ref->refs);
2324
2325                                 spin_unlock(&delayed_refs->lock);
2326                                 /*
2327                                  * Mutex was contended, block until it's
2328                                  * released and try again
2329                                  */
2330                                 mutex_lock(&head->mutex);
2331                                 mutex_unlock(&head->mutex);
2332
2333                                 btrfs_put_delayed_ref(ref);
2334                                 cond_resched();
2335                                 goto again;
2336                         }
2337                         node = rb_next(node);
2338                 }
2339                 spin_unlock(&delayed_refs->lock);
2340                 schedule_timeout(1);
2341                 goto again;
2342         }
2343 out:
2344         spin_unlock(&delayed_refs->lock);
2345         return 0;
2346 }
2347
2348 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2349                                 struct btrfs_root *root,
2350                                 u64 bytenr, u64 num_bytes, u64 flags,
2351                                 int is_data)
2352 {
2353         struct btrfs_delayed_extent_op *extent_op;
2354         int ret;
2355
2356         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2357         if (!extent_op)
2358                 return -ENOMEM;
2359
2360         extent_op->flags_to_set = flags;
2361         extent_op->update_flags = 1;
2362         extent_op->update_key = 0;
2363         extent_op->is_data = is_data ? 1 : 0;
2364
2365         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2366         if (ret)
2367                 kfree(extent_op);
2368         return ret;
2369 }
2370
2371 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2372                                       struct btrfs_root *root,
2373                                       struct btrfs_path *path,
2374                                       u64 objectid, u64 offset, u64 bytenr)
2375 {
2376         struct btrfs_delayed_ref_head *head;
2377         struct btrfs_delayed_ref_node *ref;
2378         struct btrfs_delayed_data_ref *data_ref;
2379         struct btrfs_delayed_ref_root *delayed_refs;
2380         struct rb_node *node;
2381         int ret = 0;
2382
2383         ret = -ENOENT;
2384         delayed_refs = &trans->transaction->delayed_refs;
2385         spin_lock(&delayed_refs->lock);
2386         head = btrfs_find_delayed_ref_head(trans, bytenr);
2387         if (!head)
2388                 goto out;
2389
2390         if (!mutex_trylock(&head->mutex)) {
2391                 atomic_inc(&head->node.refs);
2392                 spin_unlock(&delayed_refs->lock);
2393
2394                 btrfs_release_path(path);
2395
2396                 /*
2397                  * Mutex was contended, block until it's released and let
2398                  * caller try again
2399                  */
2400                 mutex_lock(&head->mutex);
2401                 mutex_unlock(&head->mutex);
2402                 btrfs_put_delayed_ref(&head->node);
2403                 return -EAGAIN;
2404         }
2405
2406         node = rb_prev(&head->node.rb_node);
2407         if (!node)
2408                 goto out_unlock;
2409
2410         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2411
2412         if (ref->bytenr != bytenr)
2413                 goto out_unlock;
2414
2415         ret = 1;
2416         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2417                 goto out_unlock;
2418
2419         data_ref = btrfs_delayed_node_to_data_ref(ref);
2420
2421         node = rb_prev(node);
2422         if (node) {
2423                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2424                 if (ref->bytenr == bytenr)
2425                         goto out_unlock;
2426         }
2427
2428         if (data_ref->root != root->root_key.objectid ||
2429             data_ref->objectid != objectid || data_ref->offset != offset)
2430                 goto out_unlock;
2431
2432         ret = 0;
2433 out_unlock:
2434         mutex_unlock(&head->mutex);
2435 out:
2436         spin_unlock(&delayed_refs->lock);
2437         return ret;
2438 }
2439
2440 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2441                                         struct btrfs_root *root,
2442                                         struct btrfs_path *path,
2443                                         u64 objectid, u64 offset, u64 bytenr)
2444 {
2445         struct btrfs_root *extent_root = root->fs_info->extent_root;
2446         struct extent_buffer *leaf;
2447         struct btrfs_extent_data_ref *ref;
2448         struct btrfs_extent_inline_ref *iref;
2449         struct btrfs_extent_item *ei;
2450         struct btrfs_key key;
2451         u32 item_size;
2452         int ret;
2453
2454         key.objectid = bytenr;
2455         key.offset = (u64)-1;
2456         key.type = BTRFS_EXTENT_ITEM_KEY;
2457
2458         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2459         if (ret < 0)
2460                 goto out;
2461         BUG_ON(ret == 0);
2462
2463         ret = -ENOENT;
2464         if (path->slots[0] == 0)
2465                 goto out;
2466
2467         path->slots[0]--;
2468         leaf = path->nodes[0];
2469         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2470
2471         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2472                 goto out;
2473
2474         ret = 1;
2475         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2476 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2477         if (item_size < sizeof(*ei)) {
2478                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2479                 goto out;
2480         }
2481 #endif
2482         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2483
2484         if (item_size != sizeof(*ei) +
2485             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2486                 goto out;
2487
2488         if (btrfs_extent_generation(leaf, ei) <=
2489             btrfs_root_last_snapshot(&root->root_item))
2490                 goto out;
2491
2492         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2493         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2494             BTRFS_EXTENT_DATA_REF_KEY)
2495                 goto out;
2496
2497         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2498         if (btrfs_extent_refs(leaf, ei) !=
2499             btrfs_extent_data_ref_count(leaf, ref) ||
2500             btrfs_extent_data_ref_root(leaf, ref) !=
2501             root->root_key.objectid ||
2502             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2503             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2504                 goto out;
2505
2506         ret = 0;
2507 out:
2508         return ret;
2509 }
2510
2511 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2512                           struct btrfs_root *root,
2513                           u64 objectid, u64 offset, u64 bytenr)
2514 {
2515         struct btrfs_path *path;
2516         int ret;
2517         int ret2;
2518
2519         path = btrfs_alloc_path();
2520         if (!path)
2521                 return -ENOENT;
2522
2523         do {
2524                 ret = check_committed_ref(trans, root, path, objectid,
2525                                           offset, bytenr);
2526                 if (ret && ret != -ENOENT)
2527                         goto out;
2528
2529                 ret2 = check_delayed_ref(trans, root, path, objectid,
2530                                          offset, bytenr);
2531         } while (ret2 == -EAGAIN);
2532
2533         if (ret2 && ret2 != -ENOENT) {
2534                 ret = ret2;
2535                 goto out;
2536         }
2537
2538         if (ret != -ENOENT || ret2 != -ENOENT)
2539                 ret = 0;
2540 out:
2541         btrfs_free_path(path);
2542         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2543                 WARN_ON(ret > 0);
2544         return ret;
2545 }
2546
2547 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2548                            struct btrfs_root *root,
2549                            struct extent_buffer *buf,
2550                            int full_backref, int inc)
2551 {
2552         u64 bytenr;
2553         u64 num_bytes;
2554         u64 parent;
2555         u64 ref_root;
2556         u32 nritems;
2557         struct btrfs_key key;
2558         struct btrfs_file_extent_item *fi;
2559         int i;
2560         int level;
2561         int ret = 0;
2562         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2563                             u64, u64, u64, u64, u64, u64);
2564
2565         ref_root = btrfs_header_owner(buf);
2566         nritems = btrfs_header_nritems(buf);
2567         level = btrfs_header_level(buf);
2568
2569         if (!root->ref_cows && level == 0)
2570                 return 0;
2571
2572         if (inc)
2573                 process_func = btrfs_inc_extent_ref;
2574         else
2575                 process_func = btrfs_free_extent;
2576
2577         if (full_backref)
2578                 parent = buf->start;
2579         else
2580                 parent = 0;
2581
2582         for (i = 0; i < nritems; i++) {
2583                 if (level == 0) {
2584                         btrfs_item_key_to_cpu(buf, &key, i);
2585                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2586                                 continue;
2587                         fi = btrfs_item_ptr(buf, i,
2588                                             struct btrfs_file_extent_item);
2589                         if (btrfs_file_extent_type(buf, fi) ==
2590                             BTRFS_FILE_EXTENT_INLINE)
2591                                 continue;
2592                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2593                         if (bytenr == 0)
2594                                 continue;
2595
2596                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2597                         key.offset -= btrfs_file_extent_offset(buf, fi);
2598                         ret = process_func(trans, root, bytenr, num_bytes,
2599                                            parent, ref_root, key.objectid,
2600                                            key.offset);
2601                         if (ret)
2602                                 goto fail;
2603                 } else {
2604                         bytenr = btrfs_node_blockptr(buf, i);
2605                         num_bytes = btrfs_level_size(root, level - 1);
2606                         ret = process_func(trans, root, bytenr, num_bytes,
2607                                            parent, ref_root, level - 1, 0);
2608                         if (ret)
2609                                 goto fail;
2610                 }
2611         }
2612         return 0;
2613 fail:
2614         BUG();
2615         return ret;
2616 }
2617
2618 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2619                   struct extent_buffer *buf, int full_backref)
2620 {
2621         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2622 }
2623
2624 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2625                   struct extent_buffer *buf, int full_backref)
2626 {
2627         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2628 }
2629
2630 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2631                                  struct btrfs_root *root,
2632                                  struct btrfs_path *path,
2633                                  struct btrfs_block_group_cache *cache)
2634 {
2635         int ret;
2636         struct btrfs_root *extent_root = root->fs_info->extent_root;
2637         unsigned long bi;
2638         struct extent_buffer *leaf;
2639
2640         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2641         if (ret < 0)
2642                 goto fail;
2643         BUG_ON(ret);
2644
2645         leaf = path->nodes[0];
2646         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2647         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2648         btrfs_mark_buffer_dirty(leaf);
2649         btrfs_release_path(path);
2650 fail:
2651         if (ret)
2652                 return ret;
2653         return 0;
2654
2655 }
2656
2657 static struct btrfs_block_group_cache *
2658 next_block_group(struct btrfs_root *root,
2659                  struct btrfs_block_group_cache *cache)
2660 {
2661         struct rb_node *node;
2662         spin_lock(&root->fs_info->block_group_cache_lock);
2663         node = rb_next(&cache->cache_node);
2664         btrfs_put_block_group(cache);
2665         if (node) {
2666                 cache = rb_entry(node, struct btrfs_block_group_cache,
2667                                  cache_node);
2668                 btrfs_get_block_group(cache);
2669         } else
2670                 cache = NULL;
2671         spin_unlock(&root->fs_info->block_group_cache_lock);
2672         return cache;
2673 }
2674
2675 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2676                             struct btrfs_trans_handle *trans,
2677                             struct btrfs_path *path)
2678 {
2679         struct btrfs_root *root = block_group->fs_info->tree_root;
2680         struct inode *inode = NULL;
2681         u64 alloc_hint = 0;
2682         int dcs = BTRFS_DC_ERROR;
2683         int num_pages = 0;
2684         int retries = 0;
2685         int ret = 0;
2686
2687         /*
2688          * If this block group is smaller than 100 megs don't bother caching the
2689          * block group.
2690          */
2691         if (block_group->key.offset < (100 * 1024 * 1024)) {
2692                 spin_lock(&block_group->lock);
2693                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2694                 spin_unlock(&block_group->lock);
2695                 return 0;
2696         }
2697
2698 again:
2699         inode = lookup_free_space_inode(root, block_group, path);
2700         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2701                 ret = PTR_ERR(inode);
2702                 btrfs_release_path(path);
2703                 goto out;
2704         }
2705
2706         if (IS_ERR(inode)) {
2707                 BUG_ON(retries);
2708                 retries++;
2709
2710                 if (block_group->ro)
2711                         goto out_free;
2712
2713                 ret = create_free_space_inode(root, trans, block_group, path);
2714                 if (ret)
2715                         goto out_free;
2716                 goto again;
2717         }
2718
2719         /*
2720          * We want to set the generation to 0, that way if anything goes wrong
2721          * from here on out we know not to trust this cache when we load up next
2722          * time.
2723          */
2724         BTRFS_I(inode)->generation = 0;
2725         ret = btrfs_update_inode(trans, root, inode);
2726         WARN_ON(ret);
2727
2728         if (i_size_read(inode) > 0) {
2729                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2730                                                       inode);
2731                 if (ret)
2732                         goto out_put;
2733         }
2734
2735         spin_lock(&block_group->lock);
2736         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2737                 /* We're not cached, don't bother trying to write stuff out */
2738                 dcs = BTRFS_DC_WRITTEN;
2739                 spin_unlock(&block_group->lock);
2740                 goto out_put;
2741         }
2742         spin_unlock(&block_group->lock);
2743
2744         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2745         if (!num_pages)
2746                 num_pages = 1;
2747
2748         /*
2749          * Just to make absolutely sure we have enough space, we're going to
2750          * preallocate 12 pages worth of space for each block group.  In
2751          * practice we ought to use at most 8, but we need extra space so we can
2752          * add our header and have a terminator between the extents and the
2753          * bitmaps.
2754          */
2755         num_pages *= 16;
2756         num_pages *= PAGE_CACHE_SIZE;
2757
2758         ret = btrfs_check_data_free_space(inode, num_pages);
2759         if (ret)
2760                 goto out_put;
2761
2762         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2763                                               num_pages, num_pages,
2764                                               &alloc_hint);
2765         if (!ret)
2766                 dcs = BTRFS_DC_SETUP;
2767         btrfs_free_reserved_data_space(inode, num_pages);
2768 out_put:
2769         iput(inode);
2770 out_free:
2771         btrfs_release_path(path);
2772 out:
2773         spin_lock(&block_group->lock);
2774         block_group->disk_cache_state = dcs;
2775         spin_unlock(&block_group->lock);
2776
2777         return ret;
2778 }
2779
2780 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2781                                    struct btrfs_root *root)
2782 {
2783         struct btrfs_block_group_cache *cache;
2784         int err = 0;
2785         struct btrfs_path *path;
2786         u64 last = 0;
2787
2788         path = btrfs_alloc_path();
2789         if (!path)
2790                 return -ENOMEM;
2791
2792 again:
2793         while (1) {
2794                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2795                 while (cache) {
2796                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2797                                 break;
2798                         cache = next_block_group(root, cache);
2799                 }
2800                 if (!cache) {
2801                         if (last == 0)
2802                                 break;
2803                         last = 0;
2804                         continue;
2805                 }
2806                 err = cache_save_setup(cache, trans, path);
2807                 last = cache->key.objectid + cache->key.offset;
2808                 btrfs_put_block_group(cache);
2809         }
2810
2811         while (1) {
2812                 if (last == 0) {
2813                         err = btrfs_run_delayed_refs(trans, root,
2814                                                      (unsigned long)-1);
2815                         BUG_ON(err);
2816                 }
2817
2818                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2819                 while (cache) {
2820                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2821                                 btrfs_put_block_group(cache);
2822                                 goto again;
2823                         }
2824
2825                         if (cache->dirty)
2826                                 break;
2827                         cache = next_block_group(root, cache);
2828                 }
2829                 if (!cache) {
2830                         if (last == 0)
2831                                 break;
2832                         last = 0;
2833                         continue;
2834                 }
2835
2836                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2837                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2838                 cache->dirty = 0;
2839                 last = cache->key.objectid + cache->key.offset;
2840
2841                 err = write_one_cache_group(trans, root, path, cache);
2842                 BUG_ON(err);
2843                 btrfs_put_block_group(cache);
2844         }
2845
2846         while (1) {
2847                 /*
2848                  * I don't think this is needed since we're just marking our
2849                  * preallocated extent as written, but just in case it can't
2850                  * hurt.
2851                  */
2852                 if (last == 0) {
2853                         err = btrfs_run_delayed_refs(trans, root,
2854                                                      (unsigned long)-1);
2855                         BUG_ON(err);
2856                 }
2857
2858                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2859                 while (cache) {
2860                         /*
2861                          * Really this shouldn't happen, but it could if we
2862                          * couldn't write the entire preallocated extent and
2863                          * splitting the extent resulted in a new block.
2864                          */
2865                         if (cache->dirty) {
2866                                 btrfs_put_block_group(cache);
2867                                 goto again;
2868                         }
2869                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2870                                 break;
2871                         cache = next_block_group(root, cache);
2872                 }
2873                 if (!cache) {
2874                         if (last == 0)
2875                                 break;
2876                         last = 0;
2877                         continue;
2878                 }
2879
2880                 btrfs_write_out_cache(root, trans, cache, path);
2881
2882                 /*
2883                  * If we didn't have an error then the cache state is still
2884                  * NEED_WRITE, so we can set it to WRITTEN.
2885                  */
2886                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2887                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
2888                 last = cache->key.objectid + cache->key.offset;
2889                 btrfs_put_block_group(cache);
2890         }
2891
2892         btrfs_free_path(path);
2893         return 0;
2894 }
2895
2896 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2897 {
2898         struct btrfs_block_group_cache *block_group;
2899         int readonly = 0;
2900
2901         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2902         if (!block_group || block_group->ro)
2903                 readonly = 1;
2904         if (block_group)
2905                 btrfs_put_block_group(block_group);
2906         return readonly;
2907 }
2908
2909 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2910                              u64 total_bytes, u64 bytes_used,
2911                              struct btrfs_space_info **space_info)
2912 {
2913         struct btrfs_space_info *found;
2914         int i;
2915         int factor;
2916
2917         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2918                      BTRFS_BLOCK_GROUP_RAID10))
2919                 factor = 2;
2920         else
2921                 factor = 1;
2922
2923         found = __find_space_info(info, flags);
2924         if (found) {
2925                 spin_lock(&found->lock);
2926                 found->total_bytes += total_bytes;
2927                 found->disk_total += total_bytes * factor;
2928                 found->bytes_used += bytes_used;
2929                 found->disk_used += bytes_used * factor;
2930                 found->full = 0;
2931                 spin_unlock(&found->lock);
2932                 *space_info = found;
2933                 return 0;
2934         }
2935         found = kzalloc(sizeof(*found), GFP_NOFS);
2936         if (!found)
2937                 return -ENOMEM;
2938
2939         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2940                 INIT_LIST_HEAD(&found->block_groups[i]);
2941         init_rwsem(&found->groups_sem);
2942         spin_lock_init(&found->lock);
2943         found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2944                                 BTRFS_BLOCK_GROUP_SYSTEM |
2945                                 BTRFS_BLOCK_GROUP_METADATA);
2946         found->total_bytes = total_bytes;
2947         found->disk_total = total_bytes * factor;
2948         found->bytes_used = bytes_used;
2949         found->disk_used = bytes_used * factor;
2950         found->bytes_pinned = 0;
2951         found->bytes_reserved = 0;
2952         found->bytes_readonly = 0;
2953         found->bytes_may_use = 0;
2954         found->full = 0;
2955         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
2956         found->chunk_alloc = 0;
2957         found->flush = 0;
2958         init_waitqueue_head(&found->wait);
2959         *space_info = found;
2960         list_add_rcu(&found->list, &info->space_info);
2961         return 0;
2962 }
2963
2964 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2965 {
2966         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2967                                    BTRFS_BLOCK_GROUP_RAID1 |
2968                                    BTRFS_BLOCK_GROUP_RAID10 |
2969                                    BTRFS_BLOCK_GROUP_DUP);
2970         if (extra_flags) {
2971                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2972                         fs_info->avail_data_alloc_bits |= extra_flags;
2973                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2974                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2975                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2976                         fs_info->avail_system_alloc_bits |= extra_flags;
2977         }
2978 }
2979
2980 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2981 {
2982         /*
2983          * we add in the count of missing devices because we want
2984          * to make sure that any RAID levels on a degraded FS
2985          * continue to be honored.
2986          */
2987         u64 num_devices = root->fs_info->fs_devices->rw_devices +
2988                 root->fs_info->fs_devices->missing_devices;
2989
2990         if (num_devices == 1)
2991                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2992         if (num_devices < 4)
2993                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2994
2995         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2996             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2997                       BTRFS_BLOCK_GROUP_RAID10))) {
2998                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2999         }
3000
3001         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3002             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3003                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3004         }
3005
3006         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3007             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3008              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3009              (flags & BTRFS_BLOCK_GROUP_DUP)))
3010                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3011         return flags;
3012 }
3013
3014 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3015 {
3016         if (flags & BTRFS_BLOCK_GROUP_DATA)
3017                 flags |= root->fs_info->avail_data_alloc_bits &
3018                          root->fs_info->data_alloc_profile;
3019         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3020                 flags |= root->fs_info->avail_system_alloc_bits &
3021                          root->fs_info->system_alloc_profile;
3022         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3023                 flags |= root->fs_info->avail_metadata_alloc_bits &
3024                          root->fs_info->metadata_alloc_profile;
3025         return btrfs_reduce_alloc_profile(root, flags);
3026 }
3027
3028 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3029 {
3030         u64 flags;
3031
3032         if (data)
3033                 flags = BTRFS_BLOCK_GROUP_DATA;
3034         else if (root == root->fs_info->chunk_root)
3035                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3036         else
3037                 flags = BTRFS_BLOCK_GROUP_METADATA;
3038
3039         return get_alloc_profile(root, flags);
3040 }
3041
3042 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3043 {
3044         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3045                                                        BTRFS_BLOCK_GROUP_DATA);
3046 }
3047
3048 /*
3049  * This will check the space that the inode allocates from to make sure we have
3050  * enough space for bytes.
3051  */
3052 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3053 {
3054         struct btrfs_space_info *data_sinfo;
3055         struct btrfs_root *root = BTRFS_I(inode)->root;
3056         u64 used;
3057         int ret = 0, committed = 0, alloc_chunk = 1;
3058
3059         /* make sure bytes are sectorsize aligned */
3060         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3061
3062         if (root == root->fs_info->tree_root ||
3063             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3064                 alloc_chunk = 0;
3065                 committed = 1;
3066         }
3067
3068         data_sinfo = BTRFS_I(inode)->space_info;
3069         if (!data_sinfo)
3070                 goto alloc;
3071
3072 again:
3073         /* make sure we have enough space to handle the data first */
3074         spin_lock(&data_sinfo->lock);
3075         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3076                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3077                 data_sinfo->bytes_may_use;
3078
3079         if (used + bytes > data_sinfo->total_bytes) {
3080                 struct btrfs_trans_handle *trans;
3081
3082                 /*
3083                  * if we don't have enough free bytes in this space then we need
3084                  * to alloc a new chunk.
3085                  */
3086                 if (!data_sinfo->full && alloc_chunk) {
3087                         u64 alloc_target;
3088
3089                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3090                         spin_unlock(&data_sinfo->lock);
3091 alloc:
3092                         alloc_target = btrfs_get_alloc_profile(root, 1);
3093                         trans = btrfs_join_transaction(root);
3094                         if (IS_ERR(trans))
3095                                 return PTR_ERR(trans);
3096
3097                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3098                                              bytes + 2 * 1024 * 1024,
3099                                              alloc_target,
3100                                              CHUNK_ALLOC_NO_FORCE);
3101                         btrfs_end_transaction(trans, root);
3102                         if (ret < 0) {
3103                                 if (ret != -ENOSPC)
3104                                         return ret;
3105                                 else
3106                                         goto commit_trans;
3107                         }
3108
3109                         if (!data_sinfo) {
3110                                 btrfs_set_inode_space_info(root, inode);
3111                                 data_sinfo = BTRFS_I(inode)->space_info;
3112                         }
3113                         goto again;
3114                 }
3115
3116                 /*
3117                  * If we have less pinned bytes than we want to allocate then
3118                  * don't bother committing the transaction, it won't help us.
3119                  */
3120                 if (data_sinfo->bytes_pinned < bytes)
3121                         committed = 1;
3122                 spin_unlock(&data_sinfo->lock);
3123
3124                 /* commit the current transaction and try again */
3125 commit_trans:
3126                 if (!committed &&
3127                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3128                         committed = 1;
3129                         trans = btrfs_join_transaction(root);
3130                         if (IS_ERR(trans))
3131                                 return PTR_ERR(trans);
3132                         ret = btrfs_commit_transaction(trans, root);
3133                         if (ret)
3134                                 return ret;
3135                         goto again;
3136                 }
3137
3138                 return -ENOSPC;
3139         }
3140         data_sinfo->bytes_may_use += bytes;
3141         spin_unlock(&data_sinfo->lock);
3142
3143         return 0;
3144 }
3145
3146 /*
3147  * Called if we need to clear a data reservation for this inode.
3148  */
3149 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3150 {
3151         struct btrfs_root *root = BTRFS_I(inode)->root;
3152         struct btrfs_space_info *data_sinfo;
3153
3154         /* make sure bytes are sectorsize aligned */
3155         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3156
3157         data_sinfo = BTRFS_I(inode)->space_info;
3158         spin_lock(&data_sinfo->lock);
3159         data_sinfo->bytes_may_use -= bytes;
3160         spin_unlock(&data_sinfo->lock);
3161 }
3162
3163 static void force_metadata_allocation(struct btrfs_fs_info *info)
3164 {
3165         struct list_head *head = &info->space_info;
3166         struct btrfs_space_info *found;
3167
3168         rcu_read_lock();
3169         list_for_each_entry_rcu(found, head, list) {
3170                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3171                         found->force_alloc = CHUNK_ALLOC_FORCE;
3172         }
3173         rcu_read_unlock();
3174 }
3175
3176 static int should_alloc_chunk(struct btrfs_root *root,
3177                               struct btrfs_space_info *sinfo, u64 alloc_bytes,
3178                               int force)
3179 {
3180         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3181         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3182         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3183         u64 thresh;
3184
3185         if (force == CHUNK_ALLOC_FORCE)
3186                 return 1;
3187
3188         /*
3189          * We need to take into account the global rsv because for all intents
3190          * and purposes it's used space.  Don't worry about locking the
3191          * global_rsv, it doesn't change except when the transaction commits.
3192          */
3193         num_allocated += global_rsv->size;
3194
3195         /*
3196          * in limited mode, we want to have some free space up to
3197          * about 1% of the FS size.
3198          */
3199         if (force == CHUNK_ALLOC_LIMITED) {
3200                 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3201                 thresh = max_t(u64, 64 * 1024 * 1024,
3202                                div_factor_fine(thresh, 1));
3203
3204                 if (num_bytes - num_allocated < thresh)
3205                         return 1;
3206         }
3207
3208         /*
3209          * we have two similar checks here, one based on percentage
3210          * and once based on a hard number of 256MB.  The idea
3211          * is that if we have a good amount of free
3212          * room, don't allocate a chunk.  A good mount is
3213          * less than 80% utilized of the chunks we have allocated,
3214          * or more than 256MB free
3215          */
3216         if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3217                 return 0;
3218
3219         if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
3220                 return 0;
3221
3222         thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3223
3224         /* 256MB or 5% of the FS */
3225         thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3226
3227         if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3228                 return 0;
3229         return 1;
3230 }
3231
3232 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3233                           struct btrfs_root *extent_root, u64 alloc_bytes,
3234                           u64 flags, int force)
3235 {
3236         struct btrfs_space_info *space_info;
3237         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3238         int wait_for_alloc = 0;
3239         int ret = 0;
3240
3241         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3242
3243         space_info = __find_space_info(extent_root->fs_info, flags);
3244         if (!space_info) {
3245                 ret = update_space_info(extent_root->fs_info, flags,
3246                                         0, 0, &space_info);
3247                 BUG_ON(ret);
3248         }
3249         BUG_ON(!space_info);
3250
3251 again:
3252         spin_lock(&space_info->lock);
3253         if (space_info->force_alloc)
3254                 force = space_info->force_alloc;
3255         if (space_info->full) {
3256                 spin_unlock(&space_info->lock);
3257                 return 0;
3258         }
3259
3260         if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3261                 spin_unlock(&space_info->lock);
3262                 return 0;
3263         } else if (space_info->chunk_alloc) {
3264                 wait_for_alloc = 1;
3265         } else {
3266                 space_info->chunk_alloc = 1;
3267         }
3268
3269         spin_unlock(&space_info->lock);
3270
3271         mutex_lock(&fs_info->chunk_mutex);
3272
3273         /*
3274          * The chunk_mutex is held throughout the entirety of a chunk
3275          * allocation, so once we've acquired the chunk_mutex we know that the
3276          * other guy is done and we need to recheck and see if we should
3277          * allocate.
3278          */
3279         if (wait_for_alloc) {
3280                 mutex_unlock(&fs_info->chunk_mutex);
3281                 wait_for_alloc = 0;
3282                 goto again;
3283         }
3284
3285         /*
3286          * If we have mixed data/metadata chunks we want to make sure we keep
3287          * allocating mixed chunks instead of individual chunks.
3288          */
3289         if (btrfs_mixed_space_info(space_info))
3290                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3291
3292         /*
3293          * if we're doing a data chunk, go ahead and make sure that
3294          * we keep a reasonable number of metadata chunks allocated in the
3295          * FS as well.
3296          */
3297         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3298                 fs_info->data_chunk_allocations++;
3299                 if (!(fs_info->data_chunk_allocations %
3300                       fs_info->metadata_ratio))
3301                         force_metadata_allocation(fs_info);
3302         }
3303
3304         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3305         if (ret < 0 && ret != -ENOSPC)
3306                 goto out;
3307
3308         spin_lock(&space_info->lock);
3309         if (ret)
3310                 space_info->full = 1;
3311         else
3312                 ret = 1;
3313
3314         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3315         space_info->chunk_alloc = 0;
3316         spin_unlock(&space_info->lock);
3317 out:
3318         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3319         return ret;
3320 }
3321
3322 /*
3323  * shrink metadata reservation for delalloc
3324  */
3325 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3326                            struct btrfs_root *root, u64 to_reclaim, int sync)
3327 {
3328         struct btrfs_block_rsv *block_rsv;
3329         struct btrfs_space_info *space_info;
3330         u64 reserved;
3331         u64 max_reclaim;
3332         u64 reclaimed = 0;
3333         long time_left;
3334         int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3335         int loops = 0;
3336         unsigned long progress;
3337
3338         block_rsv = &root->fs_info->delalloc_block_rsv;
3339         space_info = block_rsv->space_info;
3340
3341         smp_mb();
3342         reserved = space_info->bytes_may_use;
3343         progress = space_info->reservation_progress;
3344
3345         if (reserved == 0)
3346                 return 0;
3347
3348         smp_mb();
3349         if (root->fs_info->delalloc_bytes == 0) {
3350                 if (trans)
3351                         return 0;
3352                 btrfs_wait_ordered_extents(root, 0, 0);
3353                 return 0;
3354         }
3355
3356         max_reclaim = min(reserved, to_reclaim);
3357
3358         while (loops < 1024) {
3359                 /* have the flusher threads jump in and do some IO */
3360                 smp_mb();
3361                 nr_pages = min_t(unsigned long, nr_pages,
3362                        root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3363                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3364
3365                 spin_lock(&space_info->lock);
3366                 if (reserved > space_info->bytes_may_use)
3367                         reclaimed += reserved - space_info->bytes_may_use;
3368                 reserved = space_info->bytes_may_use;
3369                 spin_unlock(&space_info->lock);
3370
3371                 loops++;
3372
3373                 if (reserved == 0 || reclaimed >= max_reclaim)
3374                         break;
3375
3376                 if (trans && trans->transaction->blocked)
3377                         return -EAGAIN;
3378
3379                 time_left = schedule_timeout_interruptible(1);
3380
3381                 /* We were interrupted, exit */
3382                 if (time_left)
3383                         break;
3384
3385                 /* we've kicked the IO a few times, if anything has been freed,
3386                  * exit.  There is no sense in looping here for a long time
3387                  * when we really need to commit the transaction, or there are
3388                  * just too many writers without enough free space
3389                  */
3390
3391                 if (loops > 3) {
3392                         smp_mb();
3393                         if (progress != space_info->reservation_progress)
3394                                 break;
3395                 }
3396
3397         }
3398         if (reclaimed >= to_reclaim && !trans)
3399                 btrfs_wait_ordered_extents(root, 0, 0);
3400         return reclaimed >= to_reclaim;
3401 }
3402
3403 /*
3404  * Retries tells us how many times we've called reserve_metadata_bytes.  The
3405  * idea is if this is the first call (retries == 0) then we will add to our
3406  * reserved count if we can't make the allocation in order to hold our place
3407  * while we go and try and free up space.  That way for retries > 1 we don't try
3408  * and add space, we just check to see if the amount of unused space is >= the
3409  * total space, meaning that our reservation is valid.
3410  *
3411  * However if we don't intend to retry this reservation, pass -1 as retries so
3412  * that it short circuits this logic.
3413  */
3414 static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3415                                   struct btrfs_root *root,
3416                                   struct btrfs_block_rsv *block_rsv,
3417                                   u64 orig_bytes, int flush)
3418 {
3419         struct btrfs_space_info *space_info = block_rsv->space_info;
3420         u64 unused;
3421         u64 num_bytes = orig_bytes;
3422         int retries = 0;
3423         int ret = 0;
3424         bool committed = false;
3425         bool flushing = false;
3426 again:
3427         ret = 0;
3428         spin_lock(&space_info->lock);
3429         /*
3430          * We only want to wait if somebody other than us is flushing and we are
3431          * actually alloed to flush.
3432          */
3433         while (flush && !flushing && space_info->flush) {
3434                 spin_unlock(&space_info->lock);
3435                 /*
3436                  * If we have a trans handle we can't wait because the flusher
3437                  * may have to commit the transaction, which would mean we would
3438                  * deadlock since we are waiting for the flusher to finish, but
3439                  * hold the current transaction open.
3440                  */
3441                 if (trans)
3442                         return -EAGAIN;
3443                 ret = wait_event_interruptible(space_info->wait,
3444                                                !space_info->flush);
3445                 /* Must have been interrupted, return */
3446                 if (ret)
3447                         return -EINTR;
3448
3449                 spin_lock(&space_info->lock);
3450         }
3451
3452         ret = -ENOSPC;
3453         unused = space_info->bytes_used + space_info->bytes_reserved +
3454                  space_info->bytes_pinned + space_info->bytes_readonly +
3455                  space_info->bytes_may_use;
3456
3457         /*
3458          * The idea here is that we've not already over-reserved the block group
3459          * then we can go ahead and save our reservation first and then start
3460          * flushing if we need to.  Otherwise if we've already overcommitted
3461          * lets start flushing stuff first and then come back and try to make
3462          * our reservation.
3463          */
3464         if (unused <= space_info->total_bytes) {
3465                 unused = space_info->total_bytes - unused;
3466                 if (unused >= num_bytes) {
3467                         space_info->bytes_may_use += orig_bytes;
3468                         ret = 0;
3469                 } else {
3470                         /*
3471                          * Ok set num_bytes to orig_bytes since we aren't
3472                          * overocmmitted, this way we only try and reclaim what
3473                          * we need.
3474                          */
3475                         num_bytes = orig_bytes;
3476                 }
3477         } else {
3478                 /*
3479                  * Ok we're over committed, set num_bytes to the overcommitted
3480                  * amount plus the amount of bytes that we need for this
3481                  * reservation.
3482                  */
3483                 num_bytes = unused - space_info->total_bytes +
3484                         (orig_bytes * (retries + 1));
3485         }
3486
3487         /*
3488          * Couldn't make our reservation, save our place so while we're trying
3489          * to reclaim space we can actually use it instead of somebody else
3490          * stealing it from us.
3491          */
3492         if (ret && flush) {
3493                 flushing = true;
3494                 space_info->flush = 1;
3495         }
3496
3497         spin_unlock(&space_info->lock);
3498
3499         if (!ret || !flush)
3500                 goto out;
3501
3502         /*
3503          * We do synchronous shrinking since we don't actually unreserve
3504          * metadata until after the IO is completed.
3505          */
3506         ret = shrink_delalloc(trans, root, num_bytes, 1);
3507         if (ret < 0)
3508                 goto out;
3509
3510         ret = 0;
3511
3512         /*
3513          * So if we were overcommitted it's possible that somebody else flushed
3514          * out enough space and we simply didn't have enough space to reclaim,
3515          * so go back around and try again.
3516          */
3517         if (retries < 2) {
3518                 retries++;
3519                 goto again;
3520         }
3521
3522         /*
3523          * Not enough space to be reclaimed, don't bother committing the
3524          * transaction.
3525          */
3526         spin_lock(&space_info->lock);
3527         if (space_info->bytes_pinned < orig_bytes)
3528                 ret = -ENOSPC;
3529         spin_unlock(&space_info->lock);
3530         if (ret)
3531                 goto out;
3532
3533         ret = -EAGAIN;
3534         if (trans)
3535                 goto out;
3536
3537         ret = -ENOSPC;
3538         if (committed)
3539                 goto out;
3540
3541         trans = btrfs_join_transaction(root);
3542         if (IS_ERR(trans))
3543                 goto out;
3544         ret = btrfs_commit_transaction(trans, root);
3545         if (!ret) {
3546                 trans = NULL;
3547                 committed = true;
3548                 goto again;
3549         }
3550
3551 out:
3552         if (flushing) {
3553                 spin_lock(&space_info->lock);
3554                 space_info->flush = 0;
3555                 wake_up_all(&space_info->wait);
3556                 spin_unlock(&space_info->lock);
3557         }
3558         return ret;
3559 }
3560
3561 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3562                                              struct btrfs_root *root)
3563 {
3564         struct btrfs_block_rsv *block_rsv;
3565         if (root->ref_cows)
3566                 block_rsv = trans->block_rsv;
3567         else
3568                 block_rsv = root->block_rsv;
3569
3570         if (!block_rsv)
3571                 block_rsv = &root->fs_info->empty_block_rsv;
3572
3573         return block_rsv;
3574 }
3575
3576 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3577                                u64 num_bytes)
3578 {
3579         int ret = -ENOSPC;
3580         spin_lock(&block_rsv->lock);
3581         if (block_rsv->reserved >= num_bytes) {
3582                 block_rsv->reserved -= num_bytes;
3583                 if (block_rsv->reserved < block_rsv->size)
3584                         block_rsv->full = 0;
3585                 ret = 0;
3586         }
3587         spin_unlock(&block_rsv->lock);
3588         return ret;
3589 }
3590
3591 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3592                                 u64 num_bytes, int update_size)
3593 {
3594         spin_lock(&block_rsv->lock);
3595         block_rsv->reserved += num_bytes;
3596         if (update_size)
3597                 block_rsv->size += num_bytes;
3598         else if (block_rsv->reserved >= block_rsv->size)
3599                 block_rsv->full = 1;
3600         spin_unlock(&block_rsv->lock);
3601 }
3602
3603 static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3604                                     struct btrfs_block_rsv *dest, u64 num_bytes)
3605 {
3606         struct btrfs_space_info *space_info = block_rsv->space_info;
3607
3608         spin_lock(&block_rsv->lock);
3609         if (num_bytes == (u64)-1)
3610                 num_bytes = block_rsv->size;
3611         block_rsv->size -= num_bytes;
3612         if (block_rsv->reserved >= block_rsv->size) {
3613                 num_bytes = block_rsv->reserved - block_rsv->size;
3614                 block_rsv->reserved = block_rsv->size;
3615                 block_rsv->full = 1;
3616         } else {
3617                 num_bytes = 0;
3618         }
3619         spin_unlock(&block_rsv->lock);
3620
3621         if (num_bytes > 0) {
3622                 if (dest) {
3623                         spin_lock(&dest->lock);
3624                         if (!dest->full) {
3625                                 u64 bytes_to_add;
3626
3627                                 bytes_to_add = dest->size - dest->reserved;
3628                                 bytes_to_add = min(num_bytes, bytes_to_add);
3629                                 dest->reserved += bytes_to_add;
3630                                 if (dest->reserved >= dest->size)
3631                                         dest->full = 1;
3632                                 num_bytes -= bytes_to_add;
3633                         }
3634                         spin_unlock(&dest->lock);
3635                 }
3636                 if (num_bytes) {
3637                         spin_lock(&space_info->lock);
3638                         space_info->bytes_may_use -= num_bytes;
3639                         space_info->reservation_progress++;
3640                         spin_unlock(&space_info->lock);
3641                 }
3642         }
3643 }
3644
3645 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3646                                    struct btrfs_block_rsv *dst, u64 num_bytes)
3647 {
3648         int ret;
3649
3650         ret = block_rsv_use_bytes(src, num_bytes);
3651         if (ret)
3652                 return ret;
3653
3654         block_rsv_add_bytes(dst, num_bytes, 1);
3655         return 0;
3656 }
3657
3658 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3659 {
3660         memset(rsv, 0, sizeof(*rsv));
3661         spin_lock_init(&rsv->lock);
3662 }
3663
3664 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3665 {
3666         struct btrfs_block_rsv *block_rsv;
3667         struct btrfs_fs_info *fs_info = root->fs_info;
3668
3669         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3670         if (!block_rsv)
3671                 return NULL;
3672
3673         btrfs_init_block_rsv(block_rsv);
3674         block_rsv->space_info = __find_space_info(fs_info,
3675                                                   BTRFS_BLOCK_GROUP_METADATA);
3676         return block_rsv;
3677 }
3678
3679 void btrfs_free_block_rsv(struct btrfs_root *root,
3680                           struct btrfs_block_rsv *rsv)
3681 {
3682         btrfs_block_rsv_release(root, rsv, (u64)-1);
3683         kfree(rsv);
3684 }
3685
3686 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3687                         struct btrfs_root *root,
3688                         struct btrfs_block_rsv *block_rsv,
3689                         u64 num_bytes)
3690 {
3691         int ret;
3692
3693         if (num_bytes == 0)
3694                 return 0;
3695
3696         ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
3697         if (!ret) {
3698                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3699                 return 0;
3700         }
3701
3702         return ret;
3703 }
3704
3705 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3706                           struct btrfs_root *root,
3707                           struct btrfs_block_rsv *block_rsv,
3708                           u64 min_reserved, int min_factor, int flush)
3709 {
3710         u64 num_bytes = 0;
3711         int ret = -ENOSPC;
3712
3713         if (!block_rsv)
3714                 return 0;
3715
3716         spin_lock(&block_rsv->lock);
3717         if (min_factor > 0)
3718                 num_bytes = div_factor(block_rsv->size, min_factor);
3719         if (min_reserved > num_bytes)
3720                 num_bytes = min_reserved;
3721
3722         if (block_rsv->reserved >= num_bytes)
3723                 ret = 0;
3724         else
3725                 num_bytes -= block_rsv->reserved;
3726         spin_unlock(&block_rsv->lock);
3727
3728         if (!ret)
3729                 return 0;
3730
3731         ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, flush);
3732         if (!ret) {
3733                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3734                 return 0;
3735         }
3736
3737         return ret;
3738 }
3739
3740 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3741                             struct btrfs_block_rsv *dst_rsv,
3742                             u64 num_bytes)
3743 {
3744         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3745 }
3746
3747 void btrfs_block_rsv_release(struct btrfs_root *root,
3748                              struct btrfs_block_rsv *block_rsv,
3749                              u64 num_bytes)
3750 {
3751         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3752         if (global_rsv->full || global_rsv == block_rsv ||
3753             block_rsv->space_info != global_rsv->space_info)
3754                 global_rsv = NULL;
3755         block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3756 }
3757
3758 /*
3759  * helper to calculate size of global block reservation.
3760  * the desired value is sum of space used by extent tree,
3761  * checksum tree and root tree
3762  */
3763 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3764 {
3765         struct btrfs_space_info *sinfo;
3766         u64 num_bytes;
3767         u64 meta_used;
3768         u64 data_used;
3769         int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3770
3771         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3772         spin_lock(&sinfo->lock);
3773         data_used = sinfo->bytes_used;
3774         spin_unlock(&sinfo->lock);
3775
3776         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3777         spin_lock(&sinfo->lock);
3778         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3779                 data_used = 0;
3780         meta_used = sinfo->bytes_used;
3781         spin_unlock(&sinfo->lock);
3782
3783         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3784                     csum_size * 2;
3785         num_bytes += div64_u64(data_used + meta_used, 50);
3786
3787         if (num_bytes * 3 > meta_used)
3788                 num_bytes = div64_u64(meta_used, 3);
3789
3790         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3791 }
3792
3793 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3794 {
3795         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3796         struct btrfs_space_info *sinfo = block_rsv->space_info;
3797         u64 num_bytes;
3798
3799         num_bytes = calc_global_metadata_size(fs_info);
3800
3801         spin_lock(&block_rsv->lock);
3802         spin_lock(&sinfo->lock);
3803
3804         block_rsv->size = num_bytes;
3805
3806         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3807                     sinfo->bytes_reserved + sinfo->bytes_readonly +
3808                     sinfo->bytes_may_use;
3809
3810         if (sinfo->total_bytes > num_bytes) {
3811                 num_bytes = sinfo->total_bytes - num_bytes;
3812                 block_rsv->reserved += num_bytes;
3813                 sinfo->bytes_may_use += num_bytes;
3814         }
3815
3816         if (block_rsv->reserved >= block_rsv->size) {
3817                 num_bytes = block_rsv->reserved - block_rsv->size;
3818                 sinfo->bytes_may_use -= num_bytes;
3819                 sinfo->reservation_progress++;
3820                 block_rsv->reserved = block_rsv->size;
3821                 block_rsv->full = 1;
3822         }
3823
3824         spin_unlock(&sinfo->lock);
3825         spin_unlock(&block_rsv->lock);
3826 }
3827
3828 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3829 {
3830         struct btrfs_space_info *space_info;
3831
3832         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3833         fs_info->chunk_block_rsv.space_info = space_info;
3834
3835         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3836         fs_info->global_block_rsv.space_info = space_info;
3837         fs_info->delalloc_block_rsv.space_info = space_info;
3838         fs_info->trans_block_rsv.space_info = space_info;
3839         fs_info->empty_block_rsv.space_info = space_info;
3840
3841         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3842         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3843         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3844         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3845         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3846
3847         update_global_block_rsv(fs_info);
3848 }
3849
3850 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3851 {
3852         block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3853         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3854         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3855         WARN_ON(fs_info->trans_block_rsv.size > 0);
3856         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3857         WARN_ON(fs_info->chunk_block_rsv.size > 0);
3858         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3859 }
3860
3861 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3862                                   struct btrfs_root *root)
3863 {
3864         if (!trans->bytes_reserved)
3865                 return;
3866
3867         BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3868         btrfs_block_rsv_release(root, trans->block_rsv,
3869                                 trans->bytes_reserved);
3870         trans->bytes_reserved = 0;
3871 }
3872
3873 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3874                                   struct inode *inode)
3875 {
3876         struct btrfs_root *root = BTRFS_I(inode)->root;
3877         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3878         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3879
3880         /*
3881          * We need to hold space in order to delete our orphan item once we've
3882          * added it, so this takes the reservation so we can release it later
3883          * when we are truly done with the orphan item.
3884          */
3885         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3886         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3887 }
3888
3889 void btrfs_orphan_release_metadata(struct inode *inode)
3890 {
3891         struct btrfs_root *root = BTRFS_I(inode)->root;
3892         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3893         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3894 }
3895
3896 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3897                                 struct btrfs_pending_snapshot *pending)
3898 {
3899         struct btrfs_root *root = pending->root;
3900         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3901         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3902         /*
3903          * two for root back/forward refs, two for directory entries
3904          * and one for root of the snapshot.
3905          */
3906         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
3907         dst_rsv->space_info = src_rsv->space_info;
3908         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3909 }
3910
3911 /**
3912  * drop_outstanding_extent - drop an outstanding extent
3913  * @inode: the inode we're dropping the extent for
3914  *
3915  * This is called when we are freeing up an outstanding extent, either called
3916  * after an error or after an extent is written.  This will return the number of
3917  * reserved extents that need to be freed.  This must be called with
3918  * BTRFS_I(inode)->lock held.
3919  */
3920 static unsigned drop_outstanding_extent(struct inode *inode)
3921 {
3922         unsigned dropped_extents = 0;
3923
3924         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
3925         BTRFS_I(inode)->outstanding_extents--;
3926
3927         /*
3928          * If we have more or the same amount of outsanding extents than we have
3929          * reserved then we need to leave the reserved extents count alone.
3930          */
3931         if (BTRFS_I(inode)->outstanding_extents >=
3932             BTRFS_I(inode)->reserved_extents)
3933                 return 0;
3934
3935         dropped_extents = BTRFS_I(inode)->reserved_extents -
3936                 BTRFS_I(inode)->outstanding_extents;
3937         BTRFS_I(inode)->reserved_extents -= dropped_extents;
3938         return dropped_extents;
3939 }
3940
3941 /**
3942  * calc_csum_metadata_size - return the amount of metada space that must be
3943  *      reserved/free'd for the given bytes.
3944  * @inode: the inode we're manipulating
3945  * @num_bytes: the number of bytes in question
3946  * @reserve: 1 if we are reserving space, 0 if we are freeing space
3947  *
3948  * This adjusts the number of csum_bytes in the inode and then returns the
3949  * correct amount of metadata that must either be reserved or freed.  We
3950  * calculate how many checksums we can fit into one leaf and then divide the
3951  * number of bytes that will need to be checksumed by this value to figure out
3952  * how many checksums will be required.  If we are adding bytes then the number
3953  * may go up and we will return the number of additional bytes that must be
3954  * reserved.  If it is going down we will return the number of bytes that must
3955  * be freed.
3956  *
3957  * This must be called with BTRFS_I(inode)->lock held.
3958  */
3959 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
3960                                    int reserve)
3961 {
3962         struct btrfs_root *root = BTRFS_I(inode)->root;
3963         u64 csum_size;
3964         int num_csums_per_leaf;
3965         int num_csums;
3966         int old_csums;
3967
3968         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
3969             BTRFS_I(inode)->csum_bytes == 0)
3970                 return 0;
3971
3972         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
3973         if (reserve)
3974                 BTRFS_I(inode)->csum_bytes += num_bytes;
3975         else
3976                 BTRFS_I(inode)->csum_bytes -= num_bytes;
3977         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
3978         num_csums_per_leaf = (int)div64_u64(csum_size,
3979                                             sizeof(struct btrfs_csum_item) +
3980                                             sizeof(struct btrfs_disk_key));
3981         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
3982         num_csums = num_csums + num_csums_per_leaf - 1;
3983         num_csums = num_csums / num_csums_per_leaf;
3984
3985         old_csums = old_csums + num_csums_per_leaf - 1;
3986         old_csums = old_csums / num_csums_per_leaf;
3987
3988         /* No change, no need to reserve more */
3989         if (old_csums == num_csums)
3990                 return 0;
3991
3992         if (reserve)
3993                 return btrfs_calc_trans_metadata_size(root,
3994                                                       num_csums - old_csums);
3995
3996         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
3997 }
3998
3999 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4000 {
4001         struct btrfs_root *root = BTRFS_I(inode)->root;
4002         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4003         u64 to_reserve = 0;
4004         unsigned nr_extents = 0;
4005         int ret;
4006
4007         if (btrfs_transaction_in_commit(root->fs_info))
4008                 schedule_timeout(1);
4009
4010         num_bytes = ALIGN(num_bytes, root->sectorsize);
4011
4012         spin_lock(&BTRFS_I(inode)->lock);
4013         BTRFS_I(inode)->outstanding_extents++;
4014
4015         if (BTRFS_I(inode)->outstanding_extents >
4016             BTRFS_I(inode)->reserved_extents) {
4017                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4018                         BTRFS_I(inode)->reserved_extents;
4019                 BTRFS_I(inode)->reserved_extents += nr_extents;
4020
4021                 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4022         }
4023         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4024         spin_unlock(&BTRFS_I(inode)->lock);
4025
4026         ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
4027         if (ret) {
4028                 u64 to_free = 0;
4029                 unsigned dropped;
4030
4031                 spin_lock(&BTRFS_I(inode)->lock);
4032                 dropped = drop_outstanding_extent(inode);
4033                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4034                 spin_unlock(&BTRFS_I(inode)->lock);
4035                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4036
4037                 /*
4038                  * Somebody could have come in and twiddled with the
4039                  * reservation, so if we have to free more than we would have
4040                  * reserved from this reservation go ahead and release those
4041                  * bytes.
4042                  */
4043                 to_free -= to_reserve;
4044                 if (to_free)
4045                         btrfs_block_rsv_release(root, block_rsv, to_free);
4046                 return ret;
4047         }
4048
4049         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4050
4051         return 0;
4052 }
4053
4054 /**
4055  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4056  * @inode: the inode to release the reservation for
4057  * @num_bytes: the number of bytes we're releasing
4058  *
4059  * This will release the metadata reservation for an inode.  This can be called
4060  * once we complete IO for a given set of bytes to release their metadata
4061  * reservations.
4062  */
4063 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4064 {
4065         struct btrfs_root *root = BTRFS_I(inode)->root;
4066         u64 to_free = 0;
4067         unsigned dropped;
4068
4069         num_bytes = ALIGN(num_bytes, root->sectorsize);
4070         spin_lock(&BTRFS_I(inode)->lock);
4071         dropped = drop_outstanding_extent(inode);
4072
4073         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4074         spin_unlock(&BTRFS_I(inode)->lock);
4075         if (dropped > 0)
4076                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4077
4078         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4079                                 to_free);
4080 }
4081
4082 /**
4083  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4084  * @inode: inode we're writing to
4085  * @num_bytes: the number of bytes we want to allocate
4086  *
4087  * This will do the following things
4088  *
4089  * o reserve space in the data space info for num_bytes
4090  * o reserve space in the metadata space info based on number of outstanding
4091  *   extents and how much csums will be needed
4092  * o add to the inodes ->delalloc_bytes
4093  * o add it to the fs_info's delalloc inodes list.
4094  *
4095  * This will return 0 for success and -ENOSPC if there is no space left.
4096  */
4097 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4098 {
4099         int ret;
4100
4101         ret = btrfs_check_data_free_space(inode, num_bytes);
4102         if (ret)
4103                 return ret;
4104
4105         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4106         if (ret) {
4107                 btrfs_free_reserved_data_space(inode, num_bytes);
4108                 return ret;
4109         }
4110
4111         return 0;
4112 }
4113
4114 /**
4115  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4116  * @inode: inode we're releasing space for
4117  * @num_bytes: the number of bytes we want to free up
4118  *
4119  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4120  * called in the case that we don't need the metadata AND data reservations
4121  * anymore.  So if there is an error or we insert an inline extent.
4122  *
4123  * This function will release the metadata space that was not used and will
4124  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4125  * list if there are no delalloc bytes left.
4126  */
4127 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4128 {
4129         btrfs_delalloc_release_metadata(inode, num_bytes);
4130         btrfs_free_reserved_data_space(inode, num_bytes);
4131 }
4132
4133 static int update_block_group(struct btrfs_trans_handle *trans,
4134                               struct btrfs_root *root,
4135                               u64 bytenr, u64 num_bytes, int alloc)
4136 {
4137         struct btrfs_block_group_cache *cache = NULL;
4138         struct btrfs_fs_info *info = root->fs_info;
4139         u64 total = num_bytes;
4140         u64 old_val;
4141         u64 byte_in_group;
4142         int factor;
4143
4144         /* block accounting for super block */
4145         spin_lock(&info->delalloc_lock);
4146         old_val = btrfs_super_bytes_used(&info->super_copy);
4147         if (alloc)
4148                 old_val += num_bytes;
4149         else
4150                 old_val -= num_bytes;
4151         btrfs_set_super_bytes_used(&info->super_copy, old_val);
4152         spin_unlock(&info->delalloc_lock);
4153
4154         while (total) {
4155                 cache = btrfs_lookup_block_group(info, bytenr);
4156                 if (!cache)
4157                         return -1;
4158                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4159                                     BTRFS_BLOCK_GROUP_RAID1 |
4160                                     BTRFS_BLOCK_GROUP_RAID10))
4161                         factor = 2;
4162                 else
4163                         factor = 1;
4164                 /*
4165                  * If this block group has free space cache written out, we
4166                  * need to make sure to load it if we are removing space.  This
4167                  * is because we need the unpinning stage to actually add the
4168                  * space back to the block group, otherwise we will leak space.
4169                  */
4170                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4171                         cache_block_group(cache, trans, NULL, 1);
4172
4173                 byte_in_group = bytenr - cache->key.objectid;
4174                 WARN_ON(byte_in_group > cache->key.offset);
4175
4176                 spin_lock(&cache->space_info->lock);
4177                 spin_lock(&cache->lock);
4178
4179                 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4180                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4181                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4182
4183                 cache->dirty = 1;
4184                 old_val = btrfs_block_group_used(&cache->item);
4185                 num_bytes = min(total, cache->key.offset - byte_in_group);
4186                 if (alloc) {
4187                         old_val += num_bytes;
4188                         btrfs_set_block_group_used(&cache->item, old_val);
4189                         cache->reserved -= num_bytes;
4190                         cache->space_info->bytes_reserved -= num_bytes;
4191                         cache->space_info->bytes_used += num_bytes;
4192                         cache->space_info->disk_used += num_bytes * factor;
4193                         spin_unlock(&cache->lock);
4194                         spin_unlock(&cache->space_info->lock);
4195                 } else {
4196                         old_val -= num_bytes;
4197                         btrfs_set_block_group_used(&cache->item, old_val);
4198                         cache->pinned += num_bytes;
4199                         cache->space_info->bytes_pinned += num_bytes;
4200                         cache->space_info->bytes_used -= num_bytes;
4201                         cache->space_info->disk_used -= num_bytes * factor;
4202                         spin_unlock(&cache->lock);
4203                         spin_unlock(&cache->space_info->lock);
4204
4205                         set_extent_dirty(info->pinned_extents,
4206                                          bytenr, bytenr + num_bytes - 1,
4207                                          GFP_NOFS | __GFP_NOFAIL);
4208                 }
4209                 btrfs_put_block_group(cache);
4210                 total -= num_bytes;
4211                 bytenr += num_bytes;
4212         }
4213         return 0;
4214 }
4215
4216 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4217 {
4218         struct btrfs_block_group_cache *cache;
4219         u64 bytenr;
4220
4221         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4222         if (!cache)
4223                 return 0;
4224
4225         bytenr = cache->key.objectid;
4226         btrfs_put_block_group(cache);
4227
4228         return bytenr;
4229 }
4230
4231 static int pin_down_extent(struct btrfs_root *root,
4232                            struct btrfs_block_group_cache *cache,
4233                            u64 bytenr, u64 num_bytes, int reserved)
4234 {
4235         spin_lock(&cache->space_info->lock);
4236         spin_lock(&cache->lock);
4237         cache->pinned += num_bytes;
4238         cache->space_info->bytes_pinned += num_bytes;
4239         if (reserved) {
4240                 cache->reserved -= num_bytes;
4241                 cache->space_info->bytes_reserved -= num_bytes;
4242         }
4243         spin_unlock(&cache->lock);
4244         spin_unlock(&cache->space_info->lock);
4245
4246         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4247                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4248         return 0;
4249 }
4250
4251 /*
4252  * this function must be called within transaction
4253  */
4254 int btrfs_pin_extent(struct btrfs_root *root,
4255                      u64 bytenr, u64 num_bytes, int reserved)
4256 {
4257         struct btrfs_block_group_cache *cache;
4258
4259         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4260         BUG_ON(!cache);
4261
4262         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4263
4264         btrfs_put_block_group(cache);
4265         return 0;
4266 }
4267
4268 /**
4269  * btrfs_update_reserved_bytes - update the block_group and space info counters
4270  * @cache:      The cache we are manipulating
4271  * @num_bytes:  The number of bytes in question
4272  * @reserve:    One of the reservation enums
4273  *
4274  * This is called by the allocator when it reserves space, or by somebody who is
4275  * freeing space that was never actually used on disk.  For example if you
4276  * reserve some space for a new leaf in transaction A and before transaction A
4277  * commits you free that leaf, you call this with reserve set to 0 in order to
4278  * clear the reservation.
4279  *
4280  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4281  * ENOSPC accounting.  For data we handle the reservation through clearing the
4282  * delalloc bits in the io_tree.  We have to do this since we could end up
4283  * allocating less disk space for the amount of data we have reserved in the
4284  * case of compression.
4285  *
4286  * If this is a reservation and the block group has become read only we cannot
4287  * make the reservation and return -EAGAIN, otherwise this function always
4288  * succeeds.
4289  */
4290 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4291                                        u64 num_bytes, int reserve)
4292 {
4293         struct btrfs_space_info *space_info = cache->space_info;
4294         int ret = 0;
4295         spin_lock(&space_info->lock);
4296         spin_lock(&cache->lock);
4297         if (reserve != RESERVE_FREE) {
4298                 if (cache->ro) {
4299                         ret = -EAGAIN;
4300                 } else {
4301                         cache->reserved += num_bytes;
4302                         space_info->bytes_reserved += num_bytes;
4303                         if (reserve == RESERVE_ALLOC) {
4304                                 BUG_ON(space_info->bytes_may_use < num_bytes);
4305                                 space_info->bytes_may_use -= num_bytes;
4306                         }
4307                 }
4308         } else {
4309                 if (cache->ro)
4310                         space_info->bytes_readonly += num_bytes;
4311                 cache->reserved -= num_bytes;
4312                 space_info->bytes_reserved -= num_bytes;
4313                 space_info->reservation_progress++;
4314         }
4315         spin_unlock(&cache->lock);
4316         spin_unlock(&space_info->lock);
4317         return ret;
4318 }
4319
4320 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4321                                 struct btrfs_root *root)
4322 {
4323         struct btrfs_fs_info *fs_info = root->fs_info;
4324         struct btrfs_caching_control *next;
4325         struct btrfs_caching_control *caching_ctl;
4326         struct btrfs_block_group_cache *cache;
4327
4328         down_write(&fs_info->extent_commit_sem);
4329
4330         list_for_each_entry_safe(caching_ctl, next,
4331                                  &fs_info->caching_block_groups, list) {
4332                 cache = caching_ctl->block_group;
4333                 if (block_group_cache_done(cache)) {
4334                         cache->last_byte_to_unpin = (u64)-1;
4335                         list_del_init(&caching_ctl->list);
4336                         put_caching_control(caching_ctl);
4337                 } else {
4338                         cache->last_byte_to_unpin = caching_ctl->progress;
4339                 }
4340         }
4341
4342         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4343                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4344         else
4345                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4346
4347         up_write(&fs_info->extent_commit_sem);
4348
4349         update_global_block_rsv(fs_info);
4350         return 0;
4351 }
4352
4353 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4354 {
4355         struct btrfs_fs_info *fs_info = root->fs_info;
4356         struct btrfs_block_group_cache *cache = NULL;
4357         u64 len;
4358
4359         while (start <= end) {
4360                 if (!cache ||
4361                     start >= cache->key.objectid + cache->key.offset) {
4362                         if (cache)
4363                                 btrfs_put_block_group(cache);
4364                         cache = btrfs_lookup_block_group(fs_info, start);
4365                         BUG_ON(!cache);
4366                 }
4367
4368                 len = cache->key.objectid + cache->key.offset - start;
4369                 len = min(len, end + 1 - start);
4370
4371                 if (start < cache->last_byte_to_unpin) {
4372                         len = min(len, cache->last_byte_to_unpin - start);
4373                         btrfs_add_free_space(cache, start, len);
4374                 }
4375
4376                 start += len;
4377
4378                 spin_lock(&cache->space_info->lock);
4379                 spin_lock(&cache->lock);
4380                 cache->pinned -= len;
4381                 cache->space_info->bytes_pinned -= len;
4382                 if (cache->ro)
4383                         cache->space_info->bytes_readonly += len;
4384                 spin_unlock(&cache->lock);
4385                 spin_unlock(&cache->space_info->lock);
4386         }
4387
4388         if (cache)
4389                 btrfs_put_block_group(cache);
4390         return 0;
4391 }
4392
4393 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4394                                struct btrfs_root *root)
4395 {
4396         struct btrfs_fs_info *fs_info = root->fs_info;
4397         struct extent_io_tree *unpin;
4398         u64 start;
4399         u64 end;
4400         int ret;
4401
4402         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4403                 unpin = &fs_info->freed_extents[1];
4404         else
4405                 unpin = &fs_info->freed_extents[0];
4406
4407         while (1) {
4408                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4409                                             EXTENT_DIRTY);
4410                 if (ret)
4411                         break;
4412
4413                 if (btrfs_test_opt(root, DISCARD))
4414                         ret = btrfs_discard_extent(root, start,
4415                                                    end + 1 - start, NULL);
4416
4417                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4418                 unpin_extent_range(root, start, end);
4419                 cond_resched();
4420         }
4421
4422         return 0;
4423 }
4424
4425 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4426                                 struct btrfs_root *root,
4427                                 u64 bytenr, u64 num_bytes, u64 parent,
4428                                 u64 root_objectid, u64 owner_objectid,
4429                                 u64 owner_offset, int refs_to_drop,
4430                                 struct btrfs_delayed_extent_op *extent_op)
4431 {
4432         struct btrfs_key key;
4433         struct btrfs_path *path;
4434         struct btrfs_fs_info *info = root->fs_info;
4435         struct btrfs_root *extent_root = info->extent_root;
4436         struct extent_buffer *leaf;
4437         struct btrfs_extent_item *ei;
4438         struct btrfs_extent_inline_ref *iref;
4439         int ret;
4440         int is_data;
4441         int extent_slot = 0;
4442         int found_extent = 0;
4443         int num_to_del = 1;
4444         u32 item_size;
4445         u64 refs;
4446
4447         path = btrfs_alloc_path();
4448         if (!path)
4449                 return -ENOMEM;
4450
4451         path->reada = 1;
4452         path->leave_spinning = 1;
4453
4454         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4455         BUG_ON(!is_data && refs_to_drop != 1);
4456
4457         ret = lookup_extent_backref(trans, extent_root, path, &iref,
4458                                     bytenr, num_bytes, parent,
4459                                     root_objectid, owner_objectid,
4460                                     owner_offset);
4461         if (ret == 0) {
4462                 extent_slot = path->slots[0];
4463                 while (extent_slot >= 0) {
4464                         btrfs_item_key_to_cpu(path->nodes[0], &key,
4465                                               extent_slot);
4466                         if (key.objectid != bytenr)
4467                                 break;
4468                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4469                             key.offset == num_bytes) {
4470                                 found_extent = 1;
4471                                 break;
4472                         }
4473                         if (path->slots[0] - extent_slot > 5)
4474                                 break;
4475                         extent_slot--;
4476                 }
4477 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4478                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4479                 if (found_extent && item_size < sizeof(*ei))
4480                         found_extent = 0;
4481 #endif
4482                 if (!found_extent) {
4483                         BUG_ON(iref);
4484                         ret = remove_extent_backref(trans, extent_root, path,
4485                                                     NULL, refs_to_drop,
4486                                                     is_data);
4487                         BUG_ON(ret);
4488                         btrfs_release_path(path);
4489                         path->leave_spinning = 1;
4490
4491                         key.objectid = bytenr;
4492                         key.type = BTRFS_EXTENT_ITEM_KEY;
4493                         key.offset = num_bytes;
4494
4495                         ret = btrfs_search_slot(trans, extent_root,
4496                                                 &key, path, -1, 1);
4497                         if (ret) {
4498                                 printk(KERN_ERR "umm, got %d back from search"
4499                                        ", was looking for %llu\n", ret,
4500                                        (unsigned long long)bytenr);
4501                                 if (ret > 0)
4502                                         btrfs_print_leaf(extent_root,
4503                                                          path->nodes[0]);
4504                         }
4505                         BUG_ON(ret);
4506                         extent_slot = path->slots[0];
4507                 }
4508         } else {
4509                 btrfs_print_leaf(extent_root, path->nodes[0]);
4510                 WARN_ON(1);
4511                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4512                        "parent %llu root %llu  owner %llu offset %llu\n",
4513                        (unsigned long long)bytenr,
4514                        (unsigned long long)parent,
4515                        (unsigned long long)root_objectid,
4516                        (unsigned long long)owner_objectid,
4517                        (unsigned long long)owner_offset);
4518         }
4519
4520         leaf = path->nodes[0];
4521         item_size = btrfs_item_size_nr(leaf, extent_slot);
4522 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4523         if (item_size < sizeof(*ei)) {
4524                 BUG_ON(found_extent || extent_slot != path->slots[0]);
4525                 ret = convert_extent_item_v0(trans, extent_root, path,
4526                                              owner_objectid, 0);
4527                 BUG_ON(ret < 0);
4528
4529                 btrfs_release_path(path);
4530                 path->leave_spinning = 1;
4531
4532                 key.objectid = bytenr;
4533                 key.type = BTRFS_EXTENT_ITEM_KEY;
4534                 key.offset = num_bytes;
4535
4536                 ret = btrfs_search_slot(trans, extent_root, &key, path,
4537                                         -1, 1);
4538                 if (ret) {
4539                         printk(KERN_ERR "umm, got %d back from search"
4540                                ", was looking for %llu\n", ret,
4541                                (unsigned long long)bytenr);
4542                         btrfs_print_leaf(extent_root, path->nodes[0]);
4543                 }
4544                 BUG_ON(ret);
4545                 extent_slot = path->slots[0];
4546                 leaf = path->nodes[0];
4547                 item_size = btrfs_item_size_nr(leaf, extent_slot);
4548         }
4549 #endif
4550         BUG_ON(item_size < sizeof(*ei));
4551         ei = btrfs_item_ptr(leaf, extent_slot,
4552                             struct btrfs_extent_item);
4553         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4554                 struct btrfs_tree_block_info *bi;
4555                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4556                 bi = (struct btrfs_tree_block_info *)(ei + 1);
4557                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4558         }
4559
4560         refs = btrfs_extent_refs(leaf, ei);
4561         BUG_ON(refs < refs_to_drop);
4562         refs -= refs_to_drop;
4563
4564         if (refs > 0) {
4565                 if (extent_op)
4566                         __run_delayed_extent_op(extent_op, leaf, ei);
4567                 /*
4568                  * In the case of inline back ref, reference count will
4569                  * be updated by remove_extent_backref
4570                  */
4571                 if (iref) {
4572                         BUG_ON(!found_extent);
4573                 } else {
4574                         btrfs_set_extent_refs(leaf, ei, refs);
4575                         btrfs_mark_buffer_dirty(leaf);
4576                 }
4577                 if (found_extent) {
4578                         ret = remove_extent_backref(trans, extent_root, path,
4579                                                     iref, refs_to_drop,
4580                                                     is_data);
4581                         BUG_ON(ret);
4582                 }
4583         } else {
4584                 if (found_extent) {
4585                         BUG_ON(is_data && refs_to_drop !=
4586                                extent_data_ref_count(root, path, iref));
4587                         if (iref) {
4588                                 BUG_ON(path->slots[0] != extent_slot);
4589                         } else {
4590                                 BUG_ON(path->slots[0] != extent_slot + 1);
4591                                 path->slots[0] = extent_slot;
4592                                 num_to_del = 2;
4593                         }
4594                 }
4595
4596                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4597                                       num_to_del);
4598                 BUG_ON(ret);
4599                 btrfs_release_path(path);
4600
4601                 if (is_data) {
4602                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4603                         BUG_ON(ret);
4604                 } else {
4605                         invalidate_mapping_pages(info->btree_inode->i_mapping,
4606                              bytenr >> PAGE_CACHE_SHIFT,
4607                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4608                 }
4609
4610                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4611                 BUG_ON(ret);
4612         }
4613         btrfs_free_path(path);
4614         return ret;
4615 }
4616
4617 /*
4618  * when we free an block, it is possible (and likely) that we free the last
4619  * delayed ref for that extent as well.  This searches the delayed ref tree for
4620  * a given extent, and if there are no other delayed refs to be processed, it
4621  * removes it from the tree.
4622  */
4623 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4624                                       struct btrfs_root *root, u64 bytenr)
4625 {
4626         struct btrfs_delayed_ref_head *head;
4627         struct btrfs_delayed_ref_root *delayed_refs;
4628         struct btrfs_delayed_ref_node *ref;
4629         struct rb_node *node;
4630         int ret = 0;
4631
4632         delayed_refs = &trans->transaction->delayed_refs;
4633         spin_lock(&delayed_refs->lock);
4634         head = btrfs_find_delayed_ref_head(trans, bytenr);
4635         if (!head)
4636                 goto out;
4637
4638         node = rb_prev(&head->node.rb_node);
4639         if (!node)
4640                 goto out;
4641
4642         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4643
4644         /* there are still entries for this ref, we can't drop it */
4645         if (ref->bytenr == bytenr)
4646                 goto out;
4647
4648         if (head->extent_op) {
4649                 if (!head->must_insert_reserved)
4650                         goto out;
4651                 kfree(head->extent_op);
4652                 head->extent_op = NULL;
4653         }
4654
4655         /*
4656          * waiting for the lock here would deadlock.  If someone else has it
4657          * locked they are already in the process of dropping it anyway
4658          */
4659         if (!mutex_trylock(&head->mutex))
4660                 goto out;
4661
4662         /*
4663          * at this point we have a head with no other entries.  Go
4664          * ahead and process it.
4665          */
4666         head->node.in_tree = 0;
4667         rb_erase(&head->node.rb_node, &delayed_refs->root);
4668
4669         delayed_refs->num_entries--;
4670
4671         /*
4672          * we don't take a ref on the node because we're removing it from the
4673          * tree, so we just steal the ref the tree was holding.
4674          */
4675         delayed_refs->num_heads--;
4676         if (list_empty(&head->cluster))
4677                 delayed_refs->num_heads_ready--;
4678
4679         list_del_init(&head->cluster);
4680         spin_unlock(&delayed_refs->lock);
4681
4682         BUG_ON(head->extent_op);
4683         if (head->must_insert_reserved)
4684                 ret = 1;
4685
4686         mutex_unlock(&head->mutex);
4687         btrfs_put_delayed_ref(&head->node);
4688         return ret;
4689 out:
4690         spin_unlock(&delayed_refs->lock);
4691         return 0;
4692 }
4693
4694 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4695                            struct btrfs_root *root,
4696                            struct extent_buffer *buf,
4697                            u64 parent, int last_ref)
4698 {
4699         struct btrfs_block_rsv *block_rsv;
4700         struct btrfs_block_group_cache *cache = NULL;
4701         int ret;
4702
4703         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4704                 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4705                                                 parent, root->root_key.objectid,
4706                                                 btrfs_header_level(buf),
4707                                                 BTRFS_DROP_DELAYED_REF, NULL);
4708                 BUG_ON(ret);
4709         }
4710
4711         if (!last_ref)
4712                 return;
4713
4714         block_rsv = get_block_rsv(trans, root);
4715         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4716         if (block_rsv->space_info != cache->space_info)
4717                 goto out;
4718
4719         if (btrfs_header_generation(buf) == trans->transid) {
4720                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4721                         ret = check_ref_cleanup(trans, root, buf->start);
4722                         if (!ret)
4723                                 goto out;
4724                 }
4725
4726                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4727                         pin_down_extent(root, cache, buf->start, buf->len, 1);
4728                         goto out;
4729                 }
4730
4731                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4732
4733                 btrfs_add_free_space(cache, buf->start, buf->len);
4734                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
4735         }
4736 out:
4737         /*
4738          * Deleting the buffer, clear the corrupt flag since it doesn't matter
4739          * anymore.
4740          */
4741         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
4742         btrfs_put_block_group(cache);
4743 }
4744
4745 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4746                       struct btrfs_root *root,
4747                       u64 bytenr, u64 num_bytes, u64 parent,
4748                       u64 root_objectid, u64 owner, u64 offset)
4749 {
4750         int ret;
4751
4752         /*
4753          * tree log blocks never actually go into the extent allocation
4754          * tree, just update pinning info and exit early.
4755          */
4756         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4757                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4758                 /* unlocks the pinned mutex */
4759                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4760                 ret = 0;
4761         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4762                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4763                                         parent, root_objectid, (int)owner,
4764                                         BTRFS_DROP_DELAYED_REF, NULL);
4765                 BUG_ON(ret);
4766         } else {
4767                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4768                                         parent, root_objectid, owner,
4769                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
4770                 BUG_ON(ret);
4771         }
4772         return ret;
4773 }
4774
4775 static u64 stripe_align(struct btrfs_root *root, u64 val)
4776 {
4777         u64 mask = ((u64)root->stripesize - 1);
4778         u64 ret = (val + mask) & ~mask;
4779         return ret;
4780 }
4781
4782 /*
4783  * when we wait for progress in the block group caching, its because
4784  * our allocation attempt failed at least once.  So, we must sleep
4785  * and let some progress happen before we try again.
4786  *
4787  * This function will sleep at least once waiting for new free space to
4788  * show up, and then it will check the block group free space numbers
4789  * for our min num_bytes.  Another option is to have it go ahead
4790  * and look in the rbtree for a free extent of a given size, but this
4791  * is a good start.
4792  */
4793 static noinline int
4794 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4795                                 u64 num_bytes)
4796 {
4797         struct btrfs_caching_control *caching_ctl;
4798         DEFINE_WAIT(wait);
4799
4800         caching_ctl = get_caching_control(cache);
4801         if (!caching_ctl)
4802                 return 0;
4803
4804         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4805                    (cache->free_space_ctl->free_space >= num_bytes));
4806
4807         put_caching_control(caching_ctl);
4808         return 0;
4809 }
4810
4811 static noinline int
4812 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4813 {
4814         struct btrfs_caching_control *caching_ctl;
4815         DEFINE_WAIT(wait);
4816
4817         caching_ctl = get_caching_control(cache);
4818         if (!caching_ctl)
4819                 return 0;
4820
4821         wait_event(caching_ctl->wait, block_group_cache_done(cache));
4822
4823         put_caching_control(caching_ctl);
4824         return 0;
4825 }
4826
4827 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4828 {
4829         int index;
4830         if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4831                 index = 0;
4832         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4833                 index = 1;
4834         else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4835                 index = 2;
4836         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4837                 index = 3;
4838         else
4839                 index = 4;
4840         return index;
4841 }
4842
4843 enum btrfs_loop_type {
4844         LOOP_FIND_IDEAL = 0,
4845         LOOP_CACHING_NOWAIT = 1,
4846         LOOP_CACHING_WAIT = 2,
4847         LOOP_ALLOC_CHUNK = 3,
4848         LOOP_NO_EMPTY_SIZE = 4,
4849 };
4850
4851 /*
4852  * walks the btree of allocated extents and find a hole of a given size.
4853  * The key ins is changed to record the hole:
4854  * ins->objectid == block start
4855  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4856  * ins->offset == number of blocks
4857  * Any available blocks before search_start are skipped.
4858  */
4859 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4860                                      struct btrfs_root *orig_root,
4861                                      u64 num_bytes, u64 empty_size,
4862                                      u64 search_start, u64 search_end,
4863                                      u64 hint_byte, struct btrfs_key *ins,
4864                                      u64 data)
4865 {
4866         int ret = 0;
4867         struct btrfs_root *root = orig_root->fs_info->extent_root;
4868         struct btrfs_free_cluster *last_ptr = NULL;
4869         struct btrfs_block_group_cache *block_group = NULL;
4870         int empty_cluster = 2 * 1024 * 1024;
4871         int allowed_chunk_alloc = 0;
4872         int done_chunk_alloc = 0;
4873         struct btrfs_space_info *space_info;
4874         int last_ptr_loop = 0;
4875         int loop = 0;
4876         int index = 0;
4877         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
4878                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
4879         bool found_uncached_bg = false;
4880         bool failed_cluster_refill = false;
4881         bool failed_alloc = false;
4882         bool use_cluster = true;
4883         u64 ideal_cache_percent = 0;
4884         u64 ideal_cache_offset = 0;
4885
4886         WARN_ON(num_bytes < root->sectorsize);
4887         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4888         ins->objectid = 0;
4889         ins->offset = 0;
4890
4891         space_info = __find_space_info(root->fs_info, data);
4892         if (!space_info) {
4893                 printk(KERN_ERR "No space info for %llu\n", data);
4894                 return -ENOSPC;
4895         }
4896
4897         /*
4898          * If the space info is for both data and metadata it means we have a
4899          * small filesystem and we can't use the clustering stuff.
4900          */
4901         if (btrfs_mixed_space_info(space_info))
4902                 use_cluster = false;
4903
4904         if (orig_root->ref_cows || empty_size)
4905                 allowed_chunk_alloc = 1;
4906
4907         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
4908                 last_ptr = &root->fs_info->meta_alloc_cluster;
4909                 if (!btrfs_test_opt(root, SSD))
4910                         empty_cluster = 64 * 1024;
4911         }
4912
4913         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4914             btrfs_test_opt(root, SSD)) {
4915                 last_ptr = &root->fs_info->data_alloc_cluster;
4916         }
4917
4918         if (last_ptr) {
4919                 spin_lock(&last_ptr->lock);
4920                 if (last_ptr->block_group)
4921                         hint_byte = last_ptr->window_start;
4922                 spin_unlock(&last_ptr->lock);
4923         }
4924
4925         search_start = max(search_start, first_logical_byte(root, 0));
4926         search_start = max(search_start, hint_byte);
4927
4928         if (!last_ptr)
4929                 empty_cluster = 0;
4930
4931         if (search_start == hint_byte) {
4932 ideal_cache:
4933                 block_group = btrfs_lookup_block_group(root->fs_info,
4934                                                        search_start);
4935                 /*
4936                  * we don't want to use the block group if it doesn't match our
4937                  * allocation bits, or if its not cached.
4938                  *
4939                  * However if we are re-searching with an ideal block group
4940                  * picked out then we don't care that the block group is cached.
4941                  */
4942                 if (block_group && block_group_bits(block_group, data) &&
4943                     (block_group->cached != BTRFS_CACHE_NO ||
4944                      search_start == ideal_cache_offset)) {
4945                         down_read(&space_info->groups_sem);
4946                         if (list_empty(&block_group->list) ||
4947                             block_group->ro) {
4948                                 /*
4949                                  * someone is removing this block group,
4950                                  * we can't jump into the have_block_group
4951                                  * target because our list pointers are not
4952                                  * valid
4953                                  */
4954                                 btrfs_put_block_group(block_group);
4955                                 up_read(&space_info->groups_sem);
4956                         } else {
4957                                 index = get_block_group_index(block_group);
4958                                 goto have_block_group;
4959                         }
4960                 } else if (block_group) {
4961                         btrfs_put_block_group(block_group);
4962                 }
4963         }
4964 search:
4965         down_read(&space_info->groups_sem);
4966         list_for_each_entry(block_group, &space_info->block_groups[index],
4967                             list) {
4968                 u64 offset;
4969                 int cached;
4970
4971                 btrfs_get_block_group(block_group);
4972                 search_start = block_group->key.objectid;
4973
4974                 /*
4975                  * this can happen if we end up cycling through all the
4976                  * raid types, but we want to make sure we only allocate
4977                  * for the proper type.
4978                  */
4979                 if (!block_group_bits(block_group, data)) {
4980                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
4981                                 BTRFS_BLOCK_GROUP_RAID1 |
4982                                 BTRFS_BLOCK_GROUP_RAID10;
4983
4984                         /*
4985                          * if they asked for extra copies and this block group
4986                          * doesn't provide them, bail.  This does allow us to
4987                          * fill raid0 from raid1.
4988                          */
4989                         if ((data & extra) && !(block_group->flags & extra))
4990                                 goto loop;
4991                 }
4992
4993 have_block_group:
4994                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4995                         u64 free_percent;
4996
4997                         ret = cache_block_group(block_group, trans,
4998                                                 orig_root, 1);
4999                         if (block_group->cached == BTRFS_CACHE_FINISHED)
5000                                 goto have_block_group;
5001
5002                         free_percent = btrfs_block_group_used(&block_group->item);
5003                         free_percent *= 100;
5004                         free_percent = div64_u64(free_percent,
5005                                                  block_group->key.offset);
5006                         free_percent = 100 - free_percent;
5007                         if (free_percent > ideal_cache_percent &&
5008                             likely(!block_group->ro)) {
5009                                 ideal_cache_offset = block_group->key.objectid;
5010                                 ideal_cache_percent = free_percent;
5011                         }
5012
5013                         /*
5014                          * The caching workers are limited to 2 threads, so we
5015                          * can queue as much work as we care to.
5016                          */
5017                         if (loop > LOOP_FIND_IDEAL) {
5018                                 ret = cache_block_group(block_group, trans,
5019                                                         orig_root, 0);
5020                                 BUG_ON(ret);
5021                         }
5022                         found_uncached_bg = true;
5023
5024                         /*
5025                          * If loop is set for cached only, try the next block
5026                          * group.
5027                          */
5028                         if (loop == LOOP_FIND_IDEAL)
5029                                 goto loop;
5030                 }
5031
5032                 cached = block_group_cache_done(block_group);
5033                 if (unlikely(!cached))
5034                         found_uncached_bg = true;
5035
5036                 if (unlikely(block_group->ro))
5037                         goto loop;
5038
5039                 spin_lock(&block_group->free_space_ctl->tree_lock);
5040                 if (cached &&
5041                     block_group->free_space_ctl->free_space <
5042                     num_bytes + empty_size) {
5043                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5044                         goto loop;
5045                 }
5046                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5047
5048                 /*
5049                  * Ok we want to try and use the cluster allocator, so lets look
5050                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
5051                  * have tried the cluster allocator plenty of times at this
5052                  * point and not have found anything, so we are likely way too
5053                  * fragmented for the clustering stuff to find anything, so lets
5054                  * just skip it and let the allocator find whatever block it can
5055                  * find
5056                  */
5057                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
5058                         /*
5059                          * the refill lock keeps out other
5060                          * people trying to start a new cluster
5061                          */
5062                         spin_lock(&last_ptr->refill_lock);
5063                         if (last_ptr->block_group &&
5064                             (last_ptr->block_group->ro ||
5065                             !block_group_bits(last_ptr->block_group, data))) {
5066                                 offset = 0;
5067                                 goto refill_cluster;
5068                         }
5069
5070                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5071                                                  num_bytes, search_start);
5072                         if (offset) {
5073                                 /* we have a block, we're done */
5074                                 spin_unlock(&last_ptr->refill_lock);
5075                                 goto checks;
5076                         }
5077
5078                         spin_lock(&last_ptr->lock);
5079                         /*
5080                          * whoops, this cluster doesn't actually point to
5081                          * this block group.  Get a ref on the block
5082                          * group is does point to and try again
5083                          */
5084                         if (!last_ptr_loop && last_ptr->block_group &&
5085                             last_ptr->block_group != block_group &&
5086                             index <=
5087                                  get_block_group_index(last_ptr->block_group)) {
5088
5089                                 btrfs_put_block_group(block_group);
5090                                 block_group = last_ptr->block_group;
5091                                 btrfs_get_block_group(block_group);
5092                                 spin_unlock(&last_ptr->lock);
5093                                 spin_unlock(&last_ptr->refill_lock);
5094
5095                                 last_ptr_loop = 1;
5096                                 search_start = block_group->key.objectid;
5097                                 /*
5098                                  * we know this block group is properly
5099                                  * in the list because
5100                                  * btrfs_remove_block_group, drops the
5101                                  * cluster before it removes the block
5102                                  * group from the list
5103                                  */
5104                                 goto have_block_group;
5105                         }
5106                         spin_unlock(&last_ptr->lock);
5107 refill_cluster:
5108                         /*
5109                          * this cluster didn't work out, free it and
5110                          * start over
5111                          */
5112                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5113
5114                         last_ptr_loop = 0;
5115
5116                         /* allocate a cluster in this block group */
5117                         ret = btrfs_find_space_cluster(trans, root,
5118                                                block_group, last_ptr,
5119                                                offset, num_bytes,
5120                                                empty_cluster + empty_size);
5121                         if (ret == 0) {
5122                                 /*
5123                                  * now pull our allocation out of this
5124                                  * cluster
5125                                  */
5126                                 offset = btrfs_alloc_from_cluster(block_group,
5127                                                   last_ptr, num_bytes,
5128                                                   search_start);
5129                                 if (offset) {
5130                                         /* we found one, proceed */
5131                                         spin_unlock(&last_ptr->refill_lock);
5132                                         goto checks;
5133                                 }
5134                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5135                                    && !failed_cluster_refill) {
5136                                 spin_unlock(&last_ptr->refill_lock);
5137
5138                                 failed_cluster_refill = true;
5139                                 wait_block_group_cache_progress(block_group,
5140                                        num_bytes + empty_cluster + empty_size);
5141                                 goto have_block_group;
5142                         }
5143
5144                         /*
5145                          * at this point we either didn't find a cluster
5146                          * or we weren't able to allocate a block from our
5147                          * cluster.  Free the cluster we've been trying
5148                          * to use, and go to the next block group
5149                          */
5150                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5151                         spin_unlock(&last_ptr->refill_lock);
5152                         goto loop;
5153                 }
5154
5155                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5156                                                     num_bytes, empty_size);
5157                 /*
5158                  * If we didn't find a chunk, and we haven't failed on this
5159                  * block group before, and this block group is in the middle of
5160                  * caching and we are ok with waiting, then go ahead and wait
5161                  * for progress to be made, and set failed_alloc to true.
5162                  *
5163                  * If failed_alloc is true then we've already waited on this
5164                  * block group once and should move on to the next block group.
5165                  */
5166                 if (!offset && !failed_alloc && !cached &&
5167                     loop > LOOP_CACHING_NOWAIT) {
5168                         wait_block_group_cache_progress(block_group,
5169                                                 num_bytes + empty_size);
5170                         failed_alloc = true;
5171                         goto have_block_group;
5172                 } else if (!offset) {
5173                         goto loop;
5174                 }
5175 checks:
5176                 search_start = stripe_align(root, offset);
5177                 /* move on to the next group */
5178                 if (search_start + num_bytes >= search_end) {
5179                         btrfs_add_free_space(block_group, offset, num_bytes);
5180                         goto loop;
5181                 }
5182
5183                 /* move on to the next group */
5184                 if (search_start + num_bytes >
5185                     block_group->key.objectid + block_group->key.offset) {
5186                         btrfs_add_free_space(block_group, offset, num_bytes);
5187                         goto loop;
5188                 }
5189
5190                 ins->objectid = search_start;
5191                 ins->offset = num_bytes;
5192
5193                 if (offset < search_start)
5194                         btrfs_add_free_space(block_group, offset,
5195                                              search_start - offset);
5196                 BUG_ON(offset > search_start);
5197
5198                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
5199                                                   alloc_type);
5200                 if (ret == -EAGAIN) {
5201                         btrfs_add_free_space(block_group, offset, num_bytes);
5202                         goto loop;
5203                 }
5204
5205                 /* we are all good, lets return */
5206                 ins->objectid = search_start;
5207                 ins->offset = num_bytes;
5208
5209                 if (offset < search_start)
5210                         btrfs_add_free_space(block_group, offset,
5211                                              search_start - offset);
5212                 BUG_ON(offset > search_start);
5213                 btrfs_put_block_group(block_group);
5214                 break;
5215 loop:
5216                 failed_cluster_refill = false;
5217                 failed_alloc = false;
5218                 BUG_ON(index != get_block_group_index(block_group));
5219                 btrfs_put_block_group(block_group);
5220         }
5221         up_read(&space_info->groups_sem);
5222
5223         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5224                 goto search;
5225
5226         /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5227          *                      for them to make caching progress.  Also
5228          *                      determine the best possible bg to cache
5229          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5230          *                      caching kthreads as we move along
5231          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5232          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5233          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5234          *                      again
5235          */
5236         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5237                 index = 0;
5238                 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5239                         found_uncached_bg = false;
5240                         loop++;
5241                         if (!ideal_cache_percent)
5242                                 goto search;
5243
5244                         /*
5245                          * 1 of the following 2 things have happened so far
5246                          *
5247                          * 1) We found an ideal block group for caching that
5248                          * is mostly full and will cache quickly, so we might
5249                          * as well wait for it.
5250                          *
5251                          * 2) We searched for cached only and we didn't find
5252                          * anything, and we didn't start any caching kthreads
5253                          * either, so chances are we will loop through and
5254                          * start a couple caching kthreads, and then come back
5255                          * around and just wait for them.  This will be slower
5256                          * because we will have 2 caching kthreads reading at
5257                          * the same time when we could have just started one
5258                          * and waited for it to get far enough to give us an
5259                          * allocation, so go ahead and go to the wait caching
5260                          * loop.
5261                          */
5262                         loop = LOOP_CACHING_WAIT;
5263                         search_start = ideal_cache_offset;
5264                         ideal_cache_percent = 0;
5265                         goto ideal_cache;
5266                 } else if (loop == LOOP_FIND_IDEAL) {
5267                         /*
5268                          * Didn't find a uncached bg, wait on anything we find
5269                          * next.
5270                          */
5271                         loop = LOOP_CACHING_WAIT;
5272                         goto search;
5273                 }
5274
5275                 loop++;
5276
5277                 if (loop == LOOP_ALLOC_CHUNK) {
5278                        if (allowed_chunk_alloc) {
5279                                 ret = do_chunk_alloc(trans, root, num_bytes +
5280                                                      2 * 1024 * 1024, data,
5281                                                      CHUNK_ALLOC_LIMITED);
5282                                 allowed_chunk_alloc = 0;
5283                                 if (ret == 1)
5284                                         done_chunk_alloc = 1;
5285                         } else if (!done_chunk_alloc &&
5286                                    space_info->force_alloc ==
5287                                    CHUNK_ALLOC_NO_FORCE) {
5288                                 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5289                         }
5290
5291                        /*
5292                         * We didn't allocate a chunk, go ahead and drop the
5293                         * empty size and loop again.
5294                         */
5295                        if (!done_chunk_alloc)
5296                                loop = LOOP_NO_EMPTY_SIZE;
5297                 }
5298
5299                 if (loop == LOOP_NO_EMPTY_SIZE) {
5300                         empty_size = 0;
5301                         empty_cluster = 0;
5302                 }
5303
5304                 goto search;
5305         } else if (!ins->objectid) {
5306                 ret = -ENOSPC;
5307         } else if (ins->objectid) {
5308                 ret = 0;
5309         }
5310
5311         return ret;
5312 }
5313
5314 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5315                             int dump_block_groups)
5316 {
5317         struct btrfs_block_group_cache *cache;
5318         int index = 0;
5319
5320         spin_lock(&info->lock);
5321         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5322                (unsigned long long)info->flags,
5323                (unsigned long long)(info->total_bytes - info->bytes_used -
5324                                     info->bytes_pinned - info->bytes_reserved -
5325                                     info->bytes_readonly),
5326                (info->full) ? "" : "not ");
5327         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5328                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5329                (unsigned long long)info->total_bytes,
5330                (unsigned long long)info->bytes_used,
5331                (unsigned long long)info->bytes_pinned,
5332                (unsigned long long)info->bytes_reserved,
5333                (unsigned long long)info->bytes_may_use,
5334                (unsigned long long)info->bytes_readonly);
5335         spin_unlock(&info->lock);
5336
5337         if (!dump_block_groups)
5338                 return;
5339
5340         down_read(&info->groups_sem);
5341 again:
5342         list_for_each_entry(cache, &info->block_groups[index], list) {
5343                 spin_lock(&cache->lock);
5344                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5345                        "%llu pinned %llu reserved\n",
5346                        (unsigned long long)cache->key.objectid,
5347                        (unsigned long long)cache->key.offset,
5348                        (unsigned long long)btrfs_block_group_used(&cache->item),
5349                        (unsigned long long)cache->pinned,
5350                        (unsigned long long)cache->reserved);
5351                 btrfs_dump_free_space(cache, bytes);
5352                 spin_unlock(&cache->lock);
5353         }
5354         if (++index < BTRFS_NR_RAID_TYPES)
5355                 goto again;
5356         up_read(&info->groups_sem);
5357 }
5358
5359 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5360                          struct btrfs_root *root,
5361                          u64 num_bytes, u64 min_alloc_size,
5362                          u64 empty_size, u64 hint_byte,
5363                          u64 search_end, struct btrfs_key *ins,
5364                          u64 data)
5365 {
5366         int ret;
5367         u64 search_start = 0;
5368
5369         data = btrfs_get_alloc_profile(root, data);
5370 again:
5371         /*
5372          * the only place that sets empty_size is btrfs_realloc_node, which
5373          * is not called recursively on allocations
5374          */
5375         if (empty_size || root->ref_cows)
5376                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5377                                      num_bytes + 2 * 1024 * 1024, data,
5378                                      CHUNK_ALLOC_NO_FORCE);
5379
5380         WARN_ON(num_bytes < root->sectorsize);
5381         ret = find_free_extent(trans, root, num_bytes, empty_size,
5382                                search_start, search_end, hint_byte,
5383                                ins, data);
5384
5385         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5386                 num_bytes = num_bytes >> 1;
5387                 num_bytes = num_bytes & ~(root->sectorsize - 1);
5388                 num_bytes = max(num_bytes, min_alloc_size);
5389                 do_chunk_alloc(trans, root->fs_info->extent_root,
5390                                num_bytes, data, CHUNK_ALLOC_FORCE);
5391                 goto again;
5392         }
5393         if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5394                 struct btrfs_space_info *sinfo;
5395
5396                 sinfo = __find_space_info(root->fs_info, data);
5397                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5398                        "wanted %llu\n", (unsigned long long)data,
5399                        (unsigned long long)num_bytes);
5400                 dump_space_info(sinfo, num_bytes, 1);
5401         }
5402
5403         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5404
5405         return ret;
5406 }
5407
5408 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5409 {
5410         struct btrfs_block_group_cache *cache;
5411         int ret = 0;
5412
5413         cache = btrfs_lookup_block_group(root->fs_info, start);
5414         if (!cache) {
5415                 printk(KERN_ERR "Unable to find block group for %llu\n",
5416                        (unsigned long long)start);
5417                 return -ENOSPC;
5418         }
5419
5420         if (btrfs_test_opt(root, DISCARD))
5421                 ret = btrfs_discard_extent(root, start, len, NULL);
5422
5423         btrfs_add_free_space(cache, start, len);
5424         btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5425         btrfs_put_block_group(cache);
5426
5427         trace_btrfs_reserved_extent_free(root, start, len);
5428
5429         return ret;
5430 }
5431
5432 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5433                                       struct btrfs_root *root,
5434                                       u64 parent, u64 root_objectid,
5435                                       u64 flags, u64 owner, u64 offset,
5436                                       struct btrfs_key *ins, int ref_mod)
5437 {
5438         int ret;
5439         struct btrfs_fs_info *fs_info = root->fs_info;
5440         struct btrfs_extent_item *extent_item;
5441         struct btrfs_extent_inline_ref *iref;
5442         struct btrfs_path *path;
5443         struct extent_buffer *leaf;
5444         int type;
5445         u32 size;
5446
5447         if (parent > 0)
5448                 type = BTRFS_SHARED_DATA_REF_KEY;
5449         else
5450                 type = BTRFS_EXTENT_DATA_REF_KEY;
5451
5452         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5453
5454         path = btrfs_alloc_path();
5455         if (!path)
5456                 return -ENOMEM;
5457
5458         path->leave_spinning = 1;
5459         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5460                                       ins, size);
5461         BUG_ON(ret);
5462
5463         leaf = path->nodes[0];
5464         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5465                                      struct btrfs_extent_item);
5466         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5467         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5468         btrfs_set_extent_flags(leaf, extent_item,
5469                                flags | BTRFS_EXTENT_FLAG_DATA);
5470
5471         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5472         btrfs_set_extent_inline_ref_type(leaf, iref, type);
5473         if (parent > 0) {
5474                 struct btrfs_shared_data_ref *ref;
5475                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5476                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5477                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5478         } else {
5479                 struct btrfs_extent_data_ref *ref;
5480                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5481                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5482                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5483                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5484                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5485         }
5486
5487         btrfs_mark_buffer_dirty(path->nodes[0]);
5488         btrfs_free_path(path);
5489
5490         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5491         if (ret) {
5492                 printk(KERN_ERR "btrfs update block group failed for %llu "
5493                        "%llu\n", (unsigned long long)ins->objectid,
5494                        (unsigned long long)ins->offset);
5495                 BUG();
5496         }
5497         return ret;
5498 }
5499
5500 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5501                                      struct btrfs_root *root,
5502                                      u64 parent, u64 root_objectid,
5503                                      u64 flags, struct btrfs_disk_key *key,
5504                                      int level, struct btrfs_key *ins)
5505 {
5506         int ret;
5507         struct btrfs_fs_info *fs_info = root->fs_info;
5508         struct btrfs_extent_item *extent_item;
5509         struct btrfs_tree_block_info *block_info;
5510         struct btrfs_extent_inline_ref *iref;
5511         struct btrfs_path *path;
5512         struct extent_buffer *leaf;
5513         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5514
5515         path = btrfs_alloc_path();
5516         if (!path)
5517                 return -ENOMEM;
5518
5519         path->leave_spinning = 1;
5520         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5521                                       ins, size);
5522         BUG_ON(ret);
5523
5524         leaf = path->nodes[0];
5525         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5526                                      struct btrfs_extent_item);
5527         btrfs_set_extent_refs(leaf, extent_item, 1);
5528         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5529         btrfs_set_extent_flags(leaf, extent_item,
5530                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5531         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5532
5533         btrfs_set_tree_block_key(leaf, block_info, key);
5534         btrfs_set_tree_block_level(leaf, block_info, level);
5535
5536         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5537         if (parent > 0) {
5538                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5539                 btrfs_set_extent_inline_ref_type(leaf, iref,
5540                                                  BTRFS_SHARED_BLOCK_REF_KEY);
5541                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5542         } else {
5543                 btrfs_set_extent_inline_ref_type(leaf, iref,
5544                                                  BTRFS_TREE_BLOCK_REF_KEY);
5545                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5546         }
5547
5548         btrfs_mark_buffer_dirty(leaf);
5549         btrfs_free_path(path);
5550
5551         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5552         if (ret) {
5553                 printk(KERN_ERR "btrfs update block group failed for %llu "
5554                        "%llu\n", (unsigned long long)ins->objectid,
5555                        (unsigned long long)ins->offset);
5556                 BUG();
5557         }
5558         return ret;
5559 }
5560
5561 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5562                                      struct btrfs_root *root,
5563                                      u64 root_objectid, u64 owner,
5564                                      u64 offset, struct btrfs_key *ins)
5565 {
5566         int ret;
5567
5568         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5569
5570         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5571                                          0, root_objectid, owner, offset,
5572                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
5573         return ret;
5574 }
5575
5576 /*
5577  * this is used by the tree logging recovery code.  It records that
5578  * an extent has been allocated and makes sure to clear the free
5579  * space cache bits as well
5580  */
5581 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5582                                    struct btrfs_root *root,
5583                                    u64 root_objectid, u64 owner, u64 offset,
5584                                    struct btrfs_key *ins)
5585 {
5586         int ret;
5587         struct btrfs_block_group_cache *block_group;
5588         struct btrfs_caching_control *caching_ctl;
5589         u64 start = ins->objectid;
5590         u64 num_bytes = ins->offset;
5591
5592         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5593         cache_block_group(block_group, trans, NULL, 0);
5594         caching_ctl = get_caching_control(block_group);
5595
5596         if (!caching_ctl) {
5597                 BUG_ON(!block_group_cache_done(block_group));
5598                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5599                 BUG_ON(ret);
5600         } else {
5601                 mutex_lock(&caching_ctl->mutex);
5602
5603                 if (start >= caching_ctl->progress) {
5604                         ret = add_excluded_extent(root, start, num_bytes);
5605                         BUG_ON(ret);
5606                 } else if (start + num_bytes <= caching_ctl->progress) {
5607                         ret = btrfs_remove_free_space(block_group,
5608                                                       start, num_bytes);
5609                         BUG_ON(ret);
5610                 } else {
5611                         num_bytes = caching_ctl->progress - start;
5612                         ret = btrfs_remove_free_space(block_group,
5613                                                       start, num_bytes);
5614                         BUG_ON(ret);
5615
5616                         start = caching_ctl->progress;
5617                         num_bytes = ins->objectid + ins->offset -
5618                                     caching_ctl->progress;
5619                         ret = add_excluded_extent(root, start, num_bytes);
5620                         BUG_ON(ret);
5621                 }
5622
5623                 mutex_unlock(&caching_ctl->mutex);
5624                 put_caching_control(caching_ctl);
5625         }
5626
5627         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
5628                                           RESERVE_ALLOC_NO_ACCOUNT);
5629         BUG_ON(ret);
5630         btrfs_put_block_group(block_group);
5631         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5632                                          0, owner, offset, ins, 1);
5633         return ret;
5634 }
5635
5636 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5637                                             struct btrfs_root *root,
5638                                             u64 bytenr, u32 blocksize,
5639                                             int level)
5640 {
5641         struct extent_buffer *buf;
5642
5643         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5644         if (!buf)
5645                 return ERR_PTR(-ENOMEM);
5646         btrfs_set_header_generation(buf, trans->transid);
5647         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
5648         btrfs_tree_lock(buf);
5649         clean_tree_block(trans, root, buf);
5650
5651         btrfs_set_lock_blocking(buf);
5652         btrfs_set_buffer_uptodate(buf);
5653
5654         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5655                 /*
5656                  * we allow two log transactions at a time, use different
5657                  * EXENT bit to differentiate dirty pages.
5658                  */
5659                 if (root->log_transid % 2 == 0)
5660                         set_extent_dirty(&root->dirty_log_pages, buf->start,
5661                                         buf->start + buf->len - 1, GFP_NOFS);
5662                 else
5663                         set_extent_new(&root->dirty_log_pages, buf->start,
5664                                         buf->start + buf->len - 1, GFP_NOFS);
5665         } else {
5666                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5667                          buf->start + buf->len - 1, GFP_NOFS);
5668         }
5669         trans->blocks_used++;
5670         /* this returns a buffer locked for blocking */
5671         return buf;
5672 }
5673
5674 static struct btrfs_block_rsv *
5675 use_block_rsv(struct btrfs_trans_handle *trans,
5676               struct btrfs_root *root, u32 blocksize)
5677 {
5678         struct btrfs_block_rsv *block_rsv;
5679         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5680         int ret;
5681
5682         block_rsv = get_block_rsv(trans, root);
5683
5684         if (block_rsv->size == 0) {
5685                 ret = reserve_metadata_bytes(trans, root, block_rsv,
5686                                              blocksize, 0);
5687                 /*
5688                  * If we couldn't reserve metadata bytes try and use some from
5689                  * the global reserve.
5690                  */
5691                 if (ret && block_rsv != global_rsv) {
5692                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5693                         if (!ret)
5694                                 return global_rsv;
5695                         return ERR_PTR(ret);
5696                 } else if (ret) {
5697                         return ERR_PTR(ret);
5698                 }
5699                 return block_rsv;
5700         }
5701
5702         ret = block_rsv_use_bytes(block_rsv, blocksize);
5703         if (!ret)
5704                 return block_rsv;
5705         if (ret) {
5706                 WARN_ON(1);
5707                 ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
5708                                              0);
5709                 if (!ret) {
5710                         spin_lock(&block_rsv->lock);
5711                         block_rsv->size += blocksize;
5712                         spin_unlock(&block_rsv->lock);
5713                         return block_rsv;
5714                 } else if (ret && block_rsv != global_rsv) {
5715                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5716                         if (!ret)
5717                                 return global_rsv;
5718                 }
5719         }
5720
5721         return ERR_PTR(-ENOSPC);
5722 }
5723
5724 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5725 {
5726         block_rsv_add_bytes(block_rsv, blocksize, 0);
5727         block_rsv_release_bytes(block_rsv, NULL, 0);
5728 }
5729
5730 /*
5731  * finds a free extent and does all the dirty work required for allocation
5732  * returns the key for the extent through ins, and a tree buffer for
5733  * the first block of the extent through buf.
5734  *
5735  * returns the tree buffer or NULL.
5736  */
5737 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5738                                         struct btrfs_root *root, u32 blocksize,
5739                                         u64 parent, u64 root_objectid,
5740                                         struct btrfs_disk_key *key, int level,
5741                                         u64 hint, u64 empty_size)
5742 {
5743         struct btrfs_key ins;
5744         struct btrfs_block_rsv *block_rsv;
5745         struct extent_buffer *buf;
5746         u64 flags = 0;
5747         int ret;
5748
5749
5750         block_rsv = use_block_rsv(trans, root, blocksize);
5751         if (IS_ERR(block_rsv))
5752                 return ERR_CAST(block_rsv);
5753
5754         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5755                                    empty_size, hint, (u64)-1, &ins, 0);
5756         if (ret) {
5757                 unuse_block_rsv(block_rsv, blocksize);
5758                 return ERR_PTR(ret);
5759         }
5760
5761         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5762                                     blocksize, level);
5763         BUG_ON(IS_ERR(buf));
5764
5765         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5766                 if (parent == 0)
5767                         parent = ins.objectid;
5768                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5769         } else
5770                 BUG_ON(parent > 0);
5771
5772         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5773                 struct btrfs_delayed_extent_op *extent_op;
5774                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5775                 BUG_ON(!extent_op);
5776                 if (key)
5777                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
5778                 else
5779                         memset(&extent_op->key, 0, sizeof(extent_op->key));
5780                 extent_op->flags_to_set = flags;
5781                 extent_op->update_key = 1;
5782                 extent_op->update_flags = 1;
5783                 extent_op->is_data = 0;
5784
5785                 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5786                                         ins.offset, parent, root_objectid,
5787                                         level, BTRFS_ADD_DELAYED_EXTENT,
5788                                         extent_op);
5789                 BUG_ON(ret);
5790         }
5791         return buf;
5792 }
5793
5794 struct walk_control {
5795         u64 refs[BTRFS_MAX_LEVEL];
5796         u64 flags[BTRFS_MAX_LEVEL];
5797         struct btrfs_key update_progress;
5798         int stage;
5799         int level;
5800         int shared_level;
5801         int update_ref;
5802         int keep_locks;
5803         int reada_slot;
5804         int reada_count;
5805 };
5806
5807 #define DROP_REFERENCE  1
5808 #define UPDATE_BACKREF  2
5809
5810 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5811                                      struct btrfs_root *root,
5812                                      struct walk_control *wc,
5813                                      struct btrfs_path *path)
5814 {
5815         u64 bytenr;
5816         u64 generation;
5817         u64 refs;
5818         u64 flags;
5819         u32 nritems;
5820         u32 blocksize;
5821         struct btrfs_key key;
5822         struct extent_buffer *eb;
5823         int ret;
5824         int slot;
5825         int nread = 0;
5826
5827         if (path->slots[wc->level] < wc->reada_slot) {
5828                 wc->reada_count = wc->reada_count * 2 / 3;
5829                 wc->reada_count = max(wc->reada_count, 2);
5830         } else {
5831                 wc->reada_count = wc->reada_count * 3 / 2;
5832                 wc->reada_count = min_t(int, wc->reada_count,
5833                                         BTRFS_NODEPTRS_PER_BLOCK(root));
5834         }
5835
5836         eb = path->nodes[wc->level];
5837         nritems = btrfs_header_nritems(eb);
5838         blocksize = btrfs_level_size(root, wc->level - 1);
5839
5840         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5841                 if (nread >= wc->reada_count)
5842                         break;
5843
5844                 cond_resched();
5845                 bytenr = btrfs_node_blockptr(eb, slot);
5846                 generation = btrfs_node_ptr_generation(eb, slot);
5847
5848                 if (slot == path->slots[wc->level])
5849                         goto reada;
5850
5851                 if (wc->stage == UPDATE_BACKREF &&
5852                     generation <= root->root_key.offset)
5853                         continue;
5854
5855                 /* We don't lock the tree block, it's OK to be racy here */
5856                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5857                                                &refs, &flags);
5858                 BUG_ON(ret);
5859                 BUG_ON(refs == 0);
5860
5861                 if (wc->stage == DROP_REFERENCE) {
5862                         if (refs == 1)
5863                                 goto reada;
5864
5865                         if (wc->level == 1 &&
5866                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5867                                 continue;
5868                         if (!wc->update_ref ||
5869                             generation <= root->root_key.offset)
5870                                 continue;
5871                         btrfs_node_key_to_cpu(eb, &key, slot);
5872                         ret = btrfs_comp_cpu_keys(&key,
5873                                                   &wc->update_progress);
5874                         if (ret < 0)
5875                                 continue;
5876                 } else {
5877                         if (wc->level == 1 &&
5878                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5879                                 continue;
5880                 }
5881 reada:
5882                 ret = readahead_tree_block(root, bytenr, blocksize,
5883                                            generation);
5884                 if (ret)
5885                         break;
5886                 nread++;
5887         }
5888         wc->reada_slot = slot;
5889 }
5890
5891 /*
5892  * hepler to process tree block while walking down the tree.
5893  *
5894  * when wc->stage == UPDATE_BACKREF, this function updates
5895  * back refs for pointers in the block.
5896  *
5897  * NOTE: return value 1 means we should stop walking down.
5898  */
5899 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5900                                    struct btrfs_root *root,
5901                                    struct btrfs_path *path,
5902                                    struct walk_control *wc, int lookup_info)
5903 {
5904         int level = wc->level;
5905         struct extent_buffer *eb = path->nodes[level];
5906         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5907         int ret;
5908
5909         if (wc->stage == UPDATE_BACKREF &&
5910             btrfs_header_owner(eb) != root->root_key.objectid)
5911                 return 1;
5912
5913         /*
5914          * when reference count of tree block is 1, it won't increase
5915          * again. once full backref flag is set, we never clear it.
5916          */
5917         if (lookup_info &&
5918             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5919              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5920                 BUG_ON(!path->locks[level]);
5921                 ret = btrfs_lookup_extent_info(trans, root,
5922                                                eb->start, eb->len,
5923                                                &wc->refs[level],
5924                                                &wc->flags[level]);
5925                 BUG_ON(ret);
5926                 BUG_ON(wc->refs[level] == 0);
5927         }
5928
5929         if (wc->stage == DROP_REFERENCE) {
5930                 if (wc->refs[level] > 1)
5931                         return 1;
5932
5933                 if (path->locks[level] && !wc->keep_locks) {
5934                         btrfs_tree_unlock_rw(eb, path->locks[level]);
5935                         path->locks[level] = 0;
5936                 }
5937                 return 0;
5938         }
5939
5940         /* wc->stage == UPDATE_BACKREF */
5941         if (!(wc->flags[level] & flag)) {
5942                 BUG_ON(!path->locks[level]);
5943                 ret = btrfs_inc_ref(trans, root, eb, 1);
5944                 BUG_ON(ret);
5945                 ret = btrfs_dec_ref(trans, root, eb, 0);
5946                 BUG_ON(ret);
5947                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5948                                                   eb->len, flag, 0);
5949                 BUG_ON(ret);
5950                 wc->flags[level] |= flag;
5951         }
5952
5953         /*
5954          * the block is shared by multiple trees, so it's not good to
5955          * keep the tree lock
5956          */
5957         if (path->locks[level] && level > 0) {
5958                 btrfs_tree_unlock_rw(eb, path->locks[level]);
5959                 path->locks[level] = 0;
5960         }
5961         return 0;
5962 }
5963
5964 /*
5965  * hepler to process tree block pointer.
5966  *
5967  * when wc->stage == DROP_REFERENCE, this function checks
5968  * reference count of the block pointed to. if the block
5969  * is shared and we need update back refs for the subtree
5970  * rooted at the block, this function changes wc->stage to
5971  * UPDATE_BACKREF. if the block is shared and there is no
5972  * need to update back, this function drops the reference
5973  * to the block.
5974  *
5975  * NOTE: return value 1 means we should stop walking down.
5976  */
5977 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5978                                  struct btrfs_root *root,
5979                                  struct btrfs_path *path,
5980                                  struct walk_control *wc, int *lookup_info)
5981 {
5982         u64 bytenr;
5983         u64 generation;
5984         u64 parent;
5985         u32 blocksize;
5986         struct btrfs_key key;
5987         struct extent_buffer *next;
5988         int level = wc->level;
5989         int reada = 0;
5990         int ret = 0;
5991
5992         generation = btrfs_node_ptr_generation(path->nodes[level],
5993                                                path->slots[level]);
5994         /*
5995          * if the lower level block was created before the snapshot
5996          * was created, we know there is no need to update back refs
5997          * for the subtree
5998          */
5999         if (wc->stage == UPDATE_BACKREF &&
6000             generation <= root->root_key.offset) {
6001                 *lookup_info = 1;
6002                 return 1;
6003         }
6004
6005         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6006         blocksize = btrfs_level_size(root, level - 1);
6007
6008         next = btrfs_find_tree_block(root, bytenr, blocksize);
6009         if (!next) {
6010                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6011                 if (!next)
6012                         return -ENOMEM;
6013                 reada = 1;
6014         }
6015         btrfs_tree_lock(next);
6016         btrfs_set_lock_blocking(next);
6017
6018         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6019                                        &wc->refs[level - 1],
6020                                        &wc->flags[level - 1]);
6021         BUG_ON(ret);
6022         BUG_ON(wc->refs[level - 1] == 0);
6023         *lookup_info = 0;
6024
6025         if (wc->stage == DROP_REFERENCE) {
6026                 if (wc->refs[level - 1] > 1) {
6027                         if (level == 1 &&
6028                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6029                                 goto skip;
6030
6031                         if (!wc->update_ref ||
6032                             generation <= root->root_key.offset)
6033                                 goto skip;
6034
6035                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6036                                               path->slots[level]);
6037                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6038                         if (ret < 0)
6039                                 goto skip;
6040
6041                         wc->stage = UPDATE_BACKREF;
6042                         wc->shared_level = level - 1;
6043                 }
6044         } else {
6045                 if (level == 1 &&
6046                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6047                         goto skip;
6048         }
6049
6050         if (!btrfs_buffer_uptodate(next, generation)) {
6051                 btrfs_tree_unlock(next);
6052                 free_extent_buffer(next);
6053                 next = NULL;
6054                 *lookup_info = 1;
6055         }
6056
6057         if (!next) {
6058                 if (reada && level == 1)
6059                         reada_walk_down(trans, root, wc, path);
6060                 next = read_tree_block(root, bytenr, blocksize, generation);
6061                 if (!next)
6062                         return -EIO;
6063                 btrfs_tree_lock(next);
6064                 btrfs_set_lock_blocking(next);
6065         }
6066
6067         level--;
6068         BUG_ON(level != btrfs_header_level(next));
6069         path->nodes[level] = next;
6070         path->slots[level] = 0;
6071         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6072         wc->level = level;
6073         if (wc->level == 1)
6074                 wc->reada_slot = 0;
6075         return 0;
6076 skip:
6077         wc->refs[level - 1] = 0;
6078         wc->flags[level - 1] = 0;
6079         if (wc->stage == DROP_REFERENCE) {
6080                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6081                         parent = path->nodes[level]->start;
6082                 } else {
6083                         BUG_ON(root->root_key.objectid !=
6084                                btrfs_header_owner(path->nodes[level]));
6085                         parent = 0;
6086                 }
6087
6088                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6089                                         root->root_key.objectid, level - 1, 0);
6090                 BUG_ON(ret);
6091         }
6092         btrfs_tree_unlock(next);
6093         free_extent_buffer(next);
6094         *lookup_info = 1;
6095         return 1;
6096 }
6097
6098 /*
6099  * hepler to process tree block while walking up the tree.
6100  *
6101  * when wc->stage == DROP_REFERENCE, this function drops
6102  * reference count on the block.
6103  *
6104  * when wc->stage == UPDATE_BACKREF, this function changes
6105  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6106  * to UPDATE_BACKREF previously while processing the block.
6107  *
6108  * NOTE: return value 1 means we should stop walking up.
6109  */
6110 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6111                                  struct btrfs_root *root,
6112                                  struct btrfs_path *path,
6113                                  struct walk_control *wc)
6114 {
6115         int ret;
6116         int level = wc->level;
6117         struct extent_buffer *eb = path->nodes[level];
6118         u64 parent = 0;
6119
6120         if (wc->stage == UPDATE_BACKREF) {
6121                 BUG_ON(wc->shared_level < level);
6122                 if (level < wc->shared_level)
6123                         goto out;
6124
6125                 ret = find_next_key(path, level + 1, &wc->update_progress);
6126                 if (ret > 0)
6127                         wc->update_ref = 0;
6128
6129                 wc->stage = DROP_REFERENCE;
6130                 wc->shared_level = -1;
6131                 path->slots[level] = 0;
6132
6133                 /*
6134                  * check reference count again if the block isn't locked.
6135                  * we should start walking down the tree again if reference
6136                  * count is one.
6137                  */
6138                 if (!path->locks[level]) {
6139                         BUG_ON(level == 0);
6140                         btrfs_tree_lock(eb);
6141                         btrfs_set_lock_blocking(eb);
6142                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6143
6144                         ret = btrfs_lookup_extent_info(trans, root,
6145                                                        eb->start, eb->len,
6146                                                        &wc->refs[level],
6147                                                        &wc->flags[level]);
6148                         BUG_ON(ret);
6149                         BUG_ON(wc->refs[level] == 0);
6150                         if (wc->refs[level] == 1) {
6151                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6152                                 return 1;
6153                         }
6154                 }
6155         }
6156
6157         /* wc->stage == DROP_REFERENCE */
6158         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6159
6160         if (wc->refs[level] == 1) {
6161                 if (level == 0) {
6162                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6163                                 ret = btrfs_dec_ref(trans, root, eb, 1);
6164                         else
6165                                 ret = btrfs_dec_ref(trans, root, eb, 0);
6166                         BUG_ON(ret);
6167                 }
6168                 /* make block locked assertion in clean_tree_block happy */
6169                 if (!path->locks[level] &&
6170                     btrfs_header_generation(eb) == trans->transid) {
6171                         btrfs_tree_lock(eb);
6172                         btrfs_set_lock_blocking(eb);
6173                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6174                 }
6175                 clean_tree_block(trans, root, eb);
6176         }
6177
6178         if (eb == root->node) {
6179                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6180                         parent = eb->start;
6181                 else
6182                         BUG_ON(root->root_key.objectid !=
6183                                btrfs_header_owner(eb));
6184         } else {
6185                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6186                         parent = path->nodes[level + 1]->start;
6187                 else
6188                         BUG_ON(root->root_key.objectid !=
6189                                btrfs_header_owner(path->nodes[level + 1]));
6190         }
6191
6192         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6193 out:
6194         wc->refs[level] = 0;
6195         wc->flags[level] = 0;
6196         return 0;
6197 }
6198
6199 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6200                                    struct btrfs_root *root,
6201                                    struct btrfs_path *path,
6202                                    struct walk_control *wc)
6203 {
6204         int level = wc->level;
6205         int lookup_info = 1;
6206         int ret;
6207
6208         while (level >= 0) {
6209                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6210                 if (ret > 0)
6211                         break;
6212
6213                 if (level == 0)
6214                         break;
6215
6216                 if (path->slots[level] >=
6217                     btrfs_header_nritems(path->nodes[level]))
6218                         break;
6219
6220                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6221                 if (ret > 0) {
6222                         path->slots[level]++;
6223                         continue;
6224                 } else if (ret < 0)
6225                         return ret;
6226                 level = wc->level;
6227         }
6228         return 0;
6229 }
6230
6231 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6232                                  struct btrfs_root *root,
6233                                  struct btrfs_path *path,
6234                                  struct walk_control *wc, int max_level)
6235 {
6236         int level = wc->level;
6237         int ret;
6238
6239         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6240         while (level < max_level && path->nodes[level]) {
6241                 wc->level = level;
6242                 if (path->slots[level] + 1 <
6243                     btrfs_header_nritems(path->nodes[level])) {
6244                         path->slots[level]++;
6245                         return 0;
6246                 } else {
6247                         ret = walk_up_proc(trans, root, path, wc);
6248                         if (ret > 0)
6249                                 return 0;
6250
6251                         if (path->locks[level]) {
6252                                 btrfs_tree_unlock_rw(path->nodes[level],
6253                                                      path->locks[level]);
6254                                 path->locks[level] = 0;
6255                         }
6256                         free_extent_buffer(path->nodes[level]);
6257                         path->nodes[level] = NULL;
6258                         level++;
6259                 }
6260         }
6261         return 1;
6262 }
6263
6264 /*
6265  * drop a subvolume tree.
6266  *
6267  * this function traverses the tree freeing any blocks that only
6268  * referenced by the tree.
6269  *
6270  * when a shared tree block is found. this function decreases its
6271  * reference count by one. if update_ref is true, this function
6272  * also make sure backrefs for the shared block and all lower level
6273  * blocks are properly updated.
6274  */
6275 void btrfs_drop_snapshot(struct btrfs_root *root,
6276                          struct btrfs_block_rsv *block_rsv, int update_ref)
6277 {
6278         struct btrfs_path *path;
6279         struct btrfs_trans_handle *trans;
6280         struct btrfs_root *tree_root = root->fs_info->tree_root;
6281         struct btrfs_root_item *root_item = &root->root_item;
6282         struct walk_control *wc;
6283         struct btrfs_key key;
6284         int err = 0;
6285         int ret;
6286         int level;
6287
6288         path = btrfs_alloc_path();
6289         if (!path) {
6290                 err = -ENOMEM;
6291                 goto out;
6292         }
6293
6294         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6295         if (!wc) {
6296                 btrfs_free_path(path);
6297                 err = -ENOMEM;
6298                 goto out;
6299         }
6300
6301         trans = btrfs_start_transaction(tree_root, 0);
6302         BUG_ON(IS_ERR(trans));
6303
6304         if (block_rsv)
6305                 trans->block_rsv = block_rsv;
6306
6307         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6308                 level = btrfs_header_level(root->node);
6309                 path->nodes[level] = btrfs_lock_root_node(root);
6310                 btrfs_set_lock_blocking(path->nodes[level]);
6311                 path->slots[level] = 0;
6312                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6313                 memset(&wc->update_progress, 0,
6314                        sizeof(wc->update_progress));
6315         } else {
6316                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6317                 memcpy(&wc->update_progress, &key,
6318                        sizeof(wc->update_progress));
6319
6320                 level = root_item->drop_level;
6321                 BUG_ON(level == 0);
6322                 path->lowest_level = level;
6323                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6324                 path->lowest_level = 0;
6325                 if (ret < 0) {
6326                         err = ret;
6327                         goto out_free;
6328                 }
6329                 WARN_ON(ret > 0);
6330
6331                 /*
6332                  * unlock our path, this is safe because only this
6333                  * function is allowed to delete this snapshot
6334                  */
6335                 btrfs_unlock_up_safe(path, 0);
6336
6337                 level = btrfs_header_level(root->node);
6338                 while (1) {
6339                         btrfs_tree_lock(path->nodes[level]);
6340                         btrfs_set_lock_blocking(path->nodes[level]);
6341
6342                         ret = btrfs_lookup_extent_info(trans, root,
6343                                                 path->nodes[level]->start,
6344                                                 path->nodes[level]->len,
6345                                                 &wc->refs[level],
6346                                                 &wc->flags[level]);
6347                         BUG_ON(ret);
6348                         BUG_ON(wc->refs[level] == 0);
6349
6350                         if (level == root_item->drop_level)
6351                                 break;
6352
6353                         btrfs_tree_unlock(path->nodes[level]);
6354                         WARN_ON(wc->refs[level] != 1);
6355                         level--;
6356                 }
6357         }
6358
6359         wc->level = level;
6360         wc->shared_level = -1;
6361         wc->stage = DROP_REFERENCE;
6362         wc->update_ref = update_ref;
6363         wc->keep_locks = 0;
6364         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6365
6366         while (1) {
6367                 ret = walk_down_tree(trans, root, path, wc);
6368                 if (ret < 0) {
6369                         err = ret;
6370                         break;
6371                 }
6372
6373                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6374                 if (ret < 0) {
6375                         err = ret;
6376                         break;
6377                 }
6378
6379                 if (ret > 0) {
6380                         BUG_ON(wc->stage != DROP_REFERENCE);
6381                         break;
6382                 }
6383
6384                 if (wc->stage == DROP_REFERENCE) {
6385                         level = wc->level;
6386                         btrfs_node_key(path->nodes[level],
6387                                        &root_item->drop_progress,
6388                                        path->slots[level]);
6389                         root_item->drop_level = level;
6390                 }
6391
6392                 BUG_ON(wc->level == 0);
6393                 if (btrfs_should_end_transaction(trans, tree_root)) {
6394                         ret = btrfs_update_root(trans, tree_root,
6395                                                 &root->root_key,
6396                                                 root_item);
6397                         BUG_ON(ret);
6398
6399                         btrfs_end_transaction_throttle(trans, tree_root);
6400                         trans = btrfs_start_transaction(tree_root, 0);
6401                         BUG_ON(IS_ERR(trans));
6402                         if (block_rsv)
6403                                 trans->block_rsv = block_rsv;
6404                 }
6405         }
6406         btrfs_release_path(path);
6407         BUG_ON(err);
6408
6409         ret = btrfs_del_root(trans, tree_root, &root->root_key);
6410         BUG_ON(ret);
6411
6412         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6413                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6414                                            NULL, NULL);
6415                 BUG_ON(ret < 0);
6416                 if (ret > 0) {
6417                         /* if we fail to delete the orphan item this time
6418                          * around, it'll get picked up the next time.
6419                          *
6420                          * The most common failure here is just -ENOENT.
6421                          */
6422                         btrfs_del_orphan_item(trans, tree_root,
6423                                               root->root_key.objectid);
6424                 }
6425         }
6426
6427         if (root->in_radix) {
6428                 btrfs_free_fs_root(tree_root->fs_info, root);
6429         } else {
6430                 free_extent_buffer(root->node);
6431                 free_extent_buffer(root->commit_root);
6432                 kfree(root);
6433         }
6434 out_free:
6435         btrfs_end_transaction_throttle(trans, tree_root);
6436         kfree(wc);
6437         btrfs_free_path(path);
6438 out:
6439         if (err)
6440                 btrfs_std_error(root->fs_info, err);
6441         return;
6442 }
6443
6444 /*
6445  * drop subtree rooted at tree block 'node'.
6446  *
6447  * NOTE: this function will unlock and release tree block 'node'
6448  */
6449 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6450                         struct btrfs_root *root,
6451                         struct extent_buffer *node,
6452                         struct extent_buffer *parent)
6453 {
6454         struct btrfs_path *path;
6455         struct walk_control *wc;
6456         int level;
6457         int parent_level;
6458         int ret = 0;
6459         int wret;
6460
6461         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6462
6463         path = btrfs_alloc_path();
6464         if (!path)
6465                 return -ENOMEM;
6466
6467         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6468         if (!wc) {
6469                 btrfs_free_path(path);
6470                 return -ENOMEM;
6471         }
6472
6473         btrfs_assert_tree_locked(parent);
6474         parent_level = btrfs_header_level(parent);
6475         extent_buffer_get(parent);
6476         path->nodes[parent_level] = parent;
6477         path->slots[parent_level] = btrfs_header_nritems(parent);
6478
6479         btrfs_assert_tree_locked(node);
6480         level = btrfs_header_level(node);
6481         path->nodes[level] = node;
6482         path->slots[level] = 0;
6483         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6484
6485         wc->refs[parent_level] = 1;
6486         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6487         wc->level = level;
6488         wc->shared_level = -1;
6489         wc->stage = DROP_REFERENCE;
6490         wc->update_ref = 0;
6491         wc->keep_locks = 1;
6492         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6493
6494         while (1) {
6495                 wret = walk_down_tree(trans, root, path, wc);
6496                 if (wret < 0) {
6497                         ret = wret;
6498                         break;
6499                 }
6500
6501                 wret = walk_up_tree(trans, root, path, wc, parent_level);
6502                 if (wret < 0)
6503                         ret = wret;
6504                 if (wret != 0)
6505                         break;
6506         }
6507
6508         kfree(wc);
6509         btrfs_free_path(path);
6510         return ret;
6511 }
6512
6513 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6514 {
6515         u64 num_devices;
6516         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6517                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6518
6519         /*
6520          * we add in the count of missing devices because we want
6521          * to make sure that any RAID levels on a degraded FS
6522          * continue to be honored.
6523          */
6524         num_devices = root->fs_info->fs_devices->rw_devices +
6525                 root->fs_info->fs_devices->missing_devices;
6526
6527         if (num_devices == 1) {
6528                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6529                 stripped = flags & ~stripped;
6530
6531                 /* turn raid0 into single device chunks */
6532                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6533                         return stripped;
6534
6535                 /* turn mirroring into duplication */
6536                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6537                              BTRFS_BLOCK_GROUP_RAID10))
6538                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6539                 return flags;
6540         } else {
6541                 /* they already had raid on here, just return */
6542                 if (flags & stripped)
6543                         return flags;
6544
6545                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6546                 stripped = flags & ~stripped;
6547
6548                 /* switch duplicated blocks with raid1 */
6549                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6550                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6551
6552                 /* turn single device chunks into raid0 */
6553                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6554         }
6555         return flags;
6556 }
6557
6558 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6559 {
6560         struct btrfs_space_info *sinfo = cache->space_info;
6561         u64 num_bytes;
6562         u64 min_allocable_bytes;
6563         int ret = -ENOSPC;
6564
6565
6566         /*
6567          * We need some metadata space and system metadata space for
6568          * allocating chunks in some corner cases until we force to set
6569          * it to be readonly.
6570          */
6571         if ((sinfo->flags &
6572              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
6573             !force)
6574                 min_allocable_bytes = 1 * 1024 * 1024;
6575         else
6576                 min_allocable_bytes = 0;
6577
6578         spin_lock(&sinfo->lock);
6579         spin_lock(&cache->lock);
6580
6581         if (cache->ro) {
6582                 ret = 0;
6583                 goto out;
6584         }
6585
6586         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6587                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6588
6589         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
6590             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
6591             min_allocable_bytes <= sinfo->total_bytes) {
6592                 sinfo->bytes_readonly += num_bytes;
6593                 cache->ro = 1;
6594                 ret = 0;
6595         }
6596 out:
6597         spin_unlock(&cache->lock);
6598         spin_unlock(&sinfo->lock);
6599         return ret;
6600 }
6601
6602 int btrfs_set_block_group_ro(struct btrfs_root *root,
6603                              struct btrfs_block_group_cache *cache)
6604
6605 {
6606         struct btrfs_trans_handle *trans;
6607         u64 alloc_flags;
6608         int ret;
6609
6610         BUG_ON(cache->ro);
6611
6612         trans = btrfs_join_transaction(root);
6613         BUG_ON(IS_ERR(trans));
6614
6615         alloc_flags = update_block_group_flags(root, cache->flags);
6616         if (alloc_flags != cache->flags)
6617                 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6618                                CHUNK_ALLOC_FORCE);
6619
6620         ret = set_block_group_ro(cache, 0);
6621         if (!ret)
6622                 goto out;
6623         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
6624         ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6625                              CHUNK_ALLOC_FORCE);
6626         if (ret < 0)
6627                 goto out;
6628         ret = set_block_group_ro(cache, 0);
6629 out:
6630         btrfs_end_transaction(trans, root);
6631         return ret;
6632 }
6633
6634 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
6635                             struct btrfs_root *root, u64 type)
6636 {
6637         u64 alloc_flags = get_alloc_profile(root, type);
6638         return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6639                               CHUNK_ALLOC_FORCE);
6640 }
6641
6642 /*
6643  * helper to account the unused space of all the readonly block group in the
6644  * list. takes mirrors into account.
6645  */
6646 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
6647 {
6648         struct btrfs_block_group_cache *block_group;
6649         u64 free_bytes = 0;
6650         int factor;
6651
6652         list_for_each_entry(block_group, groups_list, list) {
6653                 spin_lock(&block_group->lock);
6654
6655                 if (!block_group->ro) {
6656                         spin_unlock(&block_group->lock);
6657                         continue;
6658                 }
6659
6660                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
6661                                           BTRFS_BLOCK_GROUP_RAID10 |
6662                                           BTRFS_BLOCK_GROUP_DUP))
6663                         factor = 2;
6664                 else
6665                         factor = 1;
6666
6667                 free_bytes += (block_group->key.offset -
6668                                btrfs_block_group_used(&block_group->item)) *
6669                                factor;
6670
6671                 spin_unlock(&block_group->lock);
6672         }
6673
6674         return free_bytes;
6675 }
6676
6677 /*
6678  * helper to account the unused space of all the readonly block group in the
6679  * space_info. takes mirrors into account.
6680  */
6681 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
6682 {
6683         int i;
6684         u64 free_bytes = 0;
6685
6686         spin_lock(&sinfo->lock);
6687
6688         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
6689                 if (!list_empty(&sinfo->block_groups[i]))
6690                         free_bytes += __btrfs_get_ro_block_group_free_space(
6691                                                 &sinfo->block_groups[i]);
6692
6693         spin_unlock(&sinfo->lock);
6694
6695         return free_bytes;
6696 }
6697
6698 int btrfs_set_block_group_rw(struct btrfs_root *root,
6699                               struct btrfs_block_group_cache *cache)
6700 {
6701         struct btrfs_space_info *sinfo = cache->space_info;
6702         u64 num_bytes;
6703
6704         BUG_ON(!cache->ro);
6705
6706         spin_lock(&sinfo->lock);
6707         spin_lock(&cache->lock);
6708         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6709                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6710         sinfo->bytes_readonly -= num_bytes;
6711         cache->ro = 0;
6712         spin_unlock(&cache->lock);
6713         spin_unlock(&sinfo->lock);
6714         return 0;
6715 }
6716
6717 /*
6718  * checks to see if its even possible to relocate this block group.
6719  *
6720  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
6721  * ok to go ahead and try.
6722  */
6723 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6724 {
6725         struct btrfs_block_group_cache *block_group;
6726         struct btrfs_space_info *space_info;
6727         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6728         struct btrfs_device *device;
6729         u64 min_free;
6730         u64 dev_min = 1;
6731         u64 dev_nr = 0;
6732         int index;
6733         int full = 0;
6734         int ret = 0;
6735
6736         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
6737
6738         /* odd, couldn't find the block group, leave it alone */
6739         if (!block_group)
6740                 return -1;
6741
6742         min_free = btrfs_block_group_used(&block_group->item);
6743
6744         /* no bytes used, we're good */
6745         if (!min_free)
6746                 goto out;
6747
6748         space_info = block_group->space_info;
6749         spin_lock(&space_info->lock);
6750
6751         full = space_info->full;
6752
6753         /*
6754          * if this is the last block group we have in this space, we can't
6755          * relocate it unless we're able to allocate a new chunk below.
6756          *
6757          * Otherwise, we need to make sure we have room in the space to handle
6758          * all of the extents from this block group.  If we can, we're good
6759          */
6760         if ((space_info->total_bytes != block_group->key.offset) &&
6761             (space_info->bytes_used + space_info->bytes_reserved +
6762              space_info->bytes_pinned + space_info->bytes_readonly +
6763              min_free < space_info->total_bytes)) {
6764                 spin_unlock(&space_info->lock);
6765                 goto out;
6766         }
6767         spin_unlock(&space_info->lock);
6768
6769         /*
6770          * ok we don't have enough space, but maybe we have free space on our
6771          * devices to allocate new chunks for relocation, so loop through our
6772          * alloc devices and guess if we have enough space.  However, if we
6773          * were marked as full, then we know there aren't enough chunks, and we
6774          * can just return.
6775          */
6776         ret = -1;
6777         if (full)
6778                 goto out;
6779
6780         /*
6781          * index:
6782          *      0: raid10
6783          *      1: raid1
6784          *      2: dup
6785          *      3: raid0
6786          *      4: single
6787          */
6788         index = get_block_group_index(block_group);
6789         if (index == 0) {
6790                 dev_min = 4;
6791                 /* Divide by 2 */
6792                 min_free >>= 1;
6793         } else if (index == 1) {
6794                 dev_min = 2;
6795         } else if (index == 2) {
6796                 /* Multiply by 2 */
6797                 min_free <<= 1;
6798         } else if (index == 3) {
6799                 dev_min = fs_devices->rw_devices;
6800                 do_div(min_free, dev_min);
6801         }
6802
6803         mutex_lock(&root->fs_info->chunk_mutex);
6804         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6805                 u64 dev_offset;
6806
6807                 /*
6808                  * check to make sure we can actually find a chunk with enough
6809                  * space to fit our block group in.
6810                  */
6811                 if (device->total_bytes > device->bytes_used + min_free) {
6812                         ret = find_free_dev_extent(NULL, device, min_free,
6813                                                    &dev_offset, NULL);
6814                         if (!ret)
6815                                 dev_nr++;
6816
6817                         if (dev_nr >= dev_min)
6818                                 break;
6819
6820                         ret = -1;
6821                 }
6822         }
6823         mutex_unlock(&root->fs_info->chunk_mutex);
6824 out:
6825         btrfs_put_block_group(block_group);
6826         return ret;
6827 }
6828
6829 static int find_first_block_group(struct btrfs_root *root,
6830                 struct btrfs_path *path, struct btrfs_key *key)
6831 {
6832         int ret = 0;
6833         struct btrfs_key found_key;
6834         struct extent_buffer *leaf;
6835         int slot;
6836
6837         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6838         if (ret < 0)
6839                 goto out;
6840
6841         while (1) {
6842                 slot = path->slots[0];
6843                 leaf = path->nodes[0];
6844                 if (slot >= btrfs_header_nritems(leaf)) {
6845                         ret = btrfs_next_leaf(root, path);
6846                         if (ret == 0)
6847                                 continue;
6848                         if (ret < 0)
6849                                 goto out;
6850                         break;
6851                 }
6852                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6853
6854                 if (found_key.objectid >= key->objectid &&
6855                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6856                         ret = 0;
6857                         goto out;
6858                 }
6859                 path->slots[0]++;
6860         }
6861 out:
6862         return ret;
6863 }
6864
6865 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
6866 {
6867         struct btrfs_block_group_cache *block_group;
6868         u64 last = 0;
6869
6870         while (1) {
6871                 struct inode *inode;
6872
6873                 block_group = btrfs_lookup_first_block_group(info, last);
6874                 while (block_group) {
6875                         spin_lock(&block_group->lock);
6876                         if (block_group->iref)
6877                                 break;
6878                         spin_unlock(&block_group->lock);
6879                         block_group = next_block_group(info->tree_root,
6880                                                        block_group);
6881                 }
6882                 if (!block_group) {
6883                         if (last == 0)
6884                                 break;
6885                         last = 0;
6886                         continue;
6887                 }
6888
6889                 inode = block_group->inode;
6890                 block_group->iref = 0;
6891                 block_group->inode = NULL;
6892                 spin_unlock(&block_group->lock);
6893                 iput(inode);
6894                 last = block_group->key.objectid + block_group->key.offset;
6895                 btrfs_put_block_group(block_group);
6896         }
6897 }
6898
6899 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6900 {
6901         struct btrfs_block_group_cache *block_group;
6902         struct btrfs_space_info *space_info;
6903         struct btrfs_caching_control *caching_ctl;
6904         struct rb_node *n;
6905
6906         down_write(&info->extent_commit_sem);
6907         while (!list_empty(&info->caching_block_groups)) {
6908                 caching_ctl = list_entry(info->caching_block_groups.next,
6909                                          struct btrfs_caching_control, list);
6910                 list_del(&caching_ctl->list);
6911                 put_caching_control(caching_ctl);
6912         }
6913         up_write(&info->extent_commit_sem);
6914
6915         spin_lock(&info->block_group_cache_lock);
6916         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6917                 block_group = rb_entry(n, struct btrfs_block_group_cache,
6918                                        cache_node);
6919                 rb_erase(&block_group->cache_node,
6920                          &info->block_group_cache_tree);
6921                 spin_unlock(&info->block_group_cache_lock);
6922
6923                 down_write(&block_group->space_info->groups_sem);
6924                 list_del(&block_group->list);
6925                 up_write(&block_group->space_info->groups_sem);
6926
6927                 if (block_group->cached == BTRFS_CACHE_STARTED)
6928                         wait_block_group_cache_done(block_group);
6929
6930                 /*
6931                  * We haven't cached this block group, which means we could
6932                  * possibly have excluded extents on this block group.
6933                  */
6934                 if (block_group->cached == BTRFS_CACHE_NO)
6935                         free_excluded_extents(info->extent_root, block_group);
6936
6937                 btrfs_remove_free_space_cache(block_group);
6938                 btrfs_put_block_group(block_group);
6939
6940                 spin_lock(&info->block_group_cache_lock);
6941         }
6942         spin_unlock(&info->block_group_cache_lock);
6943
6944         /* now that all the block groups are freed, go through and
6945          * free all the space_info structs.  This is only called during
6946          * the final stages of unmount, and so we know nobody is
6947          * using them.  We call synchronize_rcu() once before we start,
6948          * just to be on the safe side.
6949          */
6950         synchronize_rcu();
6951
6952         release_global_block_rsv(info);
6953
6954         while(!list_empty(&info->space_info)) {
6955                 space_info = list_entry(info->space_info.next,
6956                                         struct btrfs_space_info,
6957                                         list);
6958                 if (space_info->bytes_pinned > 0 ||
6959                     space_info->bytes_reserved > 0 ||
6960                     space_info->bytes_may_use > 0) {
6961                         WARN_ON(1);
6962                         dump_space_info(space_info, 0, 0);
6963                 }
6964                 list_del(&space_info->list);
6965                 kfree(space_info);
6966         }
6967         return 0;
6968 }
6969
6970 static void __link_block_group(struct btrfs_space_info *space_info,
6971                                struct btrfs_block_group_cache *cache)
6972 {
6973         int index = get_block_group_index(cache);
6974
6975         down_write(&space_info->groups_sem);
6976         list_add_tail(&cache->list, &space_info->block_groups[index]);
6977         up_write(&space_info->groups_sem);
6978 }
6979
6980 int btrfs_read_block_groups(struct btrfs_root *root)
6981 {
6982         struct btrfs_path *path;
6983         int ret;
6984         struct btrfs_block_group_cache *cache;
6985         struct btrfs_fs_info *info = root->fs_info;
6986         struct btrfs_space_info *space_info;
6987         struct btrfs_key key;
6988         struct btrfs_key found_key;
6989         struct extent_buffer *leaf;
6990         int need_clear = 0;
6991         u64 cache_gen;
6992
6993         root = info->extent_root;
6994         key.objectid = 0;
6995         key.offset = 0;
6996         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
6997         path = btrfs_alloc_path();
6998         if (!path)
6999                 return -ENOMEM;
7000         path->reada = 1;
7001
7002         cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
7003         if (cache_gen != 0 &&
7004             btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
7005                 need_clear = 1;
7006         if (btrfs_test_opt(root, CLEAR_CACHE))
7007                 need_clear = 1;
7008         if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
7009                 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
7010
7011         while (1) {
7012                 ret = find_first_block_group(root, path, &key);
7013                 if (ret > 0)
7014                         break;
7015                 if (ret != 0)
7016                         goto error;
7017                 leaf = path->nodes[0];
7018                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7019                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7020                 if (!cache) {
7021                         ret = -ENOMEM;
7022                         goto error;
7023                 }
7024                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7025                                                 GFP_NOFS);
7026                 if (!cache->free_space_ctl) {
7027                         kfree(cache);
7028                         ret = -ENOMEM;
7029                         goto error;
7030                 }
7031
7032                 atomic_set(&cache->count, 1);
7033                 spin_lock_init(&cache->lock);
7034                 cache->fs_info = info;
7035                 INIT_LIST_HEAD(&cache->list);
7036                 INIT_LIST_HEAD(&cache->cluster_list);
7037
7038                 if (need_clear)
7039                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7040
7041                 read_extent_buffer(leaf, &cache->item,
7042                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7043                                    sizeof(cache->item));
7044                 memcpy(&cache->key, &found_key, sizeof(found_key));
7045
7046                 key.objectid = found_key.objectid + found_key.offset;
7047                 btrfs_release_path(path);
7048                 cache->flags = btrfs_block_group_flags(&cache->item);
7049                 cache->sectorsize = root->sectorsize;
7050
7051                 btrfs_init_free_space_ctl(cache);
7052
7053                 /*
7054                  * We need to exclude the super stripes now so that the space
7055                  * info has super bytes accounted for, otherwise we'll think
7056                  * we have more space than we actually do.
7057                  */
7058                 exclude_super_stripes(root, cache);
7059
7060                 /*
7061                  * check for two cases, either we are full, and therefore
7062                  * don't need to bother with the caching work since we won't
7063                  * find any space, or we are empty, and we can just add all
7064                  * the space in and be done with it.  This saves us _alot_ of
7065                  * time, particularly in the full case.
7066                  */
7067                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7068                         cache->last_byte_to_unpin = (u64)-1;
7069                         cache->cached = BTRFS_CACHE_FINISHED;
7070                         free_excluded_extents(root, cache);
7071                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7072                         cache->last_byte_to_unpin = (u64)-1;
7073                         cache->cached = BTRFS_CACHE_FINISHED;
7074                         add_new_free_space(cache, root->fs_info,
7075                                            found_key.objectid,
7076                                            found_key.objectid +
7077                                            found_key.offset);
7078                         free_excluded_extents(root, cache);
7079                 }
7080
7081                 ret = update_space_info(info, cache->flags, found_key.offset,
7082                                         btrfs_block_group_used(&cache->item),
7083                                         &space_info);
7084                 BUG_ON(ret);
7085                 cache->space_info = space_info;
7086                 spin_lock(&cache->space_info->lock);
7087                 cache->space_info->bytes_readonly += cache->bytes_super;
7088                 spin_unlock(&cache->space_info->lock);
7089
7090                 __link_block_group(space_info, cache);
7091
7092                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7093                 BUG_ON(ret);
7094
7095                 set_avail_alloc_bits(root->fs_info, cache->flags);
7096                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7097                         set_block_group_ro(cache, 1);
7098         }
7099
7100         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7101                 if (!(get_alloc_profile(root, space_info->flags) &
7102                       (BTRFS_BLOCK_GROUP_RAID10 |
7103                        BTRFS_BLOCK_GROUP_RAID1 |
7104                        BTRFS_BLOCK_GROUP_DUP)))
7105                         continue;
7106                 /*
7107                  * avoid allocating from un-mirrored block group if there are
7108                  * mirrored block groups.
7109                  */
7110                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7111                         set_block_group_ro(cache, 1);
7112                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7113                         set_block_group_ro(cache, 1);
7114         }
7115
7116         init_global_block_rsv(info);
7117         ret = 0;
7118 error:
7119         btrfs_free_path(path);
7120         return ret;
7121 }
7122
7123 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7124                            struct btrfs_root *root, u64 bytes_used,
7125                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7126                            u64 size)
7127 {
7128         int ret;
7129         struct btrfs_root *extent_root;
7130         struct btrfs_block_group_cache *cache;
7131
7132         extent_root = root->fs_info->extent_root;
7133
7134         root->fs_info->last_trans_log_full_commit = trans->transid;
7135
7136         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7137         if (!cache)
7138                 return -ENOMEM;
7139         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7140                                         GFP_NOFS);
7141         if (!cache->free_space_ctl) {
7142                 kfree(cache);
7143                 return -ENOMEM;
7144         }
7145
7146         cache->key.objectid = chunk_offset;
7147         cache->key.offset = size;
7148         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7149         cache->sectorsize = root->sectorsize;
7150         cache->fs_info = root->fs_info;
7151
7152         atomic_set(&cache->count, 1);
7153         spin_lock_init(&cache->lock);
7154         INIT_LIST_HEAD(&cache->list);
7155         INIT_LIST_HEAD(&cache->cluster_list);
7156
7157         btrfs_init_free_space_ctl(cache);
7158
7159         btrfs_set_block_group_used(&cache->item, bytes_used);
7160         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7161         cache->flags = type;
7162         btrfs_set_block_group_flags(&cache->item, type);
7163
7164         cache->last_byte_to_unpin = (u64)-1;
7165         cache->cached = BTRFS_CACHE_FINISHED;
7166         exclude_super_stripes(root, cache);
7167
7168         add_new_free_space(cache, root->fs_info, chunk_offset,
7169                            chunk_offset + size);
7170
7171         free_excluded_extents(root, cache);
7172
7173         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7174                                 &cache->space_info);
7175         BUG_ON(ret);
7176
7177         spin_lock(&cache->space_info->lock);
7178         cache->space_info->bytes_readonly += cache->bytes_super;
7179         spin_unlock(&cache->space_info->lock);
7180
7181         __link_block_group(cache->space_info, cache);
7182
7183         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7184         BUG_ON(ret);
7185
7186         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7187                                 sizeof(cache->item));
7188         BUG_ON(ret);
7189
7190         set_avail_alloc_bits(extent_root->fs_info, type);
7191
7192         return 0;
7193 }
7194
7195 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7196                              struct btrfs_root *root, u64 group_start)
7197 {
7198         struct btrfs_path *path;
7199         struct btrfs_block_group_cache *block_group;
7200         struct btrfs_free_cluster *cluster;
7201         struct btrfs_root *tree_root = root->fs_info->tree_root;
7202         struct btrfs_key key;
7203         struct inode *inode;
7204         int ret;
7205         int factor;
7206
7207         root = root->fs_info->extent_root;
7208
7209         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7210         BUG_ON(!block_group);
7211         BUG_ON(!block_group->ro);
7212
7213         /*
7214          * Free the reserved super bytes from this block group before
7215          * remove it.
7216          */
7217         free_excluded_extents(root, block_group);
7218
7219         memcpy(&key, &block_group->key, sizeof(key));
7220         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7221                                   BTRFS_BLOCK_GROUP_RAID1 |
7222                                   BTRFS_BLOCK_GROUP_RAID10))
7223                 factor = 2;
7224         else
7225                 factor = 1;
7226
7227         /* make sure this block group isn't part of an allocation cluster */
7228         cluster = &root->fs_info->data_alloc_cluster;
7229         spin_lock(&cluster->refill_lock);
7230         btrfs_return_cluster_to_free_space(block_group, cluster);
7231         spin_unlock(&cluster->refill_lock);
7232
7233         /*
7234          * make sure this block group isn't part of a metadata
7235          * allocation cluster
7236          */
7237         cluster = &root->fs_info->meta_alloc_cluster;
7238         spin_lock(&cluster->refill_lock);
7239         btrfs_return_cluster_to_free_space(block_group, cluster);
7240         spin_unlock(&cluster->refill_lock);
7241
7242         path = btrfs_alloc_path();
7243         if (!path) {
7244                 ret = -ENOMEM;
7245                 goto out;
7246         }
7247
7248         inode = lookup_free_space_inode(root, block_group, path);
7249         if (!IS_ERR(inode)) {
7250                 ret = btrfs_orphan_add(trans, inode);
7251                 BUG_ON(ret);
7252                 clear_nlink(inode);
7253                 /* One for the block groups ref */
7254                 spin_lock(&block_group->lock);
7255                 if (block_group->iref) {
7256                         block_group->iref = 0;
7257                         block_group->inode = NULL;
7258                         spin_unlock(&block_group->lock);
7259                         iput(inode);
7260                 } else {
7261                         spin_unlock(&block_group->lock);
7262                 }
7263                 /* One for our lookup ref */
7264                 iput(inode);
7265         }
7266
7267         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7268         key.offset = block_group->key.objectid;
7269         key.type = 0;
7270
7271         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7272         if (ret < 0)
7273                 goto out;
7274         if (ret > 0)
7275                 btrfs_release_path(path);
7276         if (ret == 0) {
7277                 ret = btrfs_del_item(trans, tree_root, path);
7278                 if (ret)
7279                         goto out;
7280                 btrfs_release_path(path);
7281         }
7282
7283         spin_lock(&root->fs_info->block_group_cache_lock);
7284         rb_erase(&block_group->cache_node,
7285                  &root->fs_info->block_group_cache_tree);
7286         spin_unlock(&root->fs_info->block_group_cache_lock);
7287
7288         down_write(&block_group->space_info->groups_sem);
7289         /*
7290          * we must use list_del_init so people can check to see if they
7291          * are still on the list after taking the semaphore
7292          */
7293         list_del_init(&block_group->list);
7294         up_write(&block_group->space_info->groups_sem);
7295
7296         if (block_group->cached == BTRFS_CACHE_STARTED)
7297                 wait_block_group_cache_done(block_group);
7298
7299         btrfs_remove_free_space_cache(block_group);
7300
7301         spin_lock(&block_group->space_info->lock);
7302         block_group->space_info->total_bytes -= block_group->key.offset;
7303         block_group->space_info->bytes_readonly -= block_group->key.offset;
7304         block_group->space_info->disk_total -= block_group->key.offset * factor;
7305         spin_unlock(&block_group->space_info->lock);
7306
7307         memcpy(&key, &block_group->key, sizeof(key));
7308
7309         btrfs_clear_space_info_full(root->fs_info);
7310
7311         btrfs_put_block_group(block_group);
7312         btrfs_put_block_group(block_group);
7313
7314         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7315         if (ret > 0)
7316                 ret = -EIO;
7317         if (ret < 0)
7318                 goto out;
7319
7320         ret = btrfs_del_item(trans, root, path);
7321 out:
7322         btrfs_free_path(path);
7323         return ret;
7324 }
7325
7326 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7327 {
7328         struct btrfs_space_info *space_info;
7329         struct btrfs_super_block *disk_super;
7330         u64 features;
7331         u64 flags;
7332         int mixed = 0;
7333         int ret;
7334
7335         disk_super = &fs_info->super_copy;
7336         if (!btrfs_super_root(disk_super))
7337                 return 1;
7338
7339         features = btrfs_super_incompat_flags(disk_super);
7340         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7341                 mixed = 1;
7342
7343         flags = BTRFS_BLOCK_GROUP_SYSTEM;
7344         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7345         if (ret)
7346                 goto out;
7347
7348         if (mixed) {
7349                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7350                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7351         } else {
7352                 flags = BTRFS_BLOCK_GROUP_METADATA;
7353                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7354                 if (ret)
7355                         goto out;
7356
7357                 flags = BTRFS_BLOCK_GROUP_DATA;
7358                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7359         }
7360 out:
7361         return ret;
7362 }
7363
7364 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7365 {
7366         return unpin_extent_range(root, start, end);
7367 }
7368
7369 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7370                                u64 num_bytes, u64 *actual_bytes)
7371 {
7372         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7373 }
7374
7375 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7376 {
7377         struct btrfs_fs_info *fs_info = root->fs_info;
7378         struct btrfs_block_group_cache *cache = NULL;
7379         u64 group_trimmed;
7380         u64 start;
7381         u64 end;
7382         u64 trimmed = 0;
7383         int ret = 0;
7384
7385         cache = btrfs_lookup_block_group(fs_info, range->start);
7386
7387         while (cache) {
7388                 if (cache->key.objectid >= (range->start + range->len)) {
7389                         btrfs_put_block_group(cache);
7390                         break;
7391                 }
7392
7393                 start = max(range->start, cache->key.objectid);
7394                 end = min(range->start + range->len,
7395                                 cache->key.objectid + cache->key.offset);
7396
7397                 if (end - start >= range->minlen) {
7398                         if (!block_group_cache_done(cache)) {
7399                                 ret = cache_block_group(cache, NULL, root, 0);
7400                                 if (!ret)
7401                                         wait_block_group_cache_done(cache);
7402                         }
7403                         ret = btrfs_trim_block_group(cache,
7404                                                      &group_trimmed,
7405                                                      start,
7406                                                      end,
7407                                                      range->minlen);
7408
7409                         trimmed += group_trimmed;
7410                         if (ret) {
7411                                 btrfs_put_block_group(cache);
7412                                 break;
7413                         }
7414                 }
7415
7416                 cache = next_block_group(fs_info->tree_root, cache);
7417         }
7418
7419         range->len = trimmed;
7420         return ret;
7421 }