Btrfs: move alloc_profile_is_valid() to volumes.c
[pandora-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36
37 /*
38  * control flags for do_chunk_alloc's force field
39  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
40  * if we really need one.
41  *
42  * CHUNK_ALLOC_LIMITED means to only try and allocate one
43  * if we have very few chunks already allocated.  This is
44  * used as part of the clustering code to help make sure
45  * we have a good pool of storage to cluster in, without
46  * filling the FS with empty chunks
47  *
48  * CHUNK_ALLOC_FORCE means it must try to allocate one
49  *
50  */
51 enum {
52         CHUNK_ALLOC_NO_FORCE = 0,
53         CHUNK_ALLOC_LIMITED = 1,
54         CHUNK_ALLOC_FORCE = 2,
55 };
56
57 /*
58  * Control how reservations are dealt with.
59  *
60  * RESERVE_FREE - freeing a reservation.
61  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
62  *   ENOSPC accounting
63  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
64  *   bytes_may_use as the ENOSPC accounting is done elsewhere
65  */
66 enum {
67         RESERVE_FREE = 0,
68         RESERVE_ALLOC = 1,
69         RESERVE_ALLOC_NO_ACCOUNT = 2,
70 };
71
72 static int update_block_group(struct btrfs_trans_handle *trans,
73                               struct btrfs_root *root,
74                               u64 bytenr, u64 num_bytes, int alloc);
75 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
76                                 struct btrfs_root *root,
77                                 u64 bytenr, u64 num_bytes, u64 parent,
78                                 u64 root_objectid, u64 owner_objectid,
79                                 u64 owner_offset, int refs_to_drop,
80                                 struct btrfs_delayed_extent_op *extra_op);
81 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
82                                     struct extent_buffer *leaf,
83                                     struct btrfs_extent_item *ei);
84 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
85                                       struct btrfs_root *root,
86                                       u64 parent, u64 root_objectid,
87                                       u64 flags, u64 owner, u64 offset,
88                                       struct btrfs_key *ins, int ref_mod);
89 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
90                                      struct btrfs_root *root,
91                                      u64 parent, u64 root_objectid,
92                                      u64 flags, struct btrfs_disk_key *key,
93                                      int level, struct btrfs_key *ins);
94 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
95                           struct btrfs_root *extent_root, u64 alloc_bytes,
96                           u64 flags, int force);
97 static int find_next_key(struct btrfs_path *path, int level,
98                          struct btrfs_key *key);
99 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
100                             int dump_block_groups);
101 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
102                                        u64 num_bytes, int reserve);
103
104 static noinline int
105 block_group_cache_done(struct btrfs_block_group_cache *cache)
106 {
107         smp_mb();
108         return cache->cached == BTRFS_CACHE_FINISHED;
109 }
110
111 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
112 {
113         return (cache->flags & bits) == bits;
114 }
115
116 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
117 {
118         atomic_inc(&cache->count);
119 }
120
121 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
122 {
123         if (atomic_dec_and_test(&cache->count)) {
124                 WARN_ON(cache->pinned > 0);
125                 WARN_ON(cache->reserved > 0);
126                 kfree(cache->free_space_ctl);
127                 kfree(cache);
128         }
129 }
130
131 /*
132  * this adds the block group to the fs_info rb tree for the block group
133  * cache
134  */
135 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
136                                 struct btrfs_block_group_cache *block_group)
137 {
138         struct rb_node **p;
139         struct rb_node *parent = NULL;
140         struct btrfs_block_group_cache *cache;
141
142         spin_lock(&info->block_group_cache_lock);
143         p = &info->block_group_cache_tree.rb_node;
144
145         while (*p) {
146                 parent = *p;
147                 cache = rb_entry(parent, struct btrfs_block_group_cache,
148                                  cache_node);
149                 if (block_group->key.objectid < cache->key.objectid) {
150                         p = &(*p)->rb_left;
151                 } else if (block_group->key.objectid > cache->key.objectid) {
152                         p = &(*p)->rb_right;
153                 } else {
154                         spin_unlock(&info->block_group_cache_lock);
155                         return -EEXIST;
156                 }
157         }
158
159         rb_link_node(&block_group->cache_node, parent, p);
160         rb_insert_color(&block_group->cache_node,
161                         &info->block_group_cache_tree);
162         spin_unlock(&info->block_group_cache_lock);
163
164         return 0;
165 }
166
167 /*
168  * This will return the block group at or after bytenr if contains is 0, else
169  * it will return the block group that contains the bytenr
170  */
171 static struct btrfs_block_group_cache *
172 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
173                               int contains)
174 {
175         struct btrfs_block_group_cache *cache, *ret = NULL;
176         struct rb_node *n;
177         u64 end, start;
178
179         spin_lock(&info->block_group_cache_lock);
180         n = info->block_group_cache_tree.rb_node;
181
182         while (n) {
183                 cache = rb_entry(n, struct btrfs_block_group_cache,
184                                  cache_node);
185                 end = cache->key.objectid + cache->key.offset - 1;
186                 start = cache->key.objectid;
187
188                 if (bytenr < start) {
189                         if (!contains && (!ret || start < ret->key.objectid))
190                                 ret = cache;
191                         n = n->rb_left;
192                 } else if (bytenr > start) {
193                         if (contains && bytenr <= end) {
194                                 ret = cache;
195                                 break;
196                         }
197                         n = n->rb_right;
198                 } else {
199                         ret = cache;
200                         break;
201                 }
202         }
203         if (ret)
204                 btrfs_get_block_group(ret);
205         spin_unlock(&info->block_group_cache_lock);
206
207         return ret;
208 }
209
210 static int add_excluded_extent(struct btrfs_root *root,
211                                u64 start, u64 num_bytes)
212 {
213         u64 end = start + num_bytes - 1;
214         set_extent_bits(&root->fs_info->freed_extents[0],
215                         start, end, EXTENT_UPTODATE, GFP_NOFS);
216         set_extent_bits(&root->fs_info->freed_extents[1],
217                         start, end, EXTENT_UPTODATE, GFP_NOFS);
218         return 0;
219 }
220
221 static void free_excluded_extents(struct btrfs_root *root,
222                                   struct btrfs_block_group_cache *cache)
223 {
224         u64 start, end;
225
226         start = cache->key.objectid;
227         end = start + cache->key.offset - 1;
228
229         clear_extent_bits(&root->fs_info->freed_extents[0],
230                           start, end, EXTENT_UPTODATE, GFP_NOFS);
231         clear_extent_bits(&root->fs_info->freed_extents[1],
232                           start, end, EXTENT_UPTODATE, GFP_NOFS);
233 }
234
235 static int exclude_super_stripes(struct btrfs_root *root,
236                                  struct btrfs_block_group_cache *cache)
237 {
238         u64 bytenr;
239         u64 *logical;
240         int stripe_len;
241         int i, nr, ret;
242
243         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
244                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
245                 cache->bytes_super += stripe_len;
246                 ret = add_excluded_extent(root, cache->key.objectid,
247                                           stripe_len);
248                 BUG_ON(ret);
249         }
250
251         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
252                 bytenr = btrfs_sb_offset(i);
253                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
254                                        cache->key.objectid, bytenr,
255                                        0, &logical, &nr, &stripe_len);
256                 BUG_ON(ret);
257
258                 while (nr--) {
259                         cache->bytes_super += stripe_len;
260                         ret = add_excluded_extent(root, logical[nr],
261                                                   stripe_len);
262                         BUG_ON(ret);
263                 }
264
265                 kfree(logical);
266         }
267         return 0;
268 }
269
270 static struct btrfs_caching_control *
271 get_caching_control(struct btrfs_block_group_cache *cache)
272 {
273         struct btrfs_caching_control *ctl;
274
275         spin_lock(&cache->lock);
276         if (cache->cached != BTRFS_CACHE_STARTED) {
277                 spin_unlock(&cache->lock);
278                 return NULL;
279         }
280
281         /* We're loading it the fast way, so we don't have a caching_ctl. */
282         if (!cache->caching_ctl) {
283                 spin_unlock(&cache->lock);
284                 return NULL;
285         }
286
287         ctl = cache->caching_ctl;
288         atomic_inc(&ctl->count);
289         spin_unlock(&cache->lock);
290         return ctl;
291 }
292
293 static void put_caching_control(struct btrfs_caching_control *ctl)
294 {
295         if (atomic_dec_and_test(&ctl->count))
296                 kfree(ctl);
297 }
298
299 /*
300  * this is only called by cache_block_group, since we could have freed extents
301  * we need to check the pinned_extents for any extents that can't be used yet
302  * since their free space will be released as soon as the transaction commits.
303  */
304 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
305                               struct btrfs_fs_info *info, u64 start, u64 end)
306 {
307         u64 extent_start, extent_end, size, total_added = 0;
308         int ret;
309
310         while (start < end) {
311                 ret = find_first_extent_bit(info->pinned_extents, start,
312                                             &extent_start, &extent_end,
313                                             EXTENT_DIRTY | EXTENT_UPTODATE);
314                 if (ret)
315                         break;
316
317                 if (extent_start <= start) {
318                         start = extent_end + 1;
319                 } else if (extent_start > start && extent_start < end) {
320                         size = extent_start - start;
321                         total_added += size;
322                         ret = btrfs_add_free_space(block_group, start,
323                                                    size);
324                         BUG_ON(ret);
325                         start = extent_end + 1;
326                 } else {
327                         break;
328                 }
329         }
330
331         if (start < end) {
332                 size = end - start;
333                 total_added += size;
334                 ret = btrfs_add_free_space(block_group, start, size);
335                 BUG_ON(ret);
336         }
337
338         return total_added;
339 }
340
341 static noinline void caching_thread(struct btrfs_work *work)
342 {
343         struct btrfs_block_group_cache *block_group;
344         struct btrfs_fs_info *fs_info;
345         struct btrfs_caching_control *caching_ctl;
346         struct btrfs_root *extent_root;
347         struct btrfs_path *path;
348         struct extent_buffer *leaf;
349         struct btrfs_key key;
350         u64 total_found = 0;
351         u64 last = 0;
352         u32 nritems;
353         int ret = 0;
354
355         caching_ctl = container_of(work, struct btrfs_caching_control, work);
356         block_group = caching_ctl->block_group;
357         fs_info = block_group->fs_info;
358         extent_root = fs_info->extent_root;
359
360         path = btrfs_alloc_path();
361         if (!path)
362                 goto out;
363
364         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
365
366         /*
367          * We don't want to deadlock with somebody trying to allocate a new
368          * extent for the extent root while also trying to search the extent
369          * root to add free space.  So we skip locking and search the commit
370          * root, since its read-only
371          */
372         path->skip_locking = 1;
373         path->search_commit_root = 1;
374         path->reada = 1;
375
376         key.objectid = last;
377         key.offset = 0;
378         key.type = BTRFS_EXTENT_ITEM_KEY;
379 again:
380         mutex_lock(&caching_ctl->mutex);
381         /* need to make sure the commit_root doesn't disappear */
382         down_read(&fs_info->extent_commit_sem);
383
384         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
385         if (ret < 0)
386                 goto err;
387
388         leaf = path->nodes[0];
389         nritems = btrfs_header_nritems(leaf);
390
391         while (1) {
392                 if (btrfs_fs_closing(fs_info) > 1) {
393                         last = (u64)-1;
394                         break;
395                 }
396
397                 if (path->slots[0] < nritems) {
398                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
399                 } else {
400                         ret = find_next_key(path, 0, &key);
401                         if (ret)
402                                 break;
403
404                         if (need_resched() ||
405                             btrfs_next_leaf(extent_root, path)) {
406                                 caching_ctl->progress = last;
407                                 btrfs_release_path(path);
408                                 up_read(&fs_info->extent_commit_sem);
409                                 mutex_unlock(&caching_ctl->mutex);
410                                 cond_resched();
411                                 goto again;
412                         }
413                         leaf = path->nodes[0];
414                         nritems = btrfs_header_nritems(leaf);
415                         continue;
416                 }
417
418                 if (key.objectid < block_group->key.objectid) {
419                         path->slots[0]++;
420                         continue;
421                 }
422
423                 if (key.objectid >= block_group->key.objectid +
424                     block_group->key.offset)
425                         break;
426
427                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
428                         total_found += add_new_free_space(block_group,
429                                                           fs_info, last,
430                                                           key.objectid);
431                         last = key.objectid + key.offset;
432
433                         if (total_found > (1024 * 1024 * 2)) {
434                                 total_found = 0;
435                                 wake_up(&caching_ctl->wait);
436                         }
437                 }
438                 path->slots[0]++;
439         }
440         ret = 0;
441
442         total_found += add_new_free_space(block_group, fs_info, last,
443                                           block_group->key.objectid +
444                                           block_group->key.offset);
445         caching_ctl->progress = (u64)-1;
446
447         spin_lock(&block_group->lock);
448         block_group->caching_ctl = NULL;
449         block_group->cached = BTRFS_CACHE_FINISHED;
450         spin_unlock(&block_group->lock);
451
452 err:
453         btrfs_free_path(path);
454         up_read(&fs_info->extent_commit_sem);
455
456         free_excluded_extents(extent_root, block_group);
457
458         mutex_unlock(&caching_ctl->mutex);
459 out:
460         wake_up(&caching_ctl->wait);
461
462         put_caching_control(caching_ctl);
463         btrfs_put_block_group(block_group);
464 }
465
466 static int cache_block_group(struct btrfs_block_group_cache *cache,
467                              struct btrfs_trans_handle *trans,
468                              struct btrfs_root *root,
469                              int load_cache_only)
470 {
471         DEFINE_WAIT(wait);
472         struct btrfs_fs_info *fs_info = cache->fs_info;
473         struct btrfs_caching_control *caching_ctl;
474         int ret = 0;
475
476         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
477         BUG_ON(!caching_ctl);
478
479         INIT_LIST_HEAD(&caching_ctl->list);
480         mutex_init(&caching_ctl->mutex);
481         init_waitqueue_head(&caching_ctl->wait);
482         caching_ctl->block_group = cache;
483         caching_ctl->progress = cache->key.objectid;
484         atomic_set(&caching_ctl->count, 1);
485         caching_ctl->work.func = caching_thread;
486
487         spin_lock(&cache->lock);
488         /*
489          * This should be a rare occasion, but this could happen I think in the
490          * case where one thread starts to load the space cache info, and then
491          * some other thread starts a transaction commit which tries to do an
492          * allocation while the other thread is still loading the space cache
493          * info.  The previous loop should have kept us from choosing this block
494          * group, but if we've moved to the state where we will wait on caching
495          * block groups we need to first check if we're doing a fast load here,
496          * so we can wait for it to finish, otherwise we could end up allocating
497          * from a block group who's cache gets evicted for one reason or
498          * another.
499          */
500         while (cache->cached == BTRFS_CACHE_FAST) {
501                 struct btrfs_caching_control *ctl;
502
503                 ctl = cache->caching_ctl;
504                 atomic_inc(&ctl->count);
505                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
506                 spin_unlock(&cache->lock);
507
508                 schedule();
509
510                 finish_wait(&ctl->wait, &wait);
511                 put_caching_control(ctl);
512                 spin_lock(&cache->lock);
513         }
514
515         if (cache->cached != BTRFS_CACHE_NO) {
516                 spin_unlock(&cache->lock);
517                 kfree(caching_ctl);
518                 return 0;
519         }
520         WARN_ON(cache->caching_ctl);
521         cache->caching_ctl = caching_ctl;
522         cache->cached = BTRFS_CACHE_FAST;
523         spin_unlock(&cache->lock);
524
525         /*
526          * We can't do the read from on-disk cache during a commit since we need
527          * to have the normal tree locking.  Also if we are currently trying to
528          * allocate blocks for the tree root we can't do the fast caching since
529          * we likely hold important locks.
530          */
531         if (trans && (!trans->transaction->in_commit) &&
532             (root && root != root->fs_info->tree_root) &&
533             btrfs_test_opt(root, SPACE_CACHE)) {
534                 ret = load_free_space_cache(fs_info, cache);
535
536                 spin_lock(&cache->lock);
537                 if (ret == 1) {
538                         cache->caching_ctl = NULL;
539                         cache->cached = BTRFS_CACHE_FINISHED;
540                         cache->last_byte_to_unpin = (u64)-1;
541                 } else {
542                         if (load_cache_only) {
543                                 cache->caching_ctl = NULL;
544                                 cache->cached = BTRFS_CACHE_NO;
545                         } else {
546                                 cache->cached = BTRFS_CACHE_STARTED;
547                         }
548                 }
549                 spin_unlock(&cache->lock);
550                 wake_up(&caching_ctl->wait);
551                 if (ret == 1) {
552                         put_caching_control(caching_ctl);
553                         free_excluded_extents(fs_info->extent_root, cache);
554                         return 0;
555                 }
556         } else {
557                 /*
558                  * We are not going to do the fast caching, set cached to the
559                  * appropriate value and wakeup any waiters.
560                  */
561                 spin_lock(&cache->lock);
562                 if (load_cache_only) {
563                         cache->caching_ctl = NULL;
564                         cache->cached = BTRFS_CACHE_NO;
565                 } else {
566                         cache->cached = BTRFS_CACHE_STARTED;
567                 }
568                 spin_unlock(&cache->lock);
569                 wake_up(&caching_ctl->wait);
570         }
571
572         if (load_cache_only) {
573                 put_caching_control(caching_ctl);
574                 return 0;
575         }
576
577         down_write(&fs_info->extent_commit_sem);
578         atomic_inc(&caching_ctl->count);
579         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
580         up_write(&fs_info->extent_commit_sem);
581
582         btrfs_get_block_group(cache);
583
584         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
585
586         return ret;
587 }
588
589 /*
590  * return the block group that starts at or after bytenr
591  */
592 static struct btrfs_block_group_cache *
593 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
594 {
595         struct btrfs_block_group_cache *cache;
596
597         cache = block_group_cache_tree_search(info, bytenr, 0);
598
599         return cache;
600 }
601
602 /*
603  * return the block group that contains the given bytenr
604  */
605 struct btrfs_block_group_cache *btrfs_lookup_block_group(
606                                                  struct btrfs_fs_info *info,
607                                                  u64 bytenr)
608 {
609         struct btrfs_block_group_cache *cache;
610
611         cache = block_group_cache_tree_search(info, bytenr, 1);
612
613         return cache;
614 }
615
616 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
617                                                   u64 flags)
618 {
619         struct list_head *head = &info->space_info;
620         struct btrfs_space_info *found;
621
622         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
623
624         rcu_read_lock();
625         list_for_each_entry_rcu(found, head, list) {
626                 if (found->flags & flags) {
627                         rcu_read_unlock();
628                         return found;
629                 }
630         }
631         rcu_read_unlock();
632         return NULL;
633 }
634
635 /*
636  * after adding space to the filesystem, we need to clear the full flags
637  * on all the space infos.
638  */
639 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
640 {
641         struct list_head *head = &info->space_info;
642         struct btrfs_space_info *found;
643
644         rcu_read_lock();
645         list_for_each_entry_rcu(found, head, list)
646                 found->full = 0;
647         rcu_read_unlock();
648 }
649
650 static u64 div_factor(u64 num, int factor)
651 {
652         if (factor == 10)
653                 return num;
654         num *= factor;
655         do_div(num, 10);
656         return num;
657 }
658
659 static u64 div_factor_fine(u64 num, int factor)
660 {
661         if (factor == 100)
662                 return num;
663         num *= factor;
664         do_div(num, 100);
665         return num;
666 }
667
668 u64 btrfs_find_block_group(struct btrfs_root *root,
669                            u64 search_start, u64 search_hint, int owner)
670 {
671         struct btrfs_block_group_cache *cache;
672         u64 used;
673         u64 last = max(search_hint, search_start);
674         u64 group_start = 0;
675         int full_search = 0;
676         int factor = 9;
677         int wrapped = 0;
678 again:
679         while (1) {
680                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
681                 if (!cache)
682                         break;
683
684                 spin_lock(&cache->lock);
685                 last = cache->key.objectid + cache->key.offset;
686                 used = btrfs_block_group_used(&cache->item);
687
688                 if ((full_search || !cache->ro) &&
689                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
690                         if (used + cache->pinned + cache->reserved <
691                             div_factor(cache->key.offset, factor)) {
692                                 group_start = cache->key.objectid;
693                                 spin_unlock(&cache->lock);
694                                 btrfs_put_block_group(cache);
695                                 goto found;
696                         }
697                 }
698                 spin_unlock(&cache->lock);
699                 btrfs_put_block_group(cache);
700                 cond_resched();
701         }
702         if (!wrapped) {
703                 last = search_start;
704                 wrapped = 1;
705                 goto again;
706         }
707         if (!full_search && factor < 10) {
708                 last = search_start;
709                 full_search = 1;
710                 factor = 10;
711                 goto again;
712         }
713 found:
714         return group_start;
715 }
716
717 /* simple helper to search for an existing extent at a given offset */
718 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
719 {
720         int ret;
721         struct btrfs_key key;
722         struct btrfs_path *path;
723
724         path = btrfs_alloc_path();
725         if (!path)
726                 return -ENOMEM;
727
728         key.objectid = start;
729         key.offset = len;
730         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
731         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
732                                 0, 0);
733         btrfs_free_path(path);
734         return ret;
735 }
736
737 /*
738  * helper function to lookup reference count and flags of extent.
739  *
740  * the head node for delayed ref is used to store the sum of all the
741  * reference count modifications queued up in the rbtree. the head
742  * node may also store the extent flags to set. This way you can check
743  * to see what the reference count and extent flags would be if all of
744  * the delayed refs are not processed.
745  */
746 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
747                              struct btrfs_root *root, u64 bytenr,
748                              u64 num_bytes, u64 *refs, u64 *flags)
749 {
750         struct btrfs_delayed_ref_head *head;
751         struct btrfs_delayed_ref_root *delayed_refs;
752         struct btrfs_path *path;
753         struct btrfs_extent_item *ei;
754         struct extent_buffer *leaf;
755         struct btrfs_key key;
756         u32 item_size;
757         u64 num_refs;
758         u64 extent_flags;
759         int ret;
760
761         path = btrfs_alloc_path();
762         if (!path)
763                 return -ENOMEM;
764
765         key.objectid = bytenr;
766         key.type = BTRFS_EXTENT_ITEM_KEY;
767         key.offset = num_bytes;
768         if (!trans) {
769                 path->skip_locking = 1;
770                 path->search_commit_root = 1;
771         }
772 again:
773         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
774                                 &key, path, 0, 0);
775         if (ret < 0)
776                 goto out_free;
777
778         if (ret == 0) {
779                 leaf = path->nodes[0];
780                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
781                 if (item_size >= sizeof(*ei)) {
782                         ei = btrfs_item_ptr(leaf, path->slots[0],
783                                             struct btrfs_extent_item);
784                         num_refs = btrfs_extent_refs(leaf, ei);
785                         extent_flags = btrfs_extent_flags(leaf, ei);
786                 } else {
787 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
788                         struct btrfs_extent_item_v0 *ei0;
789                         BUG_ON(item_size != sizeof(*ei0));
790                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
791                                              struct btrfs_extent_item_v0);
792                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
793                         /* FIXME: this isn't correct for data */
794                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
795 #else
796                         BUG();
797 #endif
798                 }
799                 BUG_ON(num_refs == 0);
800         } else {
801                 num_refs = 0;
802                 extent_flags = 0;
803                 ret = 0;
804         }
805
806         if (!trans)
807                 goto out;
808
809         delayed_refs = &trans->transaction->delayed_refs;
810         spin_lock(&delayed_refs->lock);
811         head = btrfs_find_delayed_ref_head(trans, bytenr);
812         if (head) {
813                 if (!mutex_trylock(&head->mutex)) {
814                         atomic_inc(&head->node.refs);
815                         spin_unlock(&delayed_refs->lock);
816
817                         btrfs_release_path(path);
818
819                         /*
820                          * Mutex was contended, block until it's released and try
821                          * again
822                          */
823                         mutex_lock(&head->mutex);
824                         mutex_unlock(&head->mutex);
825                         btrfs_put_delayed_ref(&head->node);
826                         goto again;
827                 }
828                 if (head->extent_op && head->extent_op->update_flags)
829                         extent_flags |= head->extent_op->flags_to_set;
830                 else
831                         BUG_ON(num_refs == 0);
832
833                 num_refs += head->node.ref_mod;
834                 mutex_unlock(&head->mutex);
835         }
836         spin_unlock(&delayed_refs->lock);
837 out:
838         WARN_ON(num_refs == 0);
839         if (refs)
840                 *refs = num_refs;
841         if (flags)
842                 *flags = extent_flags;
843 out_free:
844         btrfs_free_path(path);
845         return ret;
846 }
847
848 /*
849  * Back reference rules.  Back refs have three main goals:
850  *
851  * 1) differentiate between all holders of references to an extent so that
852  *    when a reference is dropped we can make sure it was a valid reference
853  *    before freeing the extent.
854  *
855  * 2) Provide enough information to quickly find the holders of an extent
856  *    if we notice a given block is corrupted or bad.
857  *
858  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
859  *    maintenance.  This is actually the same as #2, but with a slightly
860  *    different use case.
861  *
862  * There are two kinds of back refs. The implicit back refs is optimized
863  * for pointers in non-shared tree blocks. For a given pointer in a block,
864  * back refs of this kind provide information about the block's owner tree
865  * and the pointer's key. These information allow us to find the block by
866  * b-tree searching. The full back refs is for pointers in tree blocks not
867  * referenced by their owner trees. The location of tree block is recorded
868  * in the back refs. Actually the full back refs is generic, and can be
869  * used in all cases the implicit back refs is used. The major shortcoming
870  * of the full back refs is its overhead. Every time a tree block gets
871  * COWed, we have to update back refs entry for all pointers in it.
872  *
873  * For a newly allocated tree block, we use implicit back refs for
874  * pointers in it. This means most tree related operations only involve
875  * implicit back refs. For a tree block created in old transaction, the
876  * only way to drop a reference to it is COW it. So we can detect the
877  * event that tree block loses its owner tree's reference and do the
878  * back refs conversion.
879  *
880  * When a tree block is COW'd through a tree, there are four cases:
881  *
882  * The reference count of the block is one and the tree is the block's
883  * owner tree. Nothing to do in this case.
884  *
885  * The reference count of the block is one and the tree is not the
886  * block's owner tree. In this case, full back refs is used for pointers
887  * in the block. Remove these full back refs, add implicit back refs for
888  * every pointers in the new block.
889  *
890  * The reference count of the block is greater than one and the tree is
891  * the block's owner tree. In this case, implicit back refs is used for
892  * pointers in the block. Add full back refs for every pointers in the
893  * block, increase lower level extents' reference counts. The original
894  * implicit back refs are entailed to the new block.
895  *
896  * The reference count of the block is greater than one and the tree is
897  * not the block's owner tree. Add implicit back refs for every pointer in
898  * the new block, increase lower level extents' reference count.
899  *
900  * Back Reference Key composing:
901  *
902  * The key objectid corresponds to the first byte in the extent,
903  * The key type is used to differentiate between types of back refs.
904  * There are different meanings of the key offset for different types
905  * of back refs.
906  *
907  * File extents can be referenced by:
908  *
909  * - multiple snapshots, subvolumes, or different generations in one subvol
910  * - different files inside a single subvolume
911  * - different offsets inside a file (bookend extents in file.c)
912  *
913  * The extent ref structure for the implicit back refs has fields for:
914  *
915  * - Objectid of the subvolume root
916  * - objectid of the file holding the reference
917  * - original offset in the file
918  * - how many bookend extents
919  *
920  * The key offset for the implicit back refs is hash of the first
921  * three fields.
922  *
923  * The extent ref structure for the full back refs has field for:
924  *
925  * - number of pointers in the tree leaf
926  *
927  * The key offset for the implicit back refs is the first byte of
928  * the tree leaf
929  *
930  * When a file extent is allocated, The implicit back refs is used.
931  * the fields are filled in:
932  *
933  *     (root_key.objectid, inode objectid, offset in file, 1)
934  *
935  * When a file extent is removed file truncation, we find the
936  * corresponding implicit back refs and check the following fields:
937  *
938  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
939  *
940  * Btree extents can be referenced by:
941  *
942  * - Different subvolumes
943  *
944  * Both the implicit back refs and the full back refs for tree blocks
945  * only consist of key. The key offset for the implicit back refs is
946  * objectid of block's owner tree. The key offset for the full back refs
947  * is the first byte of parent block.
948  *
949  * When implicit back refs is used, information about the lowest key and
950  * level of the tree block are required. These information are stored in
951  * tree block info structure.
952  */
953
954 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
955 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
956                                   struct btrfs_root *root,
957                                   struct btrfs_path *path,
958                                   u64 owner, u32 extra_size)
959 {
960         struct btrfs_extent_item *item;
961         struct btrfs_extent_item_v0 *ei0;
962         struct btrfs_extent_ref_v0 *ref0;
963         struct btrfs_tree_block_info *bi;
964         struct extent_buffer *leaf;
965         struct btrfs_key key;
966         struct btrfs_key found_key;
967         u32 new_size = sizeof(*item);
968         u64 refs;
969         int ret;
970
971         leaf = path->nodes[0];
972         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
973
974         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
975         ei0 = btrfs_item_ptr(leaf, path->slots[0],
976                              struct btrfs_extent_item_v0);
977         refs = btrfs_extent_refs_v0(leaf, ei0);
978
979         if (owner == (u64)-1) {
980                 while (1) {
981                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
982                                 ret = btrfs_next_leaf(root, path);
983                                 if (ret < 0)
984                                         return ret;
985                                 BUG_ON(ret > 0);
986                                 leaf = path->nodes[0];
987                         }
988                         btrfs_item_key_to_cpu(leaf, &found_key,
989                                               path->slots[0]);
990                         BUG_ON(key.objectid != found_key.objectid);
991                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
992                                 path->slots[0]++;
993                                 continue;
994                         }
995                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
996                                               struct btrfs_extent_ref_v0);
997                         owner = btrfs_ref_objectid_v0(leaf, ref0);
998                         break;
999                 }
1000         }
1001         btrfs_release_path(path);
1002
1003         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1004                 new_size += sizeof(*bi);
1005
1006         new_size -= sizeof(*ei0);
1007         ret = btrfs_search_slot(trans, root, &key, path,
1008                                 new_size + extra_size, 1);
1009         if (ret < 0)
1010                 return ret;
1011         BUG_ON(ret);
1012
1013         ret = btrfs_extend_item(trans, root, path, new_size);
1014
1015         leaf = path->nodes[0];
1016         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1017         btrfs_set_extent_refs(leaf, item, refs);
1018         /* FIXME: get real generation */
1019         btrfs_set_extent_generation(leaf, item, 0);
1020         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1021                 btrfs_set_extent_flags(leaf, item,
1022                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1023                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1024                 bi = (struct btrfs_tree_block_info *)(item + 1);
1025                 /* FIXME: get first key of the block */
1026                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1027                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1028         } else {
1029                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1030         }
1031         btrfs_mark_buffer_dirty(leaf);
1032         return 0;
1033 }
1034 #endif
1035
1036 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1037 {
1038         u32 high_crc = ~(u32)0;
1039         u32 low_crc = ~(u32)0;
1040         __le64 lenum;
1041
1042         lenum = cpu_to_le64(root_objectid);
1043         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1044         lenum = cpu_to_le64(owner);
1045         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1046         lenum = cpu_to_le64(offset);
1047         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1048
1049         return ((u64)high_crc << 31) ^ (u64)low_crc;
1050 }
1051
1052 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1053                                      struct btrfs_extent_data_ref *ref)
1054 {
1055         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1056                                     btrfs_extent_data_ref_objectid(leaf, ref),
1057                                     btrfs_extent_data_ref_offset(leaf, ref));
1058 }
1059
1060 static int match_extent_data_ref(struct extent_buffer *leaf,
1061                                  struct btrfs_extent_data_ref *ref,
1062                                  u64 root_objectid, u64 owner, u64 offset)
1063 {
1064         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1065             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1066             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1067                 return 0;
1068         return 1;
1069 }
1070
1071 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1072                                            struct btrfs_root *root,
1073                                            struct btrfs_path *path,
1074                                            u64 bytenr, u64 parent,
1075                                            u64 root_objectid,
1076                                            u64 owner, u64 offset)
1077 {
1078         struct btrfs_key key;
1079         struct btrfs_extent_data_ref *ref;
1080         struct extent_buffer *leaf;
1081         u32 nritems;
1082         int ret;
1083         int recow;
1084         int err = -ENOENT;
1085
1086         key.objectid = bytenr;
1087         if (parent) {
1088                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1089                 key.offset = parent;
1090         } else {
1091                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1092                 key.offset = hash_extent_data_ref(root_objectid,
1093                                                   owner, offset);
1094         }
1095 again:
1096         recow = 0;
1097         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1098         if (ret < 0) {
1099                 err = ret;
1100                 goto fail;
1101         }
1102
1103         if (parent) {
1104                 if (!ret)
1105                         return 0;
1106 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1107                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1108                 btrfs_release_path(path);
1109                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1110                 if (ret < 0) {
1111                         err = ret;
1112                         goto fail;
1113                 }
1114                 if (!ret)
1115                         return 0;
1116 #endif
1117                 goto fail;
1118         }
1119
1120         leaf = path->nodes[0];
1121         nritems = btrfs_header_nritems(leaf);
1122         while (1) {
1123                 if (path->slots[0] >= nritems) {
1124                         ret = btrfs_next_leaf(root, path);
1125                         if (ret < 0)
1126                                 err = ret;
1127                         if (ret)
1128                                 goto fail;
1129
1130                         leaf = path->nodes[0];
1131                         nritems = btrfs_header_nritems(leaf);
1132                         recow = 1;
1133                 }
1134
1135                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1136                 if (key.objectid != bytenr ||
1137                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1138                         goto fail;
1139
1140                 ref = btrfs_item_ptr(leaf, path->slots[0],
1141                                      struct btrfs_extent_data_ref);
1142
1143                 if (match_extent_data_ref(leaf, ref, root_objectid,
1144                                           owner, offset)) {
1145                         if (recow) {
1146                                 btrfs_release_path(path);
1147                                 goto again;
1148                         }
1149                         err = 0;
1150                         break;
1151                 }
1152                 path->slots[0]++;
1153         }
1154 fail:
1155         return err;
1156 }
1157
1158 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1159                                            struct btrfs_root *root,
1160                                            struct btrfs_path *path,
1161                                            u64 bytenr, u64 parent,
1162                                            u64 root_objectid, u64 owner,
1163                                            u64 offset, int refs_to_add)
1164 {
1165         struct btrfs_key key;
1166         struct extent_buffer *leaf;
1167         u32 size;
1168         u32 num_refs;
1169         int ret;
1170
1171         key.objectid = bytenr;
1172         if (parent) {
1173                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1174                 key.offset = parent;
1175                 size = sizeof(struct btrfs_shared_data_ref);
1176         } else {
1177                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1178                 key.offset = hash_extent_data_ref(root_objectid,
1179                                                   owner, offset);
1180                 size = sizeof(struct btrfs_extent_data_ref);
1181         }
1182
1183         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1184         if (ret && ret != -EEXIST)
1185                 goto fail;
1186
1187         leaf = path->nodes[0];
1188         if (parent) {
1189                 struct btrfs_shared_data_ref *ref;
1190                 ref = btrfs_item_ptr(leaf, path->slots[0],
1191                                      struct btrfs_shared_data_ref);
1192                 if (ret == 0) {
1193                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1194                 } else {
1195                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1196                         num_refs += refs_to_add;
1197                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1198                 }
1199         } else {
1200                 struct btrfs_extent_data_ref *ref;
1201                 while (ret == -EEXIST) {
1202                         ref = btrfs_item_ptr(leaf, path->slots[0],
1203                                              struct btrfs_extent_data_ref);
1204                         if (match_extent_data_ref(leaf, ref, root_objectid,
1205                                                   owner, offset))
1206                                 break;
1207                         btrfs_release_path(path);
1208                         key.offset++;
1209                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1210                                                       size);
1211                         if (ret && ret != -EEXIST)
1212                                 goto fail;
1213
1214                         leaf = path->nodes[0];
1215                 }
1216                 ref = btrfs_item_ptr(leaf, path->slots[0],
1217                                      struct btrfs_extent_data_ref);
1218                 if (ret == 0) {
1219                         btrfs_set_extent_data_ref_root(leaf, ref,
1220                                                        root_objectid);
1221                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1222                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1223                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1224                 } else {
1225                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1226                         num_refs += refs_to_add;
1227                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1228                 }
1229         }
1230         btrfs_mark_buffer_dirty(leaf);
1231         ret = 0;
1232 fail:
1233         btrfs_release_path(path);
1234         return ret;
1235 }
1236
1237 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1238                                            struct btrfs_root *root,
1239                                            struct btrfs_path *path,
1240                                            int refs_to_drop)
1241 {
1242         struct btrfs_key key;
1243         struct btrfs_extent_data_ref *ref1 = NULL;
1244         struct btrfs_shared_data_ref *ref2 = NULL;
1245         struct extent_buffer *leaf;
1246         u32 num_refs = 0;
1247         int ret = 0;
1248
1249         leaf = path->nodes[0];
1250         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1251
1252         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1253                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1254                                       struct btrfs_extent_data_ref);
1255                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1256         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1257                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1258                                       struct btrfs_shared_data_ref);
1259                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1260 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1261         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1262                 struct btrfs_extent_ref_v0 *ref0;
1263                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1264                                       struct btrfs_extent_ref_v0);
1265                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1266 #endif
1267         } else {
1268                 BUG();
1269         }
1270
1271         BUG_ON(num_refs < refs_to_drop);
1272         num_refs -= refs_to_drop;
1273
1274         if (num_refs == 0) {
1275                 ret = btrfs_del_item(trans, root, path);
1276         } else {
1277                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1278                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1279                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1280                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1281 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1282                 else {
1283                         struct btrfs_extent_ref_v0 *ref0;
1284                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1285                                         struct btrfs_extent_ref_v0);
1286                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1287                 }
1288 #endif
1289                 btrfs_mark_buffer_dirty(leaf);
1290         }
1291         return ret;
1292 }
1293
1294 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1295                                           struct btrfs_path *path,
1296                                           struct btrfs_extent_inline_ref *iref)
1297 {
1298         struct btrfs_key key;
1299         struct extent_buffer *leaf;
1300         struct btrfs_extent_data_ref *ref1;
1301         struct btrfs_shared_data_ref *ref2;
1302         u32 num_refs = 0;
1303
1304         leaf = path->nodes[0];
1305         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1306         if (iref) {
1307                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1308                     BTRFS_EXTENT_DATA_REF_KEY) {
1309                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1310                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1311                 } else {
1312                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1313                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1314                 }
1315         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1316                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1317                                       struct btrfs_extent_data_ref);
1318                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1319         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1320                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1321                                       struct btrfs_shared_data_ref);
1322                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1323 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1324         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1325                 struct btrfs_extent_ref_v0 *ref0;
1326                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1327                                       struct btrfs_extent_ref_v0);
1328                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1329 #endif
1330         } else {
1331                 WARN_ON(1);
1332         }
1333         return num_refs;
1334 }
1335
1336 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1337                                           struct btrfs_root *root,
1338                                           struct btrfs_path *path,
1339                                           u64 bytenr, u64 parent,
1340                                           u64 root_objectid)
1341 {
1342         struct btrfs_key key;
1343         int ret;
1344
1345         key.objectid = bytenr;
1346         if (parent) {
1347                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1348                 key.offset = parent;
1349         } else {
1350                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1351                 key.offset = root_objectid;
1352         }
1353
1354         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1355         if (ret > 0)
1356                 ret = -ENOENT;
1357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1358         if (ret == -ENOENT && parent) {
1359                 btrfs_release_path(path);
1360                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1361                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1362                 if (ret > 0)
1363                         ret = -ENOENT;
1364         }
1365 #endif
1366         return ret;
1367 }
1368
1369 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1370                                           struct btrfs_root *root,
1371                                           struct btrfs_path *path,
1372                                           u64 bytenr, u64 parent,
1373                                           u64 root_objectid)
1374 {
1375         struct btrfs_key key;
1376         int ret;
1377
1378         key.objectid = bytenr;
1379         if (parent) {
1380                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1381                 key.offset = parent;
1382         } else {
1383                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1384                 key.offset = root_objectid;
1385         }
1386
1387         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1388         btrfs_release_path(path);
1389         return ret;
1390 }
1391
1392 static inline int extent_ref_type(u64 parent, u64 owner)
1393 {
1394         int type;
1395         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1396                 if (parent > 0)
1397                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1398                 else
1399                         type = BTRFS_TREE_BLOCK_REF_KEY;
1400         } else {
1401                 if (parent > 0)
1402                         type = BTRFS_SHARED_DATA_REF_KEY;
1403                 else
1404                         type = BTRFS_EXTENT_DATA_REF_KEY;
1405         }
1406         return type;
1407 }
1408
1409 static int find_next_key(struct btrfs_path *path, int level,
1410                          struct btrfs_key *key)
1411
1412 {
1413         for (; level < BTRFS_MAX_LEVEL; level++) {
1414                 if (!path->nodes[level])
1415                         break;
1416                 if (path->slots[level] + 1 >=
1417                     btrfs_header_nritems(path->nodes[level]))
1418                         continue;
1419                 if (level == 0)
1420                         btrfs_item_key_to_cpu(path->nodes[level], key,
1421                                               path->slots[level] + 1);
1422                 else
1423                         btrfs_node_key_to_cpu(path->nodes[level], key,
1424                                               path->slots[level] + 1);
1425                 return 0;
1426         }
1427         return 1;
1428 }
1429
1430 /*
1431  * look for inline back ref. if back ref is found, *ref_ret is set
1432  * to the address of inline back ref, and 0 is returned.
1433  *
1434  * if back ref isn't found, *ref_ret is set to the address where it
1435  * should be inserted, and -ENOENT is returned.
1436  *
1437  * if insert is true and there are too many inline back refs, the path
1438  * points to the extent item, and -EAGAIN is returned.
1439  *
1440  * NOTE: inline back refs are ordered in the same way that back ref
1441  *       items in the tree are ordered.
1442  */
1443 static noinline_for_stack
1444 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1445                                  struct btrfs_root *root,
1446                                  struct btrfs_path *path,
1447                                  struct btrfs_extent_inline_ref **ref_ret,
1448                                  u64 bytenr, u64 num_bytes,
1449                                  u64 parent, u64 root_objectid,
1450                                  u64 owner, u64 offset, int insert)
1451 {
1452         struct btrfs_key key;
1453         struct extent_buffer *leaf;
1454         struct btrfs_extent_item *ei;
1455         struct btrfs_extent_inline_ref *iref;
1456         u64 flags;
1457         u64 item_size;
1458         unsigned long ptr;
1459         unsigned long end;
1460         int extra_size;
1461         int type;
1462         int want;
1463         int ret;
1464         int err = 0;
1465
1466         key.objectid = bytenr;
1467         key.type = BTRFS_EXTENT_ITEM_KEY;
1468         key.offset = num_bytes;
1469
1470         want = extent_ref_type(parent, owner);
1471         if (insert) {
1472                 extra_size = btrfs_extent_inline_ref_size(want);
1473                 path->keep_locks = 1;
1474         } else
1475                 extra_size = -1;
1476         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1477         if (ret < 0) {
1478                 err = ret;
1479                 goto out;
1480         }
1481         BUG_ON(ret);
1482
1483         leaf = path->nodes[0];
1484         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1485 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1486         if (item_size < sizeof(*ei)) {
1487                 if (!insert) {
1488                         err = -ENOENT;
1489                         goto out;
1490                 }
1491                 ret = convert_extent_item_v0(trans, root, path, owner,
1492                                              extra_size);
1493                 if (ret < 0) {
1494                         err = ret;
1495                         goto out;
1496                 }
1497                 leaf = path->nodes[0];
1498                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1499         }
1500 #endif
1501         BUG_ON(item_size < sizeof(*ei));
1502
1503         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1504         flags = btrfs_extent_flags(leaf, ei);
1505
1506         ptr = (unsigned long)(ei + 1);
1507         end = (unsigned long)ei + item_size;
1508
1509         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1510                 ptr += sizeof(struct btrfs_tree_block_info);
1511                 BUG_ON(ptr > end);
1512         } else {
1513                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1514         }
1515
1516         err = -ENOENT;
1517         while (1) {
1518                 if (ptr >= end) {
1519                         WARN_ON(ptr > end);
1520                         break;
1521                 }
1522                 iref = (struct btrfs_extent_inline_ref *)ptr;
1523                 type = btrfs_extent_inline_ref_type(leaf, iref);
1524                 if (want < type)
1525                         break;
1526                 if (want > type) {
1527                         ptr += btrfs_extent_inline_ref_size(type);
1528                         continue;
1529                 }
1530
1531                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1532                         struct btrfs_extent_data_ref *dref;
1533                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1534                         if (match_extent_data_ref(leaf, dref, root_objectid,
1535                                                   owner, offset)) {
1536                                 err = 0;
1537                                 break;
1538                         }
1539                         if (hash_extent_data_ref_item(leaf, dref) <
1540                             hash_extent_data_ref(root_objectid, owner, offset))
1541                                 break;
1542                 } else {
1543                         u64 ref_offset;
1544                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1545                         if (parent > 0) {
1546                                 if (parent == ref_offset) {
1547                                         err = 0;
1548                                         break;
1549                                 }
1550                                 if (ref_offset < parent)
1551                                         break;
1552                         } else {
1553                                 if (root_objectid == ref_offset) {
1554                                         err = 0;
1555                                         break;
1556                                 }
1557                                 if (ref_offset < root_objectid)
1558                                         break;
1559                         }
1560                 }
1561                 ptr += btrfs_extent_inline_ref_size(type);
1562         }
1563         if (err == -ENOENT && insert) {
1564                 if (item_size + extra_size >=
1565                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1566                         err = -EAGAIN;
1567                         goto out;
1568                 }
1569                 /*
1570                  * To add new inline back ref, we have to make sure
1571                  * there is no corresponding back ref item.
1572                  * For simplicity, we just do not add new inline back
1573                  * ref if there is any kind of item for this block
1574                  */
1575                 if (find_next_key(path, 0, &key) == 0 &&
1576                     key.objectid == bytenr &&
1577                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1578                         err = -EAGAIN;
1579                         goto out;
1580                 }
1581         }
1582         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1583 out:
1584         if (insert) {
1585                 path->keep_locks = 0;
1586                 btrfs_unlock_up_safe(path, 1);
1587         }
1588         return err;
1589 }
1590
1591 /*
1592  * helper to add new inline back ref
1593  */
1594 static noinline_for_stack
1595 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1596                                 struct btrfs_root *root,
1597                                 struct btrfs_path *path,
1598                                 struct btrfs_extent_inline_ref *iref,
1599                                 u64 parent, u64 root_objectid,
1600                                 u64 owner, u64 offset, int refs_to_add,
1601                                 struct btrfs_delayed_extent_op *extent_op)
1602 {
1603         struct extent_buffer *leaf;
1604         struct btrfs_extent_item *ei;
1605         unsigned long ptr;
1606         unsigned long end;
1607         unsigned long item_offset;
1608         u64 refs;
1609         int size;
1610         int type;
1611         int ret;
1612
1613         leaf = path->nodes[0];
1614         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1615         item_offset = (unsigned long)iref - (unsigned long)ei;
1616
1617         type = extent_ref_type(parent, owner);
1618         size = btrfs_extent_inline_ref_size(type);
1619
1620         ret = btrfs_extend_item(trans, root, path, size);
1621
1622         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1623         refs = btrfs_extent_refs(leaf, ei);
1624         refs += refs_to_add;
1625         btrfs_set_extent_refs(leaf, ei, refs);
1626         if (extent_op)
1627                 __run_delayed_extent_op(extent_op, leaf, ei);
1628
1629         ptr = (unsigned long)ei + item_offset;
1630         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1631         if (ptr < end - size)
1632                 memmove_extent_buffer(leaf, ptr + size, ptr,
1633                                       end - size - ptr);
1634
1635         iref = (struct btrfs_extent_inline_ref *)ptr;
1636         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1637         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1638                 struct btrfs_extent_data_ref *dref;
1639                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1640                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1641                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1642                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1643                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1644         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1645                 struct btrfs_shared_data_ref *sref;
1646                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1647                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1648                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1649         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1650                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1651         } else {
1652                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1653         }
1654         btrfs_mark_buffer_dirty(leaf);
1655         return 0;
1656 }
1657
1658 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1659                                  struct btrfs_root *root,
1660                                  struct btrfs_path *path,
1661                                  struct btrfs_extent_inline_ref **ref_ret,
1662                                  u64 bytenr, u64 num_bytes, u64 parent,
1663                                  u64 root_objectid, u64 owner, u64 offset)
1664 {
1665         int ret;
1666
1667         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1668                                            bytenr, num_bytes, parent,
1669                                            root_objectid, owner, offset, 0);
1670         if (ret != -ENOENT)
1671                 return ret;
1672
1673         btrfs_release_path(path);
1674         *ref_ret = NULL;
1675
1676         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1677                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1678                                             root_objectid);
1679         } else {
1680                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1681                                              root_objectid, owner, offset);
1682         }
1683         return ret;
1684 }
1685
1686 /*
1687  * helper to update/remove inline back ref
1688  */
1689 static noinline_for_stack
1690 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1691                                  struct btrfs_root *root,
1692                                  struct btrfs_path *path,
1693                                  struct btrfs_extent_inline_ref *iref,
1694                                  int refs_to_mod,
1695                                  struct btrfs_delayed_extent_op *extent_op)
1696 {
1697         struct extent_buffer *leaf;
1698         struct btrfs_extent_item *ei;
1699         struct btrfs_extent_data_ref *dref = NULL;
1700         struct btrfs_shared_data_ref *sref = NULL;
1701         unsigned long ptr;
1702         unsigned long end;
1703         u32 item_size;
1704         int size;
1705         int type;
1706         int ret;
1707         u64 refs;
1708
1709         leaf = path->nodes[0];
1710         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1711         refs = btrfs_extent_refs(leaf, ei);
1712         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1713         refs += refs_to_mod;
1714         btrfs_set_extent_refs(leaf, ei, refs);
1715         if (extent_op)
1716                 __run_delayed_extent_op(extent_op, leaf, ei);
1717
1718         type = btrfs_extent_inline_ref_type(leaf, iref);
1719
1720         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1721                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1722                 refs = btrfs_extent_data_ref_count(leaf, dref);
1723         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1724                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1725                 refs = btrfs_shared_data_ref_count(leaf, sref);
1726         } else {
1727                 refs = 1;
1728                 BUG_ON(refs_to_mod != -1);
1729         }
1730
1731         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1732         refs += refs_to_mod;
1733
1734         if (refs > 0) {
1735                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1736                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1737                 else
1738                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1739         } else {
1740                 size =  btrfs_extent_inline_ref_size(type);
1741                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1742                 ptr = (unsigned long)iref;
1743                 end = (unsigned long)ei + item_size;
1744                 if (ptr + size < end)
1745                         memmove_extent_buffer(leaf, ptr, ptr + size,
1746                                               end - ptr - size);
1747                 item_size -= size;
1748                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1749         }
1750         btrfs_mark_buffer_dirty(leaf);
1751         return 0;
1752 }
1753
1754 static noinline_for_stack
1755 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1756                                  struct btrfs_root *root,
1757                                  struct btrfs_path *path,
1758                                  u64 bytenr, u64 num_bytes, u64 parent,
1759                                  u64 root_objectid, u64 owner,
1760                                  u64 offset, int refs_to_add,
1761                                  struct btrfs_delayed_extent_op *extent_op)
1762 {
1763         struct btrfs_extent_inline_ref *iref;
1764         int ret;
1765
1766         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1767                                            bytenr, num_bytes, parent,
1768                                            root_objectid, owner, offset, 1);
1769         if (ret == 0) {
1770                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1771                 ret = update_inline_extent_backref(trans, root, path, iref,
1772                                                    refs_to_add, extent_op);
1773         } else if (ret == -ENOENT) {
1774                 ret = setup_inline_extent_backref(trans, root, path, iref,
1775                                                   parent, root_objectid,
1776                                                   owner, offset, refs_to_add,
1777                                                   extent_op);
1778         }
1779         return ret;
1780 }
1781
1782 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1783                                  struct btrfs_root *root,
1784                                  struct btrfs_path *path,
1785                                  u64 bytenr, u64 parent, u64 root_objectid,
1786                                  u64 owner, u64 offset, int refs_to_add)
1787 {
1788         int ret;
1789         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1790                 BUG_ON(refs_to_add != 1);
1791                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1792                                             parent, root_objectid);
1793         } else {
1794                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1795                                              parent, root_objectid,
1796                                              owner, offset, refs_to_add);
1797         }
1798         return ret;
1799 }
1800
1801 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1802                                  struct btrfs_root *root,
1803                                  struct btrfs_path *path,
1804                                  struct btrfs_extent_inline_ref *iref,
1805                                  int refs_to_drop, int is_data)
1806 {
1807         int ret;
1808
1809         BUG_ON(!is_data && refs_to_drop != 1);
1810         if (iref) {
1811                 ret = update_inline_extent_backref(trans, root, path, iref,
1812                                                    -refs_to_drop, NULL);
1813         } else if (is_data) {
1814                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1815         } else {
1816                 ret = btrfs_del_item(trans, root, path);
1817         }
1818         return ret;
1819 }
1820
1821 static int btrfs_issue_discard(struct block_device *bdev,
1822                                 u64 start, u64 len)
1823 {
1824         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1825 }
1826
1827 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1828                                 u64 num_bytes, u64 *actual_bytes)
1829 {
1830         int ret;
1831         u64 discarded_bytes = 0;
1832         struct btrfs_bio *bbio = NULL;
1833
1834
1835         /* Tell the block device(s) that the sectors can be discarded */
1836         ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1837                               bytenr, &num_bytes, &bbio, 0);
1838         if (!ret) {
1839                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1840                 int i;
1841
1842
1843                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1844                         if (!stripe->dev->can_discard)
1845                                 continue;
1846
1847                         ret = btrfs_issue_discard(stripe->dev->bdev,
1848                                                   stripe->physical,
1849                                                   stripe->length);
1850                         if (!ret)
1851                                 discarded_bytes += stripe->length;
1852                         else if (ret != -EOPNOTSUPP)
1853                                 break;
1854
1855                         /*
1856                          * Just in case we get back EOPNOTSUPP for some reason,
1857                          * just ignore the return value so we don't screw up
1858                          * people calling discard_extent.
1859                          */
1860                         ret = 0;
1861                 }
1862                 kfree(bbio);
1863         }
1864
1865         if (actual_bytes)
1866                 *actual_bytes = discarded_bytes;
1867
1868
1869         return ret;
1870 }
1871
1872 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1873                          struct btrfs_root *root,
1874                          u64 bytenr, u64 num_bytes, u64 parent,
1875                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1876 {
1877         int ret;
1878         struct btrfs_fs_info *fs_info = root->fs_info;
1879
1880         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1881                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1882
1883         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1884                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1885                                         num_bytes,
1886                                         parent, root_objectid, (int)owner,
1887                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1888         } else {
1889                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1890                                         num_bytes,
1891                                         parent, root_objectid, owner, offset,
1892                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1893         }
1894         return ret;
1895 }
1896
1897 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1898                                   struct btrfs_root *root,
1899                                   u64 bytenr, u64 num_bytes,
1900                                   u64 parent, u64 root_objectid,
1901                                   u64 owner, u64 offset, int refs_to_add,
1902                                   struct btrfs_delayed_extent_op *extent_op)
1903 {
1904         struct btrfs_path *path;
1905         struct extent_buffer *leaf;
1906         struct btrfs_extent_item *item;
1907         u64 refs;
1908         int ret;
1909         int err = 0;
1910
1911         path = btrfs_alloc_path();
1912         if (!path)
1913                 return -ENOMEM;
1914
1915         path->reada = 1;
1916         path->leave_spinning = 1;
1917         /* this will setup the path even if it fails to insert the back ref */
1918         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1919                                            path, bytenr, num_bytes, parent,
1920                                            root_objectid, owner, offset,
1921                                            refs_to_add, extent_op);
1922         if (ret == 0)
1923                 goto out;
1924
1925         if (ret != -EAGAIN) {
1926                 err = ret;
1927                 goto out;
1928         }
1929
1930         leaf = path->nodes[0];
1931         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1932         refs = btrfs_extent_refs(leaf, item);
1933         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1934         if (extent_op)
1935                 __run_delayed_extent_op(extent_op, leaf, item);
1936
1937         btrfs_mark_buffer_dirty(leaf);
1938         btrfs_release_path(path);
1939
1940         path->reada = 1;
1941         path->leave_spinning = 1;
1942
1943         /* now insert the actual backref */
1944         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1945                                     path, bytenr, parent, root_objectid,
1946                                     owner, offset, refs_to_add);
1947         BUG_ON(ret);
1948 out:
1949         btrfs_free_path(path);
1950         return err;
1951 }
1952
1953 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1954                                 struct btrfs_root *root,
1955                                 struct btrfs_delayed_ref_node *node,
1956                                 struct btrfs_delayed_extent_op *extent_op,
1957                                 int insert_reserved)
1958 {
1959         int ret = 0;
1960         struct btrfs_delayed_data_ref *ref;
1961         struct btrfs_key ins;
1962         u64 parent = 0;
1963         u64 ref_root = 0;
1964         u64 flags = 0;
1965
1966         ins.objectid = node->bytenr;
1967         ins.offset = node->num_bytes;
1968         ins.type = BTRFS_EXTENT_ITEM_KEY;
1969
1970         ref = btrfs_delayed_node_to_data_ref(node);
1971         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1972                 parent = ref->parent;
1973         else
1974                 ref_root = ref->root;
1975
1976         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1977                 if (extent_op) {
1978                         BUG_ON(extent_op->update_key);
1979                         flags |= extent_op->flags_to_set;
1980                 }
1981                 ret = alloc_reserved_file_extent(trans, root,
1982                                                  parent, ref_root, flags,
1983                                                  ref->objectid, ref->offset,
1984                                                  &ins, node->ref_mod);
1985         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1986                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1987                                              node->num_bytes, parent,
1988                                              ref_root, ref->objectid,
1989                                              ref->offset, node->ref_mod,
1990                                              extent_op);
1991         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1992                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1993                                           node->num_bytes, parent,
1994                                           ref_root, ref->objectid,
1995                                           ref->offset, node->ref_mod,
1996                                           extent_op);
1997         } else {
1998                 BUG();
1999         }
2000         return ret;
2001 }
2002
2003 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2004                                     struct extent_buffer *leaf,
2005                                     struct btrfs_extent_item *ei)
2006 {
2007         u64 flags = btrfs_extent_flags(leaf, ei);
2008         if (extent_op->update_flags) {
2009                 flags |= extent_op->flags_to_set;
2010                 btrfs_set_extent_flags(leaf, ei, flags);
2011         }
2012
2013         if (extent_op->update_key) {
2014                 struct btrfs_tree_block_info *bi;
2015                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2016                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2017                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2018         }
2019 }
2020
2021 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2022                                  struct btrfs_root *root,
2023                                  struct btrfs_delayed_ref_node *node,
2024                                  struct btrfs_delayed_extent_op *extent_op)
2025 {
2026         struct btrfs_key key;
2027         struct btrfs_path *path;
2028         struct btrfs_extent_item *ei;
2029         struct extent_buffer *leaf;
2030         u32 item_size;
2031         int ret;
2032         int err = 0;
2033
2034         path = btrfs_alloc_path();
2035         if (!path)
2036                 return -ENOMEM;
2037
2038         key.objectid = node->bytenr;
2039         key.type = BTRFS_EXTENT_ITEM_KEY;
2040         key.offset = node->num_bytes;
2041
2042         path->reada = 1;
2043         path->leave_spinning = 1;
2044         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2045                                 path, 0, 1);
2046         if (ret < 0) {
2047                 err = ret;
2048                 goto out;
2049         }
2050         if (ret > 0) {
2051                 err = -EIO;
2052                 goto out;
2053         }
2054
2055         leaf = path->nodes[0];
2056         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2057 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2058         if (item_size < sizeof(*ei)) {
2059                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2060                                              path, (u64)-1, 0);
2061                 if (ret < 0) {
2062                         err = ret;
2063                         goto out;
2064                 }
2065                 leaf = path->nodes[0];
2066                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2067         }
2068 #endif
2069         BUG_ON(item_size < sizeof(*ei));
2070         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2071         __run_delayed_extent_op(extent_op, leaf, ei);
2072
2073         btrfs_mark_buffer_dirty(leaf);
2074 out:
2075         btrfs_free_path(path);
2076         return err;
2077 }
2078
2079 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2080                                 struct btrfs_root *root,
2081                                 struct btrfs_delayed_ref_node *node,
2082                                 struct btrfs_delayed_extent_op *extent_op,
2083                                 int insert_reserved)
2084 {
2085         int ret = 0;
2086         struct btrfs_delayed_tree_ref *ref;
2087         struct btrfs_key ins;
2088         u64 parent = 0;
2089         u64 ref_root = 0;
2090
2091         ins.objectid = node->bytenr;
2092         ins.offset = node->num_bytes;
2093         ins.type = BTRFS_EXTENT_ITEM_KEY;
2094
2095         ref = btrfs_delayed_node_to_tree_ref(node);
2096         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2097                 parent = ref->parent;
2098         else
2099                 ref_root = ref->root;
2100
2101         BUG_ON(node->ref_mod != 1);
2102         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2103                 BUG_ON(!extent_op || !extent_op->update_flags ||
2104                        !extent_op->update_key);
2105                 ret = alloc_reserved_tree_block(trans, root,
2106                                                 parent, ref_root,
2107                                                 extent_op->flags_to_set,
2108                                                 &extent_op->key,
2109                                                 ref->level, &ins);
2110         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2111                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2112                                              node->num_bytes, parent, ref_root,
2113                                              ref->level, 0, 1, extent_op);
2114         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2115                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2116                                           node->num_bytes, parent, ref_root,
2117                                           ref->level, 0, 1, extent_op);
2118         } else {
2119                 BUG();
2120         }
2121         return ret;
2122 }
2123
2124 /* helper function to actually process a single delayed ref entry */
2125 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2126                                struct btrfs_root *root,
2127                                struct btrfs_delayed_ref_node *node,
2128                                struct btrfs_delayed_extent_op *extent_op,
2129                                int insert_reserved)
2130 {
2131         int ret;
2132         if (btrfs_delayed_ref_is_head(node)) {
2133                 struct btrfs_delayed_ref_head *head;
2134                 /*
2135                  * we've hit the end of the chain and we were supposed
2136                  * to insert this extent into the tree.  But, it got
2137                  * deleted before we ever needed to insert it, so all
2138                  * we have to do is clean up the accounting
2139                  */
2140                 BUG_ON(extent_op);
2141                 head = btrfs_delayed_node_to_head(node);
2142                 if (insert_reserved) {
2143                         btrfs_pin_extent(root, node->bytenr,
2144                                          node->num_bytes, 1);
2145                         if (head->is_data) {
2146                                 ret = btrfs_del_csums(trans, root,
2147                                                       node->bytenr,
2148                                                       node->num_bytes);
2149                                 BUG_ON(ret);
2150                         }
2151                 }
2152                 mutex_unlock(&head->mutex);
2153                 return 0;
2154         }
2155
2156         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2157             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2158                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2159                                            insert_reserved);
2160         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2161                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2162                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2163                                            insert_reserved);
2164         else
2165                 BUG();
2166         return ret;
2167 }
2168
2169 static noinline struct btrfs_delayed_ref_node *
2170 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2171 {
2172         struct rb_node *node;
2173         struct btrfs_delayed_ref_node *ref;
2174         int action = BTRFS_ADD_DELAYED_REF;
2175 again:
2176         /*
2177          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2178          * this prevents ref count from going down to zero when
2179          * there still are pending delayed ref.
2180          */
2181         node = rb_prev(&head->node.rb_node);
2182         while (1) {
2183                 if (!node)
2184                         break;
2185                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2186                                 rb_node);
2187                 if (ref->bytenr != head->node.bytenr)
2188                         break;
2189                 if (ref->action == action)
2190                         return ref;
2191                 node = rb_prev(node);
2192         }
2193         if (action == BTRFS_ADD_DELAYED_REF) {
2194                 action = BTRFS_DROP_DELAYED_REF;
2195                 goto again;
2196         }
2197         return NULL;
2198 }
2199
2200 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2201                                        struct btrfs_root *root,
2202                                        struct list_head *cluster)
2203 {
2204         struct btrfs_delayed_ref_root *delayed_refs;
2205         struct btrfs_delayed_ref_node *ref;
2206         struct btrfs_delayed_ref_head *locked_ref = NULL;
2207         struct btrfs_delayed_extent_op *extent_op;
2208         int ret;
2209         int count = 0;
2210         int must_insert_reserved = 0;
2211
2212         delayed_refs = &trans->transaction->delayed_refs;
2213         while (1) {
2214                 if (!locked_ref) {
2215                         /* pick a new head ref from the cluster list */
2216                         if (list_empty(cluster))
2217                                 break;
2218
2219                         locked_ref = list_entry(cluster->next,
2220                                      struct btrfs_delayed_ref_head, cluster);
2221
2222                         /* grab the lock that says we are going to process
2223                          * all the refs for this head */
2224                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2225
2226                         /*
2227                          * we may have dropped the spin lock to get the head
2228                          * mutex lock, and that might have given someone else
2229                          * time to free the head.  If that's true, it has been
2230                          * removed from our list and we can move on.
2231                          */
2232                         if (ret == -EAGAIN) {
2233                                 locked_ref = NULL;
2234                                 count++;
2235                                 continue;
2236                         }
2237                 }
2238
2239                 /*
2240                  * locked_ref is the head node, so we have to go one
2241                  * node back for any delayed ref updates
2242                  */
2243                 ref = select_delayed_ref(locked_ref);
2244
2245                 if (ref && ref->seq &&
2246                     btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2247                         /*
2248                          * there are still refs with lower seq numbers in the
2249                          * process of being added. Don't run this ref yet.
2250                          */
2251                         list_del_init(&locked_ref->cluster);
2252                         mutex_unlock(&locked_ref->mutex);
2253                         locked_ref = NULL;
2254                         delayed_refs->num_heads_ready++;
2255                         spin_unlock(&delayed_refs->lock);
2256                         cond_resched();
2257                         spin_lock(&delayed_refs->lock);
2258                         continue;
2259                 }
2260
2261                 /*
2262                  * record the must insert reserved flag before we
2263                  * drop the spin lock.
2264                  */
2265                 must_insert_reserved = locked_ref->must_insert_reserved;
2266                 locked_ref->must_insert_reserved = 0;
2267
2268                 extent_op = locked_ref->extent_op;
2269                 locked_ref->extent_op = NULL;
2270
2271                 if (!ref) {
2272                         /* All delayed refs have been processed, Go ahead
2273                          * and send the head node to run_one_delayed_ref,
2274                          * so that any accounting fixes can happen
2275                          */
2276                         ref = &locked_ref->node;
2277
2278                         if (extent_op && must_insert_reserved) {
2279                                 kfree(extent_op);
2280                                 extent_op = NULL;
2281                         }
2282
2283                         if (extent_op) {
2284                                 spin_unlock(&delayed_refs->lock);
2285
2286                                 ret = run_delayed_extent_op(trans, root,
2287                                                             ref, extent_op);
2288                                 BUG_ON(ret);
2289                                 kfree(extent_op);
2290
2291                                 goto next;
2292                         }
2293
2294                         list_del_init(&locked_ref->cluster);
2295                         locked_ref = NULL;
2296                 }
2297
2298                 ref->in_tree = 0;
2299                 rb_erase(&ref->rb_node, &delayed_refs->root);
2300                 delayed_refs->num_entries--;
2301                 /*
2302                  * we modified num_entries, but as we're currently running
2303                  * delayed refs, skip
2304                  *     wake_up(&delayed_refs->seq_wait);
2305                  * here.
2306                  */
2307                 spin_unlock(&delayed_refs->lock);
2308
2309                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2310                                           must_insert_reserved);
2311                 BUG_ON(ret);
2312
2313                 btrfs_put_delayed_ref(ref);
2314                 kfree(extent_op);
2315                 count++;
2316 next:
2317                 do_chunk_alloc(trans, root->fs_info->extent_root,
2318                                2 * 1024 * 1024,
2319                                btrfs_get_alloc_profile(root, 0),
2320                                CHUNK_ALLOC_NO_FORCE);
2321                 cond_resched();
2322                 spin_lock(&delayed_refs->lock);
2323         }
2324         return count;
2325 }
2326
2327
2328 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2329                         unsigned long num_refs)
2330 {
2331         struct list_head *first_seq = delayed_refs->seq_head.next;
2332
2333         spin_unlock(&delayed_refs->lock);
2334         pr_debug("waiting for more refs (num %ld, first %p)\n",
2335                  num_refs, first_seq);
2336         wait_event(delayed_refs->seq_wait,
2337                    num_refs != delayed_refs->num_entries ||
2338                    delayed_refs->seq_head.next != first_seq);
2339         pr_debug("done waiting for more refs (num %ld, first %p)\n",
2340                  delayed_refs->num_entries, delayed_refs->seq_head.next);
2341         spin_lock(&delayed_refs->lock);
2342 }
2343
2344 /*
2345  * this starts processing the delayed reference count updates and
2346  * extent insertions we have queued up so far.  count can be
2347  * 0, which means to process everything in the tree at the start
2348  * of the run (but not newly added entries), or it can be some target
2349  * number you'd like to process.
2350  */
2351 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2352                            struct btrfs_root *root, unsigned long count)
2353 {
2354         struct rb_node *node;
2355         struct btrfs_delayed_ref_root *delayed_refs;
2356         struct btrfs_delayed_ref_node *ref;
2357         struct list_head cluster;
2358         int ret;
2359         u64 delayed_start;
2360         int run_all = count == (unsigned long)-1;
2361         int run_most = 0;
2362         unsigned long num_refs = 0;
2363         int consider_waiting;
2364
2365         if (root == root->fs_info->extent_root)
2366                 root = root->fs_info->tree_root;
2367
2368         do_chunk_alloc(trans, root->fs_info->extent_root,
2369                        2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2370                        CHUNK_ALLOC_NO_FORCE);
2371
2372         delayed_refs = &trans->transaction->delayed_refs;
2373         INIT_LIST_HEAD(&cluster);
2374 again:
2375         consider_waiting = 0;
2376         spin_lock(&delayed_refs->lock);
2377         if (count == 0) {
2378                 count = delayed_refs->num_entries * 2;
2379                 run_most = 1;
2380         }
2381         while (1) {
2382                 if (!(run_all || run_most) &&
2383                     delayed_refs->num_heads_ready < 64)
2384                         break;
2385
2386                 /*
2387                  * go find something we can process in the rbtree.  We start at
2388                  * the beginning of the tree, and then build a cluster
2389                  * of refs to process starting at the first one we are able to
2390                  * lock
2391                  */
2392                 delayed_start = delayed_refs->run_delayed_start;
2393                 ret = btrfs_find_ref_cluster(trans, &cluster,
2394                                              delayed_refs->run_delayed_start);
2395                 if (ret)
2396                         break;
2397
2398                 if (delayed_start >= delayed_refs->run_delayed_start) {
2399                         if (consider_waiting == 0) {
2400                                 /*
2401                                  * btrfs_find_ref_cluster looped. let's do one
2402                                  * more cycle. if we don't run any delayed ref
2403                                  * during that cycle (because we can't because
2404                                  * all of them are blocked) and if the number of
2405                                  * refs doesn't change, we avoid busy waiting.
2406                                  */
2407                                 consider_waiting = 1;
2408                                 num_refs = delayed_refs->num_entries;
2409                         } else {
2410                                 wait_for_more_refs(delayed_refs, num_refs);
2411                                 /*
2412                                  * after waiting, things have changed. we
2413                                  * dropped the lock and someone else might have
2414                                  * run some refs, built new clusters and so on.
2415                                  * therefore, we restart staleness detection.
2416                                  */
2417                                 consider_waiting = 0;
2418                         }
2419                 }
2420
2421                 ret = run_clustered_refs(trans, root, &cluster);
2422                 BUG_ON(ret < 0);
2423
2424                 count -= min_t(unsigned long, ret, count);
2425
2426                 if (count == 0)
2427                         break;
2428
2429                 if (ret || delayed_refs->run_delayed_start == 0) {
2430                         /* refs were run, let's reset staleness detection */
2431                         consider_waiting = 0;
2432                 }
2433         }
2434
2435         if (run_all) {
2436                 node = rb_first(&delayed_refs->root);
2437                 if (!node)
2438                         goto out;
2439                 count = (unsigned long)-1;
2440
2441                 while (node) {
2442                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2443                                        rb_node);
2444                         if (btrfs_delayed_ref_is_head(ref)) {
2445                                 struct btrfs_delayed_ref_head *head;
2446
2447                                 head = btrfs_delayed_node_to_head(ref);
2448                                 atomic_inc(&ref->refs);
2449
2450                                 spin_unlock(&delayed_refs->lock);
2451                                 /*
2452                                  * Mutex was contended, block until it's
2453                                  * released and try again
2454                                  */
2455                                 mutex_lock(&head->mutex);
2456                                 mutex_unlock(&head->mutex);
2457
2458                                 btrfs_put_delayed_ref(ref);
2459                                 cond_resched();
2460                                 goto again;
2461                         }
2462                         node = rb_next(node);
2463                 }
2464                 spin_unlock(&delayed_refs->lock);
2465                 schedule_timeout(1);
2466                 goto again;
2467         }
2468 out:
2469         spin_unlock(&delayed_refs->lock);
2470         return 0;
2471 }
2472
2473 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2474                                 struct btrfs_root *root,
2475                                 u64 bytenr, u64 num_bytes, u64 flags,
2476                                 int is_data)
2477 {
2478         struct btrfs_delayed_extent_op *extent_op;
2479         int ret;
2480
2481         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2482         if (!extent_op)
2483                 return -ENOMEM;
2484
2485         extent_op->flags_to_set = flags;
2486         extent_op->update_flags = 1;
2487         extent_op->update_key = 0;
2488         extent_op->is_data = is_data ? 1 : 0;
2489
2490         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2491                                           num_bytes, extent_op);
2492         if (ret)
2493                 kfree(extent_op);
2494         return ret;
2495 }
2496
2497 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2498                                       struct btrfs_root *root,
2499                                       struct btrfs_path *path,
2500                                       u64 objectid, u64 offset, u64 bytenr)
2501 {
2502         struct btrfs_delayed_ref_head *head;
2503         struct btrfs_delayed_ref_node *ref;
2504         struct btrfs_delayed_data_ref *data_ref;
2505         struct btrfs_delayed_ref_root *delayed_refs;
2506         struct rb_node *node;
2507         int ret = 0;
2508
2509         ret = -ENOENT;
2510         delayed_refs = &trans->transaction->delayed_refs;
2511         spin_lock(&delayed_refs->lock);
2512         head = btrfs_find_delayed_ref_head(trans, bytenr);
2513         if (!head)
2514                 goto out;
2515
2516         if (!mutex_trylock(&head->mutex)) {
2517                 atomic_inc(&head->node.refs);
2518                 spin_unlock(&delayed_refs->lock);
2519
2520                 btrfs_release_path(path);
2521
2522                 /*
2523                  * Mutex was contended, block until it's released and let
2524                  * caller try again
2525                  */
2526                 mutex_lock(&head->mutex);
2527                 mutex_unlock(&head->mutex);
2528                 btrfs_put_delayed_ref(&head->node);
2529                 return -EAGAIN;
2530         }
2531
2532         node = rb_prev(&head->node.rb_node);
2533         if (!node)
2534                 goto out_unlock;
2535
2536         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2537
2538         if (ref->bytenr != bytenr)
2539                 goto out_unlock;
2540
2541         ret = 1;
2542         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2543                 goto out_unlock;
2544
2545         data_ref = btrfs_delayed_node_to_data_ref(ref);
2546
2547         node = rb_prev(node);
2548         if (node) {
2549                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2550                 if (ref->bytenr == bytenr)
2551                         goto out_unlock;
2552         }
2553
2554         if (data_ref->root != root->root_key.objectid ||
2555             data_ref->objectid != objectid || data_ref->offset != offset)
2556                 goto out_unlock;
2557
2558         ret = 0;
2559 out_unlock:
2560         mutex_unlock(&head->mutex);
2561 out:
2562         spin_unlock(&delayed_refs->lock);
2563         return ret;
2564 }
2565
2566 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2567                                         struct btrfs_root *root,
2568                                         struct btrfs_path *path,
2569                                         u64 objectid, u64 offset, u64 bytenr)
2570 {
2571         struct btrfs_root *extent_root = root->fs_info->extent_root;
2572         struct extent_buffer *leaf;
2573         struct btrfs_extent_data_ref *ref;
2574         struct btrfs_extent_inline_ref *iref;
2575         struct btrfs_extent_item *ei;
2576         struct btrfs_key key;
2577         u32 item_size;
2578         int ret;
2579
2580         key.objectid = bytenr;
2581         key.offset = (u64)-1;
2582         key.type = BTRFS_EXTENT_ITEM_KEY;
2583
2584         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2585         if (ret < 0)
2586                 goto out;
2587         BUG_ON(ret == 0);
2588
2589         ret = -ENOENT;
2590         if (path->slots[0] == 0)
2591                 goto out;
2592
2593         path->slots[0]--;
2594         leaf = path->nodes[0];
2595         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2596
2597         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2598                 goto out;
2599
2600         ret = 1;
2601         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2602 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2603         if (item_size < sizeof(*ei)) {
2604                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2605                 goto out;
2606         }
2607 #endif
2608         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2609
2610         if (item_size != sizeof(*ei) +
2611             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2612                 goto out;
2613
2614         if (btrfs_extent_generation(leaf, ei) <=
2615             btrfs_root_last_snapshot(&root->root_item))
2616                 goto out;
2617
2618         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2619         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2620             BTRFS_EXTENT_DATA_REF_KEY)
2621                 goto out;
2622
2623         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2624         if (btrfs_extent_refs(leaf, ei) !=
2625             btrfs_extent_data_ref_count(leaf, ref) ||
2626             btrfs_extent_data_ref_root(leaf, ref) !=
2627             root->root_key.objectid ||
2628             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2629             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2630                 goto out;
2631
2632         ret = 0;
2633 out:
2634         return ret;
2635 }
2636
2637 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2638                           struct btrfs_root *root,
2639                           u64 objectid, u64 offset, u64 bytenr)
2640 {
2641         struct btrfs_path *path;
2642         int ret;
2643         int ret2;
2644
2645         path = btrfs_alloc_path();
2646         if (!path)
2647                 return -ENOENT;
2648
2649         do {
2650                 ret = check_committed_ref(trans, root, path, objectid,
2651                                           offset, bytenr);
2652                 if (ret && ret != -ENOENT)
2653                         goto out;
2654
2655                 ret2 = check_delayed_ref(trans, root, path, objectid,
2656                                          offset, bytenr);
2657         } while (ret2 == -EAGAIN);
2658
2659         if (ret2 && ret2 != -ENOENT) {
2660                 ret = ret2;
2661                 goto out;
2662         }
2663
2664         if (ret != -ENOENT || ret2 != -ENOENT)
2665                 ret = 0;
2666 out:
2667         btrfs_free_path(path);
2668         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2669                 WARN_ON(ret > 0);
2670         return ret;
2671 }
2672
2673 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2674                            struct btrfs_root *root,
2675                            struct extent_buffer *buf,
2676                            int full_backref, int inc, int for_cow)
2677 {
2678         u64 bytenr;
2679         u64 num_bytes;
2680         u64 parent;
2681         u64 ref_root;
2682         u32 nritems;
2683         struct btrfs_key key;
2684         struct btrfs_file_extent_item *fi;
2685         int i;
2686         int level;
2687         int ret = 0;
2688         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2689                             u64, u64, u64, u64, u64, u64, int);
2690
2691         ref_root = btrfs_header_owner(buf);
2692         nritems = btrfs_header_nritems(buf);
2693         level = btrfs_header_level(buf);
2694
2695         if (!root->ref_cows && level == 0)
2696                 return 0;
2697
2698         if (inc)
2699                 process_func = btrfs_inc_extent_ref;
2700         else
2701                 process_func = btrfs_free_extent;
2702
2703         if (full_backref)
2704                 parent = buf->start;
2705         else
2706                 parent = 0;
2707
2708         for (i = 0; i < nritems; i++) {
2709                 if (level == 0) {
2710                         btrfs_item_key_to_cpu(buf, &key, i);
2711                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2712                                 continue;
2713                         fi = btrfs_item_ptr(buf, i,
2714                                             struct btrfs_file_extent_item);
2715                         if (btrfs_file_extent_type(buf, fi) ==
2716                             BTRFS_FILE_EXTENT_INLINE)
2717                                 continue;
2718                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2719                         if (bytenr == 0)
2720                                 continue;
2721
2722                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2723                         key.offset -= btrfs_file_extent_offset(buf, fi);
2724                         ret = process_func(trans, root, bytenr, num_bytes,
2725                                            parent, ref_root, key.objectid,
2726                                            key.offset, for_cow);
2727                         if (ret)
2728                                 goto fail;
2729                 } else {
2730                         bytenr = btrfs_node_blockptr(buf, i);
2731                         num_bytes = btrfs_level_size(root, level - 1);
2732                         ret = process_func(trans, root, bytenr, num_bytes,
2733                                            parent, ref_root, level - 1, 0,
2734                                            for_cow);
2735                         if (ret)
2736                                 goto fail;
2737                 }
2738         }
2739         return 0;
2740 fail:
2741         BUG();
2742         return ret;
2743 }
2744
2745 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2746                   struct extent_buffer *buf, int full_backref, int for_cow)
2747 {
2748         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2749 }
2750
2751 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2752                   struct extent_buffer *buf, int full_backref, int for_cow)
2753 {
2754         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2755 }
2756
2757 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2758                                  struct btrfs_root *root,
2759                                  struct btrfs_path *path,
2760                                  struct btrfs_block_group_cache *cache)
2761 {
2762         int ret;
2763         struct btrfs_root *extent_root = root->fs_info->extent_root;
2764         unsigned long bi;
2765         struct extent_buffer *leaf;
2766
2767         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2768         if (ret < 0)
2769                 goto fail;
2770         BUG_ON(ret);
2771
2772         leaf = path->nodes[0];
2773         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2774         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2775         btrfs_mark_buffer_dirty(leaf);
2776         btrfs_release_path(path);
2777 fail:
2778         if (ret)
2779                 return ret;
2780         return 0;
2781
2782 }
2783
2784 static struct btrfs_block_group_cache *
2785 next_block_group(struct btrfs_root *root,
2786                  struct btrfs_block_group_cache *cache)
2787 {
2788         struct rb_node *node;
2789         spin_lock(&root->fs_info->block_group_cache_lock);
2790         node = rb_next(&cache->cache_node);
2791         btrfs_put_block_group(cache);
2792         if (node) {
2793                 cache = rb_entry(node, struct btrfs_block_group_cache,
2794                                  cache_node);
2795                 btrfs_get_block_group(cache);
2796         } else
2797                 cache = NULL;
2798         spin_unlock(&root->fs_info->block_group_cache_lock);
2799         return cache;
2800 }
2801
2802 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2803                             struct btrfs_trans_handle *trans,
2804                             struct btrfs_path *path)
2805 {
2806         struct btrfs_root *root = block_group->fs_info->tree_root;
2807         struct inode *inode = NULL;
2808         u64 alloc_hint = 0;
2809         int dcs = BTRFS_DC_ERROR;
2810         int num_pages = 0;
2811         int retries = 0;
2812         int ret = 0;
2813
2814         /*
2815          * If this block group is smaller than 100 megs don't bother caching the
2816          * block group.
2817          */
2818         if (block_group->key.offset < (100 * 1024 * 1024)) {
2819                 spin_lock(&block_group->lock);
2820                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2821                 spin_unlock(&block_group->lock);
2822                 return 0;
2823         }
2824
2825 again:
2826         inode = lookup_free_space_inode(root, block_group, path);
2827         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2828                 ret = PTR_ERR(inode);
2829                 btrfs_release_path(path);
2830                 goto out;
2831         }
2832
2833         if (IS_ERR(inode)) {
2834                 BUG_ON(retries);
2835                 retries++;
2836
2837                 if (block_group->ro)
2838                         goto out_free;
2839
2840                 ret = create_free_space_inode(root, trans, block_group, path);
2841                 if (ret)
2842                         goto out_free;
2843                 goto again;
2844         }
2845
2846         /* We've already setup this transaction, go ahead and exit */
2847         if (block_group->cache_generation == trans->transid &&
2848             i_size_read(inode)) {
2849                 dcs = BTRFS_DC_SETUP;
2850                 goto out_put;
2851         }
2852
2853         /*
2854          * We want to set the generation to 0, that way if anything goes wrong
2855          * from here on out we know not to trust this cache when we load up next
2856          * time.
2857          */
2858         BTRFS_I(inode)->generation = 0;
2859         ret = btrfs_update_inode(trans, root, inode);
2860         WARN_ON(ret);
2861
2862         if (i_size_read(inode) > 0) {
2863                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2864                                                       inode);
2865                 if (ret)
2866                         goto out_put;
2867         }
2868
2869         spin_lock(&block_group->lock);
2870         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2871                 /* We're not cached, don't bother trying to write stuff out */
2872                 dcs = BTRFS_DC_WRITTEN;
2873                 spin_unlock(&block_group->lock);
2874                 goto out_put;
2875         }
2876         spin_unlock(&block_group->lock);
2877
2878         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2879         if (!num_pages)
2880                 num_pages = 1;
2881
2882         /*
2883          * Just to make absolutely sure we have enough space, we're going to
2884          * preallocate 12 pages worth of space for each block group.  In
2885          * practice we ought to use at most 8, but we need extra space so we can
2886          * add our header and have a terminator between the extents and the
2887          * bitmaps.
2888          */
2889         num_pages *= 16;
2890         num_pages *= PAGE_CACHE_SIZE;
2891
2892         ret = btrfs_check_data_free_space(inode, num_pages);
2893         if (ret)
2894                 goto out_put;
2895
2896         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2897                                               num_pages, num_pages,
2898                                               &alloc_hint);
2899         if (!ret)
2900                 dcs = BTRFS_DC_SETUP;
2901         btrfs_free_reserved_data_space(inode, num_pages);
2902
2903 out_put:
2904         iput(inode);
2905 out_free:
2906         btrfs_release_path(path);
2907 out:
2908         spin_lock(&block_group->lock);
2909         if (!ret && dcs == BTRFS_DC_SETUP)
2910                 block_group->cache_generation = trans->transid;
2911         block_group->disk_cache_state = dcs;
2912         spin_unlock(&block_group->lock);
2913
2914         return ret;
2915 }
2916
2917 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2918                                    struct btrfs_root *root)
2919 {
2920         struct btrfs_block_group_cache *cache;
2921         int err = 0;
2922         struct btrfs_path *path;
2923         u64 last = 0;
2924
2925         path = btrfs_alloc_path();
2926         if (!path)
2927                 return -ENOMEM;
2928
2929 again:
2930         while (1) {
2931                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2932                 while (cache) {
2933                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2934                                 break;
2935                         cache = next_block_group(root, cache);
2936                 }
2937                 if (!cache) {
2938                         if (last == 0)
2939                                 break;
2940                         last = 0;
2941                         continue;
2942                 }
2943                 err = cache_save_setup(cache, trans, path);
2944                 last = cache->key.objectid + cache->key.offset;
2945                 btrfs_put_block_group(cache);
2946         }
2947
2948         while (1) {
2949                 if (last == 0) {
2950                         err = btrfs_run_delayed_refs(trans, root,
2951                                                      (unsigned long)-1);
2952                         BUG_ON(err);
2953                 }
2954
2955                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2956                 while (cache) {
2957                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2958                                 btrfs_put_block_group(cache);
2959                                 goto again;
2960                         }
2961
2962                         if (cache->dirty)
2963                                 break;
2964                         cache = next_block_group(root, cache);
2965                 }
2966                 if (!cache) {
2967                         if (last == 0)
2968                                 break;
2969                         last = 0;
2970                         continue;
2971                 }
2972
2973                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2974                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2975                 cache->dirty = 0;
2976                 last = cache->key.objectid + cache->key.offset;
2977
2978                 err = write_one_cache_group(trans, root, path, cache);
2979                 BUG_ON(err);
2980                 btrfs_put_block_group(cache);
2981         }
2982
2983         while (1) {
2984                 /*
2985                  * I don't think this is needed since we're just marking our
2986                  * preallocated extent as written, but just in case it can't
2987                  * hurt.
2988                  */
2989                 if (last == 0) {
2990                         err = btrfs_run_delayed_refs(trans, root,
2991                                                      (unsigned long)-1);
2992                         BUG_ON(err);
2993                 }
2994
2995                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2996                 while (cache) {
2997                         /*
2998                          * Really this shouldn't happen, but it could if we
2999                          * couldn't write the entire preallocated extent and
3000                          * splitting the extent resulted in a new block.
3001                          */
3002                         if (cache->dirty) {
3003                                 btrfs_put_block_group(cache);
3004                                 goto again;
3005                         }
3006                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3007                                 break;
3008                         cache = next_block_group(root, cache);
3009                 }
3010                 if (!cache) {
3011                         if (last == 0)
3012                                 break;
3013                         last = 0;
3014                         continue;
3015                 }
3016
3017                 btrfs_write_out_cache(root, trans, cache, path);
3018
3019                 /*
3020                  * If we didn't have an error then the cache state is still
3021                  * NEED_WRITE, so we can set it to WRITTEN.
3022                  */
3023                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3024                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3025                 last = cache->key.objectid + cache->key.offset;
3026                 btrfs_put_block_group(cache);
3027         }
3028
3029         btrfs_free_path(path);
3030         return 0;
3031 }
3032
3033 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3034 {
3035         struct btrfs_block_group_cache *block_group;
3036         int readonly = 0;
3037
3038         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3039         if (!block_group || block_group->ro)
3040                 readonly = 1;
3041         if (block_group)
3042                 btrfs_put_block_group(block_group);
3043         return readonly;
3044 }
3045
3046 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3047                              u64 total_bytes, u64 bytes_used,
3048                              struct btrfs_space_info **space_info)
3049 {
3050         struct btrfs_space_info *found;
3051         int i;
3052         int factor;
3053
3054         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3055                      BTRFS_BLOCK_GROUP_RAID10))
3056                 factor = 2;
3057         else
3058                 factor = 1;
3059
3060         found = __find_space_info(info, flags);
3061         if (found) {
3062                 spin_lock(&found->lock);
3063                 found->total_bytes += total_bytes;
3064                 found->disk_total += total_bytes * factor;
3065                 found->bytes_used += bytes_used;
3066                 found->disk_used += bytes_used * factor;
3067                 found->full = 0;
3068                 spin_unlock(&found->lock);
3069                 *space_info = found;
3070                 return 0;
3071         }
3072         found = kzalloc(sizeof(*found), GFP_NOFS);
3073         if (!found)
3074                 return -ENOMEM;
3075
3076         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3077                 INIT_LIST_HEAD(&found->block_groups[i]);
3078         init_rwsem(&found->groups_sem);
3079         spin_lock_init(&found->lock);
3080         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3081         found->total_bytes = total_bytes;
3082         found->disk_total = total_bytes * factor;
3083         found->bytes_used = bytes_used;
3084         found->disk_used = bytes_used * factor;
3085         found->bytes_pinned = 0;
3086         found->bytes_reserved = 0;
3087         found->bytes_readonly = 0;
3088         found->bytes_may_use = 0;
3089         found->full = 0;
3090         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3091         found->chunk_alloc = 0;
3092         found->flush = 0;
3093         init_waitqueue_head(&found->wait);
3094         *space_info = found;
3095         list_add_rcu(&found->list, &info->space_info);
3096         return 0;
3097 }
3098
3099 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3100 {
3101         u64 extra_flags = chunk_to_extended(flags) &
3102                                 BTRFS_EXTENDED_PROFILE_MASK;
3103
3104         if (flags & BTRFS_BLOCK_GROUP_DATA)
3105                 fs_info->avail_data_alloc_bits |= extra_flags;
3106         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3107                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3108         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3109                 fs_info->avail_system_alloc_bits |= extra_flags;
3110 }
3111
3112 /*
3113  * @flags: available profiles in extended format (see ctree.h)
3114  *
3115  * Returns reduced profile in chunk format.  If profile changing is in
3116  * progress (either running or paused) picks the target profile (if it's
3117  * already available), otherwise falls back to plain reducing.
3118  */
3119 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3120 {
3121         /*
3122          * we add in the count of missing devices because we want
3123          * to make sure that any RAID levels on a degraded FS
3124          * continue to be honored.
3125          */
3126         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3127                 root->fs_info->fs_devices->missing_devices;
3128
3129         /* pick restriper's target profile if it's available */
3130         spin_lock(&root->fs_info->balance_lock);
3131         if (root->fs_info->balance_ctl) {
3132                 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3133                 u64 tgt = 0;
3134
3135                 if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3136                     (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3137                     (flags & bctl->data.target)) {
3138                         tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3139                 } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3140                            (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3141                            (flags & bctl->sys.target)) {
3142                         tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3143                 } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3144                            (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3145                            (flags & bctl->meta.target)) {
3146                         tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3147                 }
3148
3149                 if (tgt) {
3150                         spin_unlock(&root->fs_info->balance_lock);
3151                         flags = tgt;
3152                         goto out;
3153                 }
3154         }
3155         spin_unlock(&root->fs_info->balance_lock);
3156
3157         if (num_devices == 1)
3158                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3159         if (num_devices < 4)
3160                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3161
3162         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3163             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3164                       BTRFS_BLOCK_GROUP_RAID10))) {
3165                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3166         }
3167
3168         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3169             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3170                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3171         }
3172
3173         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3174             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3175              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3176              (flags & BTRFS_BLOCK_GROUP_DUP))) {
3177                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3178         }
3179
3180 out:
3181         return extended_to_chunk(flags);
3182 }
3183
3184 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3185 {
3186         if (flags & BTRFS_BLOCK_GROUP_DATA)
3187                 flags |= root->fs_info->avail_data_alloc_bits;
3188         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3189                 flags |= root->fs_info->avail_system_alloc_bits;
3190         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3191                 flags |= root->fs_info->avail_metadata_alloc_bits;
3192
3193         return btrfs_reduce_alloc_profile(root, flags);
3194 }
3195
3196 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3197 {
3198         u64 flags;
3199
3200         if (data)
3201                 flags = BTRFS_BLOCK_GROUP_DATA;
3202         else if (root == root->fs_info->chunk_root)
3203                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3204         else
3205                 flags = BTRFS_BLOCK_GROUP_METADATA;
3206
3207         return get_alloc_profile(root, flags);
3208 }
3209
3210 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3211 {
3212         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3213                                                        BTRFS_BLOCK_GROUP_DATA);
3214 }
3215
3216 /*
3217  * This will check the space that the inode allocates from to make sure we have
3218  * enough space for bytes.
3219  */
3220 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3221 {
3222         struct btrfs_space_info *data_sinfo;
3223         struct btrfs_root *root = BTRFS_I(inode)->root;
3224         u64 used;
3225         int ret = 0, committed = 0, alloc_chunk = 1;
3226
3227         /* make sure bytes are sectorsize aligned */
3228         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3229
3230         if (root == root->fs_info->tree_root ||
3231             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3232                 alloc_chunk = 0;
3233                 committed = 1;
3234         }
3235
3236         data_sinfo = BTRFS_I(inode)->space_info;
3237         if (!data_sinfo)
3238                 goto alloc;
3239
3240 again:
3241         /* make sure we have enough space to handle the data first */
3242         spin_lock(&data_sinfo->lock);
3243         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3244                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3245                 data_sinfo->bytes_may_use;
3246
3247         if (used + bytes > data_sinfo->total_bytes) {
3248                 struct btrfs_trans_handle *trans;
3249
3250                 /*
3251                  * if we don't have enough free bytes in this space then we need
3252                  * to alloc a new chunk.
3253                  */
3254                 if (!data_sinfo->full && alloc_chunk) {
3255                         u64 alloc_target;
3256
3257                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3258                         spin_unlock(&data_sinfo->lock);
3259 alloc:
3260                         alloc_target = btrfs_get_alloc_profile(root, 1);
3261                         trans = btrfs_join_transaction(root);
3262                         if (IS_ERR(trans))
3263                                 return PTR_ERR(trans);
3264
3265                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3266                                              bytes + 2 * 1024 * 1024,
3267                                              alloc_target,
3268                                              CHUNK_ALLOC_NO_FORCE);
3269                         btrfs_end_transaction(trans, root);
3270                         if (ret < 0) {
3271                                 if (ret != -ENOSPC)
3272                                         return ret;
3273                                 else
3274                                         goto commit_trans;
3275                         }
3276
3277                         if (!data_sinfo) {
3278                                 btrfs_set_inode_space_info(root, inode);
3279                                 data_sinfo = BTRFS_I(inode)->space_info;
3280                         }
3281                         goto again;
3282                 }
3283
3284                 /*
3285                  * If we have less pinned bytes than we want to allocate then
3286                  * don't bother committing the transaction, it won't help us.
3287                  */
3288                 if (data_sinfo->bytes_pinned < bytes)
3289                         committed = 1;
3290                 spin_unlock(&data_sinfo->lock);
3291
3292                 /* commit the current transaction and try again */
3293 commit_trans:
3294                 if (!committed &&
3295                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3296                         committed = 1;
3297                         trans = btrfs_join_transaction(root);
3298                         if (IS_ERR(trans))
3299                                 return PTR_ERR(trans);
3300                         ret = btrfs_commit_transaction(trans, root);
3301                         if (ret)
3302                                 return ret;
3303                         goto again;
3304                 }
3305
3306                 return -ENOSPC;
3307         }
3308         data_sinfo->bytes_may_use += bytes;
3309         trace_btrfs_space_reservation(root->fs_info, "space_info",
3310                                       (u64)(unsigned long)data_sinfo,
3311                                       bytes, 1);
3312         spin_unlock(&data_sinfo->lock);
3313
3314         return 0;
3315 }
3316
3317 /*
3318  * Called if we need to clear a data reservation for this inode.
3319  */
3320 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3321 {
3322         struct btrfs_root *root = BTRFS_I(inode)->root;
3323         struct btrfs_space_info *data_sinfo;
3324
3325         /* make sure bytes are sectorsize aligned */
3326         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3327
3328         data_sinfo = BTRFS_I(inode)->space_info;
3329         spin_lock(&data_sinfo->lock);
3330         data_sinfo->bytes_may_use -= bytes;
3331         trace_btrfs_space_reservation(root->fs_info, "space_info",
3332                                       (u64)(unsigned long)data_sinfo,
3333                                       bytes, 0);
3334         spin_unlock(&data_sinfo->lock);
3335 }
3336
3337 static void force_metadata_allocation(struct btrfs_fs_info *info)
3338 {
3339         struct list_head *head = &info->space_info;
3340         struct btrfs_space_info *found;
3341
3342         rcu_read_lock();
3343         list_for_each_entry_rcu(found, head, list) {
3344                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3345                         found->force_alloc = CHUNK_ALLOC_FORCE;
3346         }
3347         rcu_read_unlock();
3348 }
3349
3350 static int should_alloc_chunk(struct btrfs_root *root,
3351                               struct btrfs_space_info *sinfo, u64 alloc_bytes,
3352                               int force)
3353 {
3354         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3355         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3356         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3357         u64 thresh;
3358
3359         if (force == CHUNK_ALLOC_FORCE)
3360                 return 1;
3361
3362         /*
3363          * We need to take into account the global rsv because for all intents
3364          * and purposes it's used space.  Don't worry about locking the
3365          * global_rsv, it doesn't change except when the transaction commits.
3366          */
3367         num_allocated += global_rsv->size;
3368
3369         /*
3370          * in limited mode, we want to have some free space up to
3371          * about 1% of the FS size.
3372          */
3373         if (force == CHUNK_ALLOC_LIMITED) {
3374                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3375                 thresh = max_t(u64, 64 * 1024 * 1024,
3376                                div_factor_fine(thresh, 1));
3377
3378                 if (num_bytes - num_allocated < thresh)
3379                         return 1;
3380         }
3381         thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3382
3383         /* 256MB or 2% of the FS */
3384         thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
3385         /* system chunks need a much small threshold */
3386         if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
3387                 thresh = 32 * 1024 * 1024;
3388
3389         if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
3390                 return 0;
3391         return 1;
3392 }
3393
3394 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3395                           struct btrfs_root *extent_root, u64 alloc_bytes,
3396                           u64 flags, int force)
3397 {
3398         struct btrfs_space_info *space_info;
3399         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3400         int wait_for_alloc = 0;
3401         int ret = 0;
3402
3403         space_info = __find_space_info(extent_root->fs_info, flags);
3404         if (!space_info) {
3405                 ret = update_space_info(extent_root->fs_info, flags,
3406                                         0, 0, &space_info);
3407                 BUG_ON(ret);
3408         }
3409         BUG_ON(!space_info);
3410
3411 again:
3412         spin_lock(&space_info->lock);
3413         if (force < space_info->force_alloc)
3414                 force = space_info->force_alloc;
3415         if (space_info->full) {
3416                 spin_unlock(&space_info->lock);
3417                 return 0;
3418         }
3419
3420         if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3421                 spin_unlock(&space_info->lock);
3422                 return 0;
3423         } else if (space_info->chunk_alloc) {
3424                 wait_for_alloc = 1;
3425         } else {
3426                 space_info->chunk_alloc = 1;
3427         }
3428
3429         spin_unlock(&space_info->lock);
3430
3431         mutex_lock(&fs_info->chunk_mutex);
3432
3433         /*
3434          * The chunk_mutex is held throughout the entirety of a chunk
3435          * allocation, so once we've acquired the chunk_mutex we know that the
3436          * other guy is done and we need to recheck and see if we should
3437          * allocate.
3438          */
3439         if (wait_for_alloc) {
3440                 mutex_unlock(&fs_info->chunk_mutex);
3441                 wait_for_alloc = 0;
3442                 goto again;
3443         }
3444
3445         /*
3446          * If we have mixed data/metadata chunks we want to make sure we keep
3447          * allocating mixed chunks instead of individual chunks.
3448          */
3449         if (btrfs_mixed_space_info(space_info))
3450                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3451
3452         /*
3453          * if we're doing a data chunk, go ahead and make sure that
3454          * we keep a reasonable number of metadata chunks allocated in the
3455          * FS as well.
3456          */
3457         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3458                 fs_info->data_chunk_allocations++;
3459                 if (!(fs_info->data_chunk_allocations %
3460                       fs_info->metadata_ratio))
3461                         force_metadata_allocation(fs_info);
3462         }
3463
3464         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3465         if (ret < 0 && ret != -ENOSPC)
3466                 goto out;
3467
3468         spin_lock(&space_info->lock);
3469         if (ret)
3470                 space_info->full = 1;
3471         else
3472                 ret = 1;
3473
3474         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3475         space_info->chunk_alloc = 0;
3476         spin_unlock(&space_info->lock);
3477 out:
3478         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3479         return ret;
3480 }
3481
3482 /*
3483  * shrink metadata reservation for delalloc
3484  */
3485 static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
3486                            bool wait_ordered)
3487 {
3488         struct btrfs_block_rsv *block_rsv;
3489         struct btrfs_space_info *space_info;
3490         struct btrfs_trans_handle *trans;
3491         u64 reserved;
3492         u64 max_reclaim;
3493         u64 reclaimed = 0;
3494         long time_left;
3495         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3496         int loops = 0;
3497         unsigned long progress;
3498
3499         trans = (struct btrfs_trans_handle *)current->journal_info;
3500         block_rsv = &root->fs_info->delalloc_block_rsv;
3501         space_info = block_rsv->space_info;
3502
3503         smp_mb();
3504         reserved = space_info->bytes_may_use;
3505         progress = space_info->reservation_progress;
3506
3507         if (reserved == 0)
3508                 return 0;
3509
3510         smp_mb();
3511         if (root->fs_info->delalloc_bytes == 0) {
3512                 if (trans)
3513                         return 0;
3514                 btrfs_wait_ordered_extents(root, 0, 0);
3515                 return 0;
3516         }
3517
3518         max_reclaim = min(reserved, to_reclaim);
3519         nr_pages = max_t(unsigned long, nr_pages,
3520                          max_reclaim >> PAGE_CACHE_SHIFT);
3521         while (loops < 1024) {
3522                 /* have the flusher threads jump in and do some IO */
3523                 smp_mb();
3524                 nr_pages = min_t(unsigned long, nr_pages,
3525                        root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3526                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3527                                                 WB_REASON_FS_FREE_SPACE);
3528
3529                 spin_lock(&space_info->lock);
3530                 if (reserved > space_info->bytes_may_use)
3531                         reclaimed += reserved - space_info->bytes_may_use;
3532                 reserved = space_info->bytes_may_use;
3533                 spin_unlock(&space_info->lock);
3534
3535                 loops++;
3536
3537                 if (reserved == 0 || reclaimed >= max_reclaim)
3538                         break;
3539
3540                 if (trans && trans->transaction->blocked)
3541                         return -EAGAIN;
3542
3543                 if (wait_ordered && !trans) {
3544                         btrfs_wait_ordered_extents(root, 0, 0);
3545                 } else {
3546                         time_left = schedule_timeout_interruptible(1);
3547
3548                         /* We were interrupted, exit */
3549                         if (time_left)
3550                                 break;
3551                 }
3552
3553                 /* we've kicked the IO a few times, if anything has been freed,
3554                  * exit.  There is no sense in looping here for a long time
3555                  * when we really need to commit the transaction, or there are
3556                  * just too many writers without enough free space
3557                  */
3558
3559                 if (loops > 3) {
3560                         smp_mb();
3561                         if (progress != space_info->reservation_progress)
3562                                 break;
3563                 }
3564
3565         }
3566
3567         return reclaimed >= to_reclaim;
3568 }
3569
3570 /**
3571  * maybe_commit_transaction - possibly commit the transaction if its ok to
3572  * @root - the root we're allocating for
3573  * @bytes - the number of bytes we want to reserve
3574  * @force - force the commit
3575  *
3576  * This will check to make sure that committing the transaction will actually
3577  * get us somewhere and then commit the transaction if it does.  Otherwise it
3578  * will return -ENOSPC.
3579  */
3580 static int may_commit_transaction(struct btrfs_root *root,
3581                                   struct btrfs_space_info *space_info,
3582                                   u64 bytes, int force)
3583 {
3584         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3585         struct btrfs_trans_handle *trans;
3586
3587         trans = (struct btrfs_trans_handle *)current->journal_info;
3588         if (trans)
3589                 return -EAGAIN;
3590
3591         if (force)
3592                 goto commit;
3593
3594         /* See if there is enough pinned space to make this reservation */
3595         spin_lock(&space_info->lock);
3596         if (space_info->bytes_pinned >= bytes) {
3597                 spin_unlock(&space_info->lock);
3598                 goto commit;
3599         }
3600         spin_unlock(&space_info->lock);
3601
3602         /*
3603          * See if there is some space in the delayed insertion reservation for
3604          * this reservation.
3605          */
3606         if (space_info != delayed_rsv->space_info)
3607                 return -ENOSPC;
3608
3609         spin_lock(&space_info->lock);
3610         spin_lock(&delayed_rsv->lock);
3611         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3612                 spin_unlock(&delayed_rsv->lock);
3613                 spin_unlock(&space_info->lock);
3614                 return -ENOSPC;
3615         }
3616         spin_unlock(&delayed_rsv->lock);
3617         spin_unlock(&space_info->lock);
3618
3619 commit:
3620         trans = btrfs_join_transaction(root);
3621         if (IS_ERR(trans))
3622                 return -ENOSPC;
3623
3624         return btrfs_commit_transaction(trans, root);
3625 }
3626
3627 /**
3628  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3629  * @root - the root we're allocating for
3630  * @block_rsv - the block_rsv we're allocating for
3631  * @orig_bytes - the number of bytes we want
3632  * @flush - wether or not we can flush to make our reservation
3633  *
3634  * This will reserve orgi_bytes number of bytes from the space info associated
3635  * with the block_rsv.  If there is not enough space it will make an attempt to
3636  * flush out space to make room.  It will do this by flushing delalloc if
3637  * possible or committing the transaction.  If flush is 0 then no attempts to
3638  * regain reservations will be made and this will fail if there is not enough
3639  * space already.
3640  */
3641 static int reserve_metadata_bytes(struct btrfs_root *root,
3642                                   struct btrfs_block_rsv *block_rsv,
3643                                   u64 orig_bytes, int flush)
3644 {
3645         struct btrfs_space_info *space_info = block_rsv->space_info;
3646         u64 used;
3647         u64 num_bytes = orig_bytes;
3648         int retries = 0;
3649         int ret = 0;
3650         bool committed = false;
3651         bool flushing = false;
3652         bool wait_ordered = false;
3653
3654 again:
3655         ret = 0;
3656         spin_lock(&space_info->lock);
3657         /*
3658          * We only want to wait if somebody other than us is flushing and we are
3659          * actually alloed to flush.
3660          */
3661         while (flush && !flushing && space_info->flush) {
3662                 spin_unlock(&space_info->lock);
3663                 /*
3664                  * If we have a trans handle we can't wait because the flusher
3665                  * may have to commit the transaction, which would mean we would
3666                  * deadlock since we are waiting for the flusher to finish, but
3667                  * hold the current transaction open.
3668                  */
3669                 if (current->journal_info)
3670                         return -EAGAIN;
3671                 ret = wait_event_interruptible(space_info->wait,
3672                                                !space_info->flush);
3673                 /* Must have been interrupted, return */
3674                 if (ret)
3675                         return -EINTR;
3676
3677                 spin_lock(&space_info->lock);
3678         }
3679
3680         ret = -ENOSPC;
3681         used = space_info->bytes_used + space_info->bytes_reserved +
3682                 space_info->bytes_pinned + space_info->bytes_readonly +
3683                 space_info->bytes_may_use;
3684
3685         /*
3686          * The idea here is that we've not already over-reserved the block group
3687          * then we can go ahead and save our reservation first and then start
3688          * flushing if we need to.  Otherwise if we've already overcommitted
3689          * lets start flushing stuff first and then come back and try to make
3690          * our reservation.
3691          */
3692         if (used <= space_info->total_bytes) {
3693                 if (used + orig_bytes <= space_info->total_bytes) {
3694                         space_info->bytes_may_use += orig_bytes;
3695                         trace_btrfs_space_reservation(root->fs_info,
3696                                               "space_info",
3697                                               (u64)(unsigned long)space_info,
3698                                               orig_bytes, 1);
3699                         ret = 0;
3700                 } else {
3701                         /*
3702                          * Ok set num_bytes to orig_bytes since we aren't
3703                          * overocmmitted, this way we only try and reclaim what
3704                          * we need.
3705                          */
3706                         num_bytes = orig_bytes;
3707                 }
3708         } else {
3709                 /*
3710                  * Ok we're over committed, set num_bytes to the overcommitted
3711                  * amount plus the amount of bytes that we need for this
3712                  * reservation.
3713                  */
3714                 wait_ordered = true;
3715                 num_bytes = used - space_info->total_bytes +
3716                         (orig_bytes * (retries + 1));
3717         }
3718
3719         if (ret) {
3720                 u64 profile = btrfs_get_alloc_profile(root, 0);
3721                 u64 avail;
3722
3723                 /*
3724                  * If we have a lot of space that's pinned, don't bother doing
3725                  * the overcommit dance yet and just commit the transaction.
3726                  */
3727                 avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3728                 do_div(avail, 10);
3729                 if (space_info->bytes_pinned >= avail && flush && !committed) {
3730                         space_info->flush = 1;
3731                         flushing = true;
3732                         spin_unlock(&space_info->lock);
3733                         ret = may_commit_transaction(root, space_info,
3734                                                      orig_bytes, 1);
3735                         if (ret)
3736                                 goto out;
3737                         committed = true;
3738                         goto again;
3739                 }
3740
3741                 spin_lock(&root->fs_info->free_chunk_lock);
3742                 avail = root->fs_info->free_chunk_space;
3743
3744                 /*
3745                  * If we have dup, raid1 or raid10 then only half of the free
3746                  * space is actually useable.
3747                  */
3748                 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3749                                BTRFS_BLOCK_GROUP_RAID1 |
3750                                BTRFS_BLOCK_GROUP_RAID10))
3751                         avail >>= 1;
3752
3753                 /*
3754                  * If we aren't flushing don't let us overcommit too much, say
3755                  * 1/8th of the space.  If we can flush, let it overcommit up to
3756                  * 1/2 of the space.
3757                  */
3758                 if (flush)
3759                         avail >>= 3;
3760                 else
3761                         avail >>= 1;
3762                  spin_unlock(&root->fs_info->free_chunk_lock);
3763
3764                 if (used + num_bytes < space_info->total_bytes + avail) {
3765                         space_info->bytes_may_use += orig_bytes;
3766                         trace_btrfs_space_reservation(root->fs_info,
3767                                               "space_info",
3768                                               (u64)(unsigned long)space_info,
3769                                               orig_bytes, 1);
3770                         ret = 0;
3771                 } else {
3772                         wait_ordered = true;
3773                 }
3774         }
3775
3776         /*
3777          * Couldn't make our reservation, save our place so while we're trying
3778          * to reclaim space we can actually use it instead of somebody else
3779          * stealing it from us.
3780          */
3781         if (ret && flush) {
3782                 flushing = true;
3783                 space_info->flush = 1;
3784         }
3785
3786         spin_unlock(&space_info->lock);
3787
3788         if (!ret || !flush)
3789                 goto out;
3790
3791         /*
3792          * We do synchronous shrinking since we don't actually unreserve
3793          * metadata until after the IO is completed.
3794          */
3795         ret = shrink_delalloc(root, num_bytes, wait_ordered);
3796         if (ret < 0)
3797                 goto out;
3798
3799         ret = 0;
3800
3801         /*
3802          * So if we were overcommitted it's possible that somebody else flushed
3803          * out enough space and we simply didn't have enough space to reclaim,
3804          * so go back around and try again.
3805          */
3806         if (retries < 2) {
3807                 wait_ordered = true;
3808                 retries++;
3809                 goto again;
3810         }
3811
3812         ret = -ENOSPC;
3813         if (committed)
3814                 goto out;
3815
3816         ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3817         if (!ret) {
3818                 committed = true;
3819                 goto again;
3820         }
3821
3822 out:
3823         if (flushing) {
3824                 spin_lock(&space_info->lock);
3825                 space_info->flush = 0;
3826                 wake_up_all(&space_info->wait);
3827                 spin_unlock(&space_info->lock);
3828         }
3829         return ret;
3830 }
3831
3832 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3833                                              struct btrfs_root *root)
3834 {
3835         struct btrfs_block_rsv *block_rsv = NULL;
3836
3837         if (root->ref_cows || root == root->fs_info->csum_root)
3838                 block_rsv = trans->block_rsv;
3839
3840         if (!block_rsv)
3841                 block_rsv = root->block_rsv;
3842
3843         if (!block_rsv)
3844                 block_rsv = &root->fs_info->empty_block_rsv;
3845
3846         return block_rsv;
3847 }
3848
3849 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3850                                u64 num_bytes)
3851 {
3852         int ret = -ENOSPC;
3853         spin_lock(&block_rsv->lock);
3854         if (block_rsv->reserved >= num_bytes) {
3855                 block_rsv->reserved -= num_bytes;
3856                 if (block_rsv->reserved < block_rsv->size)
3857                         block_rsv->full = 0;
3858                 ret = 0;
3859         }
3860         spin_unlock(&block_rsv->lock);
3861         return ret;
3862 }
3863
3864 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3865                                 u64 num_bytes, int update_size)
3866 {
3867         spin_lock(&block_rsv->lock);
3868         block_rsv->reserved += num_bytes;
3869         if (update_size)
3870                 block_rsv->size += num_bytes;
3871         else if (block_rsv->reserved >= block_rsv->size)
3872                 block_rsv->full = 1;
3873         spin_unlock(&block_rsv->lock);
3874 }
3875
3876 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3877                                     struct btrfs_block_rsv *block_rsv,
3878                                     struct btrfs_block_rsv *dest, u64 num_bytes)
3879 {
3880         struct btrfs_space_info *space_info = block_rsv->space_info;
3881
3882         spin_lock(&block_rsv->lock);
3883         if (num_bytes == (u64)-1)
3884                 num_bytes = block_rsv->size;
3885         block_rsv->size -= num_bytes;
3886         if (block_rsv->reserved >= block_rsv->size) {
3887                 num_bytes = block_rsv->reserved - block_rsv->size;
3888                 block_rsv->reserved = block_rsv->size;
3889                 block_rsv->full = 1;
3890         } else {
3891                 num_bytes = 0;
3892         }
3893         spin_unlock(&block_rsv->lock);
3894
3895         if (num_bytes > 0) {
3896                 if (dest) {
3897                         spin_lock(&dest->lock);
3898                         if (!dest->full) {
3899                                 u64 bytes_to_add;
3900
3901                                 bytes_to_add = dest->size - dest->reserved;
3902                                 bytes_to_add = min(num_bytes, bytes_to_add);
3903                                 dest->reserved += bytes_to_add;
3904                                 if (dest->reserved >= dest->size)
3905                                         dest->full = 1;
3906                                 num_bytes -= bytes_to_add;
3907                         }
3908                         spin_unlock(&dest->lock);
3909                 }
3910                 if (num_bytes) {
3911                         spin_lock(&space_info->lock);
3912                         space_info->bytes_may_use -= num_bytes;
3913                         trace_btrfs_space_reservation(fs_info, "space_info",
3914                                               (u64)(unsigned long)space_info,
3915                                               num_bytes, 0);
3916                         space_info->reservation_progress++;
3917                         spin_unlock(&space_info->lock);
3918                 }
3919         }
3920 }
3921
3922 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3923                                    struct btrfs_block_rsv *dst, u64 num_bytes)
3924 {
3925         int ret;
3926
3927         ret = block_rsv_use_bytes(src, num_bytes);
3928         if (ret)
3929                 return ret;
3930
3931         block_rsv_add_bytes(dst, num_bytes, 1);
3932         return 0;
3933 }
3934
3935 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3936 {
3937         memset(rsv, 0, sizeof(*rsv));
3938         spin_lock_init(&rsv->lock);
3939 }
3940
3941 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3942 {
3943         struct btrfs_block_rsv *block_rsv;
3944         struct btrfs_fs_info *fs_info = root->fs_info;
3945
3946         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3947         if (!block_rsv)
3948                 return NULL;
3949
3950         btrfs_init_block_rsv(block_rsv);
3951         block_rsv->space_info = __find_space_info(fs_info,
3952                                                   BTRFS_BLOCK_GROUP_METADATA);
3953         return block_rsv;
3954 }
3955
3956 void btrfs_free_block_rsv(struct btrfs_root *root,
3957                           struct btrfs_block_rsv *rsv)
3958 {
3959         btrfs_block_rsv_release(root, rsv, (u64)-1);
3960         kfree(rsv);
3961 }
3962
3963 static inline int __block_rsv_add(struct btrfs_root *root,
3964                                   struct btrfs_block_rsv *block_rsv,
3965                                   u64 num_bytes, int flush)
3966 {
3967         int ret;
3968
3969         if (num_bytes == 0)
3970                 return 0;
3971
3972         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3973         if (!ret) {
3974                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3975                 return 0;
3976         }
3977
3978         return ret;
3979 }
3980
3981 int btrfs_block_rsv_add(struct btrfs_root *root,
3982                         struct btrfs_block_rsv *block_rsv,
3983                         u64 num_bytes)
3984 {
3985         return __block_rsv_add(root, block_rsv, num_bytes, 1);
3986 }
3987
3988 int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3989                                 struct btrfs_block_rsv *block_rsv,
3990                                 u64 num_bytes)
3991 {
3992         return __block_rsv_add(root, block_rsv, num_bytes, 0);
3993 }
3994
3995 int btrfs_block_rsv_check(struct btrfs_root *root,
3996                           struct btrfs_block_rsv *block_rsv, int min_factor)
3997 {
3998         u64 num_bytes = 0;
3999         int ret = -ENOSPC;
4000
4001         if (!block_rsv)
4002                 return 0;
4003
4004         spin_lock(&block_rsv->lock);
4005         num_bytes = div_factor(block_rsv->size, min_factor);
4006         if (block_rsv->reserved >= num_bytes)
4007                 ret = 0;
4008         spin_unlock(&block_rsv->lock);
4009
4010         return ret;
4011 }
4012
4013 static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
4014                                            struct btrfs_block_rsv *block_rsv,
4015                                            u64 min_reserved, int flush)
4016 {
4017         u64 num_bytes = 0;
4018         int ret = -ENOSPC;
4019
4020         if (!block_rsv)
4021                 return 0;
4022
4023         spin_lock(&block_rsv->lock);
4024         num_bytes = min_reserved;
4025         if (block_rsv->reserved >= num_bytes)
4026                 ret = 0;
4027         else
4028                 num_bytes -= block_rsv->reserved;
4029         spin_unlock(&block_rsv->lock);
4030
4031         if (!ret)
4032                 return 0;
4033
4034         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4035         if (!ret) {
4036                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4037                 return 0;
4038         }
4039
4040         return ret;
4041 }
4042
4043 int btrfs_block_rsv_refill(struct btrfs_root *root,
4044                            struct btrfs_block_rsv *block_rsv,
4045                            u64 min_reserved)
4046 {
4047         return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4048 }
4049
4050 int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4051                                    struct btrfs_block_rsv *block_rsv,
4052                                    u64 min_reserved)
4053 {
4054         return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4055 }
4056
4057 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4058                             struct btrfs_block_rsv *dst_rsv,
4059                             u64 num_bytes)
4060 {
4061         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4062 }
4063
4064 void btrfs_block_rsv_release(struct btrfs_root *root,
4065                              struct btrfs_block_rsv *block_rsv,
4066                              u64 num_bytes)
4067 {
4068         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4069         if (global_rsv->full || global_rsv == block_rsv ||
4070             block_rsv->space_info != global_rsv->space_info)
4071                 global_rsv = NULL;
4072         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4073                                 num_bytes);
4074 }
4075
4076 /*
4077  * helper to calculate size of global block reservation.
4078  * the desired value is sum of space used by extent tree,
4079  * checksum tree and root tree
4080  */
4081 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4082 {
4083         struct btrfs_space_info *sinfo;
4084         u64 num_bytes;
4085         u64 meta_used;
4086         u64 data_used;
4087         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4088
4089         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4090         spin_lock(&sinfo->lock);
4091         data_used = sinfo->bytes_used;
4092         spin_unlock(&sinfo->lock);
4093
4094         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4095         spin_lock(&sinfo->lock);
4096         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4097                 data_used = 0;
4098         meta_used = sinfo->bytes_used;
4099         spin_unlock(&sinfo->lock);
4100
4101         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4102                     csum_size * 2;
4103         num_bytes += div64_u64(data_used + meta_used, 50);
4104
4105         if (num_bytes * 3 > meta_used)
4106                 num_bytes = div64_u64(meta_used, 3) * 2;
4107
4108         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4109 }
4110
4111 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4112 {
4113         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4114         struct btrfs_space_info *sinfo = block_rsv->space_info;
4115         u64 num_bytes;
4116
4117         num_bytes = calc_global_metadata_size(fs_info);
4118
4119         spin_lock(&block_rsv->lock);
4120         spin_lock(&sinfo->lock);
4121
4122         block_rsv->size = num_bytes;
4123
4124         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4125                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4126                     sinfo->bytes_may_use;
4127
4128         if (sinfo->total_bytes > num_bytes) {
4129                 num_bytes = sinfo->total_bytes - num_bytes;
4130                 block_rsv->reserved += num_bytes;
4131                 sinfo->bytes_may_use += num_bytes;
4132                 trace_btrfs_space_reservation(fs_info, "space_info",
4133                                       (u64)(unsigned long)sinfo, num_bytes, 1);
4134         }
4135
4136         if (block_rsv->reserved >= block_rsv->size) {
4137                 num_bytes = block_rsv->reserved - block_rsv->size;
4138                 sinfo->bytes_may_use -= num_bytes;
4139                 trace_btrfs_space_reservation(fs_info, "space_info",
4140                                       (u64)(unsigned long)sinfo, num_bytes, 0);
4141                 sinfo->reservation_progress++;
4142                 block_rsv->reserved = block_rsv->size;
4143                 block_rsv->full = 1;
4144         }
4145
4146         spin_unlock(&sinfo->lock);
4147         spin_unlock(&block_rsv->lock);
4148 }
4149
4150 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4151 {
4152         struct btrfs_space_info *space_info;
4153
4154         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4155         fs_info->chunk_block_rsv.space_info = space_info;
4156
4157         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4158         fs_info->global_block_rsv.space_info = space_info;
4159         fs_info->delalloc_block_rsv.space_info = space_info;
4160         fs_info->trans_block_rsv.space_info = space_info;
4161         fs_info->empty_block_rsv.space_info = space_info;
4162         fs_info->delayed_block_rsv.space_info = space_info;
4163
4164         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4165         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4166         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4167         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4168         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4169
4170         update_global_block_rsv(fs_info);
4171 }
4172
4173 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4174 {
4175         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4176                                 (u64)-1);
4177         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4178         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4179         WARN_ON(fs_info->trans_block_rsv.size > 0);
4180         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4181         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4182         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4183         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4184         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4185 }
4186
4187 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4188                                   struct btrfs_root *root)
4189 {
4190         if (!trans->bytes_reserved)
4191                 return;
4192
4193         trace_btrfs_space_reservation(root->fs_info, "transaction",
4194                                       (u64)(unsigned long)trans,
4195                                       trans->bytes_reserved, 0);
4196         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4197         trans->bytes_reserved = 0;
4198 }
4199
4200 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4201                                   struct inode *inode)
4202 {
4203         struct btrfs_root *root = BTRFS_I(inode)->root;
4204         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4205         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4206
4207         /*
4208          * We need to hold space in order to delete our orphan item once we've
4209          * added it, so this takes the reservation so we can release it later
4210          * when we are truly done with the orphan item.
4211          */
4212         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4213         trace_btrfs_space_reservation(root->fs_info, "orphan",
4214                                       btrfs_ino(inode), num_bytes, 1);
4215         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4216 }
4217
4218 void btrfs_orphan_release_metadata(struct inode *inode)
4219 {
4220         struct btrfs_root *root = BTRFS_I(inode)->root;
4221         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4222         trace_btrfs_space_reservation(root->fs_info, "orphan",
4223                                       btrfs_ino(inode), num_bytes, 0);
4224         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4225 }
4226
4227 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4228                                 struct btrfs_pending_snapshot *pending)
4229 {
4230         struct btrfs_root *root = pending->root;
4231         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4232         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4233         /*
4234          * two for root back/forward refs, two for directory entries
4235          * and one for root of the snapshot.
4236          */
4237         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4238         dst_rsv->space_info = src_rsv->space_info;
4239         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4240 }
4241
4242 /**
4243  * drop_outstanding_extent - drop an outstanding extent
4244  * @inode: the inode we're dropping the extent for
4245  *
4246  * This is called when we are freeing up an outstanding extent, either called
4247  * after an error or after an extent is written.  This will return the number of
4248  * reserved extents that need to be freed.  This must be called with
4249  * BTRFS_I(inode)->lock held.
4250  */
4251 static unsigned drop_outstanding_extent(struct inode *inode)
4252 {
4253         unsigned drop_inode_space = 0;
4254         unsigned dropped_extents = 0;
4255
4256         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4257         BTRFS_I(inode)->outstanding_extents--;
4258
4259         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4260             BTRFS_I(inode)->delalloc_meta_reserved) {
4261                 drop_inode_space = 1;
4262                 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4263         }
4264
4265         /*
4266          * If we have more or the same amount of outsanding extents than we have
4267          * reserved then we need to leave the reserved extents count alone.
4268          */
4269         if (BTRFS_I(inode)->outstanding_extents >=
4270             BTRFS_I(inode)->reserved_extents)
4271                 return drop_inode_space;
4272
4273         dropped_extents = BTRFS_I(inode)->reserved_extents -
4274                 BTRFS_I(inode)->outstanding_extents;
4275         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4276         return dropped_extents + drop_inode_space;
4277 }
4278
4279 /**
4280  * calc_csum_metadata_size - return the amount of metada space that must be
4281  *      reserved/free'd for the given bytes.
4282  * @inode: the inode we're manipulating
4283  * @num_bytes: the number of bytes in question
4284  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4285  *
4286  * This adjusts the number of csum_bytes in the inode and then returns the
4287  * correct amount of metadata that must either be reserved or freed.  We
4288  * calculate how many checksums we can fit into one leaf and then divide the
4289  * number of bytes that will need to be checksumed by this value to figure out
4290  * how many checksums will be required.  If we are adding bytes then the number
4291  * may go up and we will return the number of additional bytes that must be
4292  * reserved.  If it is going down we will return the number of bytes that must
4293  * be freed.
4294  *
4295  * This must be called with BTRFS_I(inode)->lock held.
4296  */
4297 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4298                                    int reserve)
4299 {
4300         struct btrfs_root *root = BTRFS_I(inode)->root;
4301         u64 csum_size;
4302         int num_csums_per_leaf;
4303         int num_csums;
4304         int old_csums;
4305
4306         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4307             BTRFS_I(inode)->csum_bytes == 0)
4308                 return 0;
4309
4310         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4311         if (reserve)
4312                 BTRFS_I(inode)->csum_bytes += num_bytes;
4313         else
4314                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4315         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4316         num_csums_per_leaf = (int)div64_u64(csum_size,
4317                                             sizeof(struct btrfs_csum_item) +
4318                                             sizeof(struct btrfs_disk_key));
4319         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4320         num_csums = num_csums + num_csums_per_leaf - 1;
4321         num_csums = num_csums / num_csums_per_leaf;
4322
4323         old_csums = old_csums + num_csums_per_leaf - 1;
4324         old_csums = old_csums / num_csums_per_leaf;
4325
4326         /* No change, no need to reserve more */
4327         if (old_csums == num_csums)
4328                 return 0;
4329
4330         if (reserve)
4331                 return btrfs_calc_trans_metadata_size(root,
4332                                                       num_csums - old_csums);
4333
4334         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4335 }
4336
4337 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4338 {
4339         struct btrfs_root *root = BTRFS_I(inode)->root;
4340         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4341         u64 to_reserve = 0;
4342         u64 csum_bytes;
4343         unsigned nr_extents = 0;
4344         int extra_reserve = 0;
4345         int flush = 1;
4346         int ret;
4347
4348         /* Need to be holding the i_mutex here if we aren't free space cache */
4349         if (btrfs_is_free_space_inode(root, inode))
4350                 flush = 0;
4351
4352         if (flush && btrfs_transaction_in_commit(root->fs_info))
4353                 schedule_timeout(1);
4354
4355         mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4356         num_bytes = ALIGN(num_bytes, root->sectorsize);
4357
4358         spin_lock(&BTRFS_I(inode)->lock);
4359         BTRFS_I(inode)->outstanding_extents++;
4360
4361         if (BTRFS_I(inode)->outstanding_extents >
4362             BTRFS_I(inode)->reserved_extents)
4363                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4364                         BTRFS_I(inode)->reserved_extents;
4365
4366         /*
4367          * Add an item to reserve for updating the inode when we complete the
4368          * delalloc io.
4369          */
4370         if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4371                 nr_extents++;
4372                 extra_reserve = 1;
4373         }
4374
4375         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4376         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4377         csum_bytes = BTRFS_I(inode)->csum_bytes;
4378         spin_unlock(&BTRFS_I(inode)->lock);
4379
4380         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4381         if (ret) {
4382                 u64 to_free = 0;
4383                 unsigned dropped;
4384
4385                 spin_lock(&BTRFS_I(inode)->lock);
4386                 dropped = drop_outstanding_extent(inode);
4387                 /*
4388                  * If the inodes csum_bytes is the same as the original
4389                  * csum_bytes then we know we haven't raced with any free()ers
4390                  * so we can just reduce our inodes csum bytes and carry on.
4391                  * Otherwise we have to do the normal free thing to account for
4392                  * the case that the free side didn't free up its reserve
4393                  * because of this outstanding reservation.
4394                  */
4395                 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4396                         calc_csum_metadata_size(inode, num_bytes, 0);
4397                 else
4398                         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4399                 spin_unlock(&BTRFS_I(inode)->lock);
4400                 if (dropped)
4401                         to_free += btrfs_calc_trans_metadata_size(root, dropped);
4402
4403                 if (to_free) {
4404                         btrfs_block_rsv_release(root, block_rsv, to_free);
4405                         trace_btrfs_space_reservation(root->fs_info,
4406                                                       "delalloc",
4407                                                       btrfs_ino(inode),
4408                                                       to_free, 0);
4409                 }
4410                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4411                 return ret;
4412         }
4413
4414         spin_lock(&BTRFS_I(inode)->lock);
4415         if (extra_reserve) {
4416                 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4417                 nr_extents--;
4418         }
4419         BTRFS_I(inode)->reserved_extents += nr_extents;
4420         spin_unlock(&BTRFS_I(inode)->lock);
4421         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4422
4423         if (to_reserve)
4424                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4425                                               btrfs_ino(inode), to_reserve, 1);
4426         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4427
4428         return 0;
4429 }
4430
4431 /**
4432  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4433  * @inode: the inode to release the reservation for
4434  * @num_bytes: the number of bytes we're releasing
4435  *
4436  * This will release the metadata reservation for an inode.  This can be called
4437  * once we complete IO for a given set of bytes to release their metadata
4438  * reservations.
4439  */
4440 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4441 {
4442         struct btrfs_root *root = BTRFS_I(inode)->root;
4443         u64 to_free = 0;
4444         unsigned dropped;
4445
4446         num_bytes = ALIGN(num_bytes, root->sectorsize);
4447         spin_lock(&BTRFS_I(inode)->lock);
4448         dropped = drop_outstanding_extent(inode);
4449
4450         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4451         spin_unlock(&BTRFS_I(inode)->lock);
4452         if (dropped > 0)
4453                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4454
4455         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4456                                       btrfs_ino(inode), to_free, 0);
4457         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4458                                 to_free);
4459 }
4460
4461 /**
4462  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4463  * @inode: inode we're writing to
4464  * @num_bytes: the number of bytes we want to allocate
4465  *
4466  * This will do the following things
4467  *
4468  * o reserve space in the data space info for num_bytes
4469  * o reserve space in the metadata space info based on number of outstanding
4470  *   extents and how much csums will be needed
4471  * o add to the inodes ->delalloc_bytes
4472  * o add it to the fs_info's delalloc inodes list.
4473  *
4474  * This will return 0 for success and -ENOSPC if there is no space left.
4475  */
4476 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4477 {
4478         int ret;
4479
4480         ret = btrfs_check_data_free_space(inode, num_bytes);
4481         if (ret)
4482                 return ret;
4483
4484         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4485         if (ret) {
4486                 btrfs_free_reserved_data_space(inode, num_bytes);
4487                 return ret;
4488         }
4489
4490         return 0;
4491 }
4492
4493 /**
4494  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4495  * @inode: inode we're releasing space for
4496  * @num_bytes: the number of bytes we want to free up
4497  *
4498  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4499  * called in the case that we don't need the metadata AND data reservations
4500  * anymore.  So if there is an error or we insert an inline extent.
4501  *
4502  * This function will release the metadata space that was not used and will
4503  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4504  * list if there are no delalloc bytes left.
4505  */
4506 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4507 {
4508         btrfs_delalloc_release_metadata(inode, num_bytes);
4509         btrfs_free_reserved_data_space(inode, num_bytes);
4510 }
4511
4512 static int update_block_group(struct btrfs_trans_handle *trans,
4513                               struct btrfs_root *root,
4514                               u64 bytenr, u64 num_bytes, int alloc)
4515 {
4516         struct btrfs_block_group_cache *cache = NULL;
4517         struct btrfs_fs_info *info = root->fs_info;
4518         u64 total = num_bytes;
4519         u64 old_val;
4520         u64 byte_in_group;
4521         int factor;
4522
4523         /* block accounting for super block */
4524         spin_lock(&info->delalloc_lock);
4525         old_val = btrfs_super_bytes_used(info->super_copy);
4526         if (alloc)
4527                 old_val += num_bytes;
4528         else
4529                 old_val -= num_bytes;
4530         btrfs_set_super_bytes_used(info->super_copy, old_val);
4531         spin_unlock(&info->delalloc_lock);
4532
4533         while (total) {
4534                 cache = btrfs_lookup_block_group(info, bytenr);
4535                 if (!cache)
4536                         return -1;
4537                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4538                                     BTRFS_BLOCK_GROUP_RAID1 |
4539                                     BTRFS_BLOCK_GROUP_RAID10))
4540                         factor = 2;
4541                 else
4542                         factor = 1;
4543                 /*
4544                  * If this block group has free space cache written out, we
4545                  * need to make sure to load it if we are removing space.  This
4546                  * is because we need the unpinning stage to actually add the
4547                  * space back to the block group, otherwise we will leak space.
4548                  */
4549                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4550                         cache_block_group(cache, trans, NULL, 1);
4551
4552                 byte_in_group = bytenr - cache->key.objectid;
4553                 WARN_ON(byte_in_group > cache->key.offset);
4554
4555                 spin_lock(&cache->space_info->lock);
4556                 spin_lock(&cache->lock);
4557
4558                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4559                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4560                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4561
4562                 cache->dirty = 1;
4563                 old_val = btrfs_block_group_used(&cache->item);
4564                 num_bytes = min(total, cache->key.offset - byte_in_group);
4565                 if (alloc) {
4566                         old_val += num_bytes;
4567                         btrfs_set_block_group_used(&cache->item, old_val);
4568                         cache->reserved -= num_bytes;
4569                         cache->space_info->bytes_reserved -= num_bytes;
4570                         cache->space_info->bytes_used += num_bytes;
4571                         cache->space_info->disk_used += num_bytes * factor;
4572                         spin_unlock(&cache->lock);
4573                         spin_unlock(&cache->space_info->lock);
4574                 } else {
4575                         old_val -= num_bytes;
4576                         btrfs_set_block_group_used(&cache->item, old_val);
4577                         cache->pinned += num_bytes;
4578                         cache->space_info->bytes_pinned += num_bytes;
4579                         cache->space_info->bytes_used -= num_bytes;
4580                         cache->space_info->disk_used -= num_bytes * factor;
4581                         spin_unlock(&cache->lock);
4582                         spin_unlock(&cache->space_info->lock);
4583
4584                         set_extent_dirty(info->pinned_extents,
4585                                          bytenr, bytenr + num_bytes - 1,
4586                                          GFP_NOFS | __GFP_NOFAIL);
4587                 }
4588                 btrfs_put_block_group(cache);
4589                 total -= num_bytes;
4590                 bytenr += num_bytes;
4591         }
4592         return 0;
4593 }
4594
4595 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4596 {
4597         struct btrfs_block_group_cache *cache;
4598         u64 bytenr;
4599
4600         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4601         if (!cache)
4602                 return 0;
4603
4604         bytenr = cache->key.objectid;
4605         btrfs_put_block_group(cache);
4606
4607         return bytenr;
4608 }
4609
4610 static int pin_down_extent(struct btrfs_root *root,
4611                            struct btrfs_block_group_cache *cache,
4612                            u64 bytenr, u64 num_bytes, int reserved)
4613 {
4614         spin_lock(&cache->space_info->lock);
4615         spin_lock(&cache->lock);
4616         cache->pinned += num_bytes;
4617         cache->space_info->bytes_pinned += num_bytes;
4618         if (reserved) {
4619                 cache->reserved -= num_bytes;
4620                 cache->space_info->bytes_reserved -= num_bytes;
4621         }
4622         spin_unlock(&cache->lock);
4623         spin_unlock(&cache->space_info->lock);
4624
4625         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4626                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4627         return 0;
4628 }
4629
4630 /*
4631  * this function must be called within transaction
4632  */
4633 int btrfs_pin_extent(struct btrfs_root *root,
4634                      u64 bytenr, u64 num_bytes, int reserved)
4635 {
4636         struct btrfs_block_group_cache *cache;
4637
4638         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4639         BUG_ON(!cache);
4640
4641         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4642
4643         btrfs_put_block_group(cache);
4644         return 0;
4645 }
4646
4647 /*
4648  * this function must be called within transaction
4649  */
4650 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4651                                     struct btrfs_root *root,
4652                                     u64 bytenr, u64 num_bytes)
4653 {
4654         struct btrfs_block_group_cache *cache;
4655
4656         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4657         BUG_ON(!cache);
4658
4659         /*
4660          * pull in the free space cache (if any) so that our pin
4661          * removes the free space from the cache.  We have load_only set
4662          * to one because the slow code to read in the free extents does check
4663          * the pinned extents.
4664          */
4665         cache_block_group(cache, trans, root, 1);
4666
4667         pin_down_extent(root, cache, bytenr, num_bytes, 0);
4668
4669         /* remove us from the free space cache (if we're there at all) */
4670         btrfs_remove_free_space(cache, bytenr, num_bytes);
4671         btrfs_put_block_group(cache);
4672         return 0;
4673 }
4674
4675 /**
4676  * btrfs_update_reserved_bytes - update the block_group and space info counters
4677  * @cache:      The cache we are manipulating
4678  * @num_bytes:  The number of bytes in question
4679  * @reserve:    One of the reservation enums
4680  *
4681  * This is called by the allocator when it reserves space, or by somebody who is
4682  * freeing space that was never actually used on disk.  For example if you
4683  * reserve some space for a new leaf in transaction A and before transaction A
4684  * commits you free that leaf, you call this with reserve set to 0 in order to
4685  * clear the reservation.
4686  *
4687  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4688  * ENOSPC accounting.  For data we handle the reservation through clearing the
4689  * delalloc bits in the io_tree.  We have to do this since we could end up
4690  * allocating less disk space for the amount of data we have reserved in the
4691  * case of compression.
4692  *
4693  * If this is a reservation and the block group has become read only we cannot
4694  * make the reservation and return -EAGAIN, otherwise this function always
4695  * succeeds.
4696  */
4697 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4698                                        u64 num_bytes, int reserve)
4699 {
4700         struct btrfs_space_info *space_info = cache->space_info;
4701         int ret = 0;
4702         spin_lock(&space_info->lock);
4703         spin_lock(&cache->lock);
4704         if (reserve != RESERVE_FREE) {
4705                 if (cache->ro) {
4706                         ret = -EAGAIN;
4707                 } else {
4708                         cache->reserved += num_bytes;
4709                         space_info->bytes_reserved += num_bytes;
4710                         if (reserve == RESERVE_ALLOC) {
4711                                 trace_btrfs_space_reservation(cache->fs_info,
4712                                               "space_info",
4713                                               (u64)(unsigned long)space_info,
4714                                               num_bytes, 0);
4715                                 space_info->bytes_may_use -= num_bytes;
4716                         }
4717                 }
4718         } else {
4719                 if (cache->ro)
4720                         space_info->bytes_readonly += num_bytes;
4721                 cache->reserved -= num_bytes;
4722                 space_info->bytes_reserved -= num_bytes;
4723                 space_info->reservation_progress++;
4724         }
4725         spin_unlock(&cache->lock);
4726         spin_unlock(&space_info->lock);
4727         return ret;
4728 }
4729
4730 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4731                                 struct btrfs_root *root)
4732 {
4733         struct btrfs_fs_info *fs_info = root->fs_info;
4734         struct btrfs_caching_control *next;
4735         struct btrfs_caching_control *caching_ctl;
4736         struct btrfs_block_group_cache *cache;
4737
4738         down_write(&fs_info->extent_commit_sem);
4739
4740         list_for_each_entry_safe(caching_ctl, next,
4741                                  &fs_info->caching_block_groups, list) {
4742                 cache = caching_ctl->block_group;
4743                 if (block_group_cache_done(cache)) {
4744                         cache->last_byte_to_unpin = (u64)-1;
4745                         list_del_init(&caching_ctl->list);
4746                         put_caching_control(caching_ctl);
4747                 } else {
4748                         cache->last_byte_to_unpin = caching_ctl->progress;
4749                 }
4750         }
4751
4752         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4753                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4754         else
4755                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4756
4757         up_write(&fs_info->extent_commit_sem);
4758
4759         update_global_block_rsv(fs_info);
4760         return 0;
4761 }
4762
4763 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4764 {
4765         struct btrfs_fs_info *fs_info = root->fs_info;
4766         struct btrfs_block_group_cache *cache = NULL;
4767         u64 len;
4768
4769         while (start <= end) {
4770                 if (!cache ||
4771                     start >= cache->key.objectid + cache->key.offset) {
4772                         if (cache)
4773                                 btrfs_put_block_group(cache);
4774                         cache = btrfs_lookup_block_group(fs_info, start);
4775                         BUG_ON(!cache);
4776                 }
4777
4778                 len = cache->key.objectid + cache->key.offset - start;
4779                 len = min(len, end + 1 - start);
4780
4781                 if (start < cache->last_byte_to_unpin) {
4782                         len = min(len, cache->last_byte_to_unpin - start);
4783                         btrfs_add_free_space(cache, start, len);
4784                 }
4785
4786                 start += len;
4787
4788                 spin_lock(&cache->space_info->lock);
4789                 spin_lock(&cache->lock);
4790                 cache->pinned -= len;
4791                 cache->space_info->bytes_pinned -= len;
4792                 if (cache->ro)
4793                         cache->space_info->bytes_readonly += len;
4794                 spin_unlock(&cache->lock);
4795                 spin_unlock(&cache->space_info->lock);
4796         }
4797
4798         if (cache)
4799                 btrfs_put_block_group(cache);
4800         return 0;
4801 }
4802
4803 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4804                                struct btrfs_root *root)
4805 {
4806         struct btrfs_fs_info *fs_info = root->fs_info;
4807         struct extent_io_tree *unpin;
4808         u64 start;
4809         u64 end;
4810         int ret;
4811
4812         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4813                 unpin = &fs_info->freed_extents[1];
4814         else
4815                 unpin = &fs_info->freed_extents[0];
4816
4817         while (1) {
4818                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4819                                             EXTENT_DIRTY);
4820                 if (ret)
4821                         break;
4822
4823                 if (btrfs_test_opt(root, DISCARD))
4824                         ret = btrfs_discard_extent(root, start,
4825                                                    end + 1 - start, NULL);
4826
4827                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4828                 unpin_extent_range(root, start, end);
4829                 cond_resched();
4830         }
4831
4832         return 0;
4833 }
4834
4835 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4836                                 struct btrfs_root *root,
4837                                 u64 bytenr, u64 num_bytes, u64 parent,
4838                                 u64 root_objectid, u64 owner_objectid,
4839                                 u64 owner_offset, int refs_to_drop,
4840                                 struct btrfs_delayed_extent_op *extent_op)
4841 {
4842         struct btrfs_key key;
4843         struct btrfs_path *path;
4844         struct btrfs_fs_info *info = root->fs_info;
4845         struct btrfs_root *extent_root = info->extent_root;
4846         struct extent_buffer *leaf;
4847         struct btrfs_extent_item *ei;
4848         struct btrfs_extent_inline_ref *iref;
4849         int ret;
4850         int is_data;
4851         int extent_slot = 0;
4852         int found_extent = 0;
4853         int num_to_del = 1;
4854         u32 item_size;
4855         u64 refs;
4856
4857         path = btrfs_alloc_path();
4858         if (!path)
4859                 return -ENOMEM;
4860
4861         path->reada = 1;
4862         path->leave_spinning = 1;
4863
4864         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4865         BUG_ON(!is_data && refs_to_drop != 1);
4866
4867         ret = lookup_extent_backref(trans, extent_root, path, &iref,
4868                                     bytenr, num_bytes, parent,
4869                                     root_objectid, owner_objectid,
4870                                     owner_offset);
4871         if (ret == 0) {
4872                 extent_slot = path->slots[0];
4873                 while (extent_slot >= 0) {
4874                         btrfs_item_key_to_cpu(path->nodes[0], &key,
4875                                               extent_slot);
4876                         if (key.objectid != bytenr)
4877                                 break;
4878                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4879                             key.offset == num_bytes) {
4880                                 found_extent = 1;
4881                                 break;
4882                         }
4883                         if (path->slots[0] - extent_slot > 5)
4884                                 break;
4885                         extent_slot--;
4886                 }
4887 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4888                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4889                 if (found_extent && item_size < sizeof(*ei))
4890                         found_extent = 0;
4891 #endif
4892                 if (!found_extent) {
4893                         BUG_ON(iref);
4894                         ret = remove_extent_backref(trans, extent_root, path,
4895                                                     NULL, refs_to_drop,
4896                                                     is_data);
4897                         BUG_ON(ret);
4898                         btrfs_release_path(path);
4899                         path->leave_spinning = 1;
4900
4901                         key.objectid = bytenr;
4902                         key.type = BTRFS_EXTENT_ITEM_KEY;
4903                         key.offset = num_bytes;
4904
4905                         ret = btrfs_search_slot(trans, extent_root,
4906                                                 &key, path, -1, 1);
4907                         if (ret) {
4908                                 printk(KERN_ERR "umm, got %d back from search"
4909                                        ", was looking for %llu\n", ret,
4910                                        (unsigned long long)bytenr);
4911                                 if (ret > 0)
4912                                         btrfs_print_leaf(extent_root,
4913                                                          path->nodes[0]);
4914                         }
4915                         BUG_ON(ret);
4916                         extent_slot = path->slots[0];
4917                 }
4918         } else {
4919                 btrfs_print_leaf(extent_root, path->nodes[0]);
4920                 WARN_ON(1);
4921                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4922                        "parent %llu root %llu  owner %llu offset %llu\n",
4923                        (unsigned long long)bytenr,
4924                        (unsigned long long)parent,
4925                        (unsigned long long)root_objectid,
4926                        (unsigned long long)owner_objectid,
4927                        (unsigned long long)owner_offset);
4928         }
4929
4930         leaf = path->nodes[0];
4931         item_size = btrfs_item_size_nr(leaf, extent_slot);
4932 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4933         if (item_size < sizeof(*ei)) {
4934                 BUG_ON(found_extent || extent_slot != path->slots[0]);
4935                 ret = convert_extent_item_v0(trans, extent_root, path,
4936                                              owner_objectid, 0);
4937                 BUG_ON(ret < 0);
4938
4939                 btrfs_release_path(path);
4940                 path->leave_spinning = 1;
4941
4942                 key.objectid = bytenr;
4943                 key.type = BTRFS_EXTENT_ITEM_KEY;
4944                 key.offset = num_bytes;
4945
4946                 ret = btrfs_search_slot(trans, extent_root, &key, path,
4947                                         -1, 1);
4948                 if (ret) {
4949                         printk(KERN_ERR "umm, got %d back from search"
4950                                ", was looking for %llu\n", ret,
4951                                (unsigned long long)bytenr);
4952                         btrfs_print_leaf(extent_root, path->nodes[0]);
4953                 }
4954                 BUG_ON(ret);
4955                 extent_slot = path->slots[0];
4956                 leaf = path->nodes[0];
4957                 item_size = btrfs_item_size_nr(leaf, extent_slot);
4958         }
4959 #endif
4960         BUG_ON(item_size < sizeof(*ei));
4961         ei = btrfs_item_ptr(leaf, extent_slot,
4962                             struct btrfs_extent_item);
4963         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4964                 struct btrfs_tree_block_info *bi;
4965                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4966                 bi = (struct btrfs_tree_block_info *)(ei + 1);
4967                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4968         }
4969
4970         refs = btrfs_extent_refs(leaf, ei);
4971         BUG_ON(refs < refs_to_drop);
4972         refs -= refs_to_drop;
4973
4974         if (refs > 0) {
4975                 if (extent_op)
4976                         __run_delayed_extent_op(extent_op, leaf, ei);
4977                 /*
4978                  * In the case of inline back ref, reference count will
4979                  * be updated by remove_extent_backref
4980                  */
4981                 if (iref) {
4982                         BUG_ON(!found_extent);
4983                 } else {
4984                         btrfs_set_extent_refs(leaf, ei, refs);
4985                         btrfs_mark_buffer_dirty(leaf);
4986                 }
4987                 if (found_extent) {
4988                         ret = remove_extent_backref(trans, extent_root, path,
4989                                                     iref, refs_to_drop,
4990                                                     is_data);
4991                         BUG_ON(ret);
4992                 }
4993         } else {
4994                 if (found_extent) {
4995                         BUG_ON(is_data && refs_to_drop !=
4996                                extent_data_ref_count(root, path, iref));
4997                         if (iref) {
4998                                 BUG_ON(path->slots[0] != extent_slot);
4999                         } else {
5000                                 BUG_ON(path->slots[0] != extent_slot + 1);
5001                                 path->slots[0] = extent_slot;
5002                                 num_to_del = 2;
5003                         }
5004                 }
5005
5006                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5007                                       num_to_del);
5008                 BUG_ON(ret);
5009                 btrfs_release_path(path);
5010
5011                 if (is_data) {
5012                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5013                         BUG_ON(ret);
5014                 }
5015
5016                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5017                 BUG_ON(ret);
5018         }
5019         btrfs_free_path(path);
5020         return ret;
5021 }
5022
5023 /*
5024  * when we free an block, it is possible (and likely) that we free the last
5025  * delayed ref for that extent as well.  This searches the delayed ref tree for
5026  * a given extent, and if there are no other delayed refs to be processed, it
5027  * removes it from the tree.
5028  */
5029 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5030                                       struct btrfs_root *root, u64 bytenr)
5031 {
5032         struct btrfs_delayed_ref_head *head;
5033         struct btrfs_delayed_ref_root *delayed_refs;
5034         struct btrfs_delayed_ref_node *ref;
5035         struct rb_node *node;
5036         int ret = 0;
5037
5038         delayed_refs = &trans->transaction->delayed_refs;
5039         spin_lock(&delayed_refs->lock);
5040         head = btrfs_find_delayed_ref_head(trans, bytenr);
5041         if (!head)
5042                 goto out;
5043
5044         node = rb_prev(&head->node.rb_node);
5045         if (!node)
5046                 goto out;
5047
5048         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5049
5050         /* there are still entries for this ref, we can't drop it */
5051         if (ref->bytenr == bytenr)
5052                 goto out;
5053
5054         if (head->extent_op) {
5055                 if (!head->must_insert_reserved)
5056                         goto out;
5057                 kfree(head->extent_op);
5058                 head->extent_op = NULL;
5059         }
5060
5061         /*
5062          * waiting for the lock here would deadlock.  If someone else has it
5063          * locked they are already in the process of dropping it anyway
5064          */
5065         if (!mutex_trylock(&head->mutex))
5066                 goto out;
5067
5068         /*
5069          * at this point we have a head with no other entries.  Go
5070          * ahead and process it.
5071          */
5072         head->node.in_tree = 0;
5073         rb_erase(&head->node.rb_node, &delayed_refs->root);
5074
5075         delayed_refs->num_entries--;
5076         if (waitqueue_active(&delayed_refs->seq_wait))
5077                 wake_up(&delayed_refs->seq_wait);
5078
5079         /*
5080          * we don't take a ref on the node because we're removing it from the
5081          * tree, so we just steal the ref the tree was holding.
5082          */
5083         delayed_refs->num_heads--;
5084         if (list_empty(&head->cluster))
5085                 delayed_refs->num_heads_ready--;
5086
5087         list_del_init(&head->cluster);
5088         spin_unlock(&delayed_refs->lock);
5089
5090         BUG_ON(head->extent_op);
5091         if (head->must_insert_reserved)
5092                 ret = 1;
5093
5094         mutex_unlock(&head->mutex);
5095         btrfs_put_delayed_ref(&head->node);
5096         return ret;
5097 out:
5098         spin_unlock(&delayed_refs->lock);
5099         return 0;
5100 }
5101
5102 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5103                            struct btrfs_root *root,
5104                            struct extent_buffer *buf,
5105                            u64 parent, int last_ref, int for_cow)
5106 {
5107         struct btrfs_block_group_cache *cache = NULL;
5108         int ret;
5109
5110         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5111                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5112                                         buf->start, buf->len,
5113                                         parent, root->root_key.objectid,
5114                                         btrfs_header_level(buf),
5115                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5116                 BUG_ON(ret);
5117         }
5118
5119         if (!last_ref)
5120                 return;
5121
5122         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5123
5124         if (btrfs_header_generation(buf) == trans->transid) {
5125                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5126                         ret = check_ref_cleanup(trans, root, buf->start);
5127                         if (!ret)
5128                                 goto out;
5129                 }
5130
5131                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5132                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5133                         goto out;
5134                 }
5135
5136                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5137
5138                 btrfs_add_free_space(cache, buf->start, buf->len);
5139                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5140         }
5141 out:
5142         /*
5143          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5144          * anymore.
5145          */
5146         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5147         btrfs_put_block_group(cache);
5148 }
5149
5150 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5151                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5152                       u64 owner, u64 offset, int for_cow)
5153 {
5154         int ret;
5155         struct btrfs_fs_info *fs_info = root->fs_info;
5156
5157         /*
5158          * tree log blocks never actually go into the extent allocation
5159          * tree, just update pinning info and exit early.
5160          */
5161         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5162                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5163                 /* unlocks the pinned mutex */
5164                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5165                 ret = 0;
5166         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5167                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5168                                         num_bytes,
5169                                         parent, root_objectid, (int)owner,
5170                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5171                 BUG_ON(ret);
5172         } else {
5173                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5174                                                 num_bytes,
5175                                                 parent, root_objectid, owner,
5176                                                 offset, BTRFS_DROP_DELAYED_REF,
5177                                                 NULL, for_cow);
5178                 BUG_ON(ret);
5179         }
5180         return ret;
5181 }
5182
5183 static u64 stripe_align(struct btrfs_root *root, u64 val)
5184 {
5185         u64 mask = ((u64)root->stripesize - 1);
5186         u64 ret = (val + mask) & ~mask;
5187         return ret;
5188 }
5189
5190 /*
5191  * when we wait for progress in the block group caching, its because
5192  * our allocation attempt failed at least once.  So, we must sleep
5193  * and let some progress happen before we try again.
5194  *
5195  * This function will sleep at least once waiting for new free space to
5196  * show up, and then it will check the block group free space numbers
5197  * for our min num_bytes.  Another option is to have it go ahead
5198  * and look in the rbtree for a free extent of a given size, but this
5199  * is a good start.
5200  */
5201 static noinline int
5202 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5203                                 u64 num_bytes)
5204 {
5205         struct btrfs_caching_control *caching_ctl;
5206         DEFINE_WAIT(wait);
5207
5208         caching_ctl = get_caching_control(cache);
5209         if (!caching_ctl)
5210                 return 0;
5211
5212         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5213                    (cache->free_space_ctl->free_space >= num_bytes));
5214
5215         put_caching_control(caching_ctl);
5216         return 0;
5217 }
5218
5219 static noinline int
5220 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5221 {
5222         struct btrfs_caching_control *caching_ctl;
5223         DEFINE_WAIT(wait);
5224
5225         caching_ctl = get_caching_control(cache);
5226         if (!caching_ctl)
5227                 return 0;
5228
5229         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5230
5231         put_caching_control(caching_ctl);
5232         return 0;
5233 }
5234
5235 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5236 {
5237         int index;
5238         if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
5239                 index = 0;
5240         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
5241                 index = 1;
5242         else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
5243                 index = 2;
5244         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
5245                 index = 3;
5246         else
5247                 index = 4;
5248         return index;
5249 }
5250
5251 enum btrfs_loop_type {
5252         LOOP_CACHING_NOWAIT = 0,
5253         LOOP_CACHING_WAIT = 1,
5254         LOOP_ALLOC_CHUNK = 2,
5255         LOOP_NO_EMPTY_SIZE = 3,
5256 };
5257
5258 /*
5259  * walks the btree of allocated extents and find a hole of a given size.
5260  * The key ins is changed to record the hole:
5261  * ins->objectid == block start
5262  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5263  * ins->offset == number of blocks
5264  * Any available blocks before search_start are skipped.
5265  */
5266 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5267                                      struct btrfs_root *orig_root,
5268                                      u64 num_bytes, u64 empty_size,
5269                                      u64 hint_byte, struct btrfs_key *ins,
5270                                      u64 data)
5271 {
5272         int ret = 0;
5273         struct btrfs_root *root = orig_root->fs_info->extent_root;
5274         struct btrfs_free_cluster *last_ptr = NULL;
5275         struct btrfs_block_group_cache *block_group = NULL;
5276         struct btrfs_block_group_cache *used_block_group;
5277         u64 search_start = 0;
5278         int empty_cluster = 2 * 1024 * 1024;
5279         int allowed_chunk_alloc = 0;
5280         int done_chunk_alloc = 0;
5281         struct btrfs_space_info *space_info;
5282         int loop = 0;
5283         int index = 0;
5284         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5285                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5286         bool found_uncached_bg = false;
5287         bool failed_cluster_refill = false;
5288         bool failed_alloc = false;
5289         bool use_cluster = true;
5290         bool have_caching_bg = false;
5291
5292         WARN_ON(num_bytes < root->sectorsize);
5293         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5294         ins->objectid = 0;
5295         ins->offset = 0;
5296
5297         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5298
5299         space_info = __find_space_info(root->fs_info, data);
5300         if (!space_info) {
5301                 printk(KERN_ERR "No space info for %llu\n", data);
5302                 return -ENOSPC;
5303         }
5304
5305         /*
5306          * If the space info is for both data and metadata it means we have a
5307          * small filesystem and we can't use the clustering stuff.
5308          */
5309         if (btrfs_mixed_space_info(space_info))
5310                 use_cluster = false;
5311
5312         if (orig_root->ref_cows || empty_size)
5313                 allowed_chunk_alloc = 1;
5314
5315         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5316                 last_ptr = &root->fs_info->meta_alloc_cluster;
5317                 if (!btrfs_test_opt(root, SSD))
5318                         empty_cluster = 64 * 1024;
5319         }
5320
5321         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5322             btrfs_test_opt(root, SSD)) {
5323                 last_ptr = &root->fs_info->data_alloc_cluster;
5324         }
5325
5326         if (last_ptr) {
5327                 spin_lock(&last_ptr->lock);
5328                 if (last_ptr->block_group)
5329                         hint_byte = last_ptr->window_start;
5330                 spin_unlock(&last_ptr->lock);
5331         }
5332
5333         search_start = max(search_start, first_logical_byte(root, 0));
5334         search_start = max(search_start, hint_byte);
5335
5336         if (!last_ptr)
5337                 empty_cluster = 0;
5338
5339         if (search_start == hint_byte) {
5340                 block_group = btrfs_lookup_block_group(root->fs_info,
5341                                                        search_start);
5342                 used_block_group = block_group;
5343                 /*
5344                  * we don't want to use the block group if it doesn't match our
5345                  * allocation bits, or if its not cached.
5346                  *
5347                  * However if we are re-searching with an ideal block group
5348                  * picked out then we don't care that the block group is cached.
5349                  */
5350                 if (block_group && block_group_bits(block_group, data) &&
5351                     block_group->cached != BTRFS_CACHE_NO) {
5352                         down_read(&space_info->groups_sem);
5353                         if (list_empty(&block_group->list) ||
5354                             block_group->ro) {
5355                                 /*
5356                                  * someone is removing this block group,
5357                                  * we can't jump into the have_block_group
5358                                  * target because our list pointers are not
5359                                  * valid
5360                                  */
5361                                 btrfs_put_block_group(block_group);
5362                                 up_read(&space_info->groups_sem);
5363                         } else {
5364                                 index = get_block_group_index(block_group);
5365                                 goto have_block_group;
5366                         }
5367                 } else if (block_group) {
5368                         btrfs_put_block_group(block_group);
5369                 }
5370         }
5371 search:
5372         have_caching_bg = false;
5373         down_read(&space_info->groups_sem);
5374         list_for_each_entry(block_group, &space_info->block_groups[index],
5375                             list) {
5376                 u64 offset;
5377                 int cached;
5378
5379                 used_block_group = block_group;
5380                 btrfs_get_block_group(block_group);
5381                 search_start = block_group->key.objectid;
5382
5383                 /*
5384                  * this can happen if we end up cycling through all the
5385                  * raid types, but we want to make sure we only allocate
5386                  * for the proper type.
5387                  */
5388                 if (!block_group_bits(block_group, data)) {
5389                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5390                                 BTRFS_BLOCK_GROUP_RAID1 |
5391                                 BTRFS_BLOCK_GROUP_RAID10;
5392
5393                         /*
5394                          * if they asked for extra copies and this block group
5395                          * doesn't provide them, bail.  This does allow us to
5396                          * fill raid0 from raid1.
5397                          */
5398                         if ((data & extra) && !(block_group->flags & extra))
5399                                 goto loop;
5400                 }
5401
5402 have_block_group:
5403                 cached = block_group_cache_done(block_group);
5404                 if (unlikely(!cached)) {
5405                         found_uncached_bg = true;
5406                         ret = cache_block_group(block_group, trans,
5407                                                 orig_root, 0);
5408                         BUG_ON(ret);
5409                 }
5410
5411                 if (unlikely(block_group->ro))
5412                         goto loop;
5413
5414                 /*
5415                  * Ok we want to try and use the cluster allocator, so
5416                  * lets look there
5417                  */
5418                 if (last_ptr) {
5419                         /*
5420                          * the refill lock keeps out other
5421                          * people trying to start a new cluster
5422                          */
5423                         spin_lock(&last_ptr->refill_lock);
5424                         used_block_group = last_ptr->block_group;
5425                         if (used_block_group != block_group &&
5426                             (!used_block_group ||
5427                              used_block_group->ro ||
5428                              !block_group_bits(used_block_group, data))) {
5429                                 used_block_group = block_group;
5430                                 goto refill_cluster;
5431                         }
5432
5433                         if (used_block_group != block_group)
5434                                 btrfs_get_block_group(used_block_group);
5435
5436                         offset = btrfs_alloc_from_cluster(used_block_group,
5437                           last_ptr, num_bytes, used_block_group->key.objectid);
5438                         if (offset) {
5439                                 /* we have a block, we're done */
5440                                 spin_unlock(&last_ptr->refill_lock);
5441                                 trace_btrfs_reserve_extent_cluster(root,
5442                                         block_group, search_start, num_bytes);
5443                                 goto checks;
5444                         }
5445
5446                         WARN_ON(last_ptr->block_group != used_block_group);
5447                         if (used_block_group != block_group) {
5448                                 btrfs_put_block_group(used_block_group);
5449                                 used_block_group = block_group;
5450                         }
5451 refill_cluster:
5452                         BUG_ON(used_block_group != block_group);
5453                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5454                          * set up a new clusters, so lets just skip it
5455                          * and let the allocator find whatever block
5456                          * it can find.  If we reach this point, we
5457                          * will have tried the cluster allocator
5458                          * plenty of times and not have found
5459                          * anything, so we are likely way too
5460                          * fragmented for the clustering stuff to find
5461                          * anything.
5462                          *
5463                          * However, if the cluster is taken from the
5464                          * current block group, release the cluster
5465                          * first, so that we stand a better chance of
5466                          * succeeding in the unclustered
5467                          * allocation.  */
5468                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5469                             last_ptr->block_group != block_group) {
5470                                 spin_unlock(&last_ptr->refill_lock);
5471                                 goto unclustered_alloc;
5472                         }
5473
5474                         /*
5475                          * this cluster didn't work out, free it and
5476                          * start over
5477                          */
5478                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5479
5480                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5481                                 spin_unlock(&last_ptr->refill_lock);
5482                                 goto unclustered_alloc;
5483                         }
5484
5485                         /* allocate a cluster in this block group */
5486                         ret = btrfs_find_space_cluster(trans, root,
5487                                                block_group, last_ptr,
5488                                                search_start, num_bytes,
5489                                                empty_cluster + empty_size);
5490                         if (ret == 0) {
5491                                 /*
5492                                  * now pull our allocation out of this
5493                                  * cluster
5494                                  */
5495                                 offset = btrfs_alloc_from_cluster(block_group,
5496                                                   last_ptr, num_bytes,
5497                                                   search_start);
5498                                 if (offset) {
5499                                         /* we found one, proceed */
5500                                         spin_unlock(&last_ptr->refill_lock);
5501                                         trace_btrfs_reserve_extent_cluster(root,
5502                                                 block_group, search_start,
5503                                                 num_bytes);
5504                                         goto checks;
5505                                 }
5506                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5507                                    && !failed_cluster_refill) {
5508                                 spin_unlock(&last_ptr->refill_lock);
5509
5510                                 failed_cluster_refill = true;
5511                                 wait_block_group_cache_progress(block_group,
5512                                        num_bytes + empty_cluster + empty_size);
5513                                 goto have_block_group;
5514                         }
5515
5516                         /*
5517                          * at this point we either didn't find a cluster
5518                          * or we weren't able to allocate a block from our
5519                          * cluster.  Free the cluster we've been trying
5520                          * to use, and go to the next block group
5521                          */
5522                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5523                         spin_unlock(&last_ptr->refill_lock);
5524                         goto loop;
5525                 }
5526
5527 unclustered_alloc:
5528                 spin_lock(&block_group->free_space_ctl->tree_lock);
5529                 if (cached &&
5530                     block_group->free_space_ctl->free_space <
5531                     num_bytes + empty_cluster + empty_size) {
5532                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5533                         goto loop;
5534                 }
5535                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5536
5537                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5538                                                     num_bytes, empty_size);
5539                 /*
5540                  * If we didn't find a chunk, and we haven't failed on this
5541                  * block group before, and this block group is in the middle of
5542                  * caching and we are ok with waiting, then go ahead and wait
5543                  * for progress to be made, and set failed_alloc to true.
5544                  *
5545                  * If failed_alloc is true then we've already waited on this
5546                  * block group once and should move on to the next block group.
5547                  */
5548                 if (!offset && !failed_alloc && !cached &&
5549                     loop > LOOP_CACHING_NOWAIT) {
5550                         wait_block_group_cache_progress(block_group,
5551                                                 num_bytes + empty_size);
5552                         failed_alloc = true;
5553                         goto have_block_group;
5554                 } else if (!offset) {
5555                         if (!cached)
5556                                 have_caching_bg = true;
5557                         goto loop;
5558                 }
5559 checks:
5560                 search_start = stripe_align(root, offset);
5561
5562                 /* move on to the next group */
5563                 if (search_start + num_bytes >
5564                     used_block_group->key.objectid + used_block_group->key.offset) {
5565                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5566                         goto loop;
5567                 }
5568
5569                 if (offset < search_start)
5570                         btrfs_add_free_space(used_block_group, offset,
5571                                              search_start - offset);
5572                 BUG_ON(offset > search_start);
5573
5574                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5575                                                   alloc_type);
5576                 if (ret == -EAGAIN) {
5577                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5578                         goto loop;
5579                 }
5580
5581                 /* we are all good, lets return */
5582                 ins->objectid = search_start;
5583                 ins->offset = num_bytes;
5584
5585                 trace_btrfs_reserve_extent(orig_root, block_group,
5586                                            search_start, num_bytes);
5587                 if (offset < search_start)
5588                         btrfs_add_free_space(used_block_group, offset,
5589                                              search_start - offset);
5590                 BUG_ON(offset > search_start);
5591                 if (used_block_group != block_group)
5592                         btrfs_put_block_group(used_block_group);
5593                 btrfs_put_block_group(block_group);
5594                 break;
5595 loop:
5596                 failed_cluster_refill = false;
5597                 failed_alloc = false;
5598                 BUG_ON(index != get_block_group_index(block_group));
5599                 if (used_block_group != block_group)
5600                         btrfs_put_block_group(used_block_group);
5601                 btrfs_put_block_group(block_group);
5602         }
5603         up_read(&space_info->groups_sem);
5604
5605         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5606                 goto search;
5607
5608         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5609                 goto search;
5610
5611         /*
5612          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5613          *                      caching kthreads as we move along
5614          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5615          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5616          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5617          *                      again
5618          */
5619         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5620                 index = 0;
5621                 loop++;
5622                 if (loop == LOOP_ALLOC_CHUNK) {
5623                        if (allowed_chunk_alloc) {
5624                                 ret = do_chunk_alloc(trans, root, num_bytes +
5625                                                      2 * 1024 * 1024, data,
5626                                                      CHUNK_ALLOC_LIMITED);
5627                                 allowed_chunk_alloc = 0;
5628                                 if (ret == 1)
5629                                         done_chunk_alloc = 1;
5630                         } else if (!done_chunk_alloc &&
5631                                    space_info->force_alloc ==
5632                                    CHUNK_ALLOC_NO_FORCE) {
5633                                 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5634                         }
5635
5636                        /*
5637                         * We didn't allocate a chunk, go ahead and drop the
5638                         * empty size and loop again.
5639                         */
5640                        if (!done_chunk_alloc)
5641                                loop = LOOP_NO_EMPTY_SIZE;
5642                 }
5643
5644                 if (loop == LOOP_NO_EMPTY_SIZE) {
5645                         empty_size = 0;
5646                         empty_cluster = 0;
5647                 }
5648
5649                 goto search;
5650         } else if (!ins->objectid) {
5651                 ret = -ENOSPC;
5652         } else if (ins->objectid) {
5653                 ret = 0;
5654         }
5655
5656         return ret;
5657 }
5658
5659 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5660                             int dump_block_groups)
5661 {
5662         struct btrfs_block_group_cache *cache;
5663         int index = 0;
5664
5665         spin_lock(&info->lock);
5666         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5667                (unsigned long long)info->flags,
5668                (unsigned long long)(info->total_bytes - info->bytes_used -
5669                                     info->bytes_pinned - info->bytes_reserved -
5670                                     info->bytes_readonly),
5671                (info->full) ? "" : "not ");
5672         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5673                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5674                (unsigned long long)info->total_bytes,
5675                (unsigned long long)info->bytes_used,
5676                (unsigned long long)info->bytes_pinned,
5677                (unsigned long long)info->bytes_reserved,
5678                (unsigned long long)info->bytes_may_use,
5679                (unsigned long long)info->bytes_readonly);
5680         spin_unlock(&info->lock);
5681
5682         if (!dump_block_groups)
5683                 return;
5684
5685         down_read(&info->groups_sem);
5686 again:
5687         list_for_each_entry(cache, &info->block_groups[index], list) {
5688                 spin_lock(&cache->lock);
5689                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5690                        "%llu pinned %llu reserved\n",
5691                        (unsigned long long)cache->key.objectid,
5692                        (unsigned long long)cache->key.offset,
5693                        (unsigned long long)btrfs_block_group_used(&cache->item),
5694                        (unsigned long long)cache->pinned,
5695                        (unsigned long long)cache->reserved);
5696                 btrfs_dump_free_space(cache, bytes);
5697                 spin_unlock(&cache->lock);
5698         }
5699         if (++index < BTRFS_NR_RAID_TYPES)
5700                 goto again;
5701         up_read(&info->groups_sem);
5702 }
5703
5704 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5705                          struct btrfs_root *root,
5706                          u64 num_bytes, u64 min_alloc_size,
5707                          u64 empty_size, u64 hint_byte,
5708                          struct btrfs_key *ins, u64 data)
5709 {
5710         bool final_tried = false;
5711         int ret;
5712
5713         data = btrfs_get_alloc_profile(root, data);
5714 again:
5715         /*
5716          * the only place that sets empty_size is btrfs_realloc_node, which
5717          * is not called recursively on allocations
5718          */
5719         if (empty_size || root->ref_cows)
5720                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5721                                      num_bytes + 2 * 1024 * 1024, data,
5722                                      CHUNK_ALLOC_NO_FORCE);
5723
5724         WARN_ON(num_bytes < root->sectorsize);
5725         ret = find_free_extent(trans, root, num_bytes, empty_size,
5726                                hint_byte, ins, data);
5727
5728         if (ret == -ENOSPC) {
5729                 if (!final_tried) {
5730                         num_bytes = num_bytes >> 1;
5731                         num_bytes = num_bytes & ~(root->sectorsize - 1);
5732                         num_bytes = max(num_bytes, min_alloc_size);
5733                         do_chunk_alloc(trans, root->fs_info->extent_root,
5734                                        num_bytes, data, CHUNK_ALLOC_FORCE);
5735                         if (num_bytes == min_alloc_size)
5736                                 final_tried = true;
5737                         goto again;
5738                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
5739                         struct btrfs_space_info *sinfo;
5740
5741                         sinfo = __find_space_info(root->fs_info, data);
5742                         printk(KERN_ERR "btrfs allocation failed flags %llu, "
5743                                "wanted %llu\n", (unsigned long long)data,
5744                                (unsigned long long)num_bytes);
5745                         dump_space_info(sinfo, num_bytes, 1);
5746                 }
5747         }
5748
5749         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5750
5751         return ret;
5752 }
5753
5754 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5755                                         u64 start, u64 len, int pin)
5756 {
5757         struct btrfs_block_group_cache *cache;
5758         int ret = 0;
5759
5760         cache = btrfs_lookup_block_group(root->fs_info, start);
5761         if (!cache) {
5762                 printk(KERN_ERR "Unable to find block group for %llu\n",
5763                        (unsigned long long)start);
5764                 return -ENOSPC;
5765         }
5766
5767         if (btrfs_test_opt(root, DISCARD))
5768                 ret = btrfs_discard_extent(root, start, len, NULL);
5769
5770         if (pin)
5771                 pin_down_extent(root, cache, start, len, 1);
5772         else {
5773                 btrfs_add_free_space(cache, start, len);
5774                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5775         }
5776         btrfs_put_block_group(cache);
5777
5778         trace_btrfs_reserved_extent_free(root, start, len);
5779
5780         return ret;
5781 }
5782
5783 int btrfs_free_reserved_extent(struct btrfs_root *root,
5784                                         u64 start, u64 len)
5785 {
5786         return __btrfs_free_reserved_extent(root, start, len, 0);
5787 }
5788
5789 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
5790                                        u64 start, u64 len)
5791 {
5792         return __btrfs_free_reserved_extent(root, start, len, 1);
5793 }
5794
5795 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5796                                       struct btrfs_root *root,
5797                                       u64 parent, u64 root_objectid,
5798                                       u64 flags, u64 owner, u64 offset,
5799                                       struct btrfs_key *ins, int ref_mod)
5800 {
5801         int ret;
5802         struct btrfs_fs_info *fs_info = root->fs_info;
5803         struct btrfs_extent_item *extent_item;
5804         struct btrfs_extent_inline_ref *iref;
5805         struct btrfs_path *path;
5806         struct extent_buffer *leaf;
5807         int type;
5808         u32 size;
5809
5810         if (parent > 0)
5811                 type = BTRFS_SHARED_DATA_REF_KEY;
5812         else
5813                 type = BTRFS_EXTENT_DATA_REF_KEY;
5814
5815         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5816
5817         path = btrfs_alloc_path();
5818         if (!path)
5819                 return -ENOMEM;
5820
5821         path->leave_spinning = 1;
5822         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5823                                       ins, size);
5824         BUG_ON(ret);
5825
5826         leaf = path->nodes[0];
5827         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5828                                      struct btrfs_extent_item);
5829         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5830         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5831         btrfs_set_extent_flags(leaf, extent_item,
5832                                flags | BTRFS_EXTENT_FLAG_DATA);
5833
5834         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5835         btrfs_set_extent_inline_ref_type(leaf, iref, type);
5836         if (parent > 0) {
5837                 struct btrfs_shared_data_ref *ref;
5838                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5839                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5840                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5841         } else {
5842                 struct btrfs_extent_data_ref *ref;
5843                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5844                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5845                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5846                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5847                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5848         }
5849
5850         btrfs_mark_buffer_dirty(path->nodes[0]);
5851         btrfs_free_path(path);
5852
5853         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5854         if (ret) {
5855                 printk(KERN_ERR "btrfs update block group failed for %llu "
5856                        "%llu\n", (unsigned long long)ins->objectid,
5857                        (unsigned long long)ins->offset);
5858                 BUG();
5859         }
5860         return ret;
5861 }
5862
5863 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5864                                      struct btrfs_root *root,
5865                                      u64 parent, u64 root_objectid,
5866                                      u64 flags, struct btrfs_disk_key *key,
5867                                      int level, struct btrfs_key *ins)
5868 {
5869         int ret;
5870         struct btrfs_fs_info *fs_info = root->fs_info;
5871         struct btrfs_extent_item *extent_item;
5872         struct btrfs_tree_block_info *block_info;
5873         struct btrfs_extent_inline_ref *iref;
5874         struct btrfs_path *path;
5875         struct extent_buffer *leaf;
5876         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5877
5878         path = btrfs_alloc_path();
5879         if (!path)
5880                 return -ENOMEM;
5881
5882         path->leave_spinning = 1;
5883         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5884                                       ins, size);
5885         BUG_ON(ret);
5886
5887         leaf = path->nodes[0];
5888         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5889                                      struct btrfs_extent_item);
5890         btrfs_set_extent_refs(leaf, extent_item, 1);
5891         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5892         btrfs_set_extent_flags(leaf, extent_item,
5893                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5894         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5895
5896         btrfs_set_tree_block_key(leaf, block_info, key);
5897         btrfs_set_tree_block_level(leaf, block_info, level);
5898
5899         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5900         if (parent > 0) {
5901                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5902                 btrfs_set_extent_inline_ref_type(leaf, iref,
5903                                                  BTRFS_SHARED_BLOCK_REF_KEY);
5904                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5905         } else {
5906                 btrfs_set_extent_inline_ref_type(leaf, iref,
5907                                                  BTRFS_TREE_BLOCK_REF_KEY);
5908                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5909         }
5910
5911         btrfs_mark_buffer_dirty(leaf);
5912         btrfs_free_path(path);
5913
5914         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5915         if (ret) {
5916                 printk(KERN_ERR "btrfs update block group failed for %llu "
5917                        "%llu\n", (unsigned long long)ins->objectid,
5918                        (unsigned long long)ins->offset);
5919                 BUG();
5920         }
5921         return ret;
5922 }
5923
5924 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5925                                      struct btrfs_root *root,
5926                                      u64 root_objectid, u64 owner,
5927                                      u64 offset, struct btrfs_key *ins)
5928 {
5929         int ret;
5930
5931         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5932
5933         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
5934                                          ins->offset, 0,
5935                                          root_objectid, owner, offset,
5936                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
5937         return ret;
5938 }
5939
5940 /*
5941  * this is used by the tree logging recovery code.  It records that
5942  * an extent has been allocated and makes sure to clear the free
5943  * space cache bits as well
5944  */
5945 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5946                                    struct btrfs_root *root,
5947                                    u64 root_objectid, u64 owner, u64 offset,
5948                                    struct btrfs_key *ins)
5949 {
5950         int ret;
5951         struct btrfs_block_group_cache *block_group;
5952         struct btrfs_caching_control *caching_ctl;
5953         u64 start = ins->objectid;
5954         u64 num_bytes = ins->offset;
5955
5956         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5957         cache_block_group(block_group, trans, NULL, 0);
5958         caching_ctl = get_caching_control(block_group);
5959
5960         if (!caching_ctl) {
5961                 BUG_ON(!block_group_cache_done(block_group));
5962                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5963                 BUG_ON(ret);
5964         } else {
5965                 mutex_lock(&caching_ctl->mutex);
5966
5967                 if (start >= caching_ctl->progress) {
5968                         ret = add_excluded_extent(root, start, num_bytes);
5969                         BUG_ON(ret);
5970                 } else if (start + num_bytes <= caching_ctl->progress) {
5971                         ret = btrfs_remove_free_space(block_group,
5972                                                       start, num_bytes);
5973                         BUG_ON(ret);
5974                 } else {
5975                         num_bytes = caching_ctl->progress - start;
5976                         ret = btrfs_remove_free_space(block_group,
5977                                                       start, num_bytes);
5978                         BUG_ON(ret);
5979
5980                         start = caching_ctl->progress;
5981                         num_bytes = ins->objectid + ins->offset -
5982                                     caching_ctl->progress;
5983                         ret = add_excluded_extent(root, start, num_bytes);
5984                         BUG_ON(ret);
5985                 }
5986
5987                 mutex_unlock(&caching_ctl->mutex);
5988                 put_caching_control(caching_ctl);
5989         }
5990
5991         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
5992                                           RESERVE_ALLOC_NO_ACCOUNT);
5993         BUG_ON(ret);
5994         btrfs_put_block_group(block_group);
5995         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5996                                          0, owner, offset, ins, 1);
5997         return ret;
5998 }
5999
6000 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6001                                             struct btrfs_root *root,
6002                                             u64 bytenr, u32 blocksize,
6003                                             int level)
6004 {
6005         struct extent_buffer *buf;
6006
6007         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6008         if (!buf)
6009                 return ERR_PTR(-ENOMEM);
6010         btrfs_set_header_generation(buf, trans->transid);
6011         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6012         btrfs_tree_lock(buf);
6013         clean_tree_block(trans, root, buf);
6014         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6015
6016         btrfs_set_lock_blocking(buf);
6017         btrfs_set_buffer_uptodate(buf);
6018
6019         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6020                 /*
6021                  * we allow two log transactions at a time, use different
6022                  * EXENT bit to differentiate dirty pages.
6023                  */
6024                 if (root->log_transid % 2 == 0)
6025                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6026                                         buf->start + buf->len - 1, GFP_NOFS);
6027                 else
6028                         set_extent_new(&root->dirty_log_pages, buf->start,
6029                                         buf->start + buf->len - 1, GFP_NOFS);
6030         } else {
6031                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6032                          buf->start + buf->len - 1, GFP_NOFS);
6033         }
6034         trans->blocks_used++;
6035         /* this returns a buffer locked for blocking */
6036         return buf;
6037 }
6038
6039 static struct btrfs_block_rsv *
6040 use_block_rsv(struct btrfs_trans_handle *trans,
6041               struct btrfs_root *root, u32 blocksize)
6042 {
6043         struct btrfs_block_rsv *block_rsv;
6044         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6045         int ret;
6046
6047         block_rsv = get_block_rsv(trans, root);
6048
6049         if (block_rsv->size == 0) {
6050                 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6051                 /*
6052                  * If we couldn't reserve metadata bytes try and use some from
6053                  * the global reserve.
6054                  */
6055                 if (ret && block_rsv != global_rsv) {
6056                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6057                         if (!ret)
6058                                 return global_rsv;
6059                         return ERR_PTR(ret);
6060                 } else if (ret) {
6061                         return ERR_PTR(ret);
6062                 }
6063                 return block_rsv;
6064         }
6065
6066         ret = block_rsv_use_bytes(block_rsv, blocksize);
6067         if (!ret)
6068                 return block_rsv;
6069         if (ret) {
6070                 static DEFINE_RATELIMIT_STATE(_rs,
6071                                 DEFAULT_RATELIMIT_INTERVAL,
6072                                 /*DEFAULT_RATELIMIT_BURST*/ 2);
6073                 if (__ratelimit(&_rs)) {
6074                         printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
6075                         WARN_ON(1);
6076                 }
6077                 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6078                 if (!ret) {
6079                         return block_rsv;
6080                 } else if (ret && block_rsv != global_rsv) {
6081                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6082                         if (!ret)
6083                                 return global_rsv;
6084                 }
6085         }
6086
6087         return ERR_PTR(-ENOSPC);
6088 }
6089
6090 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6091                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6092 {
6093         block_rsv_add_bytes(block_rsv, blocksize, 0);
6094         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6095 }
6096
6097 /*
6098  * finds a free extent and does all the dirty work required for allocation
6099  * returns the key for the extent through ins, and a tree buffer for
6100  * the first block of the extent through buf.
6101  *
6102  * returns the tree buffer or NULL.
6103  */
6104 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6105                                         struct btrfs_root *root, u32 blocksize,
6106                                         u64 parent, u64 root_objectid,
6107                                         struct btrfs_disk_key *key, int level,
6108                                         u64 hint, u64 empty_size, int for_cow)
6109 {
6110         struct btrfs_key ins;
6111         struct btrfs_block_rsv *block_rsv;
6112         struct extent_buffer *buf;
6113         u64 flags = 0;
6114         int ret;
6115
6116
6117         block_rsv = use_block_rsv(trans, root, blocksize);
6118         if (IS_ERR(block_rsv))
6119                 return ERR_CAST(block_rsv);
6120
6121         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6122                                    empty_size, hint, &ins, 0);
6123         if (ret) {
6124                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6125                 return ERR_PTR(ret);
6126         }
6127
6128         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6129                                     blocksize, level);
6130         BUG_ON(IS_ERR(buf));
6131
6132         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6133                 if (parent == 0)
6134                         parent = ins.objectid;
6135                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6136         } else
6137                 BUG_ON(parent > 0);
6138
6139         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6140                 struct btrfs_delayed_extent_op *extent_op;
6141                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6142                 BUG_ON(!extent_op);
6143                 if (key)
6144                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6145                 else
6146                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6147                 extent_op->flags_to_set = flags;
6148                 extent_op->update_key = 1;
6149                 extent_op->update_flags = 1;
6150                 extent_op->is_data = 0;
6151
6152                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6153                                         ins.objectid,
6154                                         ins.offset, parent, root_objectid,
6155                                         level, BTRFS_ADD_DELAYED_EXTENT,
6156                                         extent_op, for_cow);
6157                 BUG_ON(ret);
6158         }
6159         return buf;
6160 }
6161
6162 struct walk_control {
6163         u64 refs[BTRFS_MAX_LEVEL];
6164         u64 flags[BTRFS_MAX_LEVEL];
6165         struct btrfs_key update_progress;
6166         int stage;
6167         int level;
6168         int shared_level;
6169         int update_ref;
6170         int keep_locks;
6171         int reada_slot;
6172         int reada_count;
6173         int for_reloc;
6174 };
6175
6176 #define DROP_REFERENCE  1
6177 #define UPDATE_BACKREF  2
6178
6179 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6180                                      struct btrfs_root *root,
6181                                      struct walk_control *wc,
6182                                      struct btrfs_path *path)
6183 {
6184         u64 bytenr;
6185         u64 generation;
6186         u64 refs;
6187         u64 flags;
6188         u32 nritems;
6189         u32 blocksize;
6190         struct btrfs_key key;
6191         struct extent_buffer *eb;
6192         int ret;
6193         int slot;
6194         int nread = 0;
6195
6196         if (path->slots[wc->level] < wc->reada_slot) {
6197                 wc->reada_count = wc->reada_count * 2 / 3;
6198                 wc->reada_count = max(wc->reada_count, 2);
6199         } else {
6200                 wc->reada_count = wc->reada_count * 3 / 2;
6201                 wc->reada_count = min_t(int, wc->reada_count,
6202                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6203         }
6204
6205         eb = path->nodes[wc->level];
6206         nritems = btrfs_header_nritems(eb);
6207         blocksize = btrfs_level_size(root, wc->level - 1);
6208
6209         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6210                 if (nread >= wc->reada_count)
6211                         break;
6212
6213                 cond_resched();
6214                 bytenr = btrfs_node_blockptr(eb, slot);
6215                 generation = btrfs_node_ptr_generation(eb, slot);
6216
6217                 if (slot == path->slots[wc->level])
6218                         goto reada;
6219
6220                 if (wc->stage == UPDATE_BACKREF &&
6221                     generation <= root->root_key.offset)
6222                         continue;
6223
6224                 /* We don't lock the tree block, it's OK to be racy here */
6225                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6226                                                &refs, &flags);
6227                 BUG_ON(ret);
6228                 BUG_ON(refs == 0);
6229
6230                 if (wc->stage == DROP_REFERENCE) {
6231                         if (refs == 1)
6232                                 goto reada;
6233
6234                         if (wc->level == 1 &&
6235                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6236                                 continue;
6237                         if (!wc->update_ref ||
6238                             generation <= root->root_key.offset)
6239                                 continue;
6240                         btrfs_node_key_to_cpu(eb, &key, slot);
6241                         ret = btrfs_comp_cpu_keys(&key,
6242                                                   &wc->update_progress);
6243                         if (ret < 0)
6244                                 continue;
6245                 } else {
6246                         if (wc->level == 1 &&
6247                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6248                                 continue;
6249                 }
6250 reada:
6251                 ret = readahead_tree_block(root, bytenr, blocksize,
6252                                            generation);
6253                 if (ret)
6254                         break;
6255                 nread++;
6256         }
6257         wc->reada_slot = slot;
6258 }
6259
6260 /*
6261  * hepler to process tree block while walking down the tree.
6262  *
6263  * when wc->stage == UPDATE_BACKREF, this function updates
6264  * back refs for pointers in the block.
6265  *
6266  * NOTE: return value 1 means we should stop walking down.
6267  */
6268 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6269                                    struct btrfs_root *root,
6270                                    struct btrfs_path *path,
6271                                    struct walk_control *wc, int lookup_info)
6272 {
6273         int level = wc->level;
6274         struct extent_buffer *eb = path->nodes[level];
6275         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6276         int ret;
6277
6278         if (wc->stage == UPDATE_BACKREF &&
6279             btrfs_header_owner(eb) != root->root_key.objectid)
6280                 return 1;
6281
6282         /*
6283          * when reference count of tree block is 1, it won't increase
6284          * again. once full backref flag is set, we never clear it.
6285          */
6286         if (lookup_info &&
6287             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6288              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6289                 BUG_ON(!path->locks[level]);
6290                 ret = btrfs_lookup_extent_info(trans, root,
6291                                                eb->start, eb->len,
6292                                                &wc->refs[level],
6293                                                &wc->flags[level]);
6294                 BUG_ON(ret);
6295                 BUG_ON(wc->refs[level] == 0);
6296         }
6297
6298         if (wc->stage == DROP_REFERENCE) {
6299                 if (wc->refs[level] > 1)
6300                         return 1;
6301
6302                 if (path->locks[level] && !wc->keep_locks) {
6303                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6304                         path->locks[level] = 0;
6305                 }
6306                 return 0;
6307         }
6308
6309         /* wc->stage == UPDATE_BACKREF */
6310         if (!(wc->flags[level] & flag)) {
6311                 BUG_ON(!path->locks[level]);
6312                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6313                 BUG_ON(ret);
6314                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6315                 BUG_ON(ret);
6316                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6317                                                   eb->len, flag, 0);
6318                 BUG_ON(ret);
6319                 wc->flags[level] |= flag;
6320         }
6321
6322         /*
6323          * the block is shared by multiple trees, so it's not good to
6324          * keep the tree lock
6325          */
6326         if (path->locks[level] && level > 0) {
6327                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6328                 path->locks[level] = 0;
6329         }
6330         return 0;
6331 }
6332
6333 /*
6334  * hepler to process tree block pointer.
6335  *
6336  * when wc->stage == DROP_REFERENCE, this function checks
6337  * reference count of the block pointed to. if the block
6338  * is shared and we need update back refs for the subtree
6339  * rooted at the block, this function changes wc->stage to
6340  * UPDATE_BACKREF. if the block is shared and there is no
6341  * need to update back, this function drops the reference
6342  * to the block.
6343  *
6344  * NOTE: return value 1 means we should stop walking down.
6345  */
6346 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6347                                  struct btrfs_root *root,
6348                                  struct btrfs_path *path,
6349                                  struct walk_control *wc, int *lookup_info)
6350 {
6351         u64 bytenr;
6352         u64 generation;
6353         u64 parent;
6354         u32 blocksize;
6355         struct btrfs_key key;
6356         struct extent_buffer *next;
6357         int level = wc->level;
6358         int reada = 0;
6359         int ret = 0;
6360
6361         generation = btrfs_node_ptr_generation(path->nodes[level],
6362                                                path->slots[level]);
6363         /*
6364          * if the lower level block was created before the snapshot
6365          * was created, we know there is no need to update back refs
6366          * for the subtree
6367          */
6368         if (wc->stage == UPDATE_BACKREF &&
6369             generation <= root->root_key.offset) {
6370                 *lookup_info = 1;
6371                 return 1;
6372         }
6373
6374         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6375         blocksize = btrfs_level_size(root, level - 1);
6376
6377         next = btrfs_find_tree_block(root, bytenr, blocksize);
6378         if (!next) {
6379                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6380                 if (!next)
6381                         return -ENOMEM;
6382                 reada = 1;
6383         }
6384         btrfs_tree_lock(next);
6385         btrfs_set_lock_blocking(next);
6386
6387         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6388                                        &wc->refs[level - 1],
6389                                        &wc->flags[level - 1]);
6390         BUG_ON(ret);
6391         BUG_ON(wc->refs[level - 1] == 0);
6392         *lookup_info = 0;
6393
6394         if (wc->stage == DROP_REFERENCE) {
6395                 if (wc->refs[level - 1] > 1) {
6396                         if (level == 1 &&
6397                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6398                                 goto skip;
6399
6400                         if (!wc->update_ref ||
6401                             generation <= root->root_key.offset)
6402                                 goto skip;
6403
6404                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6405                                               path->slots[level]);
6406                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6407                         if (ret < 0)
6408                                 goto skip;
6409
6410                         wc->stage = UPDATE_BACKREF;
6411                         wc->shared_level = level - 1;
6412                 }
6413         } else {
6414                 if (level == 1 &&
6415                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6416                         goto skip;
6417         }
6418
6419         if (!btrfs_buffer_uptodate(next, generation)) {
6420                 btrfs_tree_unlock(next);
6421                 free_extent_buffer(next);
6422                 next = NULL;
6423                 *lookup_info = 1;
6424         }
6425
6426         if (!next) {
6427                 if (reada && level == 1)
6428                         reada_walk_down(trans, root, wc, path);
6429                 next = read_tree_block(root, bytenr, blocksize, generation);
6430                 if (!next)
6431                         return -EIO;
6432                 btrfs_tree_lock(next);
6433                 btrfs_set_lock_blocking(next);
6434         }
6435
6436         level--;
6437         BUG_ON(level != btrfs_header_level(next));
6438         path->nodes[level] = next;
6439         path->slots[level] = 0;
6440         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6441         wc->level = level;
6442         if (wc->level == 1)
6443                 wc->reada_slot = 0;
6444         return 0;
6445 skip:
6446         wc->refs[level - 1] = 0;
6447         wc->flags[level - 1] = 0;
6448         if (wc->stage == DROP_REFERENCE) {
6449                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6450                         parent = path->nodes[level]->start;
6451                 } else {
6452                         BUG_ON(root->root_key.objectid !=
6453                                btrfs_header_owner(path->nodes[level]));
6454                         parent = 0;
6455                 }
6456
6457                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6458                                 root->root_key.objectid, level - 1, 0, 0);
6459                 BUG_ON(ret);
6460         }
6461         btrfs_tree_unlock(next);
6462         free_extent_buffer(next);
6463         *lookup_info = 1;
6464         return 1;
6465 }
6466
6467 /*
6468  * hepler to process tree block while walking up the tree.
6469  *
6470  * when wc->stage == DROP_REFERENCE, this function drops
6471  * reference count on the block.
6472  *
6473  * when wc->stage == UPDATE_BACKREF, this function changes
6474  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6475  * to UPDATE_BACKREF previously while processing the block.
6476  *
6477  * NOTE: return value 1 means we should stop walking up.
6478  */
6479 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6480                                  struct btrfs_root *root,
6481                                  struct btrfs_path *path,
6482                                  struct walk_control *wc)
6483 {
6484         int ret;
6485         int level = wc->level;
6486         struct extent_buffer *eb = path->nodes[level];
6487         u64 parent = 0;
6488
6489         if (wc->stage == UPDATE_BACKREF) {
6490                 BUG_ON(wc->shared_level < level);
6491                 if (level < wc->shared_level)
6492                         goto out;
6493
6494                 ret = find_next_key(path, level + 1, &wc->update_progress);
6495                 if (ret > 0)
6496                         wc->update_ref = 0;
6497
6498                 wc->stage = DROP_REFERENCE;
6499                 wc->shared_level = -1;
6500                 path->slots[level] = 0;
6501
6502                 /*
6503                  * check reference count again if the block isn't locked.
6504                  * we should start walking down the tree again if reference
6505                  * count is one.
6506                  */
6507                 if (!path->locks[level]) {
6508                         BUG_ON(level == 0);
6509                         btrfs_tree_lock(eb);
6510                         btrfs_set_lock_blocking(eb);
6511                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6512
6513                         ret = btrfs_lookup_extent_info(trans, root,
6514                                                        eb->start, eb->len,
6515                                                        &wc->refs[level],
6516                                                        &wc->flags[level]);
6517                         BUG_ON(ret);
6518                         BUG_ON(wc->refs[level] == 0);
6519                         if (wc->refs[level] == 1) {
6520                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6521                                 return 1;
6522                         }
6523                 }
6524         }
6525
6526         /* wc->stage == DROP_REFERENCE */
6527         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6528
6529         if (wc->refs[level] == 1) {
6530                 if (level == 0) {
6531                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6532                                 ret = btrfs_dec_ref(trans, root, eb, 1,
6533                                                     wc->for_reloc);
6534                         else
6535                                 ret = btrfs_dec_ref(trans, root, eb, 0,
6536                                                     wc->for_reloc);
6537                         BUG_ON(ret);
6538                 }
6539                 /* make block locked assertion in clean_tree_block happy */
6540                 if (!path->locks[level] &&
6541                     btrfs_header_generation(eb) == trans->transid) {
6542                         btrfs_tree_lock(eb);
6543                         btrfs_set_lock_blocking(eb);
6544                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6545                 }
6546                 clean_tree_block(trans, root, eb);
6547         }
6548
6549         if (eb == root->node) {
6550                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6551                         parent = eb->start;
6552                 else
6553                         BUG_ON(root->root_key.objectid !=
6554                                btrfs_header_owner(eb));
6555         } else {
6556                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6557                         parent = path->nodes[level + 1]->start;
6558                 else
6559                         BUG_ON(root->root_key.objectid !=
6560                                btrfs_header_owner(path->nodes[level + 1]));
6561         }
6562
6563         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
6564 out:
6565         wc->refs[level] = 0;
6566         wc->flags[level] = 0;
6567         return 0;
6568 }
6569
6570 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6571                                    struct btrfs_root *root,
6572                                    struct btrfs_path *path,
6573                                    struct walk_control *wc)
6574 {
6575         int level = wc->level;
6576         int lookup_info = 1;
6577         int ret;
6578
6579         while (level >= 0) {
6580                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6581                 if (ret > 0)
6582                         break;
6583
6584                 if (level == 0)
6585                         break;
6586
6587                 if (path->slots[level] >=
6588                     btrfs_header_nritems(path->nodes[level]))
6589                         break;
6590
6591                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6592                 if (ret > 0) {
6593                         path->slots[level]++;
6594                         continue;
6595                 } else if (ret < 0)
6596                         return ret;
6597                 level = wc->level;
6598         }
6599         return 0;
6600 }
6601
6602 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6603                                  struct btrfs_root *root,
6604                                  struct btrfs_path *path,
6605                                  struct walk_control *wc, int max_level)
6606 {
6607         int level = wc->level;
6608         int ret;
6609
6610         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6611         while (level < max_level && path->nodes[level]) {
6612                 wc->level = level;
6613                 if (path->slots[level] + 1 <
6614                     btrfs_header_nritems(path->nodes[level])) {
6615                         path->slots[level]++;
6616                         return 0;
6617                 } else {
6618                         ret = walk_up_proc(trans, root, path, wc);
6619                         if (ret > 0)
6620                                 return 0;
6621
6622                         if (path->locks[level]) {
6623                                 btrfs_tree_unlock_rw(path->nodes[level],
6624                                                      path->locks[level]);
6625                                 path->locks[level] = 0;
6626                         }
6627                         free_extent_buffer(path->nodes[level]);
6628                         path->nodes[level] = NULL;
6629                         level++;
6630                 }
6631         }
6632         return 1;
6633 }
6634
6635 /*
6636  * drop a subvolume tree.
6637  *
6638  * this function traverses the tree freeing any blocks that only
6639  * referenced by the tree.
6640  *
6641  * when a shared tree block is found. this function decreases its
6642  * reference count by one. if update_ref is true, this function
6643  * also make sure backrefs for the shared block and all lower level
6644  * blocks are properly updated.
6645  */
6646 void btrfs_drop_snapshot(struct btrfs_root *root,
6647                          struct btrfs_block_rsv *block_rsv, int update_ref,
6648                          int for_reloc)
6649 {
6650         struct btrfs_path *path;
6651         struct btrfs_trans_handle *trans;
6652         struct btrfs_root *tree_root = root->fs_info->tree_root;
6653         struct btrfs_root_item *root_item = &root->root_item;
6654         struct walk_control *wc;
6655         struct btrfs_key key;
6656         int err = 0;
6657         int ret;
6658         int level;
6659
6660         path = btrfs_alloc_path();
6661         if (!path) {
6662                 err = -ENOMEM;
6663                 goto out;
6664         }
6665
6666         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6667         if (!wc) {
6668                 btrfs_free_path(path);
6669                 err = -ENOMEM;
6670                 goto out;
6671         }
6672
6673         trans = btrfs_start_transaction(tree_root, 0);
6674         BUG_ON(IS_ERR(trans));
6675
6676         if (block_rsv)
6677                 trans->block_rsv = block_rsv;
6678
6679         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6680                 level = btrfs_header_level(root->node);
6681                 path->nodes[level] = btrfs_lock_root_node(root);
6682                 btrfs_set_lock_blocking(path->nodes[level]);
6683                 path->slots[level] = 0;
6684                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6685                 memset(&wc->update_progress, 0,
6686                        sizeof(wc->update_progress));
6687         } else {
6688                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6689                 memcpy(&wc->update_progress, &key,
6690                        sizeof(wc->update_progress));
6691
6692                 level = root_item->drop_level;
6693                 BUG_ON(level == 0);
6694                 path->lowest_level = level;
6695                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6696                 path->lowest_level = 0;
6697                 if (ret < 0) {
6698                         err = ret;
6699                         goto out_free;
6700                 }
6701                 WARN_ON(ret > 0);
6702
6703                 /*
6704                  * unlock our path, this is safe because only this
6705                  * function is allowed to delete this snapshot
6706                  */
6707                 btrfs_unlock_up_safe(path, 0);
6708
6709                 level = btrfs_header_level(root->node);
6710                 while (1) {
6711                         btrfs_tree_lock(path->nodes[level]);
6712                         btrfs_set_lock_blocking(path->nodes[level]);
6713
6714                         ret = btrfs_lookup_extent_info(trans, root,
6715                                                 path->nodes[level]->start,
6716                                                 path->nodes[level]->len,
6717                                                 &wc->refs[level],
6718                                                 &wc->flags[level]);
6719                         BUG_ON(ret);
6720                         BUG_ON(wc->refs[level] == 0);
6721
6722                         if (level == root_item->drop_level)
6723                                 break;
6724
6725                         btrfs_tree_unlock(path->nodes[level]);
6726                         WARN_ON(wc->refs[level] != 1);
6727                         level--;
6728                 }
6729         }
6730
6731         wc->level = level;
6732         wc->shared_level = -1;
6733         wc->stage = DROP_REFERENCE;
6734         wc->update_ref = update_ref;
6735         wc->keep_locks = 0;
6736         wc->for_reloc = for_reloc;
6737         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6738
6739         while (1) {
6740                 ret = walk_down_tree(trans, root, path, wc);
6741                 if (ret < 0) {
6742                         err = ret;
6743                         break;
6744                 }
6745
6746                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6747                 if (ret < 0) {
6748                         err = ret;
6749                         break;
6750                 }
6751
6752                 if (ret > 0) {
6753                         BUG_ON(wc->stage != DROP_REFERENCE);
6754                         break;
6755                 }
6756
6757                 if (wc->stage == DROP_REFERENCE) {
6758                         level = wc->level;
6759                         btrfs_node_key(path->nodes[level],
6760                                        &root_item->drop_progress,
6761                                        path->slots[level]);
6762                         root_item->drop_level = level;
6763                 }
6764
6765                 BUG_ON(wc->level == 0);
6766                 if (btrfs_should_end_transaction(trans, tree_root)) {
6767                         ret = btrfs_update_root(trans, tree_root,
6768                                                 &root->root_key,
6769                                                 root_item);
6770                         BUG_ON(ret);
6771
6772                         btrfs_end_transaction_throttle(trans, tree_root);
6773                         trans = btrfs_start_transaction(tree_root, 0);
6774                         BUG_ON(IS_ERR(trans));
6775                         if (block_rsv)
6776                                 trans->block_rsv = block_rsv;
6777                 }
6778         }
6779         btrfs_release_path(path);
6780         BUG_ON(err);
6781
6782         ret = btrfs_del_root(trans, tree_root, &root->root_key);
6783         BUG_ON(ret);
6784
6785         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6786                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6787                                            NULL, NULL);
6788                 BUG_ON(ret < 0);
6789                 if (ret > 0) {
6790                         /* if we fail to delete the orphan item this time
6791                          * around, it'll get picked up the next time.
6792                          *
6793                          * The most common failure here is just -ENOENT.
6794                          */
6795                         btrfs_del_orphan_item(trans, tree_root,
6796                                               root->root_key.objectid);
6797                 }
6798         }
6799
6800         if (root->in_radix) {
6801                 btrfs_free_fs_root(tree_root->fs_info, root);
6802         } else {
6803                 free_extent_buffer(root->node);
6804                 free_extent_buffer(root->commit_root);
6805                 kfree(root);
6806         }
6807 out_free:
6808         btrfs_end_transaction_throttle(trans, tree_root);
6809         kfree(wc);
6810         btrfs_free_path(path);
6811 out:
6812         if (err)
6813                 btrfs_std_error(root->fs_info, err);
6814         return;
6815 }
6816
6817 /*
6818  * drop subtree rooted at tree block 'node'.
6819  *
6820  * NOTE: this function will unlock and release tree block 'node'
6821  * only used by relocation code
6822  */
6823 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6824                         struct btrfs_root *root,
6825                         struct extent_buffer *node,
6826                         struct extent_buffer *parent)
6827 {
6828         struct btrfs_path *path;
6829         struct walk_control *wc;
6830         int level;
6831         int parent_level;
6832         int ret = 0;
6833         int wret;
6834
6835         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6836
6837         path = btrfs_alloc_path();
6838         if (!path)
6839                 return -ENOMEM;
6840
6841         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6842         if (!wc) {
6843                 btrfs_free_path(path);
6844                 return -ENOMEM;
6845         }
6846
6847         btrfs_assert_tree_locked(parent);
6848         parent_level = btrfs_header_level(parent);
6849         extent_buffer_get(parent);
6850         path->nodes[parent_level] = parent;
6851         path->slots[parent_level] = btrfs_header_nritems(parent);
6852
6853         btrfs_assert_tree_locked(node);
6854         level = btrfs_header_level(node);
6855         path->nodes[level] = node;
6856         path->slots[level] = 0;
6857         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6858
6859         wc->refs[parent_level] = 1;
6860         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6861         wc->level = level;
6862         wc->shared_level = -1;
6863         wc->stage = DROP_REFERENCE;
6864         wc->update_ref = 0;
6865         wc->keep_locks = 1;
6866         wc->for_reloc = 1;
6867         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6868
6869         while (1) {
6870                 wret = walk_down_tree(trans, root, path, wc);
6871                 if (wret < 0) {
6872                         ret = wret;
6873                         break;
6874                 }
6875
6876                 wret = walk_up_tree(trans, root, path, wc, parent_level);
6877                 if (wret < 0)
6878                         ret = wret;
6879                 if (wret != 0)
6880                         break;
6881         }
6882
6883         kfree(wc);
6884         btrfs_free_path(path);
6885         return ret;
6886 }
6887
6888 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6889 {
6890         u64 num_devices;
6891         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6892                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6893
6894         if (root->fs_info->balance_ctl) {
6895                 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
6896                 u64 tgt = 0;
6897
6898                 /* pick restriper's target profile and return */
6899                 if (flags & BTRFS_BLOCK_GROUP_DATA &&
6900                     bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6901                         tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
6902                 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
6903                            bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6904                         tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
6905                 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
6906                            bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6907                         tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
6908                 }
6909
6910                 if (tgt)
6911                         return extended_to_chunk(tgt);
6912         }
6913
6914         /*
6915          * we add in the count of missing devices because we want
6916          * to make sure that any RAID levels on a degraded FS
6917          * continue to be honored.
6918          */
6919         num_devices = root->fs_info->fs_devices->rw_devices +
6920                 root->fs_info->fs_devices->missing_devices;
6921
6922         if (num_devices == 1) {
6923                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6924                 stripped = flags & ~stripped;
6925
6926                 /* turn raid0 into single device chunks */
6927                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6928                         return stripped;
6929
6930                 /* turn mirroring into duplication */
6931                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6932                              BTRFS_BLOCK_GROUP_RAID10))
6933                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6934         } else {
6935                 /* they already had raid on here, just return */
6936                 if (flags & stripped)
6937                         return flags;
6938
6939                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6940                 stripped = flags & ~stripped;
6941
6942                 /* switch duplicated blocks with raid1 */
6943                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6944                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6945
6946                 /* this is drive concat, leave it alone */
6947         }
6948
6949         return flags;
6950 }
6951
6952 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6953 {
6954         struct btrfs_space_info *sinfo = cache->space_info;
6955         u64 num_bytes;
6956         u64 min_allocable_bytes;
6957         int ret = -ENOSPC;
6958
6959
6960         /*
6961          * We need some metadata space and system metadata space for
6962          * allocating chunks in some corner cases until we force to set
6963          * it to be readonly.
6964          */
6965         if ((sinfo->flags &
6966              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
6967             !force)
6968                 min_allocable_bytes = 1 * 1024 * 1024;
6969         else
6970                 min_allocable_bytes = 0;
6971
6972         spin_lock(&sinfo->lock);
6973         spin_lock(&cache->lock);
6974
6975         if (cache->ro) {
6976                 ret = 0;
6977                 goto out;
6978         }
6979
6980         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6981                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6982
6983         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
6984             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
6985             min_allocable_bytes <= sinfo->total_bytes) {
6986                 sinfo->bytes_readonly += num_bytes;
6987                 cache->ro = 1;
6988                 ret = 0;
6989         }
6990 out:
6991         spin_unlock(&cache->lock);
6992         spin_unlock(&sinfo->lock);
6993         return ret;
6994 }
6995
6996 int btrfs_set_block_group_ro(struct btrfs_root *root,
6997                              struct btrfs_block_group_cache *cache)
6998
6999 {
7000         struct btrfs_trans_handle *trans;
7001         u64 alloc_flags;
7002         int ret;
7003
7004         BUG_ON(cache->ro);
7005
7006         trans = btrfs_join_transaction(root);
7007         BUG_ON(IS_ERR(trans));
7008
7009         alloc_flags = update_block_group_flags(root, cache->flags);
7010         if (alloc_flags != cache->flags)
7011                 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7012                                CHUNK_ALLOC_FORCE);
7013
7014         ret = set_block_group_ro(cache, 0);
7015         if (!ret)
7016                 goto out;
7017         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7018         ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7019                              CHUNK_ALLOC_FORCE);
7020         if (ret < 0)
7021                 goto out;
7022         ret = set_block_group_ro(cache, 0);
7023 out:
7024         btrfs_end_transaction(trans, root);
7025         return ret;
7026 }
7027
7028 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7029                             struct btrfs_root *root, u64 type)
7030 {
7031         u64 alloc_flags = get_alloc_profile(root, type);
7032         return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7033                               CHUNK_ALLOC_FORCE);
7034 }
7035
7036 /*
7037  * helper to account the unused space of all the readonly block group in the
7038  * list. takes mirrors into account.
7039  */
7040 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7041 {
7042         struct btrfs_block_group_cache *block_group;
7043         u64 free_bytes = 0;
7044         int factor;
7045
7046         list_for_each_entry(block_group, groups_list, list) {
7047                 spin_lock(&block_group->lock);
7048
7049                 if (!block_group->ro) {
7050                         spin_unlock(&block_group->lock);
7051                         continue;
7052                 }
7053
7054                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7055                                           BTRFS_BLOCK_GROUP_RAID10 |
7056                                           BTRFS_BLOCK_GROUP_DUP))
7057                         factor = 2;
7058                 else
7059                         factor = 1;
7060
7061                 free_bytes += (block_group->key.offset -
7062                                btrfs_block_group_used(&block_group->item)) *
7063                                factor;
7064
7065                 spin_unlock(&block_group->lock);
7066         }
7067
7068         return free_bytes;
7069 }
7070
7071 /*
7072  * helper to account the unused space of all the readonly block group in the
7073  * space_info. takes mirrors into account.
7074  */
7075 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7076 {
7077         int i;
7078         u64 free_bytes = 0;
7079
7080         spin_lock(&sinfo->lock);
7081
7082         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7083                 if (!list_empty(&sinfo->block_groups[i]))
7084                         free_bytes += __btrfs_get_ro_block_group_free_space(
7085                                                 &sinfo->block_groups[i]);
7086
7087         spin_unlock(&sinfo->lock);
7088
7089         return free_bytes;
7090 }
7091
7092 int btrfs_set_block_group_rw(struct btrfs_root *root,
7093                               struct btrfs_block_group_cache *cache)
7094 {
7095         struct btrfs_space_info *sinfo = cache->space_info;
7096         u64 num_bytes;
7097
7098         BUG_ON(!cache->ro);
7099
7100         spin_lock(&sinfo->lock);
7101         spin_lock(&cache->lock);
7102         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7103                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7104         sinfo->bytes_readonly -= num_bytes;
7105         cache->ro = 0;
7106         spin_unlock(&cache->lock);
7107         spin_unlock(&sinfo->lock);
7108         return 0;
7109 }
7110
7111 /*
7112  * checks to see if its even possible to relocate this block group.
7113  *
7114  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7115  * ok to go ahead and try.
7116  */
7117 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7118 {
7119         struct btrfs_block_group_cache *block_group;
7120         struct btrfs_space_info *space_info;
7121         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7122         struct btrfs_device *device;
7123         u64 min_free;
7124         u64 dev_min = 1;
7125         u64 dev_nr = 0;
7126         int index;
7127         int full = 0;
7128         int ret = 0;
7129
7130         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7131
7132         /* odd, couldn't find the block group, leave it alone */
7133         if (!block_group)
7134                 return -1;
7135
7136         min_free = btrfs_block_group_used(&block_group->item);
7137
7138         /* no bytes used, we're good */
7139         if (!min_free)
7140                 goto out;
7141
7142         space_info = block_group->space_info;
7143         spin_lock(&space_info->lock);
7144
7145         full = space_info->full;
7146
7147         /*
7148          * if this is the last block group we have in this space, we can't
7149          * relocate it unless we're able to allocate a new chunk below.
7150          *
7151          * Otherwise, we need to make sure we have room in the space to handle
7152          * all of the extents from this block group.  If we can, we're good
7153          */
7154         if ((space_info->total_bytes != block_group->key.offset) &&
7155             (space_info->bytes_used + space_info->bytes_reserved +
7156              space_info->bytes_pinned + space_info->bytes_readonly +
7157              min_free < space_info->total_bytes)) {
7158                 spin_unlock(&space_info->lock);
7159                 goto out;
7160         }
7161         spin_unlock(&space_info->lock);
7162
7163         /*
7164          * ok we don't have enough space, but maybe we have free space on our
7165          * devices to allocate new chunks for relocation, so loop through our
7166          * alloc devices and guess if we have enough space.  However, if we
7167          * were marked as full, then we know there aren't enough chunks, and we
7168          * can just return.
7169          */
7170         ret = -1;
7171         if (full)
7172                 goto out;
7173
7174         /*
7175          * index:
7176          *      0: raid10
7177          *      1: raid1
7178          *      2: dup
7179          *      3: raid0
7180          *      4: single
7181          */
7182         index = get_block_group_index(block_group);
7183         if (index == 0) {
7184                 dev_min = 4;
7185                 /* Divide by 2 */
7186                 min_free >>= 1;
7187         } else if (index == 1) {
7188                 dev_min = 2;
7189         } else if (index == 2) {
7190                 /* Multiply by 2 */
7191                 min_free <<= 1;
7192         } else if (index == 3) {
7193                 dev_min = fs_devices->rw_devices;
7194                 do_div(min_free, dev_min);
7195         }
7196
7197         mutex_lock(&root->fs_info->chunk_mutex);
7198         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7199                 u64 dev_offset;
7200
7201                 /*
7202                  * check to make sure we can actually find a chunk with enough
7203                  * space to fit our block group in.
7204                  */
7205                 if (device->total_bytes > device->bytes_used + min_free) {
7206                         ret = find_free_dev_extent(device, min_free,
7207                                                    &dev_offset, NULL);
7208                         if (!ret)
7209                                 dev_nr++;
7210
7211                         if (dev_nr >= dev_min)
7212                                 break;
7213
7214                         ret = -1;
7215                 }
7216         }
7217         mutex_unlock(&root->fs_info->chunk_mutex);
7218 out:
7219         btrfs_put_block_group(block_group);
7220         return ret;
7221 }
7222
7223 static int find_first_block_group(struct btrfs_root *root,
7224                 struct btrfs_path *path, struct btrfs_key *key)
7225 {
7226         int ret = 0;
7227         struct btrfs_key found_key;
7228         struct extent_buffer *leaf;
7229         int slot;
7230
7231         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7232         if (ret < 0)
7233                 goto out;
7234
7235         while (1) {
7236                 slot = path->slots[0];
7237                 leaf = path->nodes[0];
7238                 if (slot >= btrfs_header_nritems(leaf)) {
7239                         ret = btrfs_next_leaf(root, path);
7240                         if (ret == 0)
7241                                 continue;
7242                         if (ret < 0)
7243                                 goto out;
7244                         break;
7245                 }
7246                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7247
7248                 if (found_key.objectid >= key->objectid &&
7249                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7250                         ret = 0;
7251                         goto out;
7252                 }
7253                 path->slots[0]++;
7254         }
7255 out:
7256         return ret;
7257 }
7258
7259 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7260 {
7261         struct btrfs_block_group_cache *block_group;
7262         u64 last = 0;
7263
7264         while (1) {
7265                 struct inode *inode;
7266
7267                 block_group = btrfs_lookup_first_block_group(info, last);
7268                 while (block_group) {
7269                         spin_lock(&block_group->lock);
7270                         if (block_group->iref)
7271                                 break;
7272                         spin_unlock(&block_group->lock);
7273                         block_group = next_block_group(info->tree_root,
7274                                                        block_group);
7275                 }
7276                 if (!block_group) {
7277                         if (last == 0)
7278                                 break;
7279                         last = 0;
7280                         continue;
7281                 }
7282
7283                 inode = block_group->inode;
7284                 block_group->iref = 0;
7285                 block_group->inode = NULL;
7286                 spin_unlock(&block_group->lock);
7287                 iput(inode);
7288                 last = block_group->key.objectid + block_group->key.offset;
7289                 btrfs_put_block_group(block_group);
7290         }
7291 }
7292
7293 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7294 {
7295         struct btrfs_block_group_cache *block_group;
7296         struct btrfs_space_info *space_info;
7297         struct btrfs_caching_control *caching_ctl;
7298         struct rb_node *n;
7299
7300         down_write(&info->extent_commit_sem);
7301         while (!list_empty(&info->caching_block_groups)) {
7302                 caching_ctl = list_entry(info->caching_block_groups.next,
7303                                          struct btrfs_caching_control, list);
7304                 list_del(&caching_ctl->list);
7305                 put_caching_control(caching_ctl);
7306         }
7307         up_write(&info->extent_commit_sem);
7308
7309         spin_lock(&info->block_group_cache_lock);
7310         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7311                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7312                                        cache_node);
7313                 rb_erase(&block_group->cache_node,
7314                          &info->block_group_cache_tree);
7315                 spin_unlock(&info->block_group_cache_lock);
7316
7317                 down_write(&block_group->space_info->groups_sem);
7318                 list_del(&block_group->list);
7319                 up_write(&block_group->space_info->groups_sem);
7320
7321                 if (block_group->cached == BTRFS_CACHE_STARTED)
7322                         wait_block_group_cache_done(block_group);
7323
7324                 /*
7325                  * We haven't cached this block group, which means we could
7326                  * possibly have excluded extents on this block group.
7327                  */
7328                 if (block_group->cached == BTRFS_CACHE_NO)
7329                         free_excluded_extents(info->extent_root, block_group);
7330
7331                 btrfs_remove_free_space_cache(block_group);
7332                 btrfs_put_block_group(block_group);
7333
7334                 spin_lock(&info->block_group_cache_lock);
7335         }
7336         spin_unlock(&info->block_group_cache_lock);
7337
7338         /* now that all the block groups are freed, go through and
7339          * free all the space_info structs.  This is only called during
7340          * the final stages of unmount, and so we know nobody is
7341          * using them.  We call synchronize_rcu() once before we start,
7342          * just to be on the safe side.
7343          */
7344         synchronize_rcu();
7345
7346         release_global_block_rsv(info);
7347
7348         while(!list_empty(&info->space_info)) {
7349                 space_info = list_entry(info->space_info.next,
7350                                         struct btrfs_space_info,
7351                                         list);
7352                 if (space_info->bytes_pinned > 0 ||
7353                     space_info->bytes_reserved > 0 ||
7354                     space_info->bytes_may_use > 0) {
7355                         WARN_ON(1);
7356                         dump_space_info(space_info, 0, 0);
7357                 }
7358                 list_del(&space_info->list);
7359                 kfree(space_info);
7360         }
7361         return 0;
7362 }
7363
7364 static void __link_block_group(struct btrfs_space_info *space_info,
7365                                struct btrfs_block_group_cache *cache)
7366 {
7367         int index = get_block_group_index(cache);
7368
7369         down_write(&space_info->groups_sem);
7370         list_add_tail(&cache->list, &space_info->block_groups[index]);
7371         up_write(&space_info->groups_sem);
7372 }
7373
7374 int btrfs_read_block_groups(struct btrfs_root *root)
7375 {
7376         struct btrfs_path *path;
7377         int ret;
7378         struct btrfs_block_group_cache *cache;
7379         struct btrfs_fs_info *info = root->fs_info;
7380         struct btrfs_space_info *space_info;
7381         struct btrfs_key key;
7382         struct btrfs_key found_key;
7383         struct extent_buffer *leaf;
7384         int need_clear = 0;
7385         u64 cache_gen;
7386
7387         root = info->extent_root;
7388         key.objectid = 0;
7389         key.offset = 0;
7390         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7391         path = btrfs_alloc_path();
7392         if (!path)
7393                 return -ENOMEM;
7394         path->reada = 1;
7395
7396         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7397         if (btrfs_test_opt(root, SPACE_CACHE) &&
7398             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7399                 need_clear = 1;
7400         if (btrfs_test_opt(root, CLEAR_CACHE))
7401                 need_clear = 1;
7402
7403         while (1) {
7404                 ret = find_first_block_group(root, path, &key);
7405                 if (ret > 0)
7406                         break;
7407                 if (ret != 0)
7408                         goto error;
7409                 leaf = path->nodes[0];
7410                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7411                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7412                 if (!cache) {
7413                         ret = -ENOMEM;
7414                         goto error;
7415                 }
7416                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7417                                                 GFP_NOFS);
7418                 if (!cache->free_space_ctl) {
7419                         kfree(cache);
7420                         ret = -ENOMEM;
7421                         goto error;
7422                 }
7423
7424                 atomic_set(&cache->count, 1);
7425                 spin_lock_init(&cache->lock);
7426                 cache->fs_info = info;
7427                 INIT_LIST_HEAD(&cache->list);
7428                 INIT_LIST_HEAD(&cache->cluster_list);
7429
7430                 if (need_clear)
7431                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7432
7433                 read_extent_buffer(leaf, &cache->item,
7434                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7435                                    sizeof(cache->item));
7436                 memcpy(&cache->key, &found_key, sizeof(found_key));
7437
7438                 key.objectid = found_key.objectid + found_key.offset;
7439                 btrfs_release_path(path);
7440                 cache->flags = btrfs_block_group_flags(&cache->item);
7441                 cache->sectorsize = root->sectorsize;
7442
7443                 btrfs_init_free_space_ctl(cache);
7444
7445                 /*
7446                  * We need to exclude the super stripes now so that the space
7447                  * info has super bytes accounted for, otherwise we'll think
7448                  * we have more space than we actually do.
7449                  */
7450                 exclude_super_stripes(root, cache);
7451
7452                 /*
7453                  * check for two cases, either we are full, and therefore
7454                  * don't need to bother with the caching work since we won't
7455                  * find any space, or we are empty, and we can just add all
7456                  * the space in and be done with it.  This saves us _alot_ of
7457                  * time, particularly in the full case.
7458                  */
7459                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7460                         cache->last_byte_to_unpin = (u64)-1;
7461                         cache->cached = BTRFS_CACHE_FINISHED;
7462                         free_excluded_extents(root, cache);
7463                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7464                         cache->last_byte_to_unpin = (u64)-1;
7465                         cache->cached = BTRFS_CACHE_FINISHED;
7466                         add_new_free_space(cache, root->fs_info,
7467                                            found_key.objectid,
7468                                            found_key.objectid +
7469                                            found_key.offset);
7470                         free_excluded_extents(root, cache);
7471                 }
7472
7473                 ret = update_space_info(info, cache->flags, found_key.offset,
7474                                         btrfs_block_group_used(&cache->item),
7475                                         &space_info);
7476                 BUG_ON(ret);
7477                 cache->space_info = space_info;
7478                 spin_lock(&cache->space_info->lock);
7479                 cache->space_info->bytes_readonly += cache->bytes_super;
7480                 spin_unlock(&cache->space_info->lock);
7481
7482                 __link_block_group(space_info, cache);
7483
7484                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7485                 BUG_ON(ret);
7486
7487                 set_avail_alloc_bits(root->fs_info, cache->flags);
7488                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7489                         set_block_group_ro(cache, 1);
7490         }
7491
7492         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7493                 if (!(get_alloc_profile(root, space_info->flags) &
7494                       (BTRFS_BLOCK_GROUP_RAID10 |
7495                        BTRFS_BLOCK_GROUP_RAID1 |
7496                        BTRFS_BLOCK_GROUP_DUP)))
7497                         continue;
7498                 /*
7499                  * avoid allocating from un-mirrored block group if there are
7500                  * mirrored block groups.
7501                  */
7502                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7503                         set_block_group_ro(cache, 1);
7504                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7505                         set_block_group_ro(cache, 1);
7506         }
7507
7508         init_global_block_rsv(info);
7509         ret = 0;
7510 error:
7511         btrfs_free_path(path);
7512         return ret;
7513 }
7514
7515 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7516                            struct btrfs_root *root, u64 bytes_used,
7517                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7518                            u64 size)
7519 {
7520         int ret;
7521         struct btrfs_root *extent_root;
7522         struct btrfs_block_group_cache *cache;
7523
7524         extent_root = root->fs_info->extent_root;
7525
7526         root->fs_info->last_trans_log_full_commit = trans->transid;
7527
7528         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7529         if (!cache)
7530                 return -ENOMEM;
7531         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7532                                         GFP_NOFS);
7533         if (!cache->free_space_ctl) {
7534                 kfree(cache);
7535                 return -ENOMEM;
7536         }
7537
7538         cache->key.objectid = chunk_offset;
7539         cache->key.offset = size;
7540         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7541         cache->sectorsize = root->sectorsize;
7542         cache->fs_info = root->fs_info;
7543
7544         atomic_set(&cache->count, 1);
7545         spin_lock_init(&cache->lock);
7546         INIT_LIST_HEAD(&cache->list);
7547         INIT_LIST_HEAD(&cache->cluster_list);
7548
7549         btrfs_init_free_space_ctl(cache);
7550
7551         btrfs_set_block_group_used(&cache->item, bytes_used);
7552         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7553         cache->flags = type;
7554         btrfs_set_block_group_flags(&cache->item, type);
7555
7556         cache->last_byte_to_unpin = (u64)-1;
7557         cache->cached = BTRFS_CACHE_FINISHED;
7558         exclude_super_stripes(root, cache);
7559
7560         add_new_free_space(cache, root->fs_info, chunk_offset,
7561                            chunk_offset + size);
7562
7563         free_excluded_extents(root, cache);
7564
7565         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7566                                 &cache->space_info);
7567         BUG_ON(ret);
7568         update_global_block_rsv(root->fs_info);
7569
7570         spin_lock(&cache->space_info->lock);
7571         cache->space_info->bytes_readonly += cache->bytes_super;
7572         spin_unlock(&cache->space_info->lock);
7573
7574         __link_block_group(cache->space_info, cache);
7575
7576         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7577         BUG_ON(ret);
7578
7579         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7580                                 sizeof(cache->item));
7581         BUG_ON(ret);
7582
7583         set_avail_alloc_bits(extent_root->fs_info, type);
7584
7585         return 0;
7586 }
7587
7588 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7589 {
7590         u64 extra_flags = chunk_to_extended(flags) &
7591                                 BTRFS_EXTENDED_PROFILE_MASK;
7592
7593         if (flags & BTRFS_BLOCK_GROUP_DATA)
7594                 fs_info->avail_data_alloc_bits &= ~extra_flags;
7595         if (flags & BTRFS_BLOCK_GROUP_METADATA)
7596                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7597         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7598                 fs_info->avail_system_alloc_bits &= ~extra_flags;
7599 }
7600
7601 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7602                              struct btrfs_root *root, u64 group_start)
7603 {
7604         struct btrfs_path *path;
7605         struct btrfs_block_group_cache *block_group;
7606         struct btrfs_free_cluster *cluster;
7607         struct btrfs_root *tree_root = root->fs_info->tree_root;
7608         struct btrfs_key key;
7609         struct inode *inode;
7610         int ret;
7611         int index;
7612         int factor;
7613
7614         root = root->fs_info->extent_root;
7615
7616         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7617         BUG_ON(!block_group);
7618         BUG_ON(!block_group->ro);
7619
7620         /*
7621          * Free the reserved super bytes from this block group before
7622          * remove it.
7623          */
7624         free_excluded_extents(root, block_group);
7625
7626         memcpy(&key, &block_group->key, sizeof(key));
7627         index = get_block_group_index(block_group);
7628         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7629                                   BTRFS_BLOCK_GROUP_RAID1 |
7630                                   BTRFS_BLOCK_GROUP_RAID10))
7631                 factor = 2;
7632         else
7633                 factor = 1;
7634
7635         /* make sure this block group isn't part of an allocation cluster */
7636         cluster = &root->fs_info->data_alloc_cluster;
7637         spin_lock(&cluster->refill_lock);
7638         btrfs_return_cluster_to_free_space(block_group, cluster);
7639         spin_unlock(&cluster->refill_lock);
7640
7641         /*
7642          * make sure this block group isn't part of a metadata
7643          * allocation cluster
7644          */
7645         cluster = &root->fs_info->meta_alloc_cluster;
7646         spin_lock(&cluster->refill_lock);
7647         btrfs_return_cluster_to_free_space(block_group, cluster);
7648         spin_unlock(&cluster->refill_lock);
7649
7650         path = btrfs_alloc_path();
7651         if (!path) {
7652                 ret = -ENOMEM;
7653                 goto out;
7654         }
7655
7656         inode = lookup_free_space_inode(tree_root, block_group, path);
7657         if (!IS_ERR(inode)) {
7658                 ret = btrfs_orphan_add(trans, inode);
7659                 BUG_ON(ret);
7660                 clear_nlink(inode);
7661                 /* One for the block groups ref */
7662                 spin_lock(&block_group->lock);
7663                 if (block_group->iref) {
7664                         block_group->iref = 0;
7665                         block_group->inode = NULL;
7666                         spin_unlock(&block_group->lock);
7667                         iput(inode);
7668                 } else {
7669                         spin_unlock(&block_group->lock);
7670                 }
7671                 /* One for our lookup ref */
7672                 btrfs_add_delayed_iput(inode);
7673         }
7674
7675         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7676         key.offset = block_group->key.objectid;
7677         key.type = 0;
7678
7679         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7680         if (ret < 0)
7681                 goto out;
7682         if (ret > 0)
7683                 btrfs_release_path(path);
7684         if (ret == 0) {
7685                 ret = btrfs_del_item(trans, tree_root, path);
7686                 if (ret)
7687                         goto out;
7688                 btrfs_release_path(path);
7689         }
7690
7691         spin_lock(&root->fs_info->block_group_cache_lock);
7692         rb_erase(&block_group->cache_node,
7693                  &root->fs_info->block_group_cache_tree);
7694         spin_unlock(&root->fs_info->block_group_cache_lock);
7695
7696         down_write(&block_group->space_info->groups_sem);
7697         /*
7698          * we must use list_del_init so people can check to see if they
7699          * are still on the list after taking the semaphore
7700          */
7701         list_del_init(&block_group->list);
7702         if (list_empty(&block_group->space_info->block_groups[index]))
7703                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
7704         up_write(&block_group->space_info->groups_sem);
7705
7706         if (block_group->cached == BTRFS_CACHE_STARTED)
7707                 wait_block_group_cache_done(block_group);
7708
7709         btrfs_remove_free_space_cache(block_group);
7710
7711         spin_lock(&block_group->space_info->lock);
7712         block_group->space_info->total_bytes -= block_group->key.offset;
7713         block_group->space_info->bytes_readonly -= block_group->key.offset;
7714         block_group->space_info->disk_total -= block_group->key.offset * factor;
7715         spin_unlock(&block_group->space_info->lock);
7716
7717         memcpy(&key, &block_group->key, sizeof(key));
7718
7719         btrfs_clear_space_info_full(root->fs_info);
7720
7721         btrfs_put_block_group(block_group);
7722         btrfs_put_block_group(block_group);
7723
7724         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7725         if (ret > 0)
7726                 ret = -EIO;
7727         if (ret < 0)
7728                 goto out;
7729
7730         ret = btrfs_del_item(trans, root, path);
7731 out:
7732         btrfs_free_path(path);
7733         return ret;
7734 }
7735
7736 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7737 {
7738         struct btrfs_space_info *space_info;
7739         struct btrfs_super_block *disk_super;
7740         u64 features;
7741         u64 flags;
7742         int mixed = 0;
7743         int ret;
7744
7745         disk_super = fs_info->super_copy;
7746         if (!btrfs_super_root(disk_super))
7747                 return 1;
7748
7749         features = btrfs_super_incompat_flags(disk_super);
7750         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7751                 mixed = 1;
7752
7753         flags = BTRFS_BLOCK_GROUP_SYSTEM;
7754         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7755         if (ret)
7756                 goto out;
7757
7758         if (mixed) {
7759                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7760                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7761         } else {
7762                 flags = BTRFS_BLOCK_GROUP_METADATA;
7763                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7764                 if (ret)
7765                         goto out;
7766
7767                 flags = BTRFS_BLOCK_GROUP_DATA;
7768                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7769         }
7770 out:
7771         return ret;
7772 }
7773
7774 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7775 {
7776         return unpin_extent_range(root, start, end);
7777 }
7778
7779 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7780                                u64 num_bytes, u64 *actual_bytes)
7781 {
7782         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7783 }
7784
7785 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7786 {
7787         struct btrfs_fs_info *fs_info = root->fs_info;
7788         struct btrfs_block_group_cache *cache = NULL;
7789         u64 group_trimmed;
7790         u64 start;
7791         u64 end;
7792         u64 trimmed = 0;
7793         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
7794         int ret = 0;
7795
7796         /*
7797          * try to trim all FS space, our block group may start from non-zero.
7798          */
7799         if (range->len == total_bytes)
7800                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
7801         else
7802                 cache = btrfs_lookup_block_group(fs_info, range->start);
7803
7804         while (cache) {
7805                 if (cache->key.objectid >= (range->start + range->len)) {
7806                         btrfs_put_block_group(cache);
7807                         break;
7808                 }
7809
7810                 start = max(range->start, cache->key.objectid);
7811                 end = min(range->start + range->len,
7812                                 cache->key.objectid + cache->key.offset);
7813
7814                 if (end - start >= range->minlen) {
7815                         if (!block_group_cache_done(cache)) {
7816                                 ret = cache_block_group(cache, NULL, root, 0);
7817                                 if (!ret)
7818                                         wait_block_group_cache_done(cache);
7819                         }
7820                         ret = btrfs_trim_block_group(cache,
7821                                                      &group_trimmed,
7822                                                      start,
7823                                                      end,
7824                                                      range->minlen);
7825
7826                         trimmed += group_trimmed;
7827                         if (ret) {
7828                                 btrfs_put_block_group(cache);
7829                                 break;
7830                         }
7831                 }
7832
7833                 cache = next_block_group(fs_info->tree_root, cache);
7834         }
7835
7836         range->len = trimmed;
7837         return ret;
7838 }