Merge branch 'topic/oss' into for-linus
[pandora-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include "compat.h"
26 #include "hash.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "free-space-cache.h"
34
35 static int update_reserved_extents(struct btrfs_root *root,
36                                    u64 bytenr, u64 num, int reserve);
37 static int update_block_group(struct btrfs_trans_handle *trans,
38                               struct btrfs_root *root,
39                               u64 bytenr, u64 num_bytes, int alloc,
40                               int mark_free);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60
61 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
62                           struct btrfs_root *extent_root, u64 alloc_bytes,
63                           u64 flags, int force);
64
65 static noinline int
66 block_group_cache_done(struct btrfs_block_group_cache *cache)
67 {
68         smp_mb();
69         return cache->cached == BTRFS_CACHE_FINISHED;
70 }
71
72 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
73 {
74         return (cache->flags & bits) == bits;
75 }
76
77 /*
78  * this adds the block group to the fs_info rb tree for the block group
79  * cache
80  */
81 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
82                                 struct btrfs_block_group_cache *block_group)
83 {
84         struct rb_node **p;
85         struct rb_node *parent = NULL;
86         struct btrfs_block_group_cache *cache;
87
88         spin_lock(&info->block_group_cache_lock);
89         p = &info->block_group_cache_tree.rb_node;
90
91         while (*p) {
92                 parent = *p;
93                 cache = rb_entry(parent, struct btrfs_block_group_cache,
94                                  cache_node);
95                 if (block_group->key.objectid < cache->key.objectid) {
96                         p = &(*p)->rb_left;
97                 } else if (block_group->key.objectid > cache->key.objectid) {
98                         p = &(*p)->rb_right;
99                 } else {
100                         spin_unlock(&info->block_group_cache_lock);
101                         return -EEXIST;
102                 }
103         }
104
105         rb_link_node(&block_group->cache_node, parent, p);
106         rb_insert_color(&block_group->cache_node,
107                         &info->block_group_cache_tree);
108         spin_unlock(&info->block_group_cache_lock);
109
110         return 0;
111 }
112
113 /*
114  * This will return the block group at or after bytenr if contains is 0, else
115  * it will return the block group that contains the bytenr
116  */
117 static struct btrfs_block_group_cache *
118 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
119                               int contains)
120 {
121         struct btrfs_block_group_cache *cache, *ret = NULL;
122         struct rb_node *n;
123         u64 end, start;
124
125         spin_lock(&info->block_group_cache_lock);
126         n = info->block_group_cache_tree.rb_node;
127
128         while (n) {
129                 cache = rb_entry(n, struct btrfs_block_group_cache,
130                                  cache_node);
131                 end = cache->key.objectid + cache->key.offset - 1;
132                 start = cache->key.objectid;
133
134                 if (bytenr < start) {
135                         if (!contains && (!ret || start < ret->key.objectid))
136                                 ret = cache;
137                         n = n->rb_left;
138                 } else if (bytenr > start) {
139                         if (contains && bytenr <= end) {
140                                 ret = cache;
141                                 break;
142                         }
143                         n = n->rb_right;
144                 } else {
145                         ret = cache;
146                         break;
147                 }
148         }
149         if (ret)
150                 atomic_inc(&ret->count);
151         spin_unlock(&info->block_group_cache_lock);
152
153         return ret;
154 }
155
156 /*
157  * We always set EXTENT_LOCKED for the super mirror extents so we don't
158  * overwrite them, so those bits need to be unset.  Also, if we are unmounting
159  * with pinned extents still sitting there because we had a block group caching,
160  * we need to clear those now, since we are done.
161  */
162 void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
163 {
164         u64 start, end, last = 0;
165         int ret;
166
167         while (1) {
168                 ret = find_first_extent_bit(&info->pinned_extents, last,
169                                             &start, &end,
170                                             EXTENT_LOCKED|EXTENT_DIRTY);
171                 if (ret)
172                         break;
173
174                 clear_extent_bits(&info->pinned_extents, start, end,
175                                   EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
176                 last = end+1;
177         }
178 }
179
180 static int remove_sb_from_cache(struct btrfs_root *root,
181                                 struct btrfs_block_group_cache *cache)
182 {
183         struct btrfs_fs_info *fs_info = root->fs_info;
184         u64 bytenr;
185         u64 *logical;
186         int stripe_len;
187         int i, nr, ret;
188
189         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190                 bytenr = btrfs_sb_offset(i);
191                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192                                        cache->key.objectid, bytenr,
193                                        0, &logical, &nr, &stripe_len);
194                 BUG_ON(ret);
195                 while (nr--) {
196                         try_lock_extent(&fs_info->pinned_extents,
197                                         logical[nr],
198                                         logical[nr] + stripe_len - 1, GFP_NOFS);
199                 }
200                 kfree(logical);
201         }
202
203         return 0;
204 }
205
206 /*
207  * this is only called by cache_block_group, since we could have freed extents
208  * we need to check the pinned_extents for any extents that can't be used yet
209  * since their free space will be released as soon as the transaction commits.
210  */
211 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
212                               struct btrfs_fs_info *info, u64 start, u64 end)
213 {
214         u64 extent_start, extent_end, size, total_added = 0;
215         int ret;
216
217         while (start < end) {
218                 ret = find_first_extent_bit(&info->pinned_extents, start,
219                                             &extent_start, &extent_end,
220                                             EXTENT_DIRTY|EXTENT_LOCKED);
221                 if (ret)
222                         break;
223
224                 if (extent_start == start) {
225                         start = extent_end + 1;
226                 } else if (extent_start > start && extent_start < end) {
227                         size = extent_start - start;
228                         total_added += size;
229                         ret = btrfs_add_free_space(block_group, start,
230                                                    size);
231                         BUG_ON(ret);
232                         start = extent_end + 1;
233                 } else {
234                         break;
235                 }
236         }
237
238         if (start < end) {
239                 size = end - start;
240                 total_added += size;
241                 ret = btrfs_add_free_space(block_group, start, size);
242                 BUG_ON(ret);
243         }
244
245         return total_added;
246 }
247
248 static int caching_kthread(void *data)
249 {
250         struct btrfs_block_group_cache *block_group = data;
251         struct btrfs_fs_info *fs_info = block_group->fs_info;
252         u64 last = 0;
253         struct btrfs_path *path;
254         int ret = 0;
255         struct btrfs_key key;
256         struct extent_buffer *leaf;
257         int slot;
258         u64 total_found = 0;
259
260         BUG_ON(!fs_info);
261
262         path = btrfs_alloc_path();
263         if (!path)
264                 return -ENOMEM;
265
266         atomic_inc(&block_group->space_info->caching_threads);
267         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
268         /*
269          * We don't want to deadlock with somebody trying to allocate a new
270          * extent for the extent root while also trying to search the extent
271          * root to add free space.  So we skip locking and search the commit
272          * root, since its read-only
273          */
274         path->skip_locking = 1;
275         path->search_commit_root = 1;
276         path->reada = 2;
277
278         key.objectid = last;
279         key.offset = 0;
280         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
281 again:
282         /* need to make sure the commit_root doesn't disappear */
283         down_read(&fs_info->extent_commit_sem);
284
285         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
286         if (ret < 0)
287                 goto err;
288
289         while (1) {
290                 smp_mb();
291                 if (block_group->fs_info->closing > 1) {
292                         last = (u64)-1;
293                         break;
294                 }
295
296                 leaf = path->nodes[0];
297                 slot = path->slots[0];
298                 if (slot >= btrfs_header_nritems(leaf)) {
299                         ret = btrfs_next_leaf(fs_info->extent_root, path);
300                         if (ret < 0)
301                                 goto err;
302                         else if (ret)
303                                 break;
304
305                         if (need_resched() ||
306                             btrfs_transaction_in_commit(fs_info)) {
307                                 leaf = path->nodes[0];
308
309                                 /* this shouldn't happen, but if the
310                                  * leaf is empty just move on.
311                                  */
312                                 if (btrfs_header_nritems(leaf) == 0)
313                                         break;
314                                 /*
315                                  * we need to copy the key out so that
316                                  * we are sure the next search advances
317                                  * us forward in the btree.
318                                  */
319                                 btrfs_item_key_to_cpu(leaf, &key, 0);
320                                 btrfs_release_path(fs_info->extent_root, path);
321                                 up_read(&fs_info->extent_commit_sem);
322                                 schedule_timeout(1);
323                                 goto again;
324                         }
325
326                         continue;
327                 }
328                 btrfs_item_key_to_cpu(leaf, &key, slot);
329                 if (key.objectid < block_group->key.objectid)
330                         goto next;
331
332                 if (key.objectid >= block_group->key.objectid +
333                     block_group->key.offset)
334                         break;
335
336                 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
337                         total_found += add_new_free_space(block_group,
338                                                           fs_info, last,
339                                                           key.objectid);
340                         last = key.objectid + key.offset;
341                 }
342
343                 if (total_found > (1024 * 1024 * 2)) {
344                         total_found = 0;
345                         wake_up(&block_group->caching_q);
346                 }
347 next:
348                 path->slots[0]++;
349         }
350         ret = 0;
351
352         total_found += add_new_free_space(block_group, fs_info, last,
353                                           block_group->key.objectid +
354                                           block_group->key.offset);
355
356         spin_lock(&block_group->lock);
357         block_group->cached = BTRFS_CACHE_FINISHED;
358         spin_unlock(&block_group->lock);
359
360 err:
361         btrfs_free_path(path);
362         up_read(&fs_info->extent_commit_sem);
363         atomic_dec(&block_group->space_info->caching_threads);
364         wake_up(&block_group->caching_q);
365
366         return 0;
367 }
368
369 static int cache_block_group(struct btrfs_block_group_cache *cache)
370 {
371         struct task_struct *tsk;
372         int ret = 0;
373
374         spin_lock(&cache->lock);
375         if (cache->cached != BTRFS_CACHE_NO) {
376                 spin_unlock(&cache->lock);
377                 return ret;
378         }
379         cache->cached = BTRFS_CACHE_STARTED;
380         spin_unlock(&cache->lock);
381
382         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
383                           cache->key.objectid);
384         if (IS_ERR(tsk)) {
385                 ret = PTR_ERR(tsk);
386                 printk(KERN_ERR "error running thread %d\n", ret);
387                 BUG();
388         }
389
390         return ret;
391 }
392
393 /*
394  * return the block group that starts at or after bytenr
395  */
396 static struct btrfs_block_group_cache *
397 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
398 {
399         struct btrfs_block_group_cache *cache;
400
401         cache = block_group_cache_tree_search(info, bytenr, 0);
402
403         return cache;
404 }
405
406 /*
407  * return the block group that contains the given bytenr
408  */
409 struct btrfs_block_group_cache *btrfs_lookup_block_group(
410                                                  struct btrfs_fs_info *info,
411                                                  u64 bytenr)
412 {
413         struct btrfs_block_group_cache *cache;
414
415         cache = block_group_cache_tree_search(info, bytenr, 1);
416
417         return cache;
418 }
419
420 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
421 {
422         if (atomic_dec_and_test(&cache->count))
423                 kfree(cache);
424 }
425
426 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
427                                                   u64 flags)
428 {
429         struct list_head *head = &info->space_info;
430         struct btrfs_space_info *found;
431
432         rcu_read_lock();
433         list_for_each_entry_rcu(found, head, list) {
434                 if (found->flags == flags) {
435                         rcu_read_unlock();
436                         return found;
437                 }
438         }
439         rcu_read_unlock();
440         return NULL;
441 }
442
443 /*
444  * after adding space to the filesystem, we need to clear the full flags
445  * on all the space infos.
446  */
447 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
448 {
449         struct list_head *head = &info->space_info;
450         struct btrfs_space_info *found;
451
452         rcu_read_lock();
453         list_for_each_entry_rcu(found, head, list)
454                 found->full = 0;
455         rcu_read_unlock();
456 }
457
458 static u64 div_factor(u64 num, int factor)
459 {
460         if (factor == 10)
461                 return num;
462         num *= factor;
463         do_div(num, 10);
464         return num;
465 }
466
467 u64 btrfs_find_block_group(struct btrfs_root *root,
468                            u64 search_start, u64 search_hint, int owner)
469 {
470         struct btrfs_block_group_cache *cache;
471         u64 used;
472         u64 last = max(search_hint, search_start);
473         u64 group_start = 0;
474         int full_search = 0;
475         int factor = 9;
476         int wrapped = 0;
477 again:
478         while (1) {
479                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
480                 if (!cache)
481                         break;
482
483                 spin_lock(&cache->lock);
484                 last = cache->key.objectid + cache->key.offset;
485                 used = btrfs_block_group_used(&cache->item);
486
487                 if ((full_search || !cache->ro) &&
488                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
489                         if (used + cache->pinned + cache->reserved <
490                             div_factor(cache->key.offset, factor)) {
491                                 group_start = cache->key.objectid;
492                                 spin_unlock(&cache->lock);
493                                 btrfs_put_block_group(cache);
494                                 goto found;
495                         }
496                 }
497                 spin_unlock(&cache->lock);
498                 btrfs_put_block_group(cache);
499                 cond_resched();
500         }
501         if (!wrapped) {
502                 last = search_start;
503                 wrapped = 1;
504                 goto again;
505         }
506         if (!full_search && factor < 10) {
507                 last = search_start;
508                 full_search = 1;
509                 factor = 10;
510                 goto again;
511         }
512 found:
513         return group_start;
514 }
515
516 /* simple helper to search for an existing extent at a given offset */
517 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
518 {
519         int ret;
520         struct btrfs_key key;
521         struct btrfs_path *path;
522
523         path = btrfs_alloc_path();
524         BUG_ON(!path);
525         key.objectid = start;
526         key.offset = len;
527         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
528         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
529                                 0, 0);
530         btrfs_free_path(path);
531         return ret;
532 }
533
534 /*
535  * Back reference rules.  Back refs have three main goals:
536  *
537  * 1) differentiate between all holders of references to an extent so that
538  *    when a reference is dropped we can make sure it was a valid reference
539  *    before freeing the extent.
540  *
541  * 2) Provide enough information to quickly find the holders of an extent
542  *    if we notice a given block is corrupted or bad.
543  *
544  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
545  *    maintenance.  This is actually the same as #2, but with a slightly
546  *    different use case.
547  *
548  * There are two kinds of back refs. The implicit back refs is optimized
549  * for pointers in non-shared tree blocks. For a given pointer in a block,
550  * back refs of this kind provide information about the block's owner tree
551  * and the pointer's key. These information allow us to find the block by
552  * b-tree searching. The full back refs is for pointers in tree blocks not
553  * referenced by their owner trees. The location of tree block is recorded
554  * in the back refs. Actually the full back refs is generic, and can be
555  * used in all cases the implicit back refs is used. The major shortcoming
556  * of the full back refs is its overhead. Every time a tree block gets
557  * COWed, we have to update back refs entry for all pointers in it.
558  *
559  * For a newly allocated tree block, we use implicit back refs for
560  * pointers in it. This means most tree related operations only involve
561  * implicit back refs. For a tree block created in old transaction, the
562  * only way to drop a reference to it is COW it. So we can detect the
563  * event that tree block loses its owner tree's reference and do the
564  * back refs conversion.
565  *
566  * When a tree block is COW'd through a tree, there are four cases:
567  *
568  * The reference count of the block is one and the tree is the block's
569  * owner tree. Nothing to do in this case.
570  *
571  * The reference count of the block is one and the tree is not the
572  * block's owner tree. In this case, full back refs is used for pointers
573  * in the block. Remove these full back refs, add implicit back refs for
574  * every pointers in the new block.
575  *
576  * The reference count of the block is greater than one and the tree is
577  * the block's owner tree. In this case, implicit back refs is used for
578  * pointers in the block. Add full back refs for every pointers in the
579  * block, increase lower level extents' reference counts. The original
580  * implicit back refs are entailed to the new block.
581  *
582  * The reference count of the block is greater than one and the tree is
583  * not the block's owner tree. Add implicit back refs for every pointer in
584  * the new block, increase lower level extents' reference count.
585  *
586  * Back Reference Key composing:
587  *
588  * The key objectid corresponds to the first byte in the extent,
589  * The key type is used to differentiate between types of back refs.
590  * There are different meanings of the key offset for different types
591  * of back refs.
592  *
593  * File extents can be referenced by:
594  *
595  * - multiple snapshots, subvolumes, or different generations in one subvol
596  * - different files inside a single subvolume
597  * - different offsets inside a file (bookend extents in file.c)
598  *
599  * The extent ref structure for the implicit back refs has fields for:
600  *
601  * - Objectid of the subvolume root
602  * - objectid of the file holding the reference
603  * - original offset in the file
604  * - how many bookend extents
605  *
606  * The key offset for the implicit back refs is hash of the first
607  * three fields.
608  *
609  * The extent ref structure for the full back refs has field for:
610  *
611  * - number of pointers in the tree leaf
612  *
613  * The key offset for the implicit back refs is the first byte of
614  * the tree leaf
615  *
616  * When a file extent is allocated, The implicit back refs is used.
617  * the fields are filled in:
618  *
619  *     (root_key.objectid, inode objectid, offset in file, 1)
620  *
621  * When a file extent is removed file truncation, we find the
622  * corresponding implicit back refs and check the following fields:
623  *
624  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
625  *
626  * Btree extents can be referenced by:
627  *
628  * - Different subvolumes
629  *
630  * Both the implicit back refs and the full back refs for tree blocks
631  * only consist of key. The key offset for the implicit back refs is
632  * objectid of block's owner tree. The key offset for the full back refs
633  * is the first byte of parent block.
634  *
635  * When implicit back refs is used, information about the lowest key and
636  * level of the tree block are required. These information are stored in
637  * tree block info structure.
638  */
639
640 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
641 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
642                                   struct btrfs_root *root,
643                                   struct btrfs_path *path,
644                                   u64 owner, u32 extra_size)
645 {
646         struct btrfs_extent_item *item;
647         struct btrfs_extent_item_v0 *ei0;
648         struct btrfs_extent_ref_v0 *ref0;
649         struct btrfs_tree_block_info *bi;
650         struct extent_buffer *leaf;
651         struct btrfs_key key;
652         struct btrfs_key found_key;
653         u32 new_size = sizeof(*item);
654         u64 refs;
655         int ret;
656
657         leaf = path->nodes[0];
658         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
659
660         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
661         ei0 = btrfs_item_ptr(leaf, path->slots[0],
662                              struct btrfs_extent_item_v0);
663         refs = btrfs_extent_refs_v0(leaf, ei0);
664
665         if (owner == (u64)-1) {
666                 while (1) {
667                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
668                                 ret = btrfs_next_leaf(root, path);
669                                 if (ret < 0)
670                                         return ret;
671                                 BUG_ON(ret > 0);
672                                 leaf = path->nodes[0];
673                         }
674                         btrfs_item_key_to_cpu(leaf, &found_key,
675                                               path->slots[0]);
676                         BUG_ON(key.objectid != found_key.objectid);
677                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
678                                 path->slots[0]++;
679                                 continue;
680                         }
681                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
682                                               struct btrfs_extent_ref_v0);
683                         owner = btrfs_ref_objectid_v0(leaf, ref0);
684                         break;
685                 }
686         }
687         btrfs_release_path(root, path);
688
689         if (owner < BTRFS_FIRST_FREE_OBJECTID)
690                 new_size += sizeof(*bi);
691
692         new_size -= sizeof(*ei0);
693         ret = btrfs_search_slot(trans, root, &key, path,
694                                 new_size + extra_size, 1);
695         if (ret < 0)
696                 return ret;
697         BUG_ON(ret);
698
699         ret = btrfs_extend_item(trans, root, path, new_size);
700         BUG_ON(ret);
701
702         leaf = path->nodes[0];
703         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
704         btrfs_set_extent_refs(leaf, item, refs);
705         /* FIXME: get real generation */
706         btrfs_set_extent_generation(leaf, item, 0);
707         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
708                 btrfs_set_extent_flags(leaf, item,
709                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
710                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
711                 bi = (struct btrfs_tree_block_info *)(item + 1);
712                 /* FIXME: get first key of the block */
713                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
714                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
715         } else {
716                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
717         }
718         btrfs_mark_buffer_dirty(leaf);
719         return 0;
720 }
721 #endif
722
723 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
724 {
725         u32 high_crc = ~(u32)0;
726         u32 low_crc = ~(u32)0;
727         __le64 lenum;
728
729         lenum = cpu_to_le64(root_objectid);
730         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
731         lenum = cpu_to_le64(owner);
732         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
733         lenum = cpu_to_le64(offset);
734         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
735
736         return ((u64)high_crc << 31) ^ (u64)low_crc;
737 }
738
739 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
740                                      struct btrfs_extent_data_ref *ref)
741 {
742         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
743                                     btrfs_extent_data_ref_objectid(leaf, ref),
744                                     btrfs_extent_data_ref_offset(leaf, ref));
745 }
746
747 static int match_extent_data_ref(struct extent_buffer *leaf,
748                                  struct btrfs_extent_data_ref *ref,
749                                  u64 root_objectid, u64 owner, u64 offset)
750 {
751         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
752             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
753             btrfs_extent_data_ref_offset(leaf, ref) != offset)
754                 return 0;
755         return 1;
756 }
757
758 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
759                                            struct btrfs_root *root,
760                                            struct btrfs_path *path,
761                                            u64 bytenr, u64 parent,
762                                            u64 root_objectid,
763                                            u64 owner, u64 offset)
764 {
765         struct btrfs_key key;
766         struct btrfs_extent_data_ref *ref;
767         struct extent_buffer *leaf;
768         u32 nritems;
769         int ret;
770         int recow;
771         int err = -ENOENT;
772
773         key.objectid = bytenr;
774         if (parent) {
775                 key.type = BTRFS_SHARED_DATA_REF_KEY;
776                 key.offset = parent;
777         } else {
778                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
779                 key.offset = hash_extent_data_ref(root_objectid,
780                                                   owner, offset);
781         }
782 again:
783         recow = 0;
784         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
785         if (ret < 0) {
786                 err = ret;
787                 goto fail;
788         }
789
790         if (parent) {
791                 if (!ret)
792                         return 0;
793 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
794                 key.type = BTRFS_EXTENT_REF_V0_KEY;
795                 btrfs_release_path(root, path);
796                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
797                 if (ret < 0) {
798                         err = ret;
799                         goto fail;
800                 }
801                 if (!ret)
802                         return 0;
803 #endif
804                 goto fail;
805         }
806
807         leaf = path->nodes[0];
808         nritems = btrfs_header_nritems(leaf);
809         while (1) {
810                 if (path->slots[0] >= nritems) {
811                         ret = btrfs_next_leaf(root, path);
812                         if (ret < 0)
813                                 err = ret;
814                         if (ret)
815                                 goto fail;
816
817                         leaf = path->nodes[0];
818                         nritems = btrfs_header_nritems(leaf);
819                         recow = 1;
820                 }
821
822                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
823                 if (key.objectid != bytenr ||
824                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
825                         goto fail;
826
827                 ref = btrfs_item_ptr(leaf, path->slots[0],
828                                      struct btrfs_extent_data_ref);
829
830                 if (match_extent_data_ref(leaf, ref, root_objectid,
831                                           owner, offset)) {
832                         if (recow) {
833                                 btrfs_release_path(root, path);
834                                 goto again;
835                         }
836                         err = 0;
837                         break;
838                 }
839                 path->slots[0]++;
840         }
841 fail:
842         return err;
843 }
844
845 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
846                                            struct btrfs_root *root,
847                                            struct btrfs_path *path,
848                                            u64 bytenr, u64 parent,
849                                            u64 root_objectid, u64 owner,
850                                            u64 offset, int refs_to_add)
851 {
852         struct btrfs_key key;
853         struct extent_buffer *leaf;
854         u32 size;
855         u32 num_refs;
856         int ret;
857
858         key.objectid = bytenr;
859         if (parent) {
860                 key.type = BTRFS_SHARED_DATA_REF_KEY;
861                 key.offset = parent;
862                 size = sizeof(struct btrfs_shared_data_ref);
863         } else {
864                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
865                 key.offset = hash_extent_data_ref(root_objectid,
866                                                   owner, offset);
867                 size = sizeof(struct btrfs_extent_data_ref);
868         }
869
870         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
871         if (ret && ret != -EEXIST)
872                 goto fail;
873
874         leaf = path->nodes[0];
875         if (parent) {
876                 struct btrfs_shared_data_ref *ref;
877                 ref = btrfs_item_ptr(leaf, path->slots[0],
878                                      struct btrfs_shared_data_ref);
879                 if (ret == 0) {
880                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
881                 } else {
882                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
883                         num_refs += refs_to_add;
884                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
885                 }
886         } else {
887                 struct btrfs_extent_data_ref *ref;
888                 while (ret == -EEXIST) {
889                         ref = btrfs_item_ptr(leaf, path->slots[0],
890                                              struct btrfs_extent_data_ref);
891                         if (match_extent_data_ref(leaf, ref, root_objectid,
892                                                   owner, offset))
893                                 break;
894                         btrfs_release_path(root, path);
895                         key.offset++;
896                         ret = btrfs_insert_empty_item(trans, root, path, &key,
897                                                       size);
898                         if (ret && ret != -EEXIST)
899                                 goto fail;
900
901                         leaf = path->nodes[0];
902                 }
903                 ref = btrfs_item_ptr(leaf, path->slots[0],
904                                      struct btrfs_extent_data_ref);
905                 if (ret == 0) {
906                         btrfs_set_extent_data_ref_root(leaf, ref,
907                                                        root_objectid);
908                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
909                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
910                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
911                 } else {
912                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
913                         num_refs += refs_to_add;
914                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
915                 }
916         }
917         btrfs_mark_buffer_dirty(leaf);
918         ret = 0;
919 fail:
920         btrfs_release_path(root, path);
921         return ret;
922 }
923
924 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
925                                            struct btrfs_root *root,
926                                            struct btrfs_path *path,
927                                            int refs_to_drop)
928 {
929         struct btrfs_key key;
930         struct btrfs_extent_data_ref *ref1 = NULL;
931         struct btrfs_shared_data_ref *ref2 = NULL;
932         struct extent_buffer *leaf;
933         u32 num_refs = 0;
934         int ret = 0;
935
936         leaf = path->nodes[0];
937         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
938
939         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
940                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
941                                       struct btrfs_extent_data_ref);
942                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
943         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
944                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
945                                       struct btrfs_shared_data_ref);
946                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
947 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
948         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
949                 struct btrfs_extent_ref_v0 *ref0;
950                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
951                                       struct btrfs_extent_ref_v0);
952                 num_refs = btrfs_ref_count_v0(leaf, ref0);
953 #endif
954         } else {
955                 BUG();
956         }
957
958         BUG_ON(num_refs < refs_to_drop);
959         num_refs -= refs_to_drop;
960
961         if (num_refs == 0) {
962                 ret = btrfs_del_item(trans, root, path);
963         } else {
964                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
965                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
966                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
967                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
968 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
969                 else {
970                         struct btrfs_extent_ref_v0 *ref0;
971                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
972                                         struct btrfs_extent_ref_v0);
973                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
974                 }
975 #endif
976                 btrfs_mark_buffer_dirty(leaf);
977         }
978         return ret;
979 }
980
981 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
982                                           struct btrfs_path *path,
983                                           struct btrfs_extent_inline_ref *iref)
984 {
985         struct btrfs_key key;
986         struct extent_buffer *leaf;
987         struct btrfs_extent_data_ref *ref1;
988         struct btrfs_shared_data_ref *ref2;
989         u32 num_refs = 0;
990
991         leaf = path->nodes[0];
992         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
993         if (iref) {
994                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
995                     BTRFS_EXTENT_DATA_REF_KEY) {
996                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
997                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
998                 } else {
999                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1000                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1001                 }
1002         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1003                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1004                                       struct btrfs_extent_data_ref);
1005                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1006         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1007                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1008                                       struct btrfs_shared_data_ref);
1009                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1010 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1011         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1012                 struct btrfs_extent_ref_v0 *ref0;
1013                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1014                                       struct btrfs_extent_ref_v0);
1015                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1016 #endif
1017         } else {
1018                 WARN_ON(1);
1019         }
1020         return num_refs;
1021 }
1022
1023 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1024                                           struct btrfs_root *root,
1025                                           struct btrfs_path *path,
1026                                           u64 bytenr, u64 parent,
1027                                           u64 root_objectid)
1028 {
1029         struct btrfs_key key;
1030         int ret;
1031
1032         key.objectid = bytenr;
1033         if (parent) {
1034                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1035                 key.offset = parent;
1036         } else {
1037                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1038                 key.offset = root_objectid;
1039         }
1040
1041         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1042         if (ret > 0)
1043                 ret = -ENOENT;
1044 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1045         if (ret == -ENOENT && parent) {
1046                 btrfs_release_path(root, path);
1047                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1048                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1049                 if (ret > 0)
1050                         ret = -ENOENT;
1051         }
1052 #endif
1053         return ret;
1054 }
1055
1056 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1057                                           struct btrfs_root *root,
1058                                           struct btrfs_path *path,
1059                                           u64 bytenr, u64 parent,
1060                                           u64 root_objectid)
1061 {
1062         struct btrfs_key key;
1063         int ret;
1064
1065         key.objectid = bytenr;
1066         if (parent) {
1067                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1068                 key.offset = parent;
1069         } else {
1070                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1071                 key.offset = root_objectid;
1072         }
1073
1074         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1075         btrfs_release_path(root, path);
1076         return ret;
1077 }
1078
1079 static inline int extent_ref_type(u64 parent, u64 owner)
1080 {
1081         int type;
1082         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1083                 if (parent > 0)
1084                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1085                 else
1086                         type = BTRFS_TREE_BLOCK_REF_KEY;
1087         } else {
1088                 if (parent > 0)
1089                         type = BTRFS_SHARED_DATA_REF_KEY;
1090                 else
1091                         type = BTRFS_EXTENT_DATA_REF_KEY;
1092         }
1093         return type;
1094 }
1095
1096 static int find_next_key(struct btrfs_path *path, int level,
1097                          struct btrfs_key *key)
1098
1099 {
1100         for (; level < BTRFS_MAX_LEVEL; level++) {
1101                 if (!path->nodes[level])
1102                         break;
1103                 if (path->slots[level] + 1 >=
1104                     btrfs_header_nritems(path->nodes[level]))
1105                         continue;
1106                 if (level == 0)
1107                         btrfs_item_key_to_cpu(path->nodes[level], key,
1108                                               path->slots[level] + 1);
1109                 else
1110                         btrfs_node_key_to_cpu(path->nodes[level], key,
1111                                               path->slots[level] + 1);
1112                 return 0;
1113         }
1114         return 1;
1115 }
1116
1117 /*
1118  * look for inline back ref. if back ref is found, *ref_ret is set
1119  * to the address of inline back ref, and 0 is returned.
1120  *
1121  * if back ref isn't found, *ref_ret is set to the address where it
1122  * should be inserted, and -ENOENT is returned.
1123  *
1124  * if insert is true and there are too many inline back refs, the path
1125  * points to the extent item, and -EAGAIN is returned.
1126  *
1127  * NOTE: inline back refs are ordered in the same way that back ref
1128  *       items in the tree are ordered.
1129  */
1130 static noinline_for_stack
1131 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1132                                  struct btrfs_root *root,
1133                                  struct btrfs_path *path,
1134                                  struct btrfs_extent_inline_ref **ref_ret,
1135                                  u64 bytenr, u64 num_bytes,
1136                                  u64 parent, u64 root_objectid,
1137                                  u64 owner, u64 offset, int insert)
1138 {
1139         struct btrfs_key key;
1140         struct extent_buffer *leaf;
1141         struct btrfs_extent_item *ei;
1142         struct btrfs_extent_inline_ref *iref;
1143         u64 flags;
1144         u64 item_size;
1145         unsigned long ptr;
1146         unsigned long end;
1147         int extra_size;
1148         int type;
1149         int want;
1150         int ret;
1151         int err = 0;
1152
1153         key.objectid = bytenr;
1154         key.type = BTRFS_EXTENT_ITEM_KEY;
1155         key.offset = num_bytes;
1156
1157         want = extent_ref_type(parent, owner);
1158         if (insert) {
1159                 extra_size = btrfs_extent_inline_ref_size(want);
1160                 path->keep_locks = 1;
1161         } else
1162                 extra_size = -1;
1163         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1164         if (ret < 0) {
1165                 err = ret;
1166                 goto out;
1167         }
1168         BUG_ON(ret);
1169
1170         leaf = path->nodes[0];
1171         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1172 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1173         if (item_size < sizeof(*ei)) {
1174                 if (!insert) {
1175                         err = -ENOENT;
1176                         goto out;
1177                 }
1178                 ret = convert_extent_item_v0(trans, root, path, owner,
1179                                              extra_size);
1180                 if (ret < 0) {
1181                         err = ret;
1182                         goto out;
1183                 }
1184                 leaf = path->nodes[0];
1185                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1186         }
1187 #endif
1188         BUG_ON(item_size < sizeof(*ei));
1189
1190         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1191         flags = btrfs_extent_flags(leaf, ei);
1192
1193         ptr = (unsigned long)(ei + 1);
1194         end = (unsigned long)ei + item_size;
1195
1196         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1197                 ptr += sizeof(struct btrfs_tree_block_info);
1198                 BUG_ON(ptr > end);
1199         } else {
1200                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1201         }
1202
1203         err = -ENOENT;
1204         while (1) {
1205                 if (ptr >= end) {
1206                         WARN_ON(ptr > end);
1207                         break;
1208                 }
1209                 iref = (struct btrfs_extent_inline_ref *)ptr;
1210                 type = btrfs_extent_inline_ref_type(leaf, iref);
1211                 if (want < type)
1212                         break;
1213                 if (want > type) {
1214                         ptr += btrfs_extent_inline_ref_size(type);
1215                         continue;
1216                 }
1217
1218                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1219                         struct btrfs_extent_data_ref *dref;
1220                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1221                         if (match_extent_data_ref(leaf, dref, root_objectid,
1222                                                   owner, offset)) {
1223                                 err = 0;
1224                                 break;
1225                         }
1226                         if (hash_extent_data_ref_item(leaf, dref) <
1227                             hash_extent_data_ref(root_objectid, owner, offset))
1228                                 break;
1229                 } else {
1230                         u64 ref_offset;
1231                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1232                         if (parent > 0) {
1233                                 if (parent == ref_offset) {
1234                                         err = 0;
1235                                         break;
1236                                 }
1237                                 if (ref_offset < parent)
1238                                         break;
1239                         } else {
1240                                 if (root_objectid == ref_offset) {
1241                                         err = 0;
1242                                         break;
1243                                 }
1244                                 if (ref_offset < root_objectid)
1245                                         break;
1246                         }
1247                 }
1248                 ptr += btrfs_extent_inline_ref_size(type);
1249         }
1250         if (err == -ENOENT && insert) {
1251                 if (item_size + extra_size >=
1252                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1253                         err = -EAGAIN;
1254                         goto out;
1255                 }
1256                 /*
1257                  * To add new inline back ref, we have to make sure
1258                  * there is no corresponding back ref item.
1259                  * For simplicity, we just do not add new inline back
1260                  * ref if there is any kind of item for this block
1261                  */
1262                 if (find_next_key(path, 0, &key) == 0 &&
1263                     key.objectid == bytenr &&
1264                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1265                         err = -EAGAIN;
1266                         goto out;
1267                 }
1268         }
1269         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1270 out:
1271         if (insert) {
1272                 path->keep_locks = 0;
1273                 btrfs_unlock_up_safe(path, 1);
1274         }
1275         return err;
1276 }
1277
1278 /*
1279  * helper to add new inline back ref
1280  */
1281 static noinline_for_stack
1282 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1283                                 struct btrfs_root *root,
1284                                 struct btrfs_path *path,
1285                                 struct btrfs_extent_inline_ref *iref,
1286                                 u64 parent, u64 root_objectid,
1287                                 u64 owner, u64 offset, int refs_to_add,
1288                                 struct btrfs_delayed_extent_op *extent_op)
1289 {
1290         struct extent_buffer *leaf;
1291         struct btrfs_extent_item *ei;
1292         unsigned long ptr;
1293         unsigned long end;
1294         unsigned long item_offset;
1295         u64 refs;
1296         int size;
1297         int type;
1298         int ret;
1299
1300         leaf = path->nodes[0];
1301         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1302         item_offset = (unsigned long)iref - (unsigned long)ei;
1303
1304         type = extent_ref_type(parent, owner);
1305         size = btrfs_extent_inline_ref_size(type);
1306
1307         ret = btrfs_extend_item(trans, root, path, size);
1308         BUG_ON(ret);
1309
1310         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1311         refs = btrfs_extent_refs(leaf, ei);
1312         refs += refs_to_add;
1313         btrfs_set_extent_refs(leaf, ei, refs);
1314         if (extent_op)
1315                 __run_delayed_extent_op(extent_op, leaf, ei);
1316
1317         ptr = (unsigned long)ei + item_offset;
1318         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1319         if (ptr < end - size)
1320                 memmove_extent_buffer(leaf, ptr + size, ptr,
1321                                       end - size - ptr);
1322
1323         iref = (struct btrfs_extent_inline_ref *)ptr;
1324         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1325         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1326                 struct btrfs_extent_data_ref *dref;
1327                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1328                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1329                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1330                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1331                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1332         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1333                 struct btrfs_shared_data_ref *sref;
1334                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1335                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1336                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1337         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1338                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1339         } else {
1340                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1341         }
1342         btrfs_mark_buffer_dirty(leaf);
1343         return 0;
1344 }
1345
1346 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1347                                  struct btrfs_root *root,
1348                                  struct btrfs_path *path,
1349                                  struct btrfs_extent_inline_ref **ref_ret,
1350                                  u64 bytenr, u64 num_bytes, u64 parent,
1351                                  u64 root_objectid, u64 owner, u64 offset)
1352 {
1353         int ret;
1354
1355         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1356                                            bytenr, num_bytes, parent,
1357                                            root_objectid, owner, offset, 0);
1358         if (ret != -ENOENT)
1359                 return ret;
1360
1361         btrfs_release_path(root, path);
1362         *ref_ret = NULL;
1363
1364         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1365                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1366                                             root_objectid);
1367         } else {
1368                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1369                                              root_objectid, owner, offset);
1370         }
1371         return ret;
1372 }
1373
1374 /*
1375  * helper to update/remove inline back ref
1376  */
1377 static noinline_for_stack
1378 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1379                                  struct btrfs_root *root,
1380                                  struct btrfs_path *path,
1381                                  struct btrfs_extent_inline_ref *iref,
1382                                  int refs_to_mod,
1383                                  struct btrfs_delayed_extent_op *extent_op)
1384 {
1385         struct extent_buffer *leaf;
1386         struct btrfs_extent_item *ei;
1387         struct btrfs_extent_data_ref *dref = NULL;
1388         struct btrfs_shared_data_ref *sref = NULL;
1389         unsigned long ptr;
1390         unsigned long end;
1391         u32 item_size;
1392         int size;
1393         int type;
1394         int ret;
1395         u64 refs;
1396
1397         leaf = path->nodes[0];
1398         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1399         refs = btrfs_extent_refs(leaf, ei);
1400         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1401         refs += refs_to_mod;
1402         btrfs_set_extent_refs(leaf, ei, refs);
1403         if (extent_op)
1404                 __run_delayed_extent_op(extent_op, leaf, ei);
1405
1406         type = btrfs_extent_inline_ref_type(leaf, iref);
1407
1408         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1409                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1410                 refs = btrfs_extent_data_ref_count(leaf, dref);
1411         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1412                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1413                 refs = btrfs_shared_data_ref_count(leaf, sref);
1414         } else {
1415                 refs = 1;
1416                 BUG_ON(refs_to_mod != -1);
1417         }
1418
1419         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1420         refs += refs_to_mod;
1421
1422         if (refs > 0) {
1423                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1424                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1425                 else
1426                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1427         } else {
1428                 size =  btrfs_extent_inline_ref_size(type);
1429                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1430                 ptr = (unsigned long)iref;
1431                 end = (unsigned long)ei + item_size;
1432                 if (ptr + size < end)
1433                         memmove_extent_buffer(leaf, ptr, ptr + size,
1434                                               end - ptr - size);
1435                 item_size -= size;
1436                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1437                 BUG_ON(ret);
1438         }
1439         btrfs_mark_buffer_dirty(leaf);
1440         return 0;
1441 }
1442
1443 static noinline_for_stack
1444 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1445                                  struct btrfs_root *root,
1446                                  struct btrfs_path *path,
1447                                  u64 bytenr, u64 num_bytes, u64 parent,
1448                                  u64 root_objectid, u64 owner,
1449                                  u64 offset, int refs_to_add,
1450                                  struct btrfs_delayed_extent_op *extent_op)
1451 {
1452         struct btrfs_extent_inline_ref *iref;
1453         int ret;
1454
1455         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1456                                            bytenr, num_bytes, parent,
1457                                            root_objectid, owner, offset, 1);
1458         if (ret == 0) {
1459                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1460                 ret = update_inline_extent_backref(trans, root, path, iref,
1461                                                    refs_to_add, extent_op);
1462         } else if (ret == -ENOENT) {
1463                 ret = setup_inline_extent_backref(trans, root, path, iref,
1464                                                   parent, root_objectid,
1465                                                   owner, offset, refs_to_add,
1466                                                   extent_op);
1467         }
1468         return ret;
1469 }
1470
1471 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1472                                  struct btrfs_root *root,
1473                                  struct btrfs_path *path,
1474                                  u64 bytenr, u64 parent, u64 root_objectid,
1475                                  u64 owner, u64 offset, int refs_to_add)
1476 {
1477         int ret;
1478         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1479                 BUG_ON(refs_to_add != 1);
1480                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1481                                             parent, root_objectid);
1482         } else {
1483                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1484                                              parent, root_objectid,
1485                                              owner, offset, refs_to_add);
1486         }
1487         return ret;
1488 }
1489
1490 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1491                                  struct btrfs_root *root,
1492                                  struct btrfs_path *path,
1493                                  struct btrfs_extent_inline_ref *iref,
1494                                  int refs_to_drop, int is_data)
1495 {
1496         int ret;
1497
1498         BUG_ON(!is_data && refs_to_drop != 1);
1499         if (iref) {
1500                 ret = update_inline_extent_backref(trans, root, path, iref,
1501                                                    -refs_to_drop, NULL);
1502         } else if (is_data) {
1503                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1504         } else {
1505                 ret = btrfs_del_item(trans, root, path);
1506         }
1507         return ret;
1508 }
1509
1510 #ifdef BIO_RW_DISCARD
1511 static void btrfs_issue_discard(struct block_device *bdev,
1512                                 u64 start, u64 len)
1513 {
1514         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
1515 }
1516 #endif
1517
1518 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1519                                 u64 num_bytes)
1520 {
1521 #ifdef BIO_RW_DISCARD
1522         int ret;
1523         u64 map_length = num_bytes;
1524         struct btrfs_multi_bio *multi = NULL;
1525
1526         /* Tell the block device(s) that the sectors can be discarded */
1527         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1528                               bytenr, &map_length, &multi, 0);
1529         if (!ret) {
1530                 struct btrfs_bio_stripe *stripe = multi->stripes;
1531                 int i;
1532
1533                 if (map_length > num_bytes)
1534                         map_length = num_bytes;
1535
1536                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1537                         btrfs_issue_discard(stripe->dev->bdev,
1538                                             stripe->physical,
1539                                             map_length);
1540                 }
1541                 kfree(multi);
1542         }
1543
1544         return ret;
1545 #else
1546         return 0;
1547 #endif
1548 }
1549
1550 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1551                          struct btrfs_root *root,
1552                          u64 bytenr, u64 num_bytes, u64 parent,
1553                          u64 root_objectid, u64 owner, u64 offset)
1554 {
1555         int ret;
1556         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1557                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1558
1559         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1560                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1561                                         parent, root_objectid, (int)owner,
1562                                         BTRFS_ADD_DELAYED_REF, NULL);
1563         } else {
1564                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1565                                         parent, root_objectid, owner, offset,
1566                                         BTRFS_ADD_DELAYED_REF, NULL);
1567         }
1568         return ret;
1569 }
1570
1571 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1572                                   struct btrfs_root *root,
1573                                   u64 bytenr, u64 num_bytes,
1574                                   u64 parent, u64 root_objectid,
1575                                   u64 owner, u64 offset, int refs_to_add,
1576                                   struct btrfs_delayed_extent_op *extent_op)
1577 {
1578         struct btrfs_path *path;
1579         struct extent_buffer *leaf;
1580         struct btrfs_extent_item *item;
1581         u64 refs;
1582         int ret;
1583         int err = 0;
1584
1585         path = btrfs_alloc_path();
1586         if (!path)
1587                 return -ENOMEM;
1588
1589         path->reada = 1;
1590         path->leave_spinning = 1;
1591         /* this will setup the path even if it fails to insert the back ref */
1592         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1593                                            path, bytenr, num_bytes, parent,
1594                                            root_objectid, owner, offset,
1595                                            refs_to_add, extent_op);
1596         if (ret == 0)
1597                 goto out;
1598
1599         if (ret != -EAGAIN) {
1600                 err = ret;
1601                 goto out;
1602         }
1603
1604         leaf = path->nodes[0];
1605         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1606         refs = btrfs_extent_refs(leaf, item);
1607         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1608         if (extent_op)
1609                 __run_delayed_extent_op(extent_op, leaf, item);
1610
1611         btrfs_mark_buffer_dirty(leaf);
1612         btrfs_release_path(root->fs_info->extent_root, path);
1613
1614         path->reada = 1;
1615         path->leave_spinning = 1;
1616
1617         /* now insert the actual backref */
1618         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1619                                     path, bytenr, parent, root_objectid,
1620                                     owner, offset, refs_to_add);
1621         BUG_ON(ret);
1622 out:
1623         btrfs_free_path(path);
1624         return err;
1625 }
1626
1627 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1628                                 struct btrfs_root *root,
1629                                 struct btrfs_delayed_ref_node *node,
1630                                 struct btrfs_delayed_extent_op *extent_op,
1631                                 int insert_reserved)
1632 {
1633         int ret = 0;
1634         struct btrfs_delayed_data_ref *ref;
1635         struct btrfs_key ins;
1636         u64 parent = 0;
1637         u64 ref_root = 0;
1638         u64 flags = 0;
1639
1640         ins.objectid = node->bytenr;
1641         ins.offset = node->num_bytes;
1642         ins.type = BTRFS_EXTENT_ITEM_KEY;
1643
1644         ref = btrfs_delayed_node_to_data_ref(node);
1645         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1646                 parent = ref->parent;
1647         else
1648                 ref_root = ref->root;
1649
1650         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1651                 if (extent_op) {
1652                         BUG_ON(extent_op->update_key);
1653                         flags |= extent_op->flags_to_set;
1654                 }
1655                 ret = alloc_reserved_file_extent(trans, root,
1656                                                  parent, ref_root, flags,
1657                                                  ref->objectid, ref->offset,
1658                                                  &ins, node->ref_mod);
1659                 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1660         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1661                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1662                                              node->num_bytes, parent,
1663                                              ref_root, ref->objectid,
1664                                              ref->offset, node->ref_mod,
1665                                              extent_op);
1666         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1667                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1668                                           node->num_bytes, parent,
1669                                           ref_root, ref->objectid,
1670                                           ref->offset, node->ref_mod,
1671                                           extent_op);
1672         } else {
1673                 BUG();
1674         }
1675         return ret;
1676 }
1677
1678 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1679                                     struct extent_buffer *leaf,
1680                                     struct btrfs_extent_item *ei)
1681 {
1682         u64 flags = btrfs_extent_flags(leaf, ei);
1683         if (extent_op->update_flags) {
1684                 flags |= extent_op->flags_to_set;
1685                 btrfs_set_extent_flags(leaf, ei, flags);
1686         }
1687
1688         if (extent_op->update_key) {
1689                 struct btrfs_tree_block_info *bi;
1690                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1691                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1692                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1693         }
1694 }
1695
1696 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1697                                  struct btrfs_root *root,
1698                                  struct btrfs_delayed_ref_node *node,
1699                                  struct btrfs_delayed_extent_op *extent_op)
1700 {
1701         struct btrfs_key key;
1702         struct btrfs_path *path;
1703         struct btrfs_extent_item *ei;
1704         struct extent_buffer *leaf;
1705         u32 item_size;
1706         int ret;
1707         int err = 0;
1708
1709         path = btrfs_alloc_path();
1710         if (!path)
1711                 return -ENOMEM;
1712
1713         key.objectid = node->bytenr;
1714         key.type = BTRFS_EXTENT_ITEM_KEY;
1715         key.offset = node->num_bytes;
1716
1717         path->reada = 1;
1718         path->leave_spinning = 1;
1719         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1720                                 path, 0, 1);
1721         if (ret < 0) {
1722                 err = ret;
1723                 goto out;
1724         }
1725         if (ret > 0) {
1726                 err = -EIO;
1727                 goto out;
1728         }
1729
1730         leaf = path->nodes[0];
1731         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1732 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1733         if (item_size < sizeof(*ei)) {
1734                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1735                                              path, (u64)-1, 0);
1736                 if (ret < 0) {
1737                         err = ret;
1738                         goto out;
1739                 }
1740                 leaf = path->nodes[0];
1741                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1742         }
1743 #endif
1744         BUG_ON(item_size < sizeof(*ei));
1745         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1746         __run_delayed_extent_op(extent_op, leaf, ei);
1747
1748         btrfs_mark_buffer_dirty(leaf);
1749 out:
1750         btrfs_free_path(path);
1751         return err;
1752 }
1753
1754 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1755                                 struct btrfs_root *root,
1756                                 struct btrfs_delayed_ref_node *node,
1757                                 struct btrfs_delayed_extent_op *extent_op,
1758                                 int insert_reserved)
1759 {
1760         int ret = 0;
1761         struct btrfs_delayed_tree_ref *ref;
1762         struct btrfs_key ins;
1763         u64 parent = 0;
1764         u64 ref_root = 0;
1765
1766         ins.objectid = node->bytenr;
1767         ins.offset = node->num_bytes;
1768         ins.type = BTRFS_EXTENT_ITEM_KEY;
1769
1770         ref = btrfs_delayed_node_to_tree_ref(node);
1771         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1772                 parent = ref->parent;
1773         else
1774                 ref_root = ref->root;
1775
1776         BUG_ON(node->ref_mod != 1);
1777         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1778                 BUG_ON(!extent_op || !extent_op->update_flags ||
1779                        !extent_op->update_key);
1780                 ret = alloc_reserved_tree_block(trans, root,
1781                                                 parent, ref_root,
1782                                                 extent_op->flags_to_set,
1783                                                 &extent_op->key,
1784                                                 ref->level, &ins);
1785                 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1786         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1787                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1788                                              node->num_bytes, parent, ref_root,
1789                                              ref->level, 0, 1, extent_op);
1790         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1791                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1792                                           node->num_bytes, parent, ref_root,
1793                                           ref->level, 0, 1, extent_op);
1794         } else {
1795                 BUG();
1796         }
1797         return ret;
1798 }
1799
1800
1801 /* helper function to actually process a single delayed ref entry */
1802 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1803                                struct btrfs_root *root,
1804                                struct btrfs_delayed_ref_node *node,
1805                                struct btrfs_delayed_extent_op *extent_op,
1806                                int insert_reserved)
1807 {
1808         int ret;
1809         if (btrfs_delayed_ref_is_head(node)) {
1810                 struct btrfs_delayed_ref_head *head;
1811                 /*
1812                  * we've hit the end of the chain and we were supposed
1813                  * to insert this extent into the tree.  But, it got
1814                  * deleted before we ever needed to insert it, so all
1815                  * we have to do is clean up the accounting
1816                  */
1817                 BUG_ON(extent_op);
1818                 head = btrfs_delayed_node_to_head(node);
1819                 if (insert_reserved) {
1820                         if (head->is_data) {
1821                                 ret = btrfs_del_csums(trans, root,
1822                                                       node->bytenr,
1823                                                       node->num_bytes);
1824                                 BUG_ON(ret);
1825                         }
1826                         btrfs_update_pinned_extents(root, node->bytenr,
1827                                                     node->num_bytes, 1);
1828                         update_reserved_extents(root, node->bytenr,
1829                                                 node->num_bytes, 0);
1830                 }
1831                 mutex_unlock(&head->mutex);
1832                 return 0;
1833         }
1834
1835         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1836             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1837                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1838                                            insert_reserved);
1839         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1840                  node->type == BTRFS_SHARED_DATA_REF_KEY)
1841                 ret = run_delayed_data_ref(trans, root, node, extent_op,
1842                                            insert_reserved);
1843         else
1844                 BUG();
1845         return ret;
1846 }
1847
1848 static noinline struct btrfs_delayed_ref_node *
1849 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1850 {
1851         struct rb_node *node;
1852         struct btrfs_delayed_ref_node *ref;
1853         int action = BTRFS_ADD_DELAYED_REF;
1854 again:
1855         /*
1856          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1857          * this prevents ref count from going down to zero when
1858          * there still are pending delayed ref.
1859          */
1860         node = rb_prev(&head->node.rb_node);
1861         while (1) {
1862                 if (!node)
1863                         break;
1864                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1865                                 rb_node);
1866                 if (ref->bytenr != head->node.bytenr)
1867                         break;
1868                 if (ref->action == action)
1869                         return ref;
1870                 node = rb_prev(node);
1871         }
1872         if (action == BTRFS_ADD_DELAYED_REF) {
1873                 action = BTRFS_DROP_DELAYED_REF;
1874                 goto again;
1875         }
1876         return NULL;
1877 }
1878
1879 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1880                                        struct btrfs_root *root,
1881                                        struct list_head *cluster)
1882 {
1883         struct btrfs_delayed_ref_root *delayed_refs;
1884         struct btrfs_delayed_ref_node *ref;
1885         struct btrfs_delayed_ref_head *locked_ref = NULL;
1886         struct btrfs_delayed_extent_op *extent_op;
1887         int ret;
1888         int count = 0;
1889         int must_insert_reserved = 0;
1890
1891         delayed_refs = &trans->transaction->delayed_refs;
1892         while (1) {
1893                 if (!locked_ref) {
1894                         /* pick a new head ref from the cluster list */
1895                         if (list_empty(cluster))
1896                                 break;
1897
1898                         locked_ref = list_entry(cluster->next,
1899                                      struct btrfs_delayed_ref_head, cluster);
1900
1901                         /* grab the lock that says we are going to process
1902                          * all the refs for this head */
1903                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
1904
1905                         /*
1906                          * we may have dropped the spin lock to get the head
1907                          * mutex lock, and that might have given someone else
1908                          * time to free the head.  If that's true, it has been
1909                          * removed from our list and we can move on.
1910                          */
1911                         if (ret == -EAGAIN) {
1912                                 locked_ref = NULL;
1913                                 count++;
1914                                 continue;
1915                         }
1916                 }
1917
1918                 /*
1919                  * record the must insert reserved flag before we
1920                  * drop the spin lock.
1921                  */
1922                 must_insert_reserved = locked_ref->must_insert_reserved;
1923                 locked_ref->must_insert_reserved = 0;
1924
1925                 extent_op = locked_ref->extent_op;
1926                 locked_ref->extent_op = NULL;
1927
1928                 /*
1929                  * locked_ref is the head node, so we have to go one
1930                  * node back for any delayed ref updates
1931                  */
1932                 ref = select_delayed_ref(locked_ref);
1933                 if (!ref) {
1934                         /* All delayed refs have been processed, Go ahead
1935                          * and send the head node to run_one_delayed_ref,
1936                          * so that any accounting fixes can happen
1937                          */
1938                         ref = &locked_ref->node;
1939
1940                         if (extent_op && must_insert_reserved) {
1941                                 kfree(extent_op);
1942                                 extent_op = NULL;
1943                         }
1944
1945                         if (extent_op) {
1946                                 spin_unlock(&delayed_refs->lock);
1947
1948                                 ret = run_delayed_extent_op(trans, root,
1949                                                             ref, extent_op);
1950                                 BUG_ON(ret);
1951                                 kfree(extent_op);
1952
1953                                 cond_resched();
1954                                 spin_lock(&delayed_refs->lock);
1955                                 continue;
1956                         }
1957
1958                         list_del_init(&locked_ref->cluster);
1959                         locked_ref = NULL;
1960                 }
1961
1962                 ref->in_tree = 0;
1963                 rb_erase(&ref->rb_node, &delayed_refs->root);
1964                 delayed_refs->num_entries--;
1965
1966                 spin_unlock(&delayed_refs->lock);
1967
1968                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
1969                                           must_insert_reserved);
1970                 BUG_ON(ret);
1971
1972                 btrfs_put_delayed_ref(ref);
1973                 kfree(extent_op);
1974                 count++;
1975
1976                 cond_resched();
1977                 spin_lock(&delayed_refs->lock);
1978         }
1979         return count;
1980 }
1981
1982 /*
1983  * this starts processing the delayed reference count updates and
1984  * extent insertions we have queued up so far.  count can be
1985  * 0, which means to process everything in the tree at the start
1986  * of the run (but not newly added entries), or it can be some target
1987  * number you'd like to process.
1988  */
1989 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1990                            struct btrfs_root *root, unsigned long count)
1991 {
1992         struct rb_node *node;
1993         struct btrfs_delayed_ref_root *delayed_refs;
1994         struct btrfs_delayed_ref_node *ref;
1995         struct list_head cluster;
1996         int ret;
1997         int run_all = count == (unsigned long)-1;
1998         int run_most = 0;
1999
2000         if (root == root->fs_info->extent_root)
2001                 root = root->fs_info->tree_root;
2002
2003         delayed_refs = &trans->transaction->delayed_refs;
2004         INIT_LIST_HEAD(&cluster);
2005 again:
2006         spin_lock(&delayed_refs->lock);
2007         if (count == 0) {
2008                 count = delayed_refs->num_entries * 2;
2009                 run_most = 1;
2010         }
2011         while (1) {
2012                 if (!(run_all || run_most) &&
2013                     delayed_refs->num_heads_ready < 64)
2014                         break;
2015
2016                 /*
2017                  * go find something we can process in the rbtree.  We start at
2018                  * the beginning of the tree, and then build a cluster
2019                  * of refs to process starting at the first one we are able to
2020                  * lock
2021                  */
2022                 ret = btrfs_find_ref_cluster(trans, &cluster,
2023                                              delayed_refs->run_delayed_start);
2024                 if (ret)
2025                         break;
2026
2027                 ret = run_clustered_refs(trans, root, &cluster);
2028                 BUG_ON(ret < 0);
2029
2030                 count -= min_t(unsigned long, ret, count);
2031
2032                 if (count == 0)
2033                         break;
2034         }
2035
2036         if (run_all) {
2037                 node = rb_first(&delayed_refs->root);
2038                 if (!node)
2039                         goto out;
2040                 count = (unsigned long)-1;
2041
2042                 while (node) {
2043                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2044                                        rb_node);
2045                         if (btrfs_delayed_ref_is_head(ref)) {
2046                                 struct btrfs_delayed_ref_head *head;
2047
2048                                 head = btrfs_delayed_node_to_head(ref);
2049                                 atomic_inc(&ref->refs);
2050
2051                                 spin_unlock(&delayed_refs->lock);
2052                                 mutex_lock(&head->mutex);
2053                                 mutex_unlock(&head->mutex);
2054
2055                                 btrfs_put_delayed_ref(ref);
2056                                 cond_resched();
2057                                 goto again;
2058                         }
2059                         node = rb_next(node);
2060                 }
2061                 spin_unlock(&delayed_refs->lock);
2062                 schedule_timeout(1);
2063                 goto again;
2064         }
2065 out:
2066         spin_unlock(&delayed_refs->lock);
2067         return 0;
2068 }
2069
2070 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2071                                 struct btrfs_root *root,
2072                                 u64 bytenr, u64 num_bytes, u64 flags,
2073                                 int is_data)
2074 {
2075         struct btrfs_delayed_extent_op *extent_op;
2076         int ret;
2077
2078         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2079         if (!extent_op)
2080                 return -ENOMEM;
2081
2082         extent_op->flags_to_set = flags;
2083         extent_op->update_flags = 1;
2084         extent_op->update_key = 0;
2085         extent_op->is_data = is_data ? 1 : 0;
2086
2087         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2088         if (ret)
2089                 kfree(extent_op);
2090         return ret;
2091 }
2092
2093 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2094                                       struct btrfs_root *root,
2095                                       struct btrfs_path *path,
2096                                       u64 objectid, u64 offset, u64 bytenr)
2097 {
2098         struct btrfs_delayed_ref_head *head;
2099         struct btrfs_delayed_ref_node *ref;
2100         struct btrfs_delayed_data_ref *data_ref;
2101         struct btrfs_delayed_ref_root *delayed_refs;
2102         struct rb_node *node;
2103         int ret = 0;
2104
2105         ret = -ENOENT;
2106         delayed_refs = &trans->transaction->delayed_refs;
2107         spin_lock(&delayed_refs->lock);
2108         head = btrfs_find_delayed_ref_head(trans, bytenr);
2109         if (!head)
2110                 goto out;
2111
2112         if (!mutex_trylock(&head->mutex)) {
2113                 atomic_inc(&head->node.refs);
2114                 spin_unlock(&delayed_refs->lock);
2115
2116                 btrfs_release_path(root->fs_info->extent_root, path);
2117
2118                 mutex_lock(&head->mutex);
2119                 mutex_unlock(&head->mutex);
2120                 btrfs_put_delayed_ref(&head->node);
2121                 return -EAGAIN;
2122         }
2123
2124         node = rb_prev(&head->node.rb_node);
2125         if (!node)
2126                 goto out_unlock;
2127
2128         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2129
2130         if (ref->bytenr != bytenr)
2131                 goto out_unlock;
2132
2133         ret = 1;
2134         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2135                 goto out_unlock;
2136
2137         data_ref = btrfs_delayed_node_to_data_ref(ref);
2138
2139         node = rb_prev(node);
2140         if (node) {
2141                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2142                 if (ref->bytenr == bytenr)
2143                         goto out_unlock;
2144         }
2145
2146         if (data_ref->root != root->root_key.objectid ||
2147             data_ref->objectid != objectid || data_ref->offset != offset)
2148                 goto out_unlock;
2149
2150         ret = 0;
2151 out_unlock:
2152         mutex_unlock(&head->mutex);
2153 out:
2154         spin_unlock(&delayed_refs->lock);
2155         return ret;
2156 }
2157
2158 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2159                                         struct btrfs_root *root,
2160                                         struct btrfs_path *path,
2161                                         u64 objectid, u64 offset, u64 bytenr)
2162 {
2163         struct btrfs_root *extent_root = root->fs_info->extent_root;
2164         struct extent_buffer *leaf;
2165         struct btrfs_extent_data_ref *ref;
2166         struct btrfs_extent_inline_ref *iref;
2167         struct btrfs_extent_item *ei;
2168         struct btrfs_key key;
2169         u32 item_size;
2170         int ret;
2171
2172         key.objectid = bytenr;
2173         key.offset = (u64)-1;
2174         key.type = BTRFS_EXTENT_ITEM_KEY;
2175
2176         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2177         if (ret < 0)
2178                 goto out;
2179         BUG_ON(ret == 0);
2180
2181         ret = -ENOENT;
2182         if (path->slots[0] == 0)
2183                 goto out;
2184
2185         path->slots[0]--;
2186         leaf = path->nodes[0];
2187         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2188
2189         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2190                 goto out;
2191
2192         ret = 1;
2193         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2194 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2195         if (item_size < sizeof(*ei)) {
2196                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2197                 goto out;
2198         }
2199 #endif
2200         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2201
2202         if (item_size != sizeof(*ei) +
2203             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2204                 goto out;
2205
2206         if (btrfs_extent_generation(leaf, ei) <=
2207             btrfs_root_last_snapshot(&root->root_item))
2208                 goto out;
2209
2210         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2211         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2212             BTRFS_EXTENT_DATA_REF_KEY)
2213                 goto out;
2214
2215         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2216         if (btrfs_extent_refs(leaf, ei) !=
2217             btrfs_extent_data_ref_count(leaf, ref) ||
2218             btrfs_extent_data_ref_root(leaf, ref) !=
2219             root->root_key.objectid ||
2220             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2221             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2222                 goto out;
2223
2224         ret = 0;
2225 out:
2226         return ret;
2227 }
2228
2229 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2230                           struct btrfs_root *root,
2231                           u64 objectid, u64 offset, u64 bytenr)
2232 {
2233         struct btrfs_path *path;
2234         int ret;
2235         int ret2;
2236
2237         path = btrfs_alloc_path();
2238         if (!path)
2239                 return -ENOENT;
2240
2241         do {
2242                 ret = check_committed_ref(trans, root, path, objectid,
2243                                           offset, bytenr);
2244                 if (ret && ret != -ENOENT)
2245                         goto out;
2246
2247                 ret2 = check_delayed_ref(trans, root, path, objectid,
2248                                          offset, bytenr);
2249         } while (ret2 == -EAGAIN);
2250
2251         if (ret2 && ret2 != -ENOENT) {
2252                 ret = ret2;
2253                 goto out;
2254         }
2255
2256         if (ret != -ENOENT || ret2 != -ENOENT)
2257                 ret = 0;
2258 out:
2259         btrfs_free_path(path);
2260         return ret;
2261 }
2262
2263 #if 0
2264 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2265                     struct extent_buffer *buf, u32 nr_extents)
2266 {
2267         struct btrfs_key key;
2268         struct btrfs_file_extent_item *fi;
2269         u64 root_gen;
2270         u32 nritems;
2271         int i;
2272         int level;
2273         int ret = 0;
2274         int shared = 0;
2275
2276         if (!root->ref_cows)
2277                 return 0;
2278
2279         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2280                 shared = 0;
2281                 root_gen = root->root_key.offset;
2282         } else {
2283                 shared = 1;
2284                 root_gen = trans->transid - 1;
2285         }
2286
2287         level = btrfs_header_level(buf);
2288         nritems = btrfs_header_nritems(buf);
2289
2290         if (level == 0) {
2291                 struct btrfs_leaf_ref *ref;
2292                 struct btrfs_extent_info *info;
2293
2294                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2295                 if (!ref) {
2296                         ret = -ENOMEM;
2297                         goto out;
2298                 }
2299
2300                 ref->root_gen = root_gen;
2301                 ref->bytenr = buf->start;
2302                 ref->owner = btrfs_header_owner(buf);
2303                 ref->generation = btrfs_header_generation(buf);
2304                 ref->nritems = nr_extents;
2305                 info = ref->extents;
2306
2307                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2308                         u64 disk_bytenr;
2309                         btrfs_item_key_to_cpu(buf, &key, i);
2310                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2311                                 continue;
2312                         fi = btrfs_item_ptr(buf, i,
2313                                             struct btrfs_file_extent_item);
2314                         if (btrfs_file_extent_type(buf, fi) ==
2315                             BTRFS_FILE_EXTENT_INLINE)
2316                                 continue;
2317                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2318                         if (disk_bytenr == 0)
2319                                 continue;
2320
2321                         info->bytenr = disk_bytenr;
2322                         info->num_bytes =
2323                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2324                         info->objectid = key.objectid;
2325                         info->offset = key.offset;
2326                         info++;
2327                 }
2328
2329                 ret = btrfs_add_leaf_ref(root, ref, shared);
2330                 if (ret == -EEXIST && shared) {
2331                         struct btrfs_leaf_ref *old;
2332                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2333                         BUG_ON(!old);
2334                         btrfs_remove_leaf_ref(root, old);
2335                         btrfs_free_leaf_ref(root, old);
2336                         ret = btrfs_add_leaf_ref(root, ref, shared);
2337                 }
2338                 WARN_ON(ret);
2339                 btrfs_free_leaf_ref(root, ref);
2340         }
2341 out:
2342         return ret;
2343 }
2344
2345 /* when a block goes through cow, we update the reference counts of
2346  * everything that block points to.  The internal pointers of the block
2347  * can be in just about any order, and it is likely to have clusters of
2348  * things that are close together and clusters of things that are not.
2349  *
2350  * To help reduce the seeks that come with updating all of these reference
2351  * counts, sort them by byte number before actual updates are done.
2352  *
2353  * struct refsort is used to match byte number to slot in the btree block.
2354  * we sort based on the byte number and then use the slot to actually
2355  * find the item.
2356  *
2357  * struct refsort is smaller than strcut btrfs_item and smaller than
2358  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2359  * for a btree block, there's no way for a kmalloc of refsorts for a
2360  * single node to be bigger than a page.
2361  */
2362 struct refsort {
2363         u64 bytenr;
2364         u32 slot;
2365 };
2366
2367 /*
2368  * for passing into sort()
2369  */
2370 static int refsort_cmp(const void *a_void, const void *b_void)
2371 {
2372         const struct refsort *a = a_void;
2373         const struct refsort *b = b_void;
2374
2375         if (a->bytenr < b->bytenr)
2376                 return -1;
2377         if (a->bytenr > b->bytenr)
2378                 return 1;
2379         return 0;
2380 }
2381 #endif
2382
2383 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2384                            struct btrfs_root *root,
2385                            struct extent_buffer *buf,
2386                            int full_backref, int inc)
2387 {
2388         u64 bytenr;
2389         u64 num_bytes;
2390         u64 parent;
2391         u64 ref_root;
2392         u32 nritems;
2393         struct btrfs_key key;
2394         struct btrfs_file_extent_item *fi;
2395         int i;
2396         int level;
2397         int ret = 0;
2398         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2399                             u64, u64, u64, u64, u64, u64);
2400
2401         ref_root = btrfs_header_owner(buf);
2402         nritems = btrfs_header_nritems(buf);
2403         level = btrfs_header_level(buf);
2404
2405         if (!root->ref_cows && level == 0)
2406                 return 0;
2407
2408         if (inc)
2409                 process_func = btrfs_inc_extent_ref;
2410         else
2411                 process_func = btrfs_free_extent;
2412
2413         if (full_backref)
2414                 parent = buf->start;
2415         else
2416                 parent = 0;
2417
2418         for (i = 0; i < nritems; i++) {
2419                 if (level == 0) {
2420                         btrfs_item_key_to_cpu(buf, &key, i);
2421                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2422                                 continue;
2423                         fi = btrfs_item_ptr(buf, i,
2424                                             struct btrfs_file_extent_item);
2425                         if (btrfs_file_extent_type(buf, fi) ==
2426                             BTRFS_FILE_EXTENT_INLINE)
2427                                 continue;
2428                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2429                         if (bytenr == 0)
2430                                 continue;
2431
2432                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2433                         key.offset -= btrfs_file_extent_offset(buf, fi);
2434                         ret = process_func(trans, root, bytenr, num_bytes,
2435                                            parent, ref_root, key.objectid,
2436                                            key.offset);
2437                         if (ret)
2438                                 goto fail;
2439                 } else {
2440                         bytenr = btrfs_node_blockptr(buf, i);
2441                         num_bytes = btrfs_level_size(root, level - 1);
2442                         ret = process_func(trans, root, bytenr, num_bytes,
2443                                            parent, ref_root, level - 1, 0);
2444                         if (ret)
2445                                 goto fail;
2446                 }
2447         }
2448         return 0;
2449 fail:
2450         BUG();
2451         return ret;
2452 }
2453
2454 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2455                   struct extent_buffer *buf, int full_backref)
2456 {
2457         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2458 }
2459
2460 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2461                   struct extent_buffer *buf, int full_backref)
2462 {
2463         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2464 }
2465
2466 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2467                                  struct btrfs_root *root,
2468                                  struct btrfs_path *path,
2469                                  struct btrfs_block_group_cache *cache)
2470 {
2471         int ret;
2472         struct btrfs_root *extent_root = root->fs_info->extent_root;
2473         unsigned long bi;
2474         struct extent_buffer *leaf;
2475
2476         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2477         if (ret < 0)
2478                 goto fail;
2479         BUG_ON(ret);
2480
2481         leaf = path->nodes[0];
2482         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2483         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2484         btrfs_mark_buffer_dirty(leaf);
2485         btrfs_release_path(extent_root, path);
2486 fail:
2487         if (ret)
2488                 return ret;
2489         return 0;
2490
2491 }
2492
2493 static struct btrfs_block_group_cache *
2494 next_block_group(struct btrfs_root *root,
2495                  struct btrfs_block_group_cache *cache)
2496 {
2497         struct rb_node *node;
2498         spin_lock(&root->fs_info->block_group_cache_lock);
2499         node = rb_next(&cache->cache_node);
2500         btrfs_put_block_group(cache);
2501         if (node) {
2502                 cache = rb_entry(node, struct btrfs_block_group_cache,
2503                                  cache_node);
2504                 atomic_inc(&cache->count);
2505         } else
2506                 cache = NULL;
2507         spin_unlock(&root->fs_info->block_group_cache_lock);
2508         return cache;
2509 }
2510
2511 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2512                                    struct btrfs_root *root)
2513 {
2514         struct btrfs_block_group_cache *cache;
2515         int err = 0;
2516         struct btrfs_path *path;
2517         u64 last = 0;
2518
2519         path = btrfs_alloc_path();
2520         if (!path)
2521                 return -ENOMEM;
2522
2523         while (1) {
2524                 if (last == 0) {
2525                         err = btrfs_run_delayed_refs(trans, root,
2526                                                      (unsigned long)-1);
2527                         BUG_ON(err);
2528                 }
2529
2530                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2531                 while (cache) {
2532                         if (cache->dirty)
2533                                 break;
2534                         cache = next_block_group(root, cache);
2535                 }
2536                 if (!cache) {
2537                         if (last == 0)
2538                                 break;
2539                         last = 0;
2540                         continue;
2541                 }
2542
2543                 cache->dirty = 0;
2544                 last = cache->key.objectid + cache->key.offset;
2545
2546                 err = write_one_cache_group(trans, root, path, cache);
2547                 BUG_ON(err);
2548                 btrfs_put_block_group(cache);
2549         }
2550
2551         btrfs_free_path(path);
2552         return 0;
2553 }
2554
2555 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2556 {
2557         struct btrfs_block_group_cache *block_group;
2558         int readonly = 0;
2559
2560         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2561         if (!block_group || block_group->ro)
2562                 readonly = 1;
2563         if (block_group)
2564                 btrfs_put_block_group(block_group);
2565         return readonly;
2566 }
2567
2568 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2569                              u64 total_bytes, u64 bytes_used,
2570                              struct btrfs_space_info **space_info)
2571 {
2572         struct btrfs_space_info *found;
2573
2574         found = __find_space_info(info, flags);
2575         if (found) {
2576                 spin_lock(&found->lock);
2577                 found->total_bytes += total_bytes;
2578                 found->bytes_used += bytes_used;
2579                 found->full = 0;
2580                 spin_unlock(&found->lock);
2581                 *space_info = found;
2582                 return 0;
2583         }
2584         found = kzalloc(sizeof(*found), GFP_NOFS);
2585         if (!found)
2586                 return -ENOMEM;
2587
2588         INIT_LIST_HEAD(&found->block_groups);
2589         init_rwsem(&found->groups_sem);
2590         spin_lock_init(&found->lock);
2591         found->flags = flags;
2592         found->total_bytes = total_bytes;
2593         found->bytes_used = bytes_used;
2594         found->bytes_pinned = 0;
2595         found->bytes_reserved = 0;
2596         found->bytes_readonly = 0;
2597         found->bytes_delalloc = 0;
2598         found->full = 0;
2599         found->force_alloc = 0;
2600         *space_info = found;
2601         list_add_rcu(&found->list, &info->space_info);
2602         atomic_set(&found->caching_threads, 0);
2603         return 0;
2604 }
2605
2606 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2607 {
2608         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2609                                    BTRFS_BLOCK_GROUP_RAID1 |
2610                                    BTRFS_BLOCK_GROUP_RAID10 |
2611                                    BTRFS_BLOCK_GROUP_DUP);
2612         if (extra_flags) {
2613                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2614                         fs_info->avail_data_alloc_bits |= extra_flags;
2615                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2616                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2617                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2618                         fs_info->avail_system_alloc_bits |= extra_flags;
2619         }
2620 }
2621
2622 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2623 {
2624         spin_lock(&cache->space_info->lock);
2625         spin_lock(&cache->lock);
2626         if (!cache->ro) {
2627                 cache->space_info->bytes_readonly += cache->key.offset -
2628                                         btrfs_block_group_used(&cache->item);
2629                 cache->ro = 1;
2630         }
2631         spin_unlock(&cache->lock);
2632         spin_unlock(&cache->space_info->lock);
2633 }
2634
2635 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2636 {
2637         u64 num_devices = root->fs_info->fs_devices->rw_devices;
2638
2639         if (num_devices == 1)
2640                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2641         if (num_devices < 4)
2642                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2643
2644         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2645             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2646                       BTRFS_BLOCK_GROUP_RAID10))) {
2647                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2648         }
2649
2650         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2651             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2652                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2653         }
2654
2655         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2656             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2657              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2658              (flags & BTRFS_BLOCK_GROUP_DUP)))
2659                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2660         return flags;
2661 }
2662
2663 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2664 {
2665         struct btrfs_fs_info *info = root->fs_info;
2666         u64 alloc_profile;
2667
2668         if (data) {
2669                 alloc_profile = info->avail_data_alloc_bits &
2670                         info->data_alloc_profile;
2671                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2672         } else if (root == root->fs_info->chunk_root) {
2673                 alloc_profile = info->avail_system_alloc_bits &
2674                         info->system_alloc_profile;
2675                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2676         } else {
2677                 alloc_profile = info->avail_metadata_alloc_bits &
2678                         info->metadata_alloc_profile;
2679                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2680         }
2681
2682         return btrfs_reduce_alloc_profile(root, data);
2683 }
2684
2685 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2686 {
2687         u64 alloc_target;
2688
2689         alloc_target = btrfs_get_alloc_profile(root, 1);
2690         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2691                                                        alloc_target);
2692 }
2693
2694 /*
2695  * for now this just makes sure we have at least 5% of our metadata space free
2696  * for use.
2697  */
2698 int btrfs_check_metadata_free_space(struct btrfs_root *root)
2699 {
2700         struct btrfs_fs_info *info = root->fs_info;
2701         struct btrfs_space_info *meta_sinfo;
2702         u64 alloc_target, thresh;
2703         int committed = 0, ret;
2704
2705         /* get the space info for where the metadata will live */
2706         alloc_target = btrfs_get_alloc_profile(root, 0);
2707         meta_sinfo = __find_space_info(info, alloc_target);
2708
2709 again:
2710         spin_lock(&meta_sinfo->lock);
2711         if (!meta_sinfo->full)
2712                 thresh = meta_sinfo->total_bytes * 80;
2713         else
2714                 thresh = meta_sinfo->total_bytes * 95;
2715
2716         do_div(thresh, 100);
2717
2718         if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2719             meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2720                 struct btrfs_trans_handle *trans;
2721                 if (!meta_sinfo->full) {
2722                         meta_sinfo->force_alloc = 1;
2723                         spin_unlock(&meta_sinfo->lock);
2724
2725                         trans = btrfs_start_transaction(root, 1);
2726                         if (!trans)
2727                                 return -ENOMEM;
2728
2729                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2730                                              2 * 1024 * 1024, alloc_target, 0);
2731                         btrfs_end_transaction(trans, root);
2732                         goto again;
2733                 }
2734                 spin_unlock(&meta_sinfo->lock);
2735
2736                 if (!committed) {
2737                         committed = 1;
2738                         trans = btrfs_join_transaction(root, 1);
2739                         if (!trans)
2740                                 return -ENOMEM;
2741                         ret = btrfs_commit_transaction(trans, root);
2742                         if (ret)
2743                                 return ret;
2744                         goto again;
2745                 }
2746                 return -ENOSPC;
2747         }
2748         spin_unlock(&meta_sinfo->lock);
2749
2750         return 0;
2751 }
2752
2753 /*
2754  * This will check the space that the inode allocates from to make sure we have
2755  * enough space for bytes.
2756  */
2757 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2758                                 u64 bytes)
2759 {
2760         struct btrfs_space_info *data_sinfo;
2761         int ret = 0, committed = 0;
2762
2763         /* make sure bytes are sectorsize aligned */
2764         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2765
2766         data_sinfo = BTRFS_I(inode)->space_info;
2767 again:
2768         /* make sure we have enough space to handle the data first */
2769         spin_lock(&data_sinfo->lock);
2770         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2771             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2772             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2773             data_sinfo->bytes_may_use < bytes) {
2774                 struct btrfs_trans_handle *trans;
2775
2776                 /*
2777                  * if we don't have enough free bytes in this space then we need
2778                  * to alloc a new chunk.
2779                  */
2780                 if (!data_sinfo->full) {
2781                         u64 alloc_target;
2782
2783                         data_sinfo->force_alloc = 1;
2784                         spin_unlock(&data_sinfo->lock);
2785
2786                         alloc_target = btrfs_get_alloc_profile(root, 1);
2787                         trans = btrfs_start_transaction(root, 1);
2788                         if (!trans)
2789                                 return -ENOMEM;
2790
2791                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2792                                              bytes + 2 * 1024 * 1024,
2793                                              alloc_target, 0);
2794                         btrfs_end_transaction(trans, root);
2795                         if (ret)
2796                                 return ret;
2797                         goto again;
2798                 }
2799                 spin_unlock(&data_sinfo->lock);
2800
2801                 /* commit the current transaction and try again */
2802                 if (!committed) {
2803                         committed = 1;
2804                         trans = btrfs_join_transaction(root, 1);
2805                         if (!trans)
2806                                 return -ENOMEM;
2807                         ret = btrfs_commit_transaction(trans, root);
2808                         if (ret)
2809                                 return ret;
2810                         goto again;
2811                 }
2812
2813                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2814                        ", %llu bytes_used, %llu bytes_reserved, "
2815                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2816                        "%llu total\n", (unsigned long long)bytes,
2817                        (unsigned long long)data_sinfo->bytes_delalloc,
2818                        (unsigned long long)data_sinfo->bytes_used,
2819                        (unsigned long long)data_sinfo->bytes_reserved,
2820                        (unsigned long long)data_sinfo->bytes_pinned,
2821                        (unsigned long long)data_sinfo->bytes_readonly,
2822                        (unsigned long long)data_sinfo->bytes_may_use,
2823                        (unsigned long long)data_sinfo->total_bytes);
2824                 return -ENOSPC;
2825         }
2826         data_sinfo->bytes_may_use += bytes;
2827         BTRFS_I(inode)->reserved_bytes += bytes;
2828         spin_unlock(&data_sinfo->lock);
2829
2830         return btrfs_check_metadata_free_space(root);
2831 }
2832
2833 /*
2834  * if there was an error for whatever reason after calling
2835  * btrfs_check_data_free_space, call this so we can cleanup the counters.
2836  */
2837 void btrfs_free_reserved_data_space(struct btrfs_root *root,
2838                                     struct inode *inode, u64 bytes)
2839 {
2840         struct btrfs_space_info *data_sinfo;
2841
2842         /* make sure bytes are sectorsize aligned */
2843         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2844
2845         data_sinfo = BTRFS_I(inode)->space_info;
2846         spin_lock(&data_sinfo->lock);
2847         data_sinfo->bytes_may_use -= bytes;
2848         BTRFS_I(inode)->reserved_bytes -= bytes;
2849         spin_unlock(&data_sinfo->lock);
2850 }
2851
2852 /* called when we are adding a delalloc extent to the inode's io_tree */
2853 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2854                                   u64 bytes)
2855 {
2856         struct btrfs_space_info *data_sinfo;
2857
2858         /* get the space info for where this inode will be storing its data */
2859         data_sinfo = BTRFS_I(inode)->space_info;
2860
2861         /* make sure we have enough space to handle the data first */
2862         spin_lock(&data_sinfo->lock);
2863         data_sinfo->bytes_delalloc += bytes;
2864
2865         /*
2866          * we are adding a delalloc extent without calling
2867          * btrfs_check_data_free_space first.  This happens on a weird
2868          * writepage condition, but shouldn't hurt our accounting
2869          */
2870         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2871                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2872                 BTRFS_I(inode)->reserved_bytes = 0;
2873         } else {
2874                 data_sinfo->bytes_may_use -= bytes;
2875                 BTRFS_I(inode)->reserved_bytes -= bytes;
2876         }
2877
2878         spin_unlock(&data_sinfo->lock);
2879 }
2880
2881 /* called when we are clearing an delalloc extent from the inode's io_tree */
2882 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2883                               u64 bytes)
2884 {
2885         struct btrfs_space_info *info;
2886
2887         info = BTRFS_I(inode)->space_info;
2888
2889         spin_lock(&info->lock);
2890         info->bytes_delalloc -= bytes;
2891         spin_unlock(&info->lock);
2892 }
2893
2894 static void force_metadata_allocation(struct btrfs_fs_info *info)
2895 {
2896         struct list_head *head = &info->space_info;
2897         struct btrfs_space_info *found;
2898
2899         rcu_read_lock();
2900         list_for_each_entry_rcu(found, head, list) {
2901                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2902                         found->force_alloc = 1;
2903         }
2904         rcu_read_unlock();
2905 }
2906
2907 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
2908                           struct btrfs_root *extent_root, u64 alloc_bytes,
2909                           u64 flags, int force)
2910 {
2911         struct btrfs_space_info *space_info;
2912         struct btrfs_fs_info *fs_info = extent_root->fs_info;
2913         u64 thresh;
2914         int ret = 0;
2915
2916         mutex_lock(&fs_info->chunk_mutex);
2917
2918         flags = btrfs_reduce_alloc_profile(extent_root, flags);
2919
2920         space_info = __find_space_info(extent_root->fs_info, flags);
2921         if (!space_info) {
2922                 ret = update_space_info(extent_root->fs_info, flags,
2923                                         0, 0, &space_info);
2924                 BUG_ON(ret);
2925         }
2926         BUG_ON(!space_info);
2927
2928         spin_lock(&space_info->lock);
2929         if (space_info->force_alloc) {
2930                 force = 1;
2931                 space_info->force_alloc = 0;
2932         }
2933         if (space_info->full) {
2934                 spin_unlock(&space_info->lock);
2935                 goto out;
2936         }
2937
2938         thresh = space_info->total_bytes - space_info->bytes_readonly;
2939         thresh = div_factor(thresh, 6);
2940         if (!force &&
2941            (space_info->bytes_used + space_info->bytes_pinned +
2942             space_info->bytes_reserved + alloc_bytes) < thresh) {
2943                 spin_unlock(&space_info->lock);
2944                 goto out;
2945         }
2946         spin_unlock(&space_info->lock);
2947
2948         /*
2949          * if we're doing a data chunk, go ahead and make sure that
2950          * we keep a reasonable number of metadata chunks allocated in the
2951          * FS as well.
2952          */
2953         if (flags & BTRFS_BLOCK_GROUP_DATA) {
2954                 fs_info->data_chunk_allocations++;
2955                 if (!(fs_info->data_chunk_allocations %
2956                       fs_info->metadata_ratio))
2957                         force_metadata_allocation(fs_info);
2958         }
2959
2960         ret = btrfs_alloc_chunk(trans, extent_root, flags);
2961         if (ret)
2962                 space_info->full = 1;
2963 out:
2964         mutex_unlock(&extent_root->fs_info->chunk_mutex);
2965         return ret;
2966 }
2967
2968 static int update_block_group(struct btrfs_trans_handle *trans,
2969                               struct btrfs_root *root,
2970                               u64 bytenr, u64 num_bytes, int alloc,
2971                               int mark_free)
2972 {
2973         struct btrfs_block_group_cache *cache;
2974         struct btrfs_fs_info *info = root->fs_info;
2975         u64 total = num_bytes;
2976         u64 old_val;
2977         u64 byte_in_group;
2978
2979         /* block accounting for super block */
2980         spin_lock(&info->delalloc_lock);
2981         old_val = btrfs_super_bytes_used(&info->super_copy);
2982         if (alloc)
2983                 old_val += num_bytes;
2984         else
2985                 old_val -= num_bytes;
2986         btrfs_set_super_bytes_used(&info->super_copy, old_val);
2987
2988         /* block accounting for root item */
2989         old_val = btrfs_root_used(&root->root_item);
2990         if (alloc)
2991                 old_val += num_bytes;
2992         else
2993                 old_val -= num_bytes;
2994         btrfs_set_root_used(&root->root_item, old_val);
2995         spin_unlock(&info->delalloc_lock);
2996
2997         while (total) {
2998                 cache = btrfs_lookup_block_group(info, bytenr);
2999                 if (!cache)
3000                         return -1;
3001                 byte_in_group = bytenr - cache->key.objectid;
3002                 WARN_ON(byte_in_group > cache->key.offset);
3003
3004                 spin_lock(&cache->space_info->lock);
3005                 spin_lock(&cache->lock);
3006                 cache->dirty = 1;
3007                 old_val = btrfs_block_group_used(&cache->item);
3008                 num_bytes = min(total, cache->key.offset - byte_in_group);
3009                 if (alloc) {
3010                         old_val += num_bytes;
3011                         cache->space_info->bytes_used += num_bytes;
3012                         if (cache->ro)
3013                                 cache->space_info->bytes_readonly -= num_bytes;
3014                         btrfs_set_block_group_used(&cache->item, old_val);
3015                         spin_unlock(&cache->lock);
3016                         spin_unlock(&cache->space_info->lock);
3017                 } else {
3018                         old_val -= num_bytes;
3019                         cache->space_info->bytes_used -= num_bytes;
3020                         if (cache->ro)
3021                                 cache->space_info->bytes_readonly += num_bytes;
3022                         btrfs_set_block_group_used(&cache->item, old_val);
3023                         spin_unlock(&cache->lock);
3024                         spin_unlock(&cache->space_info->lock);
3025                         if (mark_free) {
3026                                 int ret;
3027
3028                                 ret = btrfs_discard_extent(root, bytenr,
3029                                                            num_bytes);
3030                                 WARN_ON(ret);
3031
3032                                 ret = btrfs_add_free_space(cache, bytenr,
3033                                                            num_bytes);
3034                                 WARN_ON(ret);
3035                         }
3036                 }
3037                 btrfs_put_block_group(cache);
3038                 total -= num_bytes;
3039                 bytenr += num_bytes;
3040         }
3041         return 0;
3042 }
3043
3044 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3045 {
3046         struct btrfs_block_group_cache *cache;
3047         u64 bytenr;
3048
3049         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3050         if (!cache)
3051                 return 0;
3052
3053         bytenr = cache->key.objectid;
3054         btrfs_put_block_group(cache);
3055
3056         return bytenr;
3057 }
3058
3059 int btrfs_update_pinned_extents(struct btrfs_root *root,
3060                                 u64 bytenr, u64 num, int pin)
3061 {
3062         u64 len;
3063         struct btrfs_block_group_cache *cache;
3064         struct btrfs_fs_info *fs_info = root->fs_info;
3065
3066         if (pin)
3067                 set_extent_dirty(&fs_info->pinned_extents,
3068                                 bytenr, bytenr + num - 1, GFP_NOFS);
3069
3070         while (num > 0) {
3071                 cache = btrfs_lookup_block_group(fs_info, bytenr);
3072                 BUG_ON(!cache);
3073                 len = min(num, cache->key.offset -
3074                           (bytenr - cache->key.objectid));
3075                 if (pin) {
3076                         spin_lock(&cache->space_info->lock);
3077                         spin_lock(&cache->lock);
3078                         cache->pinned += len;
3079                         cache->space_info->bytes_pinned += len;
3080                         spin_unlock(&cache->lock);
3081                         spin_unlock(&cache->space_info->lock);
3082                         fs_info->total_pinned += len;
3083                 } else {
3084                         int unpin = 0;
3085
3086                         /*
3087                          * in order to not race with the block group caching, we
3088                          * only want to unpin the extent if we are cached.  If
3089                          * we aren't cached, we want to start async caching this
3090                          * block group so we can free the extent the next time
3091                          * around.
3092                          */
3093                         spin_lock(&cache->space_info->lock);
3094                         spin_lock(&cache->lock);
3095                         unpin = (cache->cached == BTRFS_CACHE_FINISHED);
3096                         if (likely(unpin)) {
3097                                 cache->pinned -= len;
3098                                 cache->space_info->bytes_pinned -= len;
3099                                 fs_info->total_pinned -= len;
3100                         }
3101                         spin_unlock(&cache->lock);
3102                         spin_unlock(&cache->space_info->lock);
3103
3104                         if (likely(unpin))
3105                                 clear_extent_dirty(&fs_info->pinned_extents,
3106                                                    bytenr, bytenr + len -1,
3107                                                    GFP_NOFS);
3108                         else
3109                                 cache_block_group(cache);
3110
3111                         if (unpin)
3112                                 btrfs_add_free_space(cache, bytenr, len);
3113                 }
3114                 btrfs_put_block_group(cache);
3115                 bytenr += len;
3116                 num -= len;
3117         }
3118         return 0;
3119 }
3120
3121 static int update_reserved_extents(struct btrfs_root *root,
3122                                    u64 bytenr, u64 num, int reserve)
3123 {
3124         u64 len;
3125         struct btrfs_block_group_cache *cache;
3126         struct btrfs_fs_info *fs_info = root->fs_info;
3127
3128         while (num > 0) {
3129                 cache = btrfs_lookup_block_group(fs_info, bytenr);
3130                 BUG_ON(!cache);
3131                 len = min(num, cache->key.offset -
3132                           (bytenr - cache->key.objectid));
3133
3134                 spin_lock(&cache->space_info->lock);
3135                 spin_lock(&cache->lock);
3136                 if (reserve) {
3137                         cache->reserved += len;
3138                         cache->space_info->bytes_reserved += len;
3139                 } else {
3140                         cache->reserved -= len;
3141                         cache->space_info->bytes_reserved -= len;
3142                 }
3143                 spin_unlock(&cache->lock);
3144                 spin_unlock(&cache->space_info->lock);
3145                 btrfs_put_block_group(cache);
3146                 bytenr += len;
3147                 num -= len;
3148         }
3149         return 0;
3150 }
3151
3152 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
3153 {
3154         u64 last = 0;
3155         u64 start;
3156         u64 end;
3157         struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
3158         int ret;
3159
3160         while (1) {
3161                 ret = find_first_extent_bit(pinned_extents, last,
3162                                             &start, &end, EXTENT_DIRTY);
3163                 if (ret)
3164                         break;
3165
3166                 set_extent_dirty(copy, start, end, GFP_NOFS);
3167                 last = end + 1;
3168         }
3169         return 0;
3170 }
3171
3172 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3173                                struct btrfs_root *root,
3174                                struct extent_io_tree *unpin)
3175 {
3176         u64 start;
3177         u64 end;
3178         int ret;
3179
3180         while (1) {
3181                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3182                                             EXTENT_DIRTY);
3183                 if (ret)
3184                         break;
3185
3186                 ret = btrfs_discard_extent(root, start, end + 1 - start);
3187
3188                 /* unlocks the pinned mutex */
3189                 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
3190                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3191
3192                 cond_resched();
3193         }
3194
3195         return ret;
3196 }
3197
3198 static int pin_down_bytes(struct btrfs_trans_handle *trans,
3199                           struct btrfs_root *root,
3200                           struct btrfs_path *path,
3201                           u64 bytenr, u64 num_bytes, int is_data,
3202                           struct extent_buffer **must_clean)
3203 {
3204         int err = 0;
3205         struct extent_buffer *buf;
3206
3207         if (is_data)
3208                 goto pinit;
3209
3210         buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3211         if (!buf)
3212                 goto pinit;
3213
3214         /* we can reuse a block if it hasn't been written
3215          * and it is from this transaction.  We can't
3216          * reuse anything from the tree log root because
3217          * it has tiny sub-transactions.
3218          */
3219         if (btrfs_buffer_uptodate(buf, 0) &&
3220             btrfs_try_tree_lock(buf)) {
3221                 u64 header_owner = btrfs_header_owner(buf);
3222                 u64 header_transid = btrfs_header_generation(buf);
3223                 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3224                     header_transid == trans->transid &&
3225                     !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3226                         *must_clean = buf;
3227                         return 1;
3228                 }
3229                 btrfs_tree_unlock(buf);
3230         }
3231         free_extent_buffer(buf);
3232 pinit:
3233         btrfs_set_path_blocking(path);
3234         /* unlocks the pinned mutex */
3235         btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
3236
3237         BUG_ON(err < 0);
3238         return 0;
3239 }
3240
3241
3242 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3243                                 struct btrfs_root *root,
3244                                 u64 bytenr, u64 num_bytes, u64 parent,
3245                                 u64 root_objectid, u64 owner_objectid,
3246                                 u64 owner_offset, int refs_to_drop,
3247                                 struct btrfs_delayed_extent_op *extent_op)
3248 {
3249         struct btrfs_key key;
3250         struct btrfs_path *path;
3251         struct btrfs_fs_info *info = root->fs_info;
3252         struct btrfs_root *extent_root = info->extent_root;
3253         struct extent_buffer *leaf;
3254         struct btrfs_extent_item *ei;
3255         struct btrfs_extent_inline_ref *iref;
3256         int ret;
3257         int is_data;
3258         int extent_slot = 0;
3259         int found_extent = 0;
3260         int num_to_del = 1;
3261         u32 item_size;
3262         u64 refs;
3263
3264         path = btrfs_alloc_path();
3265         if (!path)
3266                 return -ENOMEM;
3267
3268         path->reada = 1;
3269         path->leave_spinning = 1;
3270
3271         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3272         BUG_ON(!is_data && refs_to_drop != 1);
3273
3274         ret = lookup_extent_backref(trans, extent_root, path, &iref,
3275                                     bytenr, num_bytes, parent,
3276                                     root_objectid, owner_objectid,
3277                                     owner_offset);
3278         if (ret == 0) {
3279                 extent_slot = path->slots[0];
3280                 while (extent_slot >= 0) {
3281                         btrfs_item_key_to_cpu(path->nodes[0], &key,
3282                                               extent_slot);
3283                         if (key.objectid != bytenr)
3284                                 break;
3285                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3286                             key.offset == num_bytes) {
3287                                 found_extent = 1;
3288                                 break;
3289                         }
3290                         if (path->slots[0] - extent_slot > 5)
3291                                 break;
3292                         extent_slot--;
3293                 }
3294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3295                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3296                 if (found_extent && item_size < sizeof(*ei))
3297                         found_extent = 0;
3298 #endif
3299                 if (!found_extent) {
3300                         BUG_ON(iref);
3301                         ret = remove_extent_backref(trans, extent_root, path,
3302                                                     NULL, refs_to_drop,
3303                                                     is_data);
3304                         BUG_ON(ret);
3305                         btrfs_release_path(extent_root, path);
3306                         path->leave_spinning = 1;
3307
3308                         key.objectid = bytenr;
3309                         key.type = BTRFS_EXTENT_ITEM_KEY;
3310                         key.offset = num_bytes;
3311
3312                         ret = btrfs_search_slot(trans, extent_root,
3313                                                 &key, path, -1, 1);
3314                         if (ret) {
3315                                 printk(KERN_ERR "umm, got %d back from search"
3316                                        ", was looking for %llu\n", ret,
3317                                        (unsigned long long)bytenr);
3318                                 btrfs_print_leaf(extent_root, path->nodes[0]);
3319                         }
3320                         BUG_ON(ret);
3321                         extent_slot = path->slots[0];
3322                 }
3323         } else {
3324                 btrfs_print_leaf(extent_root, path->nodes[0]);
3325                 WARN_ON(1);
3326                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
3327                        "parent %llu root %llu  owner %llu offset %llu\n",
3328                        (unsigned long long)bytenr,
3329                        (unsigned long long)parent,
3330                        (unsigned long long)root_objectid,
3331                        (unsigned long long)owner_objectid,
3332                        (unsigned long long)owner_offset);
3333         }
3334
3335         leaf = path->nodes[0];
3336         item_size = btrfs_item_size_nr(leaf, extent_slot);
3337 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3338         if (item_size < sizeof(*ei)) {
3339                 BUG_ON(found_extent || extent_slot != path->slots[0]);
3340                 ret = convert_extent_item_v0(trans, extent_root, path,
3341                                              owner_objectid, 0);
3342                 BUG_ON(ret < 0);
3343
3344                 btrfs_release_path(extent_root, path);
3345                 path->leave_spinning = 1;
3346
3347                 key.objectid = bytenr;
3348                 key.type = BTRFS_EXTENT_ITEM_KEY;
3349                 key.offset = num_bytes;
3350
3351                 ret = btrfs_search_slot(trans, extent_root, &key, path,
3352                                         -1, 1);
3353                 if (ret) {
3354                         printk(KERN_ERR "umm, got %d back from search"
3355                                ", was looking for %llu\n", ret,
3356                                (unsigned long long)bytenr);
3357                         btrfs_print_leaf(extent_root, path->nodes[0]);
3358                 }
3359                 BUG_ON(ret);
3360                 extent_slot = path->slots[0];
3361                 leaf = path->nodes[0];
3362                 item_size = btrfs_item_size_nr(leaf, extent_slot);
3363         }
3364 #endif
3365         BUG_ON(item_size < sizeof(*ei));
3366         ei = btrfs_item_ptr(leaf, extent_slot,
3367                             struct btrfs_extent_item);
3368         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3369                 struct btrfs_tree_block_info *bi;
3370                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3371                 bi = (struct btrfs_tree_block_info *)(ei + 1);
3372                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3373         }
3374
3375         refs = btrfs_extent_refs(leaf, ei);
3376         BUG_ON(refs < refs_to_drop);
3377         refs -= refs_to_drop;
3378
3379         if (refs > 0) {
3380                 if (extent_op)
3381                         __run_delayed_extent_op(extent_op, leaf, ei);
3382                 /*
3383                  * In the case of inline back ref, reference count will
3384                  * be updated by remove_extent_backref
3385                  */
3386                 if (iref) {
3387                         BUG_ON(!found_extent);
3388                 } else {
3389                         btrfs_set_extent_refs(leaf, ei, refs);
3390                         btrfs_mark_buffer_dirty(leaf);
3391                 }
3392                 if (found_extent) {
3393                         ret = remove_extent_backref(trans, extent_root, path,
3394                                                     iref, refs_to_drop,
3395                                                     is_data);
3396                         BUG_ON(ret);
3397                 }
3398         } else {
3399                 int mark_free = 0;
3400                 struct extent_buffer *must_clean = NULL;
3401
3402                 if (found_extent) {
3403                         BUG_ON(is_data && refs_to_drop !=
3404                                extent_data_ref_count(root, path, iref));
3405                         if (iref) {
3406                                 BUG_ON(path->slots[0] != extent_slot);
3407                         } else {
3408                                 BUG_ON(path->slots[0] != extent_slot + 1);
3409                                 path->slots[0] = extent_slot;
3410                                 num_to_del = 2;
3411                         }
3412                 }
3413
3414                 ret = pin_down_bytes(trans, root, path, bytenr,
3415                                      num_bytes, is_data, &must_clean);
3416                 if (ret > 0)
3417                         mark_free = 1;
3418                 BUG_ON(ret < 0);
3419                 /*
3420                  * it is going to be very rare for someone to be waiting
3421                  * on the block we're freeing.  del_items might need to
3422                  * schedule, so rather than get fancy, just force it
3423                  * to blocking here
3424                  */
3425                 if (must_clean)
3426                         btrfs_set_lock_blocking(must_clean);
3427
3428                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3429                                       num_to_del);
3430                 BUG_ON(ret);
3431                 btrfs_release_path(extent_root, path);
3432
3433                 if (must_clean) {
3434                         clean_tree_block(NULL, root, must_clean);
3435                         btrfs_tree_unlock(must_clean);
3436                         free_extent_buffer(must_clean);
3437                 }
3438
3439                 if (is_data) {
3440                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3441                         BUG_ON(ret);
3442                 } else {
3443                         invalidate_mapping_pages(info->btree_inode->i_mapping,
3444                              bytenr >> PAGE_CACHE_SHIFT,
3445                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3446                 }
3447
3448                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3449                                          mark_free);
3450                 BUG_ON(ret);
3451         }
3452         btrfs_free_path(path);
3453         return ret;
3454 }
3455
3456 /*
3457  * when we free an extent, it is possible (and likely) that we free the last
3458  * delayed ref for that extent as well.  This searches the delayed ref tree for
3459  * a given extent, and if there are no other delayed refs to be processed, it
3460  * removes it from the tree.
3461  */
3462 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3463                                       struct btrfs_root *root, u64 bytenr)
3464 {
3465         struct btrfs_delayed_ref_head *head;
3466         struct btrfs_delayed_ref_root *delayed_refs;
3467         struct btrfs_delayed_ref_node *ref;
3468         struct rb_node *node;
3469         int ret;
3470
3471         delayed_refs = &trans->transaction->delayed_refs;
3472         spin_lock(&delayed_refs->lock);
3473         head = btrfs_find_delayed_ref_head(trans, bytenr);
3474         if (!head)
3475                 goto out;
3476
3477         node = rb_prev(&head->node.rb_node);
3478         if (!node)
3479                 goto out;
3480
3481         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3482
3483         /* there are still entries for this ref, we can't drop it */
3484         if (ref->bytenr == bytenr)
3485                 goto out;
3486
3487         if (head->extent_op) {
3488                 if (!head->must_insert_reserved)
3489                         goto out;
3490                 kfree(head->extent_op);
3491                 head->extent_op = NULL;
3492         }
3493
3494         /*
3495          * waiting for the lock here would deadlock.  If someone else has it
3496          * locked they are already in the process of dropping it anyway
3497          */
3498         if (!mutex_trylock(&head->mutex))
3499                 goto out;
3500
3501         /*
3502          * at this point we have a head with no other entries.  Go
3503          * ahead and process it.
3504          */
3505         head->node.in_tree = 0;
3506         rb_erase(&head->node.rb_node, &delayed_refs->root);
3507
3508         delayed_refs->num_entries--;
3509
3510         /*
3511          * we don't take a ref on the node because we're removing it from the
3512          * tree, so we just steal the ref the tree was holding.
3513          */
3514         delayed_refs->num_heads--;
3515         if (list_empty(&head->cluster))
3516                 delayed_refs->num_heads_ready--;
3517
3518         list_del_init(&head->cluster);
3519         spin_unlock(&delayed_refs->lock);
3520
3521         ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
3522                                   &head->node, head->extent_op,
3523                                   head->must_insert_reserved);
3524         BUG_ON(ret);
3525         btrfs_put_delayed_ref(&head->node);
3526         return 0;
3527 out:
3528         spin_unlock(&delayed_refs->lock);
3529         return 0;
3530 }
3531
3532 int btrfs_free_extent(struct btrfs_trans_handle *trans,
3533                       struct btrfs_root *root,
3534                       u64 bytenr, u64 num_bytes, u64 parent,
3535                       u64 root_objectid, u64 owner, u64 offset)
3536 {
3537         int ret;
3538
3539         /*
3540          * tree log blocks never actually go into the extent allocation
3541          * tree, just update pinning info and exit early.
3542          */
3543         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
3544                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
3545                 /* unlocks the pinned mutex */
3546                 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
3547                 update_reserved_extents(root, bytenr, num_bytes, 0);
3548                 ret = 0;
3549         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
3550                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
3551                                         parent, root_objectid, (int)owner,
3552                                         BTRFS_DROP_DELAYED_REF, NULL);
3553                 BUG_ON(ret);
3554                 ret = check_ref_cleanup(trans, root, bytenr);
3555                 BUG_ON(ret);
3556         } else {
3557                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
3558                                         parent, root_objectid, owner,
3559                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
3560                 BUG_ON(ret);
3561         }
3562         return ret;
3563 }
3564
3565 static u64 stripe_align(struct btrfs_root *root, u64 val)
3566 {
3567         u64 mask = ((u64)root->stripesize - 1);
3568         u64 ret = (val + mask) & ~mask;
3569         return ret;
3570 }
3571
3572 /*
3573  * when we wait for progress in the block group caching, its because
3574  * our allocation attempt failed at least once.  So, we must sleep
3575  * and let some progress happen before we try again.
3576  *
3577  * This function will sleep at least once waiting for new free space to
3578  * show up, and then it will check the block group free space numbers
3579  * for our min num_bytes.  Another option is to have it go ahead
3580  * and look in the rbtree for a free extent of a given size, but this
3581  * is a good start.
3582  */
3583 static noinline int
3584 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3585                                 u64 num_bytes)
3586 {
3587         DEFINE_WAIT(wait);
3588
3589         prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
3590
3591         if (block_group_cache_done(cache)) {
3592                 finish_wait(&cache->caching_q, &wait);
3593                 return 0;
3594         }
3595         schedule();
3596         finish_wait(&cache->caching_q, &wait);
3597
3598         wait_event(cache->caching_q, block_group_cache_done(cache) ||
3599                    (cache->free_space >= num_bytes));
3600         return 0;
3601 }
3602
3603 enum btrfs_loop_type {
3604         LOOP_CACHED_ONLY = 0,
3605         LOOP_CACHING_NOWAIT = 1,
3606         LOOP_CACHING_WAIT = 2,
3607         LOOP_ALLOC_CHUNK = 3,
3608         LOOP_NO_EMPTY_SIZE = 4,
3609 };
3610
3611 /*
3612  * walks the btree of allocated extents and find a hole of a given size.
3613  * The key ins is changed to record the hole:
3614  * ins->objectid == block start
3615  * ins->flags = BTRFS_EXTENT_ITEM_KEY
3616  * ins->offset == number of blocks
3617  * Any available blocks before search_start are skipped.
3618  */
3619 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3620                                      struct btrfs_root *orig_root,
3621                                      u64 num_bytes, u64 empty_size,
3622                                      u64 search_start, u64 search_end,
3623                                      u64 hint_byte, struct btrfs_key *ins,
3624                                      u64 exclude_start, u64 exclude_nr,
3625                                      int data)
3626 {
3627         int ret = 0;
3628         struct btrfs_root *root = orig_root->fs_info->extent_root;
3629         struct btrfs_free_cluster *last_ptr = NULL;
3630         struct btrfs_block_group_cache *block_group = NULL;
3631         int empty_cluster = 2 * 1024 * 1024;
3632         int allowed_chunk_alloc = 0;
3633         struct btrfs_space_info *space_info;
3634         int last_ptr_loop = 0;
3635         int loop = 0;
3636         bool found_uncached_bg = false;
3637
3638         WARN_ON(num_bytes < root->sectorsize);
3639         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
3640         ins->objectid = 0;
3641         ins->offset = 0;
3642
3643         space_info = __find_space_info(root->fs_info, data);
3644
3645         if (orig_root->ref_cows || empty_size)
3646                 allowed_chunk_alloc = 1;
3647
3648         if (data & BTRFS_BLOCK_GROUP_METADATA) {
3649                 last_ptr = &root->fs_info->meta_alloc_cluster;
3650                 if (!btrfs_test_opt(root, SSD))
3651                         empty_cluster = 64 * 1024;
3652         }
3653
3654         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
3655                 last_ptr = &root->fs_info->data_alloc_cluster;
3656         }
3657
3658         if (last_ptr) {
3659                 spin_lock(&last_ptr->lock);
3660                 if (last_ptr->block_group)
3661                         hint_byte = last_ptr->window_start;
3662                 spin_unlock(&last_ptr->lock);
3663         }
3664
3665         search_start = max(search_start, first_logical_byte(root, 0));
3666         search_start = max(search_start, hint_byte);
3667
3668         if (!last_ptr)
3669                 empty_cluster = 0;
3670
3671         if (search_start == hint_byte) {
3672                 block_group = btrfs_lookup_block_group(root->fs_info,
3673                                                        search_start);
3674                 /*
3675                  * we don't want to use the block group if it doesn't match our
3676                  * allocation bits, or if its not cached.
3677                  */
3678                 if (block_group && block_group_bits(block_group, data) &&
3679                     block_group_cache_done(block_group)) {
3680                         down_read(&space_info->groups_sem);
3681                         if (list_empty(&block_group->list) ||
3682                             block_group->ro) {
3683                                 /*
3684                                  * someone is removing this block group,
3685                                  * we can't jump into the have_block_group
3686                                  * target because our list pointers are not
3687                                  * valid
3688                                  */
3689                                 btrfs_put_block_group(block_group);
3690                                 up_read(&space_info->groups_sem);
3691                         } else
3692                                 goto have_block_group;
3693                 } else if (block_group) {
3694                         btrfs_put_block_group(block_group);
3695                 }
3696         }
3697
3698 search:
3699         down_read(&space_info->groups_sem);
3700         list_for_each_entry(block_group, &space_info->block_groups, list) {
3701                 u64 offset;
3702                 int cached;
3703
3704                 atomic_inc(&block_group->count);
3705                 search_start = block_group->key.objectid;
3706
3707 have_block_group:
3708                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3709                         /*
3710                          * we want to start caching kthreads, but not too many
3711                          * right off the bat so we don't overwhelm the system,
3712                          * so only start them if there are less than 2 and we're
3713                          * in the initial allocation phase.
3714                          */
3715                         if (loop > LOOP_CACHING_NOWAIT ||
3716                             atomic_read(&space_info->caching_threads) < 2) {
3717                                 ret = cache_block_group(block_group);
3718                                 BUG_ON(ret);
3719                         }
3720                 }
3721
3722                 cached = block_group_cache_done(block_group);
3723                 if (unlikely(!cached)) {
3724                         found_uncached_bg = true;
3725
3726                         /* if we only want cached bgs, loop */
3727                         if (loop == LOOP_CACHED_ONLY)
3728                                 goto loop;
3729                 }
3730
3731                 if (unlikely(block_group->ro))
3732                         goto loop;
3733
3734                 if (last_ptr) {
3735                         /*
3736                          * the refill lock keeps out other
3737                          * people trying to start a new cluster
3738                          */
3739                         spin_lock(&last_ptr->refill_lock);
3740                         if (last_ptr->block_group &&
3741                             (last_ptr->block_group->ro ||
3742                             !block_group_bits(last_ptr->block_group, data))) {
3743                                 offset = 0;
3744                                 goto refill_cluster;
3745                         }
3746
3747                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
3748                                                  num_bytes, search_start);
3749                         if (offset) {
3750                                 /* we have a block, we're done */
3751                                 spin_unlock(&last_ptr->refill_lock);
3752                                 goto checks;
3753                         }
3754
3755                         spin_lock(&last_ptr->lock);
3756                         /*
3757                          * whoops, this cluster doesn't actually point to
3758                          * this block group.  Get a ref on the block
3759                          * group is does point to and try again
3760                          */
3761                         if (!last_ptr_loop && last_ptr->block_group &&
3762                             last_ptr->block_group != block_group) {
3763
3764                                 btrfs_put_block_group(block_group);
3765                                 block_group = last_ptr->block_group;
3766                                 atomic_inc(&block_group->count);
3767                                 spin_unlock(&last_ptr->lock);
3768                                 spin_unlock(&last_ptr->refill_lock);
3769
3770                                 last_ptr_loop = 1;
3771                                 search_start = block_group->key.objectid;
3772                                 /*
3773                                  * we know this block group is properly
3774                                  * in the list because
3775                                  * btrfs_remove_block_group, drops the
3776                                  * cluster before it removes the block
3777                                  * group from the list
3778                                  */
3779                                 goto have_block_group;
3780                         }
3781                         spin_unlock(&last_ptr->lock);
3782 refill_cluster:
3783                         /*
3784                          * this cluster didn't work out, free it and
3785                          * start over
3786                          */
3787                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
3788
3789                         last_ptr_loop = 0;
3790
3791                         /* allocate a cluster in this block group */
3792                         ret = btrfs_find_space_cluster(trans, root,
3793                                                block_group, last_ptr,
3794                                                offset, num_bytes,
3795                                                empty_cluster + empty_size);
3796                         if (ret == 0) {
3797                                 /*
3798                                  * now pull our allocation out of this
3799                                  * cluster
3800                                  */
3801                                 offset = btrfs_alloc_from_cluster(block_group,
3802                                                   last_ptr, num_bytes,
3803                                                   search_start);
3804                                 if (offset) {
3805                                         /* we found one, proceed */
3806                                         spin_unlock(&last_ptr->refill_lock);
3807                                         goto checks;
3808                                 }
3809                         } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
3810                                 spin_unlock(&last_ptr->refill_lock);
3811
3812                                 wait_block_group_cache_progress(block_group,
3813                                        num_bytes + empty_cluster + empty_size);
3814                                 goto have_block_group;
3815                         }
3816
3817                         /*
3818                          * at this point we either didn't find a cluster
3819                          * or we weren't able to allocate a block from our
3820                          * cluster.  Free the cluster we've been trying
3821                          * to use, and go to the next block group
3822                          */
3823                         if (loop < LOOP_NO_EMPTY_SIZE) {
3824                                 btrfs_return_cluster_to_free_space(NULL,
3825                                                                    last_ptr);
3826                                 spin_unlock(&last_ptr->refill_lock);
3827                                 goto loop;
3828                         }
3829                         spin_unlock(&last_ptr->refill_lock);
3830                 }
3831
3832                 offset = btrfs_find_space_for_alloc(block_group, search_start,
3833                                                     num_bytes, empty_size);
3834                 if (!offset && (cached || (!cached &&
3835                                            loop == LOOP_CACHING_NOWAIT))) {
3836                         goto loop;
3837                 } else if (!offset && (!cached &&
3838                                        loop > LOOP_CACHING_NOWAIT)) {
3839                         wait_block_group_cache_progress(block_group,
3840                                         num_bytes + empty_size);
3841                         goto have_block_group;
3842                 }
3843 checks:
3844                 search_start = stripe_align(root, offset);
3845                 /* move on to the next group */
3846                 if (search_start + num_bytes >= search_end) {
3847                         btrfs_add_free_space(block_group, offset, num_bytes);
3848                         goto loop;
3849                 }
3850
3851                 /* move on to the next group */
3852                 if (search_start + num_bytes >
3853                     block_group->key.objectid + block_group->key.offset) {
3854                         btrfs_add_free_space(block_group, offset, num_bytes);
3855                         goto loop;
3856                 }
3857
3858                 if (exclude_nr > 0 &&
3859                     (search_start + num_bytes > exclude_start &&
3860                      search_start < exclude_start + exclude_nr)) {
3861                         search_start = exclude_start + exclude_nr;
3862
3863                         btrfs_add_free_space(block_group, offset, num_bytes);
3864                         /*
3865                          * if search_start is still in this block group
3866                          * then we just re-search this block group
3867                          */
3868                         if (search_start >= block_group->key.objectid &&
3869                             search_start < (block_group->key.objectid +
3870                                             block_group->key.offset))
3871                                 goto have_block_group;
3872                         goto loop;
3873                 }
3874
3875                 ins->objectid = search_start;
3876                 ins->offset = num_bytes;
3877
3878                 if (offset < search_start)
3879                         btrfs_add_free_space(block_group, offset,
3880                                              search_start - offset);
3881                 BUG_ON(offset > search_start);
3882
3883                 /* we are all good, lets return */
3884                 break;
3885 loop:
3886                 btrfs_put_block_group(block_group);
3887         }
3888         up_read(&space_info->groups_sem);
3889
3890         /* LOOP_CACHED_ONLY, only search fully cached block groups
3891          * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3892          *                      dont wait foR them to finish caching
3893          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3894          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3895          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3896          *                      again
3897          */
3898         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
3899             (found_uncached_bg || empty_size || empty_cluster ||
3900              allowed_chunk_alloc)) {
3901                 if (found_uncached_bg) {
3902                         found_uncached_bg = false;
3903                         if (loop < LOOP_CACHING_WAIT) {
3904                                 loop++;
3905                                 goto search;
3906                         }
3907                 }
3908
3909                 if (loop == LOOP_ALLOC_CHUNK) {
3910                         empty_size = 0;
3911                         empty_cluster = 0;
3912                 }
3913
3914                 if (allowed_chunk_alloc) {
3915                         ret = do_chunk_alloc(trans, root, num_bytes +
3916                                              2 * 1024 * 1024, data, 1);
3917                         allowed_chunk_alloc = 0;
3918                 } else {
3919                         space_info->force_alloc = 1;
3920                 }
3921
3922                 if (loop < LOOP_NO_EMPTY_SIZE) {
3923                         loop++;
3924                         goto search;
3925                 }
3926                 ret = -ENOSPC;
3927         } else if (!ins->objectid) {
3928                 ret = -ENOSPC;
3929         }
3930
3931         /* we found what we needed */
3932         if (ins->objectid) {
3933                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
3934                         trans->block_group = block_group->key.objectid;
3935
3936                 btrfs_put_block_group(block_group);
3937                 ret = 0;
3938         }
3939
3940         return ret;
3941 }
3942
3943 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3944 {
3945         struct btrfs_block_group_cache *cache;
3946
3947         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3948                (unsigned long long)(info->total_bytes - info->bytes_used -
3949                                     info->bytes_pinned - info->bytes_reserved),
3950                (info->full) ? "" : "not ");
3951         printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
3952                " may_use=%llu, used=%llu\n",
3953                (unsigned long long)info->total_bytes,
3954                (unsigned long long)info->bytes_pinned,
3955                (unsigned long long)info->bytes_delalloc,
3956                (unsigned long long)info->bytes_may_use,
3957                (unsigned long long)info->bytes_used);
3958
3959         down_read(&info->groups_sem);
3960         list_for_each_entry(cache, &info->block_groups, list) {
3961                 spin_lock(&cache->lock);
3962                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3963                        "%llu pinned %llu reserved\n",
3964                        (unsigned long long)cache->key.objectid,
3965                        (unsigned long long)cache->key.offset,
3966                        (unsigned long long)btrfs_block_group_used(&cache->item),
3967                        (unsigned long long)cache->pinned,
3968                        (unsigned long long)cache->reserved);
3969                 btrfs_dump_free_space(cache, bytes);
3970                 spin_unlock(&cache->lock);
3971         }
3972         up_read(&info->groups_sem);
3973 }
3974
3975 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3976                                   struct btrfs_root *root,
3977                                   u64 num_bytes, u64 min_alloc_size,
3978                                   u64 empty_size, u64 hint_byte,
3979                                   u64 search_end, struct btrfs_key *ins,
3980                                   u64 data)
3981 {
3982         int ret;
3983         u64 search_start = 0;
3984         struct btrfs_fs_info *info = root->fs_info;
3985
3986         data = btrfs_get_alloc_profile(root, data);
3987 again:
3988         /*
3989          * the only place that sets empty_size is btrfs_realloc_node, which
3990          * is not called recursively on allocations
3991          */
3992         if (empty_size || root->ref_cows) {
3993                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3994                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3995                                      2 * 1024 * 1024,
3996                                      BTRFS_BLOCK_GROUP_METADATA |
3997                                      (info->metadata_alloc_profile &
3998                                       info->avail_metadata_alloc_bits), 0);
3999                 }
4000                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4001                                      num_bytes + 2 * 1024 * 1024, data, 0);
4002         }
4003
4004         WARN_ON(num_bytes < root->sectorsize);
4005         ret = find_free_extent(trans, root, num_bytes, empty_size,
4006                                search_start, search_end, hint_byte, ins,
4007                                trans->alloc_exclude_start,
4008                                trans->alloc_exclude_nr, data);
4009
4010         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4011                 num_bytes = num_bytes >> 1;
4012                 num_bytes = num_bytes & ~(root->sectorsize - 1);
4013                 num_bytes = max(num_bytes, min_alloc_size);
4014                 do_chunk_alloc(trans, root->fs_info->extent_root,
4015                                num_bytes, data, 1);
4016                 goto again;
4017         }
4018         if (ret == -ENOSPC) {
4019                 struct btrfs_space_info *sinfo;
4020
4021                 sinfo = __find_space_info(root->fs_info, data);
4022                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4023                        "wanted %llu\n", (unsigned long long)data,
4024                        (unsigned long long)num_bytes);
4025                 dump_space_info(sinfo, num_bytes);
4026         }
4027
4028         return ret;
4029 }
4030
4031 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4032 {
4033         struct btrfs_block_group_cache *cache;
4034         int ret = 0;
4035
4036         cache = btrfs_lookup_block_group(root->fs_info, start);
4037         if (!cache) {
4038                 printk(KERN_ERR "Unable to find block group for %llu\n",
4039                        (unsigned long long)start);
4040                 return -ENOSPC;
4041         }
4042
4043         ret = btrfs_discard_extent(root, start, len);
4044
4045         btrfs_add_free_space(cache, start, len);
4046         btrfs_put_block_group(cache);
4047         update_reserved_extents(root, start, len, 0);
4048
4049         return ret;
4050 }
4051
4052 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4053                                   struct btrfs_root *root,
4054                                   u64 num_bytes, u64 min_alloc_size,
4055                                   u64 empty_size, u64 hint_byte,
4056                                   u64 search_end, struct btrfs_key *ins,
4057                                   u64 data)
4058 {
4059         int ret;
4060         ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
4061                                      empty_size, hint_byte, search_end, ins,
4062                                      data);
4063         if (!ret)
4064                 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4065
4066         return ret;
4067 }
4068
4069 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4070                                       struct btrfs_root *root,
4071                                       u64 parent, u64 root_objectid,
4072                                       u64 flags, u64 owner, u64 offset,
4073                                       struct btrfs_key *ins, int ref_mod)
4074 {
4075         int ret;
4076         struct btrfs_fs_info *fs_info = root->fs_info;
4077         struct btrfs_extent_item *extent_item;
4078         struct btrfs_extent_inline_ref *iref;
4079         struct btrfs_path *path;
4080         struct extent_buffer *leaf;
4081         int type;
4082         u32 size;
4083
4084         if (parent > 0)
4085                 type = BTRFS_SHARED_DATA_REF_KEY;
4086         else
4087                 type = BTRFS_EXTENT_DATA_REF_KEY;
4088
4089         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4090
4091         path = btrfs_alloc_path();
4092         BUG_ON(!path);
4093
4094         path->leave_spinning = 1;
4095         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4096                                       ins, size);
4097         BUG_ON(ret);
4098
4099         leaf = path->nodes[0];
4100         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4101                                      struct btrfs_extent_item);
4102         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4103         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4104         btrfs_set_extent_flags(leaf, extent_item,
4105                                flags | BTRFS_EXTENT_FLAG_DATA);
4106
4107         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4108         btrfs_set_extent_inline_ref_type(leaf, iref, type);
4109         if (parent > 0) {
4110                 struct btrfs_shared_data_ref *ref;
4111                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4112                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4113                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4114         } else {
4115                 struct btrfs_extent_data_ref *ref;
4116                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4117                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4118                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4119                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4120                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4121         }
4122
4123         btrfs_mark_buffer_dirty(path->nodes[0]);
4124         btrfs_free_path(path);
4125
4126         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4127                                  1, 0);
4128         if (ret) {
4129                 printk(KERN_ERR "btrfs update block group failed for %llu "
4130                        "%llu\n", (unsigned long long)ins->objectid,
4131                        (unsigned long long)ins->offset);
4132                 BUG();
4133         }
4134         return ret;
4135 }
4136
4137 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4138                                      struct btrfs_root *root,
4139                                      u64 parent, u64 root_objectid,
4140                                      u64 flags, struct btrfs_disk_key *key,
4141                                      int level, struct btrfs_key *ins)
4142 {
4143         int ret;
4144         struct btrfs_fs_info *fs_info = root->fs_info;
4145         struct btrfs_extent_item *extent_item;
4146         struct btrfs_tree_block_info *block_info;
4147         struct btrfs_extent_inline_ref *iref;
4148         struct btrfs_path *path;
4149         struct extent_buffer *leaf;
4150         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
4151
4152         path = btrfs_alloc_path();
4153         BUG_ON(!path);
4154
4155         path->leave_spinning = 1;
4156         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4157                                       ins, size);
4158         BUG_ON(ret);
4159
4160         leaf = path->nodes[0];
4161         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4162                                      struct btrfs_extent_item);
4163         btrfs_set_extent_refs(leaf, extent_item, 1);
4164         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4165         btrfs_set_extent_flags(leaf, extent_item,
4166                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4167         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4168
4169         btrfs_set_tree_block_key(leaf, block_info, key);
4170         btrfs_set_tree_block_level(leaf, block_info, level);
4171
4172         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4173         if (parent > 0) {
4174                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4175                 btrfs_set_extent_inline_ref_type(leaf, iref,
4176                                                  BTRFS_SHARED_BLOCK_REF_KEY);
4177                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4178         } else {
4179                 btrfs_set_extent_inline_ref_type(leaf, iref,
4180                                                  BTRFS_TREE_BLOCK_REF_KEY);
4181                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4182         }
4183
4184         btrfs_mark_buffer_dirty(leaf);
4185         btrfs_free_path(path);
4186
4187         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4188                                  1, 0);
4189         if (ret) {
4190                 printk(KERN_ERR "btrfs update block group failed for %llu "
4191                        "%llu\n", (unsigned long long)ins->objectid,
4192                        (unsigned long long)ins->offset);
4193                 BUG();
4194         }
4195         return ret;
4196 }
4197
4198 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4199                                      struct btrfs_root *root,
4200                                      u64 root_objectid, u64 owner,
4201                                      u64 offset, struct btrfs_key *ins)
4202 {
4203         int ret;
4204
4205         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4206
4207         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4208                                          0, root_objectid, owner, offset,
4209                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
4210         return ret;
4211 }
4212
4213 /*
4214  * this is used by the tree logging recovery code.  It records that
4215  * an extent has been allocated and makes sure to clear the free
4216  * space cache bits as well
4217  */
4218 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4219                                    struct btrfs_root *root,
4220                                    u64 root_objectid, u64 owner, u64 offset,
4221                                    struct btrfs_key *ins)
4222 {
4223         int ret;
4224         struct btrfs_block_group_cache *block_group;
4225
4226         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4227         cache_block_group(block_group);
4228         wait_event(block_group->caching_q,
4229                    block_group_cache_done(block_group));
4230
4231         ret = btrfs_remove_free_space(block_group, ins->objectid,
4232                                       ins->offset);
4233         BUG_ON(ret);
4234         btrfs_put_block_group(block_group);
4235         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4236                                          0, owner, offset, ins, 1);
4237         return ret;
4238 }
4239
4240 /*
4241  * finds a free extent and does all the dirty work required for allocation
4242  * returns the key for the extent through ins, and a tree buffer for
4243  * the first block of the extent through buf.
4244  *
4245  * returns 0 if everything worked, non-zero otherwise.
4246  */
4247 static int alloc_tree_block(struct btrfs_trans_handle *trans,
4248                             struct btrfs_root *root,
4249                             u64 num_bytes, u64 parent, u64 root_objectid,
4250                             struct btrfs_disk_key *key, int level,
4251                             u64 empty_size, u64 hint_byte, u64 search_end,
4252                             struct btrfs_key *ins)
4253 {
4254         int ret;
4255         u64 flags = 0;
4256
4257         ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4258                                      empty_size, hint_byte, search_end,
4259                                      ins, 0);
4260         if (ret)
4261                 return ret;
4262
4263         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4264                 if (parent == 0)
4265                         parent = ins->objectid;
4266                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4267         } else
4268                 BUG_ON(parent > 0);
4269
4270         update_reserved_extents(root, ins->objectid, ins->offset, 1);
4271         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4272                 struct btrfs_delayed_extent_op *extent_op;
4273                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4274                 BUG_ON(!extent_op);
4275                 if (key)
4276                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
4277                 else
4278                         memset(&extent_op->key, 0, sizeof(extent_op->key));
4279                 extent_op->flags_to_set = flags;
4280                 extent_op->update_key = 1;
4281                 extent_op->update_flags = 1;
4282                 extent_op->is_data = 0;
4283
4284                 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4285                                         ins->offset, parent, root_objectid,
4286                                         level, BTRFS_ADD_DELAYED_EXTENT,
4287                                         extent_op);
4288                 BUG_ON(ret);
4289         }
4290         return ret;
4291 }
4292
4293 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4294                                             struct btrfs_root *root,
4295                                             u64 bytenr, u32 blocksize,
4296                                             int level)
4297 {
4298         struct extent_buffer *buf;
4299
4300         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4301         if (!buf)
4302                 return ERR_PTR(-ENOMEM);
4303         btrfs_set_header_generation(buf, trans->transid);
4304         btrfs_set_buffer_lockdep_class(buf, level);
4305         btrfs_tree_lock(buf);
4306         clean_tree_block(trans, root, buf);
4307
4308         btrfs_set_lock_blocking(buf);
4309         btrfs_set_buffer_uptodate(buf);
4310
4311         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4312                 set_extent_dirty(&root->dirty_log_pages, buf->start,
4313                          buf->start + buf->len - 1, GFP_NOFS);
4314         } else {
4315                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4316                          buf->start + buf->len - 1, GFP_NOFS);
4317         }
4318         trans->blocks_used++;
4319         /* this returns a buffer locked for blocking */
4320         return buf;
4321 }
4322
4323 /*
4324  * helper function to allocate a block for a given tree
4325  * returns the tree buffer or NULL.
4326  */
4327 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4328                                         struct btrfs_root *root, u32 blocksize,
4329                                         u64 parent, u64 root_objectid,
4330                                         struct btrfs_disk_key *key, int level,
4331                                         u64 hint, u64 empty_size)
4332 {
4333         struct btrfs_key ins;
4334         int ret;
4335         struct extent_buffer *buf;
4336
4337         ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4338                                key, level, empty_size, hint, (u64)-1, &ins);
4339         if (ret) {
4340                 BUG_ON(ret > 0);
4341                 return ERR_PTR(ret);
4342         }
4343
4344         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4345                                     blocksize, level);
4346         return buf;
4347 }
4348
4349 #if 0
4350 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
4351                         struct btrfs_root *root, struct extent_buffer *leaf)
4352 {
4353         u64 disk_bytenr;
4354         u64 num_bytes;
4355         struct btrfs_key key;
4356         struct btrfs_file_extent_item *fi;
4357         u32 nritems;
4358         int i;
4359         int ret;
4360
4361         BUG_ON(!btrfs_is_leaf(leaf));
4362         nritems = btrfs_header_nritems(leaf);
4363
4364         for (i = 0; i < nritems; i++) {
4365                 cond_resched();
4366                 btrfs_item_key_to_cpu(leaf, &key, i);
4367
4368                 /* only extents have references, skip everything else */
4369                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4370                         continue;
4371
4372                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4373
4374                 /* inline extents live in the btree, they don't have refs */
4375                 if (btrfs_file_extent_type(leaf, fi) ==
4376                     BTRFS_FILE_EXTENT_INLINE)
4377                         continue;
4378
4379                 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4380
4381                 /* holes don't have refs */
4382                 if (disk_bytenr == 0)
4383                         continue;
4384
4385                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4386                 ret = btrfs_free_extent(trans, root, disk_bytenr, num_bytes,
4387                                         leaf->start, 0, key.objectid, 0);
4388                 BUG_ON(ret);
4389         }
4390         return 0;
4391 }
4392
4393 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
4394                                         struct btrfs_root *root,
4395                                         struct btrfs_leaf_ref *ref)
4396 {
4397         int i;
4398         int ret;
4399         struct btrfs_extent_info *info;
4400         struct refsort *sorted;
4401
4402         if (ref->nritems == 0)
4403                 return 0;
4404
4405         sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
4406         for (i = 0; i < ref->nritems; i++) {
4407                 sorted[i].bytenr = ref->extents[i].bytenr;
4408                 sorted[i].slot = i;
4409         }
4410         sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
4411
4412         /*
4413          * the items in the ref were sorted when the ref was inserted
4414          * into the ref cache, so this is already in order
4415          */
4416         for (i = 0; i < ref->nritems; i++) {
4417                 info = ref->extents + sorted[i].slot;
4418                 ret = btrfs_free_extent(trans, root, info->bytenr,
4419                                           info->num_bytes, ref->bytenr,
4420                                           ref->owner, ref->generation,
4421                                           info->objectid, 0);
4422
4423                 atomic_inc(&root->fs_info->throttle_gen);
4424                 wake_up(&root->fs_info->transaction_throttle);
4425                 cond_resched();
4426
4427                 BUG_ON(ret);
4428                 info++;
4429         }
4430
4431         kfree(sorted);
4432         return 0;
4433 }
4434
4435
4436 static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
4437                                      struct btrfs_root *root, u64 start,
4438                                      u64 len, u32 *refs)
4439 {
4440         int ret;
4441
4442         ret = btrfs_lookup_extent_refs(trans, root, start, len, refs);
4443         BUG_ON(ret);
4444
4445 #if 0 /* some debugging code in case we see problems here */
4446         /* if the refs count is one, it won't get increased again.  But
4447          * if the ref count is > 1, someone may be decreasing it at
4448          * the same time we are.
4449          */
4450         if (*refs != 1) {
4451                 struct extent_buffer *eb = NULL;
4452                 eb = btrfs_find_create_tree_block(root, start, len);
4453                 if (eb)
4454                         btrfs_tree_lock(eb);
4455
4456                 mutex_lock(&root->fs_info->alloc_mutex);
4457                 ret = lookup_extent_ref(NULL, root, start, len, refs);
4458                 BUG_ON(ret);
4459                 mutex_unlock(&root->fs_info->alloc_mutex);
4460
4461                 if (eb) {
4462                         btrfs_tree_unlock(eb);
4463                         free_extent_buffer(eb);
4464                 }
4465                 if (*refs == 1) {
4466                         printk(KERN_ERR "btrfs block %llu went down to one "
4467                                "during drop_snap\n", (unsigned long long)start);
4468                 }
4469
4470         }
4471 #endif
4472
4473         cond_resched();
4474         return ret;
4475 }
4476
4477
4478 /*
4479  * this is used while deleting old snapshots, and it drops the refs
4480  * on a whole subtree starting from a level 1 node.
4481  *
4482  * The idea is to sort all the leaf pointers, and then drop the
4483  * ref on all the leaves in order.  Most of the time the leaves
4484  * will have ref cache entries, so no leaf IOs will be required to
4485  * find the extents they have references on.
4486  *
4487  * For each leaf, any references it has are also dropped in order
4488  *
4489  * This ends up dropping the references in something close to optimal
4490  * order for reading and modifying the extent allocation tree.
4491  */
4492 static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
4493                                         struct btrfs_root *root,
4494                                         struct btrfs_path *path)
4495 {
4496         u64 bytenr;
4497         u64 root_owner;
4498         u64 root_gen;
4499         struct extent_buffer *eb = path->nodes[1];
4500         struct extent_buffer *leaf;
4501         struct btrfs_leaf_ref *ref;
4502         struct refsort *sorted = NULL;
4503         int nritems = btrfs_header_nritems(eb);
4504         int ret;
4505         int i;
4506         int refi = 0;
4507         int slot = path->slots[1];
4508         u32 blocksize = btrfs_level_size(root, 0);
4509         u32 refs;
4510
4511         if (nritems == 0)
4512                 goto out;
4513
4514         root_owner = btrfs_header_owner(eb);
4515         root_gen = btrfs_header_generation(eb);
4516         sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
4517
4518         /*
4519          * step one, sort all the leaf pointers so we don't scribble
4520          * randomly into the extent allocation tree
4521          */
4522         for (i = slot; i < nritems; i++) {
4523                 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
4524                 sorted[refi].slot = i;
4525                 refi++;
4526         }
4527
4528         /*
4529          * nritems won't be zero, but if we're picking up drop_snapshot
4530          * after a crash, slot might be > 0, so double check things
4531          * just in case.
4532          */
4533         if (refi == 0)
4534                 goto out;
4535
4536         sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
4537
4538         /*
4539          * the first loop frees everything the leaves point to
4540          */
4541         for (i = 0; i < refi; i++) {
4542                 u64 ptr_gen;
4543
4544                 bytenr = sorted[i].bytenr;
4545
4546                 /*
4547                  * check the reference count on this leaf.  If it is > 1
4548                  * we just decrement it below and don't update any
4549                  * of the refs the leaf points to.
4550                  */
4551                 ret = drop_snap_lookup_refcount(trans, root, bytenr,
4552                                                 blocksize, &refs);
4553                 BUG_ON(ret);
4554                 if (refs != 1)
4555                         continue;
4556
4557                 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
4558
4559                 /*
4560                  * the leaf only had one reference, which means the
4561                  * only thing pointing to this leaf is the snapshot
4562                  * we're deleting.  It isn't possible for the reference
4563                  * count to increase again later
4564                  *
4565                  * The reference cache is checked for the leaf,
4566                  * and if found we'll be able to drop any refs held by
4567                  * the leaf without needing to read it in.
4568                  */
4569                 ref = btrfs_lookup_leaf_ref(root, bytenr);
4570                 if (ref && ref->generation != ptr_gen) {
4571                         btrfs_free_leaf_ref(root, ref);
4572                         ref = NULL;
4573                 }
4574                 if (ref) {
4575                         ret = cache_drop_leaf_ref(trans, root, ref);
4576                         BUG_ON(ret);
4577                         btrfs_remove_leaf_ref(root, ref);
4578                         btrfs_free_leaf_ref(root, ref);
4579                 } else {
4580                         /*
4581                          * the leaf wasn't in the reference cache, so
4582                          * we have to read it.
4583                          */
4584                         leaf = read_tree_block(root, bytenr, blocksize,
4585                                                ptr_gen);
4586                         ret = btrfs_drop_leaf_ref(trans, root, leaf);
4587                         BUG_ON(ret);
4588                         free_extent_buffer(leaf);
4589                 }
4590                 atomic_inc(&root->fs_info->throttle_gen);
4591                 wake_up(&root->fs_info->transaction_throttle);
4592                 cond_resched();
4593         }
4594
4595         /*
4596          * run through the loop again to free the refs on the leaves.
4597          * This is faster than doing it in the loop above because
4598          * the leaves are likely to be clustered together.  We end up
4599          * working in nice chunks on the extent allocation tree.
4600          */
4601         for (i = 0; i < refi; i++) {
4602                 bytenr = sorted[i].bytenr;
4603                 ret = btrfs_free_extent(trans, root, bytenr,
4604                                         blocksize, eb->start,
4605                                         root_owner, root_gen, 0, 1);
4606                 BUG_ON(ret);
4607
4608                 atomic_inc(&root->fs_info->throttle_gen);
4609                 wake_up(&root->fs_info->transaction_throttle);
4610                 cond_resched();
4611         }
4612 out:
4613         kfree(sorted);
4614
4615         /*
4616          * update the path to show we've processed the entire level 1
4617          * node.  This will get saved into the root's drop_snapshot_progress
4618          * field so these drops are not repeated again if this transaction
4619          * commits.
4620          */
4621         path->slots[1] = nritems;
4622         return 0;
4623 }
4624
4625 /*
4626  * helper function for drop_snapshot, this walks down the tree dropping ref
4627  * counts as it goes.
4628  */
4629 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4630                                    struct btrfs_root *root,
4631                                    struct btrfs_path *path, int *level)
4632 {
4633         u64 root_owner;
4634         u64 root_gen;
4635         u64 bytenr;
4636         u64 ptr_gen;
4637         struct extent_buffer *next;
4638         struct extent_buffer *cur;
4639         struct extent_buffer *parent;
4640         u32 blocksize;
4641         int ret;
4642         u32 refs;
4643
4644         WARN_ON(*level < 0);
4645         WARN_ON(*level >= BTRFS_MAX_LEVEL);
4646         ret = drop_snap_lookup_refcount(trans, root, path->nodes[*level]->start,
4647                                 path->nodes[*level]->len, &refs);
4648         BUG_ON(ret);
4649         if (refs > 1)
4650                 goto out;
4651
4652         /*
4653          * walk down to the last node level and free all the leaves
4654          */
4655         while (*level >= 0) {
4656                 WARN_ON(*level < 0);
4657                 WARN_ON(*level >= BTRFS_MAX_LEVEL);
4658                 cur = path->nodes[*level];
4659
4660                 if (btrfs_header_level(cur) != *level)
4661                         WARN_ON(1);
4662
4663                 if (path->slots[*level] >=
4664                     btrfs_header_nritems(cur))
4665                         break;
4666
4667                 /* the new code goes down to level 1 and does all the
4668                  * leaves pointed to that node in bulk.  So, this check
4669                  * for level 0 will always be false.
4670                  *
4671                  * But, the disk format allows the drop_snapshot_progress
4672                  * field in the root to leave things in a state where
4673                  * a leaf will need cleaning up here.  If someone crashes
4674                  * with the old code and then boots with the new code,
4675                  * we might find a leaf here.
4676                  */
4677                 if (*level == 0) {
4678                         ret = btrfs_drop_leaf_ref(trans, root, cur);
4679                         BUG_ON(ret);
4680                         break;
4681                 }
4682
4683                 /*
4684                  * once we get to level one, process the whole node
4685                  * at once, including everything below it.
4686                  */
4687                 if (*level == 1) {
4688                         ret = drop_level_one_refs(trans, root, path);
4689                         BUG_ON(ret);
4690                         break;
4691                 }
4692
4693                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
4694                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
4695                 blocksize = btrfs_level_size(root, *level - 1);
4696
4697                 ret = drop_snap_lookup_refcount(trans, root, bytenr,
4698                                                 blocksize, &refs);
4699                 BUG_ON(ret);
4700
4701                 /*
4702                  * if there is more than one reference, we don't need
4703                  * to read that node to drop any references it has.  We
4704                  * just drop the ref we hold on that node and move on to the
4705                  * next slot in this level.
4706                  */
4707                 if (refs != 1) {
4708                         parent = path->nodes[*level];
4709                         root_owner = btrfs_header_owner(parent);
4710                         root_gen = btrfs_header_generation(parent);
4711                         path->slots[*level]++;
4712
4713                         ret = btrfs_free_extent(trans, root, bytenr,
4714                                                 blocksize, parent->start,
4715                                                 root_owner, root_gen,
4716                                                 *level - 1, 1);
4717                         BUG_ON(ret);
4718
4719                         atomic_inc(&root->fs_info->throttle_gen);
4720                         wake_up(&root->fs_info->transaction_throttle);
4721                         cond_resched();
4722
4723                         continue;
4724                 }
4725
4726                 /*
4727                  * we need to keep freeing things in the next level down.
4728                  * read the block and loop around to process it
4729                  */
4730                 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
4731                 WARN_ON(*level <= 0);
4732                 if (path->nodes[*level-1])
4733                         free_extent_buffer(path->nodes[*level-1]);
4734                 path->nodes[*level-1] = next;
4735                 *level = btrfs_header_level(next);
4736                 path->slots[*level] = 0;
4737                 cond_resched();
4738         }
4739 out:
4740         WARN_ON(*level < 0);
4741         WARN_ON(*level >= BTRFS_MAX_LEVEL);
4742
4743         if (path->nodes[*level] == root->node) {
4744                 parent = path->nodes[*level];
4745                 bytenr = path->nodes[*level]->start;
4746         } else {
4747                 parent = path->nodes[*level + 1];
4748                 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
4749         }
4750
4751         blocksize = btrfs_level_size(root, *level);
4752         root_owner = btrfs_header_owner(parent);
4753         root_gen = btrfs_header_generation(parent);
4754
4755         /*
4756          * cleanup and free the reference on the last node
4757          * we processed
4758          */
4759         ret = btrfs_free_extent(trans, root, bytenr, blocksize,
4760                                   parent->start, root_owner, root_gen,
4761                                   *level, 1);
4762         free_extent_buffer(path->nodes[*level]);
4763         path->nodes[*level] = NULL;
4764
4765         *level += 1;
4766         BUG_ON(ret);
4767
4768         cond_resched();
4769         return 0;
4770 }
4771 #endif
4772
4773 struct walk_control {
4774         u64 refs[BTRFS_MAX_LEVEL];
4775         u64 flags[BTRFS_MAX_LEVEL];
4776         struct btrfs_key update_progress;
4777         int stage;
4778         int level;
4779         int shared_level;
4780         int update_ref;
4781         int keep_locks;
4782 };
4783
4784 #define DROP_REFERENCE  1
4785 #define UPDATE_BACKREF  2
4786
4787 /*
4788  * hepler to process tree block while walking down the tree.
4789  *
4790  * when wc->stage == DROP_REFERENCE, this function checks
4791  * reference count of the block. if the block is shared and
4792  * we need update back refs for the subtree rooted at the
4793  * block, this function changes wc->stage to UPDATE_BACKREF
4794  *
4795  * when wc->stage == UPDATE_BACKREF, this function updates
4796  * back refs for pointers in the block.
4797  *
4798  * NOTE: return value 1 means we should stop walking down.
4799  */
4800 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4801                                    struct btrfs_root *root,
4802                                    struct btrfs_path *path,
4803                                    struct walk_control *wc)
4804 {
4805         int level = wc->level;
4806         struct extent_buffer *eb = path->nodes[level];
4807         struct btrfs_key key;
4808         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4809         int ret;
4810
4811         if (wc->stage == UPDATE_BACKREF &&
4812             btrfs_header_owner(eb) != root->root_key.objectid)
4813                 return 1;
4814
4815         /*
4816          * when reference count of tree block is 1, it won't increase
4817          * again. once full backref flag is set, we never clear it.
4818          */
4819         if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4820             (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4821                 BUG_ON(!path->locks[level]);
4822                 ret = btrfs_lookup_extent_info(trans, root,
4823                                                eb->start, eb->len,
4824                                                &wc->refs[level],
4825                                                &wc->flags[level]);
4826                 BUG_ON(ret);
4827                 BUG_ON(wc->refs[level] == 0);
4828         }
4829
4830         if (wc->stage == DROP_REFERENCE &&
4831             wc->update_ref && wc->refs[level] > 1) {
4832                 BUG_ON(eb == root->node);
4833                 BUG_ON(path->slots[level] > 0);
4834                 if (level == 0)
4835                         btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
4836                 else
4837                         btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
4838                 if (btrfs_header_owner(eb) == root->root_key.objectid &&
4839                     btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
4840                         wc->stage = UPDATE_BACKREF;
4841                         wc->shared_level = level;
4842                 }
4843         }
4844
4845         if (wc->stage == DROP_REFERENCE) {
4846                 if (wc->refs[level] > 1)
4847                         return 1;
4848
4849                 if (path->locks[level] && !wc->keep_locks) {
4850                         btrfs_tree_unlock(eb);
4851                         path->locks[level] = 0;
4852                 }
4853                 return 0;
4854         }
4855
4856         /* wc->stage == UPDATE_BACKREF */
4857         if (!(wc->flags[level] & flag)) {
4858                 BUG_ON(!path->locks[level]);
4859                 ret = btrfs_inc_ref(trans, root, eb, 1);
4860                 BUG_ON(ret);
4861                 ret = btrfs_dec_ref(trans, root, eb, 0);
4862                 BUG_ON(ret);
4863                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4864                                                   eb->len, flag, 0);
4865                 BUG_ON(ret);
4866                 wc->flags[level] |= flag;
4867         }
4868
4869         /*
4870          * the block is shared by multiple trees, so it's not good to
4871          * keep the tree lock
4872          */
4873         if (path->locks[level] && level > 0) {
4874                 btrfs_tree_unlock(eb);
4875                 path->locks[level] = 0;
4876         }
4877         return 0;
4878 }
4879
4880 /*
4881  * hepler to process tree block while walking up the tree.
4882  *
4883  * when wc->stage == DROP_REFERENCE, this function drops
4884  * reference count on the block.
4885  *
4886  * when wc->stage == UPDATE_BACKREF, this function changes
4887  * wc->stage back to DROP_REFERENCE if we changed wc->stage
4888  * to UPDATE_BACKREF previously while processing the block.
4889  *
4890  * NOTE: return value 1 means we should stop walking up.
4891  */
4892 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4893                                  struct btrfs_root *root,
4894                                  struct btrfs_path *path,
4895                                  struct walk_control *wc)
4896 {
4897         int ret = 0;
4898         int level = wc->level;
4899         struct extent_buffer *eb = path->nodes[level];
4900         u64 parent = 0;
4901
4902         if (wc->stage == UPDATE_BACKREF) {
4903                 BUG_ON(wc->shared_level < level);
4904                 if (level < wc->shared_level)
4905                         goto out;
4906
4907                 BUG_ON(wc->refs[level] <= 1);
4908                 ret = find_next_key(path, level + 1, &wc->update_progress);
4909                 if (ret > 0)
4910                         wc->update_ref = 0;
4911
4912                 wc->stage = DROP_REFERENCE;
4913                 wc->shared_level = -1;
4914                 path->slots[level] = 0;
4915
4916                 /*
4917                  * check reference count again if the block isn't locked.
4918                  * we should start walking down the tree again if reference
4919                  * count is one.
4920                  */
4921                 if (!path->locks[level]) {
4922                         BUG_ON(level == 0);
4923                         btrfs_tree_lock(eb);
4924                         btrfs_set_lock_blocking(eb);
4925                         path->locks[level] = 1;
4926
4927                         ret = btrfs_lookup_extent_info(trans, root,
4928                                                        eb->start, eb->len,
4929                                                        &wc->refs[level],
4930                                                        &wc->flags[level]);
4931                         BUG_ON(ret);
4932                         BUG_ON(wc->refs[level] == 0);
4933                         if (wc->refs[level] == 1) {
4934                                 btrfs_tree_unlock(eb);
4935                                 path->locks[level] = 0;
4936                                 return 1;
4937                         }
4938                 } else {
4939                         BUG_ON(level != 0);
4940                 }
4941         }
4942
4943         /* wc->stage == DROP_REFERENCE */
4944         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
4945
4946         if (wc->refs[level] == 1) {
4947                 if (level == 0) {
4948                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4949                                 ret = btrfs_dec_ref(trans, root, eb, 1);
4950                         else
4951                                 ret = btrfs_dec_ref(trans, root, eb, 0);
4952                         BUG_ON(ret);
4953                 }
4954                 /* make block locked assertion in clean_tree_block happy */
4955                 if (!path->locks[level] &&
4956                     btrfs_header_generation(eb) == trans->transid) {
4957                         btrfs_tree_lock(eb);
4958                         btrfs_set_lock_blocking(eb);
4959                         path->locks[level] = 1;
4960                 }
4961                 clean_tree_block(trans, root, eb);
4962         }
4963
4964         if (eb == root->node) {
4965                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4966                         parent = eb->start;
4967                 else
4968                         BUG_ON(root->root_key.objectid !=
4969                                btrfs_header_owner(eb));
4970         } else {
4971                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4972                         parent = path->nodes[level + 1]->start;
4973                 else
4974                         BUG_ON(root->root_key.objectid !=
4975                                btrfs_header_owner(path->nodes[level + 1]));
4976         }
4977
4978         ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
4979                                 root->root_key.objectid, level, 0);
4980         BUG_ON(ret);
4981 out:
4982         wc->refs[level] = 0;
4983         wc->flags[level] = 0;
4984         return ret;
4985 }
4986
4987 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4988                                    struct btrfs_root *root,
4989                                    struct btrfs_path *path,
4990                                    struct walk_control *wc)
4991 {
4992         struct extent_buffer *next;
4993         struct extent_buffer *cur;
4994         u64 bytenr;
4995         u64 ptr_gen;
4996         u32 blocksize;
4997         int level = wc->level;
4998         int ret;
4999
5000         while (level >= 0) {
5001                 cur = path->nodes[level];
5002                 BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
5003
5004                 ret = walk_down_proc(trans, root, path, wc);
5005                 if (ret > 0)
5006                         break;
5007
5008                 if (level == 0)
5009                         break;
5010
5011                 bytenr = btrfs_node_blockptr(cur, path->slots[level]);
5012                 blocksize = btrfs_level_size(root, level - 1);
5013                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
5014
5015                 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
5016                 btrfs_tree_lock(next);
5017                 btrfs_set_lock_blocking(next);
5018
5019                 level--;
5020                 BUG_ON(level != btrfs_header_level(next));
5021                 path->nodes[level] = next;
5022                 path->slots[level] = 0;
5023                 path->locks[level] = 1;
5024                 wc->level = level;
5025         }
5026         return 0;
5027 }
5028
5029 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5030                                  struct btrfs_root *root,
5031                                  struct btrfs_path *path,
5032                                  struct walk_control *wc, int max_level)
5033 {
5034         int level = wc->level;
5035         int ret;
5036
5037         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5038         while (level < max_level && path->nodes[level]) {
5039                 wc->level = level;
5040                 if (path->slots[level] + 1 <
5041                     btrfs_header_nritems(path->nodes[level])) {
5042                         path->slots[level]++;
5043                         return 0;
5044                 } else {
5045                         ret = walk_up_proc(trans, root, path, wc);
5046                         if (ret > 0)
5047                                 return 0;
5048
5049                         if (path->locks[level]) {
5050                                 btrfs_tree_unlock(path->nodes[level]);
5051                                 path->locks[level] = 0;
5052                         }
5053                         free_extent_buffer(path->nodes[level]);
5054                         path->nodes[level] = NULL;
5055                         level++;
5056                 }
5057         }
5058         return 1;
5059 }
5060
5061 /*
5062  * drop a subvolume tree.
5063  *
5064  * this function traverses the tree freeing any blocks that only
5065  * referenced by the tree.
5066  *
5067  * when a shared tree block is found. this function decreases its
5068  * reference count by one. if update_ref is true, this function
5069  * also make sure backrefs for the shared block and all lower level
5070  * blocks are properly updated.
5071  */
5072 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5073 {
5074         struct btrfs_path *path;
5075         struct btrfs_trans_handle *trans;
5076         struct btrfs_root *tree_root = root->fs_info->tree_root;
5077         struct btrfs_root_item *root_item = &root->root_item;
5078         struct walk_control *wc;
5079         struct btrfs_key key;
5080         int err = 0;
5081         int ret;
5082         int level;
5083
5084         path = btrfs_alloc_path();
5085         BUG_ON(!path);
5086
5087         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5088         BUG_ON(!wc);
5089
5090         trans = btrfs_start_transaction(tree_root, 1);
5091
5092         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5093                 level = btrfs_header_level(root->node);
5094                 path->nodes[level] = btrfs_lock_root_node(root);
5095                 btrfs_set_lock_blocking(path->nodes[level]);
5096                 path->slots[level] = 0;
5097                 path->locks[level] = 1;
5098                 memset(&wc->update_progress, 0,
5099                        sizeof(wc->update_progress));
5100         } else {
5101                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5102                 memcpy(&wc->update_progress, &key,
5103                        sizeof(wc->update_progress));
5104
5105                 level = root_item->drop_level;
5106                 BUG_ON(level == 0);
5107                 path->lowest_level = level;
5108                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5109                 path->lowest_level = 0;
5110                 if (ret < 0) {
5111                         err = ret;
5112                         goto out;
5113                 }
5114                 btrfs_node_key_to_cpu(path->nodes[level], &key,
5115                                       path->slots[level]);
5116                 WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
5117
5118                 /*
5119                  * unlock our path, this is safe because only this
5120                  * function is allowed to delete this snapshot
5121                  */
5122                 btrfs_unlock_up_safe(path, 0);
5123
5124                 level = btrfs_header_level(root->node);
5125                 while (1) {
5126                         btrfs_tree_lock(path->nodes[level]);
5127                         btrfs_set_lock_blocking(path->nodes[level]);
5128
5129                         ret = btrfs_lookup_extent_info(trans, root,
5130                                                 path->nodes[level]->start,
5131                                                 path->nodes[level]->len,
5132                                                 &wc->refs[level],
5133                                                 &wc->flags[level]);
5134                         BUG_ON(ret);
5135                         BUG_ON(wc->refs[level] == 0);
5136
5137                         if (level == root_item->drop_level)
5138                                 break;
5139
5140                         btrfs_tree_unlock(path->nodes[level]);
5141                         WARN_ON(wc->refs[level] != 1);
5142                         level--;
5143                 }
5144         }
5145
5146         wc->level = level;
5147         wc->shared_level = -1;
5148         wc->stage = DROP_REFERENCE;
5149         wc->update_ref = update_ref;
5150         wc->keep_locks = 0;
5151
5152         while (1) {
5153                 ret = walk_down_tree(trans, root, path, wc);
5154                 if (ret < 0) {
5155                         err = ret;
5156                         break;
5157                 }
5158
5159                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5160                 if (ret < 0) {
5161                         err = ret;
5162                         break;
5163                 }
5164
5165                 if (ret > 0) {
5166                         BUG_ON(wc->stage != DROP_REFERENCE);
5167                         break;
5168                 }
5169
5170                 if (wc->stage == DROP_REFERENCE) {
5171                         level = wc->level;
5172                         btrfs_node_key(path->nodes[level],
5173                                        &root_item->drop_progress,
5174                                        path->slots[level]);
5175                         root_item->drop_level = level;
5176                 }
5177
5178                 BUG_ON(wc->level == 0);
5179                 if (trans->transaction->in_commit ||
5180                     trans->transaction->delayed_refs.flushing) {
5181                         ret = btrfs_update_root(trans, tree_root,
5182                                                 &root->root_key,
5183                                                 root_item);
5184                         BUG_ON(ret);
5185
5186                         btrfs_end_transaction(trans, tree_root);
5187                         trans = btrfs_start_transaction(tree_root, 1);
5188                 } else {
5189                         unsigned long update;
5190                         update = trans->delayed_ref_updates;
5191                         trans->delayed_ref_updates = 0;
5192                         if (update)
5193                                 btrfs_run_delayed_refs(trans, tree_root,
5194                                                        update);
5195                 }
5196         }
5197         btrfs_release_path(root, path);
5198         BUG_ON(err);
5199
5200         ret = btrfs_del_root(trans, tree_root, &root->root_key);
5201         BUG_ON(ret);
5202
5203         free_extent_buffer(root->node);
5204         free_extent_buffer(root->commit_root);
5205         kfree(root);
5206 out:
5207         btrfs_end_transaction(trans, tree_root);
5208         kfree(wc);
5209         btrfs_free_path(path);
5210         return err;
5211 }
5212
5213 /*
5214  * drop subtree rooted at tree block 'node'.
5215  *
5216  * NOTE: this function will unlock and release tree block 'node'
5217  */
5218 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5219                         struct btrfs_root *root,
5220                         struct extent_buffer *node,
5221                         struct extent_buffer *parent)
5222 {
5223         struct btrfs_path *path;
5224         struct walk_control *wc;
5225         int level;
5226         int parent_level;
5227         int ret = 0;
5228         int wret;
5229
5230         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5231
5232         path = btrfs_alloc_path();
5233         BUG_ON(!path);
5234
5235         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5236         BUG_ON(!wc);
5237
5238         btrfs_assert_tree_locked(parent);
5239         parent_level = btrfs_header_level(parent);
5240         extent_buffer_get(parent);
5241         path->nodes[parent_level] = parent;
5242         path->slots[parent_level] = btrfs_header_nritems(parent);
5243
5244         btrfs_assert_tree_locked(node);
5245         level = btrfs_header_level(node);
5246         path->nodes[level] = node;
5247         path->slots[level] = 0;
5248         path->locks[level] = 1;
5249
5250         wc->refs[parent_level] = 1;
5251         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5252         wc->level = level;
5253         wc->shared_level = -1;
5254         wc->stage = DROP_REFERENCE;
5255         wc->update_ref = 0;
5256         wc->keep_locks = 1;
5257
5258         while (1) {
5259                 wret = walk_down_tree(trans, root, path, wc);
5260                 if (wret < 0) {
5261                         ret = wret;
5262                         break;
5263                 }
5264
5265                 wret = walk_up_tree(trans, root, path, wc, parent_level);
5266                 if (wret < 0)
5267                         ret = wret;
5268                 if (wret != 0)
5269                         break;
5270         }
5271
5272         kfree(wc);
5273         btrfs_free_path(path);
5274         return ret;
5275 }
5276
5277 #if 0
5278 static unsigned long calc_ra(unsigned long start, unsigned long last,
5279                              unsigned long nr)
5280 {
5281         return min(last, start + nr - 1);
5282 }
5283
5284 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
5285                                          u64 len)
5286 {
5287         u64 page_start;
5288         u64 page_end;
5289         unsigned long first_index;
5290         unsigned long last_index;
5291         unsigned long i;
5292         struct page *page;
5293         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5294         struct file_ra_state *ra;
5295         struct btrfs_ordered_extent *ordered;
5296         unsigned int total_read = 0;
5297         unsigned int total_dirty = 0;
5298         int ret = 0;
5299
5300         ra = kzalloc(sizeof(*ra), GFP_NOFS);
5301
5302         mutex_lock(&inode->i_mutex);
5303         first_index = start >> PAGE_CACHE_SHIFT;
5304         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5305
5306         /* make sure the dirty trick played by the caller work */
5307         ret = invalidate_inode_pages2_range(inode->i_mapping,
5308                                             first_index, last_index);
5309         if (ret)
5310                 goto out_unlock;
5311
5312         file_ra_state_init(ra, inode->i_mapping);
5313
5314         for (i = first_index ; i <= last_index; i++) {
5315                 if (total_read % ra->ra_pages == 0) {
5316                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
5317                                        calc_ra(i, last_index, ra->ra_pages));
5318                 }
5319                 total_read++;
5320 again:
5321                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
5322                         BUG_ON(1);
5323                 page = grab_cache_page(inode->i_mapping, i);
5324                 if (!page) {
5325                         ret = -ENOMEM;
5326                         goto out_unlock;
5327                 }
5328                 if (!PageUptodate(page)) {
5329                         btrfs_readpage(NULL, page);
5330                         lock_page(page);
5331                         if (!PageUptodate(page)) {
5332                                 unlock_page(page);
5333                                 page_cache_release(page);
5334                                 ret = -EIO;
5335                                 goto out_unlock;
5336                         }
5337                 }
5338                 wait_on_page_writeback(page);
5339
5340                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5341                 page_end = page_start + PAGE_CACHE_SIZE - 1;
5342                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5343
5344                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5345                 if (ordered) {
5346                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5347                         unlock_page(page);
5348                         page_cache_release(page);
5349                         btrfs_start_ordered_extent(inode, ordered, 1);
5350                         btrfs_put_ordered_extent(ordered);
5351                         goto again;
5352                 }
5353                 set_page_extent_mapped(page);
5354
5355                 if (i == first_index)
5356                         set_extent_bits(io_tree, page_start, page_end,
5357                                         EXTENT_BOUNDARY, GFP_NOFS);
5358                 btrfs_set_extent_delalloc(inode, page_start, page_end);
5359
5360                 set_page_dirty(page);
5361                 total_dirty++;
5362
5363                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5364                 unlock_page(page);
5365                 page_cache_release(page);
5366         }
5367
5368 out_unlock:
5369         kfree(ra);
5370         mutex_unlock(&inode->i_mutex);
5371         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5372         return ret;
5373 }
5374
5375 static noinline int relocate_data_extent(struct inode *reloc_inode,
5376                                          struct btrfs_key *extent_key,
5377                                          u64 offset)
5378 {
5379         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5380         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5381         struct extent_map *em;
5382         u64 start = extent_key->objectid - offset;
5383         u64 end = start + extent_key->offset - 1;
5384
5385         em = alloc_extent_map(GFP_NOFS);
5386         BUG_ON(!em || IS_ERR(em));
5387
5388         em->start = start;
5389         em->len = extent_key->offset;
5390         em->block_len = extent_key->offset;
5391         em->block_start = extent_key->objectid;
5392         em->bdev = root->fs_info->fs_devices->latest_bdev;
5393         set_bit(EXTENT_FLAG_PINNED, &em->flags);
5394
5395         /* setup extent map to cheat btrfs_readpage */
5396         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5397         while (1) {
5398                 int ret;
5399                 spin_lock(&em_tree->lock);
5400                 ret = add_extent_mapping(em_tree, em);
5401                 spin_unlock(&em_tree->lock);
5402                 if (ret != -EEXIST) {
5403                         free_extent_map(em);
5404                         break;
5405                 }
5406                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
5407         }
5408         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5409
5410         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
5411 }
5412
5413 struct btrfs_ref_path {
5414         u64 extent_start;
5415         u64 nodes[BTRFS_MAX_LEVEL];
5416         u64 root_objectid;
5417         u64 root_generation;
5418         u64 owner_objectid;
5419         u32 num_refs;
5420         int lowest_level;
5421         int current_level;
5422         int shared_level;
5423
5424         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5425         u64 new_nodes[BTRFS_MAX_LEVEL];
5426 };
5427
5428 struct disk_extent {
5429         u64 ram_bytes;
5430         u64 disk_bytenr;
5431         u64 disk_num_bytes;
5432         u64 offset;
5433         u64 num_bytes;
5434         u8 compression;
5435         u8 encryption;
5436         u16 other_encoding;
5437 };
5438
5439 static int is_cowonly_root(u64 root_objectid)
5440 {
5441         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5442             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5443             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5444             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
5445             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5446             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
5447                 return 1;
5448         return 0;
5449 }
5450
5451 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
5452                                     struct btrfs_root *extent_root,
5453                                     struct btrfs_ref_path *ref_path,
5454                                     int first_time)
5455 {
5456         struct extent_buffer *leaf;
5457         struct btrfs_path *path;
5458         struct btrfs_extent_ref *ref;
5459         struct btrfs_key key;
5460         struct btrfs_key found_key;
5461         u64 bytenr;
5462         u32 nritems;
5463         int level;
5464         int ret = 1;
5465
5466         path = btrfs_alloc_path();
5467         if (!path)
5468                 return -ENOMEM;
5469
5470         if (first_time) {
5471                 ref_path->lowest_level = -1;
5472                 ref_path->current_level = -1;
5473                 ref_path->shared_level = -1;
5474                 goto walk_up;
5475         }
5476 walk_down:
5477         level = ref_path->current_level - 1;
5478         while (level >= -1) {
5479                 u64 parent;
5480                 if (level < ref_path->lowest_level)
5481                         break;
5482
5483                 if (level >= 0)
5484                         bytenr = ref_path->nodes[level];
5485                 else
5486                         bytenr = ref_path->extent_start;
5487                 BUG_ON(bytenr == 0);
5488
5489                 parent = ref_path->nodes[level + 1];
5490                 ref_path->nodes[level + 1] = 0;
5491                 ref_path->current_level = level;
5492                 BUG_ON(parent == 0);
5493
5494                 key.objectid = bytenr;
5495                 key.offset = parent + 1;
5496                 key.type = BTRFS_EXTENT_REF_KEY;
5497
5498                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5499                 if (ret < 0)
5500                         goto out;
5501                 BUG_ON(ret == 0);
5502
5503                 leaf = path->nodes[0];
5504                 nritems = btrfs_header_nritems(leaf);
5505                 if (path->slots[0] >= nritems) {
5506                         ret = btrfs_next_leaf(extent_root, path);
5507                         if (ret < 0)
5508                                 goto out;
5509                         if (ret > 0)
5510                                 goto next;
5511                         leaf = path->nodes[0];
5512                 }
5513
5514                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5515                 if (found_key.objectid == bytenr &&
5516                     found_key.type == BTRFS_EXTENT_REF_KEY) {
5517                         if (level < ref_path->shared_level)
5518                                 ref_path->shared_level = level;
5519                         goto found;
5520                 }
5521 next:
5522                 level--;
5523                 btrfs_release_path(extent_root, path);
5524                 cond_resched();
5525         }
5526         /* reached lowest level */
5527         ret = 1;
5528         goto out;
5529 walk_up:
5530         level = ref_path->current_level;
5531         while (level < BTRFS_MAX_LEVEL - 1) {
5532                 u64 ref_objectid;
5533
5534                 if (level >= 0)
5535                         bytenr = ref_path->nodes[level];
5536                 else
5537                         bytenr = ref_path->extent_start;
5538
5539                 BUG_ON(bytenr == 0);
5540
5541                 key.objectid = bytenr;
5542                 key.offset = 0;
5543                 key.type = BTRFS_EXTENT_REF_KEY;
5544
5545                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5546                 if (ret < 0)
5547                         goto out;
5548
5549                 leaf = path->nodes[0];
5550                 nritems = btrfs_header_nritems(leaf);
5551                 if (path->slots[0] >= nritems) {
5552                         ret = btrfs_next_leaf(extent_root, path);
5553                         if (ret < 0)
5554                                 goto out;
5555                         if (ret > 0) {
5556                                 /* the extent was freed by someone */
5557                                 if (ref_path->lowest_level == level)
5558                                         goto out;
5559                                 btrfs_release_path(extent_root, path);
5560                                 goto walk_down;
5561                         }
5562                         leaf = path->nodes[0];
5563                 }
5564
5565                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5566                 if (found_key.objectid != bytenr ||
5567                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
5568                         /* the extent was freed by someone */
5569                         if (ref_path->lowest_level == level) {
5570                                 ret = 1;
5571                                 goto out;
5572                         }
5573                         btrfs_release_path(extent_root, path);
5574                         goto walk_down;
5575                 }
5576 found:
5577                 ref = btrfs_item_ptr(leaf, path->slots[0],
5578                                 struct btrfs_extent_ref);
5579                 ref_objectid = btrfs_ref_objectid(leaf, ref);
5580                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5581                         if (first_time) {
5582                                 level = (int)ref_objectid;
5583                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
5584                                 ref_path->lowest_level = level;
5585                                 ref_path->current_level = level;
5586                                 ref_path->nodes[level] = bytenr;
5587                         } else {
5588                                 WARN_ON(ref_objectid != level);
5589                         }
5590                 } else {
5591                         WARN_ON(level != -1);
5592                 }
5593                 first_time = 0;
5594
5595                 if (ref_path->lowest_level == level) {
5596                         ref_path->owner_objectid = ref_objectid;
5597                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
5598                 }
5599
5600                 /*
5601                  * the block is tree root or the block isn't in reference
5602                  * counted tree.
5603                  */
5604                 if (found_key.objectid == found_key.offset ||
5605                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
5606                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5607                         ref_path->root_generation =
5608                                 btrfs_ref_generation(leaf, ref);
5609                         if (level < 0) {
5610                                 /* special reference from the tree log */
5611                                 ref_path->nodes[0] = found_key.offset;
5612                                 ref_path->current_level = 0;
5613                         }
5614                         ret = 0;
5615                         goto out;
5616                 }
5617
5618                 level++;
5619                 BUG_ON(ref_path->nodes[level] != 0);
5620                 ref_path->nodes[level] = found_key.offset;
5621                 ref_path->current_level = level;
5622
5623                 /*
5624                  * the reference was created in the running transaction,
5625                  * no need to continue walking up.
5626                  */
5627                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
5628                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5629                         ref_path->root_generation =
5630                                 btrfs_ref_generation(leaf, ref);
5631                         ret = 0;
5632                         goto out;
5633                 }
5634
5635                 btrfs_release_path(extent_root, path);
5636                 cond_resched();
5637         }
5638         /* reached max tree level, but no tree root found. */
5639         BUG();
5640 out:
5641         btrfs_free_path(path);
5642         return ret;
5643 }
5644
5645 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
5646                                 struct btrfs_root *extent_root,
5647                                 struct btrfs_ref_path *ref_path,
5648                                 u64 extent_start)
5649 {
5650         memset(ref_path, 0, sizeof(*ref_path));
5651         ref_path->extent_start = extent_start;
5652
5653         return __next_ref_path(trans, extent_root, ref_path, 1);
5654 }
5655
5656 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
5657                                struct btrfs_root *extent_root,
5658                                struct btrfs_ref_path *ref_path)
5659 {
5660         return __next_ref_path(trans, extent_root, ref_path, 0);
5661 }
5662
5663 static noinline int get_new_locations(struct inode *reloc_inode,
5664                                       struct btrfs_key *extent_key,
5665                                       u64 offset, int no_fragment,
5666                                       struct disk_extent **extents,
5667                                       int *nr_extents)
5668 {
5669         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5670         struct btrfs_path *path;
5671         struct btrfs_file_extent_item *fi;
5672         struct extent_buffer *leaf;
5673         struct disk_extent *exts = *extents;
5674         struct btrfs_key found_key;
5675         u64 cur_pos;
5676         u64 last_byte;
5677         u32 nritems;
5678         int nr = 0;
5679         int max = *nr_extents;
5680         int ret;
5681
5682         WARN_ON(!no_fragment && *extents);
5683         if (!exts) {
5684                 max = 1;
5685                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
5686                 if (!exts)
5687                         return -ENOMEM;
5688         }
5689
5690         path = btrfs_alloc_path();
5691         BUG_ON(!path);
5692
5693         cur_pos = extent_key->objectid - offset;
5694         last_byte = extent_key->objectid + extent_key->offset;
5695         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
5696                                        cur_pos, 0);
5697         if (ret < 0)
5698                 goto out;
5699         if (ret > 0) {
5700                 ret = -ENOENT;
5701                 goto out;
5702         }
5703
5704         while (1) {
5705                 leaf = path->nodes[0];
5706                 nritems = btrfs_header_nritems(leaf);
5707                 if (path->slots[0] >= nritems) {
5708                         ret = btrfs_next_leaf(root, path);
5709                         if (ret < 0)
5710                                 goto out;
5711                         if (ret > 0)
5712                                 break;
5713                         leaf = path->nodes[0];
5714                 }
5715
5716                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5717                 if (found_key.offset != cur_pos ||
5718                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
5719                     found_key.objectid != reloc_inode->i_ino)
5720                         break;
5721
5722                 fi = btrfs_item_ptr(leaf, path->slots[0],
5723                                     struct btrfs_file_extent_item);
5724                 if (btrfs_file_extent_type(leaf, fi) !=
5725                     BTRFS_FILE_EXTENT_REG ||
5726                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5727                         break;
5728
5729                 if (nr == max) {
5730                         struct disk_extent *old = exts;
5731                         max *= 2;
5732                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
5733                         memcpy(exts, old, sizeof(*exts) * nr);
5734                         if (old != *extents)
5735                                 kfree(old);
5736                 }
5737
5738                 exts[nr].disk_bytenr =
5739                         btrfs_file_extent_disk_bytenr(leaf, fi);
5740                 exts[nr].disk_num_bytes =
5741                         btrfs_file_extent_disk_num_bytes(leaf, fi);
5742                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
5743                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5744                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
5745                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
5746                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
5747                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
5748                                                                            fi);
5749                 BUG_ON(exts[nr].offset > 0);
5750                 BUG_ON(exts[nr].compression || exts[nr].encryption);
5751                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
5752
5753                 cur_pos += exts[nr].num_bytes;
5754                 nr++;
5755
5756                 if (cur_pos + offset >= last_byte)
5757                         break;
5758
5759                 if (no_fragment) {
5760                         ret = 1;
5761                         goto out;
5762                 }
5763                 path->slots[0]++;
5764         }
5765
5766         BUG_ON(cur_pos + offset > last_byte);
5767         if (cur_pos + offset < last_byte) {
5768                 ret = -ENOENT;
5769                 goto out;
5770         }
5771         ret = 0;
5772 out:
5773         btrfs_free_path(path);
5774         if (ret) {
5775                 if (exts != *extents)
5776                         kfree(exts);
5777         } else {
5778                 *extents = exts;
5779                 *nr_extents = nr;
5780         }
5781         return ret;
5782 }
5783
5784 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
5785                                         struct btrfs_root *root,
5786                                         struct btrfs_path *path,
5787                                         struct btrfs_key *extent_key,
5788                                         struct btrfs_key *leaf_key,
5789                                         struct btrfs_ref_path *ref_path,
5790                                         struct disk_extent *new_extents,
5791                                         int nr_extents)
5792 {
5793         struct extent_buffer *leaf;
5794         struct btrfs_file_extent_item *fi;
5795         struct inode *inode = NULL;
5796         struct btrfs_key key;
5797         u64 lock_start = 0;
5798         u64 lock_end = 0;
5799         u64 num_bytes;
5800         u64 ext_offset;
5801         u64 search_end = (u64)-1;
5802         u32 nritems;
5803         int nr_scaned = 0;
5804         int extent_locked = 0;
5805         int extent_type;
5806         int ret;
5807
5808         memcpy(&key, leaf_key, sizeof(key));
5809         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5810                 if (key.objectid < ref_path->owner_objectid ||
5811                     (key.objectid == ref_path->owner_objectid &&
5812                      key.type < BTRFS_EXTENT_DATA_KEY)) {
5813                         key.objectid = ref_path->owner_objectid;
5814                         key.type = BTRFS_EXTENT_DATA_KEY;
5815                         key.offset = 0;
5816                 }
5817         }
5818
5819         while (1) {
5820                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
5821                 if (ret < 0)
5822                         goto out;
5823
5824                 leaf = path->nodes[0];
5825                 nritems = btrfs_header_nritems(leaf);
5826 next:
5827                 if (extent_locked && ret > 0) {
5828                         /*
5829                          * the file extent item was modified by someone
5830                          * before the extent got locked.
5831                          */
5832                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5833                                       lock_end, GFP_NOFS);
5834                         extent_locked = 0;
5835                 }
5836
5837                 if (path->slots[0] >= nritems) {
5838                         if (++nr_scaned > 2)
5839                                 break;
5840
5841                         BUG_ON(extent_locked);
5842                         ret = btrfs_next_leaf(root, path);
5843                         if (ret < 0)
5844                                 goto out;
5845                         if (ret > 0)
5846                                 break;
5847                         leaf = path->nodes[0];
5848                         nritems = btrfs_header_nritems(leaf);
5849                 }
5850
5851                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5852
5853                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5854                         if ((key.objectid > ref_path->owner_objectid) ||
5855                             (key.objectid == ref_path->owner_objectid &&
5856                              key.type > BTRFS_EXTENT_DATA_KEY) ||
5857                             key.offset >= search_end)
5858                                 break;
5859                 }
5860
5861                 if (inode && key.objectid != inode->i_ino) {
5862                         BUG_ON(extent_locked);
5863                         btrfs_release_path(root, path);
5864                         mutex_unlock(&inode->i_mutex);
5865                         iput(inode);
5866                         inode = NULL;
5867                         continue;
5868                 }
5869
5870                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
5871                         path->slots[0]++;
5872                         ret = 1;
5873                         goto next;
5874                 }
5875                 fi = btrfs_item_ptr(leaf, path->slots[0],
5876                                     struct btrfs_file_extent_item);
5877                 extent_type = btrfs_file_extent_type(leaf, fi);
5878                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
5879                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
5880                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
5881                      extent_key->objectid)) {
5882                         path->slots[0]++;
5883                         ret = 1;
5884                         goto next;
5885                 }
5886
5887                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5888                 ext_offset = btrfs_file_extent_offset(leaf, fi);
5889
5890                 if (search_end == (u64)-1) {
5891                         search_end = key.offset - ext_offset +
5892                                 btrfs_file_extent_ram_bytes(leaf, fi);
5893                 }
5894
5895                 if (!extent_locked) {
5896                         lock_start = key.offset;
5897                         lock_end = lock_start + num_bytes - 1;
5898                 } else {
5899                         if (lock_start > key.offset ||
5900                             lock_end + 1 < key.offset + num_bytes) {
5901                                 unlock_extent(&BTRFS_I(inode)->io_tree,
5902                                               lock_start, lock_end, GFP_NOFS);
5903                                 extent_locked = 0;
5904                         }
5905                 }
5906
5907                 if (!inode) {
5908                         btrfs_release_path(root, path);
5909
5910                         inode = btrfs_iget_locked(root->fs_info->sb,
5911                                                   key.objectid, root);
5912                         if (inode->i_state & I_NEW) {
5913                                 BTRFS_I(inode)->root = root;
5914                                 BTRFS_I(inode)->location.objectid =
5915                                         key.objectid;
5916                                 BTRFS_I(inode)->location.type =
5917                                         BTRFS_INODE_ITEM_KEY;
5918                                 BTRFS_I(inode)->location.offset = 0;
5919                                 btrfs_read_locked_inode(inode);
5920                                 unlock_new_inode(inode);
5921                         }
5922                         /*
5923                          * some code call btrfs_commit_transaction while
5924                          * holding the i_mutex, so we can't use mutex_lock
5925                          * here.
5926                          */
5927                         if (is_bad_inode(inode) ||
5928                             !mutex_trylock(&inode->i_mutex)) {
5929                                 iput(inode);
5930                                 inode = NULL;
5931                                 key.offset = (u64)-1;
5932                                 goto skip;
5933                         }
5934                 }
5935
5936                 if (!extent_locked) {
5937                         struct btrfs_ordered_extent *ordered;
5938
5939                         btrfs_release_path(root, path);
5940
5941                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5942                                     lock_end, GFP_NOFS);
5943                         ordered = btrfs_lookup_first_ordered_extent(inode,
5944                                                                     lock_end);
5945                         if (ordered &&
5946                             ordered->file_offset <= lock_end &&
5947                             ordered->file_offset + ordered->len > lock_start) {
5948                                 unlock_extent(&BTRFS_I(inode)->io_tree,
5949                                               lock_start, lock_end, GFP_NOFS);
5950                                 btrfs_start_ordered_extent(inode, ordered, 1);
5951                                 btrfs_put_ordered_extent(ordered);
5952                                 key.offset += num_bytes;
5953                                 goto skip;
5954                         }
5955                         if (ordered)
5956                                 btrfs_put_ordered_extent(ordered);
5957
5958                         extent_locked = 1;
5959                         continue;
5960                 }
5961
5962                 if (nr_extents == 1) {
5963                         /* update extent pointer in place */
5964                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
5965                                                 new_extents[0].disk_bytenr);
5966                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5967                                                 new_extents[0].disk_num_bytes);
5968                         btrfs_mark_buffer_dirty(leaf);
5969
5970                         btrfs_drop_extent_cache(inode, key.offset,
5971                                                 key.offset + num_bytes - 1, 0);
5972
5973                         ret = btrfs_inc_extent_ref(trans, root,
5974                                                 new_extents[0].disk_bytenr,
5975                                                 new_extents[0].disk_num_bytes,
5976                                                 leaf->start,
5977                                                 root->root_key.objectid,
5978                                                 trans->transid,
5979                                                 key.objectid);
5980                         BUG_ON(ret);
5981
5982                         ret = btrfs_free_extent(trans, root,
5983                                                 extent_key->objectid,
5984                                                 extent_key->offset,
5985                                                 leaf->start,
5986                                                 btrfs_header_owner(leaf),
5987                                                 btrfs_header_generation(leaf),
5988                                                 key.objectid, 0);
5989                         BUG_ON(ret);
5990
5991                         btrfs_release_path(root, path);
5992                         key.offset += num_bytes;
5993                 } else {
5994                         BUG_ON(1);
5995 #if 0
5996                         u64 alloc_hint;
5997                         u64 extent_len;
5998                         int i;
5999                         /*
6000                          * drop old extent pointer at first, then insert the
6001                          * new pointers one bye one
6002                          */
6003                         btrfs_release_path(root, path);
6004                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
6005                                                  key.offset + num_bytes,
6006                                                  key.offset, &alloc_hint);
6007                         BUG_ON(ret);
6008
6009                         for (i = 0; i < nr_extents; i++) {
6010                                 if (ext_offset >= new_extents[i].num_bytes) {
6011                                         ext_offset -= new_extents[i].num_bytes;
6012                                         continue;
6013                                 }
6014                                 extent_len = min(new_extents[i].num_bytes -
6015                                                  ext_offset, num_bytes);
6016
6017                                 ret = btrfs_insert_empty_item(trans, root,
6018                                                               path, &key,
6019                                                               sizeof(*fi));
6020                                 BUG_ON(ret);
6021
6022                                 leaf = path->nodes[0];
6023                                 fi = btrfs_item_ptr(leaf, path->slots[0],
6024                                                 struct btrfs_file_extent_item);
6025                                 btrfs_set_file_extent_generation(leaf, fi,
6026                                                         trans->transid);
6027                                 btrfs_set_file_extent_type(leaf, fi,
6028                                                         BTRFS_FILE_EXTENT_REG);
6029                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6030                                                 new_extents[i].disk_bytenr);
6031                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6032                                                 new_extents[i].disk_num_bytes);
6033                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
6034                                                 new_extents[i].ram_bytes);
6035
6036                                 btrfs_set_file_extent_compression(leaf, fi,
6037                                                 new_extents[i].compression);
6038                                 btrfs_set_file_extent_encryption(leaf, fi,
6039                                                 new_extents[i].encryption);
6040                                 btrfs_set_file_extent_other_encoding(leaf, fi,
6041                                                 new_extents[i].other_encoding);
6042
6043                                 btrfs_set_file_extent_num_bytes(leaf, fi,
6044                                                         extent_len);
6045                                 ext_offset += new_extents[i].offset;
6046                                 btrfs_set_file_extent_offset(leaf, fi,
6047                                                         ext_offset);
6048                                 btrfs_mark_buffer_dirty(leaf);
6049
6050                                 btrfs_drop_extent_cache(inode, key.offset,
6051                                                 key.offset + extent_len - 1, 0);
6052
6053                                 ret = btrfs_inc_extent_ref(trans, root,
6054                                                 new_extents[i].disk_bytenr,
6055                                                 new_extents[i].disk_num_bytes,
6056                                                 leaf->start,
6057                                                 root->root_key.objectid,
6058                                                 trans->transid, key.objectid);
6059                                 BUG_ON(ret);
6060                                 btrfs_release_path(root, path);
6061
6062                                 inode_add_bytes(inode, extent_len);
6063
6064                                 ext_offset = 0;
6065                                 num_bytes -= extent_len;
6066                                 key.offset += extent_len;
6067
6068                                 if (num_bytes == 0)
6069                                         break;
6070                         }
6071                         BUG_ON(i >= nr_extents);
6072 #endif
6073                 }
6074
6075                 if (extent_locked) {
6076                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6077                                       lock_end, GFP_NOFS);
6078                         extent_locked = 0;
6079                 }
6080 skip:
6081                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
6082                     key.offset >= search_end)
6083                         break;
6084
6085                 cond_resched();
6086         }
6087         ret = 0;
6088 out:
6089         btrfs_release_path(root, path);
6090         if (inode) {
6091                 mutex_unlock(&inode->i_mutex);
6092                 if (extent_locked) {
6093                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6094                                       lock_end, GFP_NOFS);
6095                 }
6096                 iput(inode);
6097         }
6098         return ret;
6099 }
6100
6101 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6102                                struct btrfs_root *root,
6103                                struct extent_buffer *buf, u64 orig_start)
6104 {
6105         int level;
6106         int ret;
6107
6108         BUG_ON(btrfs_header_generation(buf) != trans->transid);
6109         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6110
6111         level = btrfs_header_level(buf);
6112         if (level == 0) {
6113                 struct btrfs_leaf_ref *ref;
6114                 struct btrfs_leaf_ref *orig_ref;
6115
6116                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6117                 if (!orig_ref)
6118                         return -ENOENT;
6119
6120                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6121                 if (!ref) {
6122                         btrfs_free_leaf_ref(root, orig_ref);
6123                         return -ENOMEM;
6124                 }
6125
6126                 ref->nritems = orig_ref->nritems;
6127                 memcpy(ref->extents, orig_ref->extents,
6128                         sizeof(ref->extents[0]) * ref->nritems);
6129
6130                 btrfs_free_leaf_ref(root, orig_ref);
6131
6132                 ref->root_gen = trans->transid;
6133                 ref->bytenr = buf->start;
6134                 ref->owner = btrfs_header_owner(buf);
6135                 ref->generation = btrfs_header_generation(buf);
6136
6137                 ret = btrfs_add_leaf_ref(root, ref, 0);
6138                 WARN_ON(ret);
6139                 btrfs_free_leaf_ref(root, ref);
6140         }
6141         return 0;
6142 }
6143
6144 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6145                                         struct extent_buffer *leaf,
6146                                         struct btrfs_block_group_cache *group,
6147                                         struct btrfs_root *target_root)
6148 {
6149         struct btrfs_key key;
6150         struct inode *inode = NULL;
6151         struct btrfs_file_extent_item *fi;
6152         u64 num_bytes;
6153         u64 skip_objectid = 0;
6154         u32 nritems;
6155         u32 i;
6156
6157         nritems = btrfs_header_nritems(leaf);
6158         for (i = 0; i < nritems; i++) {
6159                 btrfs_item_key_to_cpu(leaf, &key, i);
6160                 if (key.objectid == skip_objectid ||
6161                     key.type != BTRFS_EXTENT_DATA_KEY)
6162                         continue;
6163                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6164                 if (btrfs_file_extent_type(leaf, fi) ==
6165                     BTRFS_FILE_EXTENT_INLINE)
6166                         continue;
6167                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6168                         continue;
6169                 if (!inode || inode->i_ino != key.objectid) {
6170                         iput(inode);
6171                         inode = btrfs_ilookup(target_root->fs_info->sb,
6172                                               key.objectid, target_root, 1);
6173                 }
6174                 if (!inode) {
6175                         skip_objectid = key.objectid;
6176                         continue;
6177                 }
6178                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6179
6180                 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6181                             key.offset + num_bytes - 1, GFP_NOFS);
6182                 btrfs_drop_extent_cache(inode, key.offset,
6183                                         key.offset + num_bytes - 1, 1);
6184                 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6185                               key.offset + num_bytes - 1, GFP_NOFS);
6186                 cond_resched();
6187         }
6188         iput(inode);
6189         return 0;
6190 }
6191
6192 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
6193                                         struct btrfs_root *root,
6194                                         struct extent_buffer *leaf,
6195                                         struct btrfs_block_group_cache *group,
6196                                         struct inode *reloc_inode)
6197 {
6198         struct btrfs_key key;
6199         struct btrfs_key extent_key;
6200         struct btrfs_file_extent_item *fi;
6201         struct btrfs_leaf_ref *ref;
6202         struct disk_extent *new_extent;
6203         u64 bytenr;
6204         u64 num_bytes;
6205         u32 nritems;
6206         u32 i;
6207         int ext_index;
6208         int nr_extent;
6209         int ret;
6210
6211         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6212         BUG_ON(!new_extent);
6213
6214         ref = btrfs_lookup_leaf_ref(root, leaf->start);
6215         BUG_ON(!ref);
6216
6217         ext_index = -1;
6218         nritems = btrfs_header_nritems(leaf);
6219         for (i = 0; i < nritems; i++) {
6220                 btrfs_item_key_to_cpu(leaf, &key, i);
6221                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6222                         continue;
6223                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6224                 if (btrfs_file_extent_type(leaf, fi) ==
6225                     BTRFS_FILE_EXTENT_INLINE)
6226                         continue;
6227                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6228                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6229                 if (bytenr == 0)
6230                         continue;
6231
6232                 ext_index++;
6233                 if (bytenr >= group->key.objectid + group->key.offset ||
6234                     bytenr + num_bytes <= group->key.objectid)
6235                         continue;
6236
6237                 extent_key.objectid = bytenr;
6238                 extent_key.offset = num_bytes;
6239                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6240                 nr_extent = 1;
6241                 ret = get_new_locations(reloc_inode, &extent_key,
6242                                         group->key.objectid, 1,
6243                                         &new_extent, &nr_extent);
6244                 if (ret > 0)
6245                         continue;
6246                 BUG_ON(ret < 0);
6247
6248                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6249                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6250                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6251                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6252
6253                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6254                                                 new_extent->disk_bytenr);
6255                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6256                                                 new_extent->disk_num_bytes);
6257                 btrfs_mark_buffer_dirty(leaf);
6258
6259                 ret = btrfs_inc_extent_ref(trans, root,
6260                                         new_extent->disk_bytenr,
6261                                         new_extent->disk_num_bytes,
6262                                         leaf->start,
6263                                         root->root_key.objectid,
6264                                         trans->transid, key.objectid);
6265                 BUG_ON(ret);
6266
6267                 ret = btrfs_free_extent(trans, root,
6268                                         bytenr, num_bytes, leaf->start,
6269                                         btrfs_header_owner(leaf),
6270                                         btrfs_header_generation(leaf),
6271                                         key.objectid, 0);
6272                 BUG_ON(ret);
6273                 cond_resched();
6274         }
6275         kfree(new_extent);
6276         BUG_ON(ext_index + 1 != ref->nritems);
6277         btrfs_free_leaf_ref(root, ref);
6278         return 0;
6279 }
6280
6281 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6282                           struct btrfs_root *root)
6283 {
6284         struct btrfs_root *reloc_root;
6285         int ret;
6286
6287         if (root->reloc_root) {
6288                 reloc_root = root->reloc_root;
6289                 root->reloc_root = NULL;
6290                 list_add(&reloc_root->dead_list,
6291                          &root->fs_info->dead_reloc_roots);
6292
6293                 btrfs_set_root_bytenr(&reloc_root->root_item,
6294                                       reloc_root->node->start);
6295                 btrfs_set_root_level(&root->root_item,
6296                                      btrfs_header_level(reloc_root->node));
6297                 memset(&reloc_root->root_item.drop_progress, 0,
6298                         sizeof(struct btrfs_disk_key));
6299                 reloc_root->root_item.drop_level = 0;
6300
6301                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6302                                         &reloc_root->root_key,
6303                                         &reloc_root->root_item);
6304                 BUG_ON(ret);
6305         }
6306         return 0;
6307 }
6308
6309 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6310 {
6311         struct btrfs_trans_handle *trans;
6312         struct btrfs_root *reloc_root;
6313         struct btrfs_root *prev_root = NULL;
6314         struct list_head dead_roots;
6315         int ret;
6316         unsigned long nr;
6317
6318         INIT_LIST_HEAD(&dead_roots);
6319         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6320
6321         while (!list_empty(&dead_roots)) {
6322                 reloc_root = list_entry(dead_roots.prev,
6323                                         struct btrfs_root, dead_list);
6324                 list_del_init(&reloc_root->dead_list);
6325
6326                 BUG_ON(reloc_root->commit_root != NULL);
6327                 while (1) {
6328                         trans = btrfs_join_transaction(root, 1);
6329                         BUG_ON(!trans);
6330
6331                         mutex_lock(&root->fs_info->drop_mutex);
6332                         ret = btrfs_drop_snapshot(trans, reloc_root);
6333                         if (ret != -EAGAIN)
6334                                 break;
6335                         mutex_unlock(&root->fs_info->drop_mutex);
6336
6337                         nr = trans->blocks_used;
6338                         ret = btrfs_end_transaction(trans, root);
6339                         BUG_ON(ret);
6340                         btrfs_btree_balance_dirty(root, nr);
6341                 }
6342
6343                 free_extent_buffer(reloc_root->node);
6344
6345                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6346                                      &reloc_root->root_key);
6347                 BUG_ON(ret);
6348                 mutex_unlock(&root->fs_info->drop_mutex);
6349
6350                 nr = trans->blocks_used;
6351                 ret = btrfs_end_transaction(trans, root);
6352                 BUG_ON(ret);
6353                 btrfs_btree_balance_dirty(root, nr);
6354
6355                 kfree(prev_root);
6356                 prev_root = reloc_root;
6357         }
6358         if (prev_root) {
6359                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6360                 kfree(prev_root);
6361         }
6362         return 0;
6363 }
6364
6365 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6366 {
6367         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6368         return 0;
6369 }
6370
6371 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6372 {
6373         struct btrfs_root *reloc_root;
6374         struct btrfs_trans_handle *trans;
6375         struct btrfs_key location;
6376         int found;
6377         int ret;
6378
6379         mutex_lock(&root->fs_info->tree_reloc_mutex);
6380         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6381         BUG_ON(ret);
6382         found = !list_empty(&root->fs_info->dead_reloc_roots);
6383         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6384
6385         if (found) {
6386                 trans = btrfs_start_transaction(root, 1);
6387                 BUG_ON(!trans);
6388                 ret = btrfs_commit_transaction(trans, root);
6389                 BUG_ON(ret);
6390         }
6391
6392         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6393         location.offset = (u64)-1;
6394         location.type = BTRFS_ROOT_ITEM_KEY;
6395
6396         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6397         BUG_ON(!reloc_root);
6398         btrfs_orphan_cleanup(reloc_root);
6399         return 0;
6400 }
6401
6402 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
6403                                     struct btrfs_root *root)
6404 {
6405         struct btrfs_root *reloc_root;
6406         struct extent_buffer *eb;
6407         struct btrfs_root_item *root_item;
6408         struct btrfs_key root_key;
6409         int ret;
6410
6411         BUG_ON(!root->ref_cows);
6412         if (root->reloc_root)
6413                 return 0;
6414
6415         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6416         BUG_ON(!root_item);
6417
6418         ret = btrfs_copy_root(trans, root, root->commit_root,
6419                               &eb, BTRFS_TREE_RELOC_OBJECTID);
6420         BUG_ON(ret);
6421
6422         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6423         root_key.offset = root->root_key.objectid;
6424         root_key.type = BTRFS_ROOT_ITEM_KEY;
6425
6426         memcpy(root_item, &root->root_item, sizeof(root_item));
6427         btrfs_set_root_refs(root_item, 0);
6428         btrfs_set_root_bytenr(root_item, eb->start);
6429         btrfs_set_root_level(root_item, btrfs_header_level(eb));
6430         btrfs_set_root_generation(root_item, trans->transid);
6431
6432         btrfs_tree_unlock(eb);
6433         free_extent_buffer(eb);
6434
6435         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6436                                 &root_key, root_item);
6437         BUG_ON(ret);
6438         kfree(root_item);
6439
6440         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6441                                                  &root_key);
6442         BUG_ON(!reloc_root);
6443         reloc_root->last_trans = trans->transid;
6444         reloc_root->commit_root = NULL;
6445         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6446
6447         root->reloc_root = reloc_root;
6448         return 0;
6449 }
6450
6451 /*
6452  * Core function of space balance.
6453  *
6454  * The idea is using reloc trees to relocate tree blocks in reference
6455  * counted roots. There is one reloc tree for each subvol, and all
6456  * reloc trees share same root key objectid. Reloc trees are snapshots
6457  * of the latest committed roots of subvols (root->commit_root).
6458  *
6459  * To relocate a tree block referenced by a subvol, there are two steps.
6460  * COW the block through subvol's reloc tree, then update block pointer
6461  * in the subvol to point to the new block. Since all reloc trees share
6462  * same root key objectid, doing special handing for tree blocks owned
6463  * by them is easy. Once a tree block has been COWed in one reloc tree,
6464  * we can use the resulting new block directly when the same block is
6465  * required to COW again through other reloc trees. By this way, relocated
6466  * tree blocks are shared between reloc trees, so they are also shared
6467  * between subvols.
6468  */
6469 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
6470                                       struct btrfs_root *root,
6471                                       struct btrfs_path *path,
6472                                       struct btrfs_key *first_key,
6473                                       struct btrfs_ref_path *ref_path,
6474                                       struct btrfs_block_group_cache *group,
6475                                       struct inode *reloc_inode)
6476 {
6477         struct btrfs_root *reloc_root;
6478         struct extent_buffer *eb = NULL;
6479         struct btrfs_key *keys;
6480         u64 *nodes;
6481         int level;
6482         int shared_level;
6483         int lowest_level = 0;
6484         int ret;
6485
6486         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6487                 lowest_level = ref_path->owner_objectid;
6488
6489         if (!root->ref_cows) {
6490                 path->lowest_level = lowest_level;
6491                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6492                 BUG_ON(ret < 0);
6493                 path->lowest_level = 0;
6494                 btrfs_release_path(root, path);
6495                 return 0;
6496         }
6497
6498         mutex_lock(&root->fs_info->tree_reloc_mutex);
6499         ret = init_reloc_tree(trans, root);
6500         BUG_ON(ret);
6501         reloc_root = root->reloc_root;
6502
6503         shared_level = ref_path->shared_level;
6504         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
6505
6506         keys = ref_path->node_keys;
6507         nodes = ref_path->new_nodes;
6508         memset(&keys[shared_level + 1], 0,
6509                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6510         memset(&nodes[shared_level + 1], 0,
6511                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
6512
6513         if (nodes[lowest_level] == 0) {
6514                 path->lowest_level = lowest_level;
6515                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6516                                         0, 1);
6517                 BUG_ON(ret);
6518                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6519                         eb = path->nodes[level];
6520                         if (!eb || eb == reloc_root->node)
6521                                 break;
6522                         nodes[level] = eb->start;
6523                         if (level == 0)
6524                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6525                         else
6526                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6527                 }
6528                 if (nodes[0] &&
6529                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6530                         eb = path->nodes[0];
6531                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
6532                                                       group, reloc_inode);
6533                         BUG_ON(ret);
6534                 }
6535                 btrfs_release_path(reloc_root, path);
6536         } else {
6537                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
6538                                        lowest_level);
6539                 BUG_ON(ret);
6540         }
6541
6542         /*
6543          * replace tree blocks in the fs tree with tree blocks in
6544          * the reloc tree.
6545          */
6546         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6547         BUG_ON(ret < 0);
6548
6549         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6550                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6551                                         0, 0);
6552                 BUG_ON(ret);
6553                 extent_buffer_get(path->nodes[0]);
6554                 eb = path->nodes[0];
6555                 btrfs_release_path(reloc_root, path);
6556                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6557                 BUG_ON(ret);
6558                 free_extent_buffer(eb);
6559         }
6560
6561         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6562         path->lowest_level = 0;
6563         return 0;
6564 }
6565
6566 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
6567                                         struct btrfs_root *root,
6568                                         struct btrfs_path *path,
6569                                         struct btrfs_key *first_key,
6570                                         struct btrfs_ref_path *ref_path)
6571 {
6572         int ret;
6573
6574         ret = relocate_one_path(trans, root, path, first_key,
6575                                 ref_path, NULL, NULL);
6576         BUG_ON(ret);
6577
6578         return 0;
6579 }
6580
6581 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
6582                                     struct btrfs_root *extent_root,
6583                                     struct btrfs_path *path,
6584                                     struct btrfs_key *extent_key)
6585 {
6586         int ret;
6587
6588         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
6589         if (ret)
6590                 goto out;
6591         ret = btrfs_del_item(trans, extent_root, path);
6592 out:
6593         btrfs_release_path(extent_root, path);
6594         return ret;
6595 }
6596
6597 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
6598                                                 struct btrfs_ref_path *ref_path)
6599 {
6600         struct btrfs_key root_key;
6601
6602         root_key.objectid = ref_path->root_objectid;
6603         root_key.type = BTRFS_ROOT_ITEM_KEY;
6604         if (is_cowonly_root(ref_path->root_objectid))
6605                 root_key.offset = 0;
6606         else
6607                 root_key.offset = (u64)-1;
6608
6609         return btrfs_read_fs_root_no_name(fs_info, &root_key);
6610 }
6611
6612 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
6613                                         struct btrfs_path *path,
6614                                         struct btrfs_key *extent_key,
6615                                         struct btrfs_block_group_cache *group,
6616                                         struct inode *reloc_inode, int pass)
6617 {
6618         struct btrfs_trans_handle *trans;
6619         struct btrfs_root *found_root;
6620         struct btrfs_ref_path *ref_path = NULL;
6621         struct disk_extent *new_extents = NULL;
6622         int nr_extents = 0;
6623         int loops;
6624         int ret;
6625         int level;
6626         struct btrfs_key first_key;
6627         u64 prev_block = 0;
6628
6629
6630         trans = btrfs_start_transaction(extent_root, 1);
6631         BUG_ON(!trans);
6632
6633         if (extent_key->objectid == 0) {
6634                 ret = del_extent_zero(trans, extent_root, path, extent_key);
6635                 goto out;
6636         }
6637
6638         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
6639         if (!ref_path) {
6640                 ret = -ENOMEM;
6641                 goto out;
6642         }
6643
6644         for (loops = 0; ; loops++) {
6645                 if (loops == 0) {
6646                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
6647                                                    extent_key->objectid);
6648                 } else {
6649                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
6650                 }
6651                 if (ret < 0)
6652                         goto out;
6653                 if (ret > 0)
6654                         break;
6655
6656                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6657                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
6658                         continue;
6659
6660                 found_root = read_ref_root(extent_root->fs_info, ref_path);
6661                 BUG_ON(!found_root);
6662                 /*
6663                  * for reference counted tree, only process reference paths
6664                  * rooted at the latest committed root.
6665                  */
6666                 if (found_root->ref_cows &&
6667                     ref_path->root_generation != found_root->root_key.offset)
6668                         continue;
6669
6670                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6671                         if (pass == 0) {
6672                                 /*
6673                                  * copy data extents to new locations
6674                                  */
6675                                 u64 group_start = group->key.objectid;
6676                                 ret = relocate_data_extent(reloc_inode,
6677                                                            extent_key,
6678                                                            group_start);
6679                                 if (ret < 0)
6680                                         goto out;
6681                                 break;
6682                         }
6683                         level = 0;
6684                 } else {
6685                         level = ref_path->owner_objectid;
6686                 }
6687
6688                 if (prev_block != ref_path->nodes[level]) {
6689                         struct extent_buffer *eb;
6690                         u64 block_start = ref_path->nodes[level];
6691                         u64 block_size = btrfs_level_size(found_root, level);
6692
6693                         eb = read_tree_block(found_root, block_start,
6694                                              block_size, 0);
6695                         btrfs_tree_lock(eb);
6696                         BUG_ON(level != btrfs_header_level(eb));
6697
6698                         if (level == 0)
6699                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
6700                         else
6701                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
6702
6703                         btrfs_tree_unlock(eb);
6704                         free_extent_buffer(eb);
6705                         prev_block = block_start;
6706                 }
6707
6708                 mutex_lock(&extent_root->fs_info->trans_mutex);
6709                 btrfs_record_root_in_trans(found_root);
6710                 mutex_unlock(&extent_root->fs_info->trans_mutex);
6711                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6712                         /*
6713                          * try to update data extent references while
6714                          * keeping metadata shared between snapshots.
6715                          */
6716                         if (pass == 1) {
6717                                 ret = relocate_one_path(trans, found_root,
6718                                                 path, &first_key, ref_path,
6719                                                 group, reloc_inode);
6720                                 if (ret < 0)
6721                                         goto out;
6722                                 continue;
6723                         }
6724                         /*
6725                          * use fallback method to process the remaining
6726                          * references.
6727                          */
6728                         if (!new_extents) {
6729                                 u64 group_start = group->key.objectid;
6730                                 new_extents = kmalloc(sizeof(*new_extents),
6731                                                       GFP_NOFS);
6732                                 nr_extents = 1;
6733                                 ret = get_new_locations(reloc_inode,
6734                                                         extent_key,
6735                                                         group_start, 1,
6736                                                         &new_extents,
6737                                                         &nr_extents);
6738                                 if (ret)
6739                                         goto out;
6740                         }
6741                         ret = replace_one_extent(trans, found_root,
6742                                                 path, extent_key,
6743                                                 &first_key, ref_path,
6744                                                 new_extents, nr_extents);
6745                 } else {
6746                         ret = relocate_tree_block(trans, found_root, path,
6747                                                   &first_key, ref_path);
6748                 }
6749                 if (ret < 0)
6750                         goto out;
6751         }
6752         ret = 0;
6753 out:
6754         btrfs_end_transaction(trans, extent_root);
6755         kfree(new_extents);
6756         kfree(ref_path);
6757         return ret;
6758 }
6759 #endif
6760
6761 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6762 {
6763         u64 num_devices;
6764         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6765                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6766
6767         num_devices = root->fs_info->fs_devices->rw_devices;
6768         if (num_devices == 1) {
6769                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6770                 stripped = flags & ~stripped;
6771
6772                 /* turn raid0 into single device chunks */
6773                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6774                         return stripped;
6775
6776                 /* turn mirroring into duplication */
6777                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6778                              BTRFS_BLOCK_GROUP_RAID10))
6779                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6780                 return flags;
6781         } else {
6782                 /* they already had raid on here, just return */
6783                 if (flags & stripped)
6784                         return flags;
6785
6786                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6787                 stripped = flags & ~stripped;
6788
6789                 /* switch duplicated blocks with raid1 */
6790                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6791                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6792
6793                 /* turn single device chunks into raid0 */
6794                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6795         }
6796         return flags;
6797 }
6798
6799 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
6800                      struct btrfs_block_group_cache *shrink_block_group,
6801                      int force)
6802 {
6803         struct btrfs_trans_handle *trans;
6804         u64 new_alloc_flags;
6805         u64 calc;
6806
6807         spin_lock(&shrink_block_group->lock);
6808         if (btrfs_block_group_used(&shrink_block_group->item) +
6809             shrink_block_group->reserved > 0) {
6810                 spin_unlock(&shrink_block_group->lock);
6811
6812                 trans = btrfs_start_transaction(root, 1);
6813                 spin_lock(&shrink_block_group->lock);
6814
6815                 new_alloc_flags = update_block_group_flags(root,
6816                                                    shrink_block_group->flags);
6817                 if (new_alloc_flags != shrink_block_group->flags) {
6818                         calc =
6819                              btrfs_block_group_used(&shrink_block_group->item);
6820                 } else {
6821                         calc = shrink_block_group->key.offset;
6822                 }
6823                 spin_unlock(&shrink_block_group->lock);
6824
6825                 do_chunk_alloc(trans, root->fs_info->extent_root,
6826                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
6827
6828                 btrfs_end_transaction(trans, root);
6829         } else
6830                 spin_unlock(&shrink_block_group->lock);
6831         return 0;
6832 }
6833
6834
6835 int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
6836                                          struct btrfs_block_group_cache *group)
6837
6838 {
6839         __alloc_chunk_for_shrink(root, group, 1);
6840         set_block_group_readonly(group);
6841         return 0;
6842 }
6843
6844 #if 0
6845 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
6846                                  struct btrfs_root *root,
6847                                  u64 objectid, u64 size)
6848 {
6849         struct btrfs_path *path;
6850         struct btrfs_inode_item *item;
6851         struct extent_buffer *leaf;
6852         int ret;
6853
6854         path = btrfs_alloc_path();
6855         if (!path)
6856                 return -ENOMEM;
6857
6858         path->leave_spinning = 1;
6859         ret = btrfs_insert_empty_inode(trans, root, path, objectid);
6860         if (ret)
6861                 goto out;
6862
6863         leaf = path->nodes[0];
6864         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
6865         memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
6866         btrfs_set_inode_generation(leaf, item, 1);
6867         btrfs_set_inode_size(leaf, item, size);
6868         btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
6869         btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
6870         btrfs_mark_buffer_dirty(leaf);
6871         btrfs_release_path(root, path);
6872 out:
6873         btrfs_free_path(path);
6874         return ret;
6875 }
6876
6877 static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
6878                                         struct btrfs_block_group_cache *group)
6879 {
6880         struct inode *inode = NULL;
6881         struct btrfs_trans_handle *trans;
6882         struct btrfs_root *root;
6883         struct btrfs_key root_key;
6884         u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
6885         int err = 0;
6886
6887         root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6888         root_key.type = BTRFS_ROOT_ITEM_KEY;
6889         root_key.offset = (u64)-1;
6890         root = btrfs_read_fs_root_no_name(fs_info, &root_key);
6891         if (IS_ERR(root))
6892                 return ERR_CAST(root);
6893
6894         trans = btrfs_start_transaction(root, 1);
6895         BUG_ON(!trans);
6896
6897         err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
6898         if (err)
6899                 goto out;
6900
6901         err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
6902         BUG_ON(err);
6903
6904         err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
6905                                        group->key.offset, 0, group->key.offset,
6906                                        0, 0, 0);
6907         BUG_ON(err);
6908
6909         inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
6910         if (inode->i_state & I_NEW) {
6911                 BTRFS_I(inode)->root = root;
6912                 BTRFS_I(inode)->location.objectid = objectid;
6913                 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
6914                 BTRFS_I(inode)->location.offset = 0;
6915                 btrfs_read_locked_inode(inode);
6916                 unlock_new_inode(inode);
6917                 BUG_ON(is_bad_inode(inode));
6918         } else {
6919                 BUG_ON(1);
6920         }
6921         BTRFS_I(inode)->index_cnt = group->key.objectid;
6922
6923         err = btrfs_orphan_add(trans, inode);
6924 out:
6925         btrfs_end_transaction(trans, root);
6926         if (err) {
6927                 if (inode)
6928                         iput(inode);
6929                 inode = ERR_PTR(err);
6930         }
6931         return inode;
6932 }
6933
6934 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
6935 {
6936
6937         struct btrfs_ordered_sum *sums;
6938         struct btrfs_sector_sum *sector_sum;
6939         struct btrfs_ordered_extent *ordered;
6940         struct btrfs_root *root = BTRFS_I(inode)->root;
6941         struct list_head list;
6942         size_t offset;
6943         int ret;
6944         u64 disk_bytenr;
6945
6946         INIT_LIST_HEAD(&list);
6947
6948         ordered = btrfs_lookup_ordered_extent(inode, file_pos);
6949         BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
6950
6951         disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
6952         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
6953                                        disk_bytenr + len - 1, &list);
6954
6955         while (!list_empty(&list)) {
6956                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
6957                 list_del_init(&sums->list);
6958
6959                 sector_sum = sums->sums;
6960                 sums->bytenr = ordered->start;
6961
6962                 offset = 0;
6963                 while (offset < sums->len) {
6964                         sector_sum->bytenr += ordered->start - disk_bytenr;
6965                         sector_sum++;
6966                         offset += root->sectorsize;
6967                 }
6968
6969                 btrfs_add_ordered_sum(inode, ordered, sums);
6970         }
6971         btrfs_put_ordered_extent(ordered);
6972         return 0;
6973 }
6974
6975 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
6976 {
6977         struct btrfs_trans_handle *trans;
6978         struct btrfs_path *path;
6979         struct btrfs_fs_info *info = root->fs_info;
6980         struct extent_buffer *leaf;
6981         struct inode *reloc_inode;
6982         struct btrfs_block_group_cache *block_group;
6983         struct btrfs_key key;
6984         u64 skipped;
6985         u64 cur_byte;
6986         u64 total_found;
6987         u32 nritems;
6988         int ret;
6989         int progress;
6990         int pass = 0;
6991
6992         root = root->fs_info->extent_root;
6993
6994         block_group = btrfs_lookup_block_group(info, group_start);
6995         BUG_ON(!block_group);
6996
6997         printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
6998                (unsigned long long)block_group->key.objectid,
6999                (unsigned long long)block_group->flags);
7000
7001         path = btrfs_alloc_path();
7002         BUG_ON(!path);
7003
7004         reloc_inode = create_reloc_inode(info, block_group);
7005         BUG_ON(IS_ERR(reloc_inode));
7006
7007         __alloc_chunk_for_shrink(root, block_group, 1);
7008         set_block_group_readonly(block_group);
7009
7010         btrfs_start_delalloc_inodes(info->tree_root);
7011         btrfs_wait_ordered_extents(info->tree_root, 0);
7012 again:
7013         skipped = 0;
7014         total_found = 0;
7015         progress = 0;
7016         key.objectid = block_group->key.objectid;
7017         key.offset = 0;
7018         key.type = 0;
7019         cur_byte = key.objectid;
7020
7021         trans = btrfs_start_transaction(info->tree_root, 1);
7022         btrfs_commit_transaction(trans, info->tree_root);
7023
7024         mutex_lock(&root->fs_info->cleaner_mutex);
7025         btrfs_clean_old_snapshots(info->tree_root);
7026         btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
7027         mutex_unlock(&root->fs_info->cleaner_mutex);
7028
7029         trans = btrfs_start_transaction(info->tree_root, 1);
7030         btrfs_commit_transaction(trans, info->tree_root);
7031
7032         while (1) {
7033                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7034                 if (ret < 0)
7035                         goto out;
7036 next:
7037                 leaf = path->nodes[0];
7038                 nritems = btrfs_header_nritems(leaf);
7039                 if (path->slots[0] >= nritems) {
7040                         ret = btrfs_next_leaf(root, path);
7041                         if (ret < 0)
7042                                 goto out;
7043                         if (ret == 1) {
7044                                 ret = 0;
7045                                 break;
7046                         }
7047                         leaf = path->nodes[0];
7048                         nritems = btrfs_header_nritems(leaf);
7049                 }
7050
7051                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7052
7053                 if (key.objectid >= block_group->key.objectid +
7054                     block_group->key.offset)
7055                         break;
7056
7057                 if (progress && need_resched()) {
7058                         btrfs_release_path(root, path);
7059                         cond_resched();
7060                         progress = 0;
7061                         continue;
7062                 }
7063                 progress = 1;
7064
7065                 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
7066                     key.objectid + key.offset <= cur_byte) {
7067                         path->slots[0]++;
7068                         goto next;
7069                 }
7070
7071                 total_found++;
7072                 cur_byte = key.objectid + key.offset;
7073                 btrfs_release_path(root, path);
7074
7075                 __alloc_chunk_for_shrink(root, block_group, 0);
7076                 ret = relocate_one_extent(root, path, &key, block_group,
7077                                           reloc_inode, pass);
7078                 BUG_ON(ret < 0);
7079                 if (ret > 0)
7080                         skipped++;
7081
7082                 key.objectid = cur_byte;
7083                 key.type = 0;
7084                 key.offset = 0;
7085         }
7086
7087         btrfs_release_path(root, path);
7088
7089         if (pass == 0) {
7090                 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
7091                 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
7092         }
7093
7094         if (total_found > 0) {
7095                 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
7096                        (unsigned long long)total_found, pass);
7097                 pass++;
7098                 if (total_found == skipped && pass > 2) {
7099                         iput(reloc_inode);
7100                         reloc_inode = create_reloc_inode(info, block_group);
7101                         pass = 0;
7102                 }
7103                 goto again;
7104         }
7105
7106         /* delete reloc_inode */
7107         iput(reloc_inode);
7108
7109         /* unpin extents in this range */
7110         trans = btrfs_start_transaction(info->tree_root, 1);
7111         btrfs_commit_transaction(trans, info->tree_root);
7112
7113         spin_lock(&block_group->lock);
7114         WARN_ON(block_group->pinned > 0);
7115         WARN_ON(block_group->reserved > 0);
7116         WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
7117         spin_unlock(&block_group->lock);
7118         btrfs_put_block_group(block_group);
7119         ret = 0;
7120 out:
7121         btrfs_free_path(path);
7122         return ret;
7123 }
7124 #endif
7125
7126 static int find_first_block_group(struct btrfs_root *root,
7127                 struct btrfs_path *path, struct btrfs_key *key)
7128 {
7129         int ret = 0;
7130         struct btrfs_key found_key;
7131         struct extent_buffer *leaf;
7132         int slot;
7133
7134         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7135         if (ret < 0)
7136                 goto out;
7137
7138         while (1) {
7139                 slot = path->slots[0];
7140                 leaf = path->nodes[0];
7141                 if (slot >= btrfs_header_nritems(leaf)) {
7142                         ret = btrfs_next_leaf(root, path);
7143                         if (ret == 0)
7144                                 continue;
7145                         if (ret < 0)
7146                                 goto out;
7147                         break;
7148                 }
7149                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7150
7151                 if (found_key.objectid >= key->objectid &&
7152                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7153                         ret = 0;
7154                         goto out;
7155                 }
7156                 path->slots[0]++;
7157         }
7158         ret = -ENOENT;
7159 out:
7160         return ret;
7161 }
7162
7163 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7164 {
7165         struct btrfs_block_group_cache *block_group;
7166         struct btrfs_space_info *space_info;
7167         struct rb_node *n;
7168
7169         spin_lock(&info->block_group_cache_lock);
7170         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7171                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7172                                        cache_node);
7173                 rb_erase(&block_group->cache_node,
7174                          &info->block_group_cache_tree);
7175                 spin_unlock(&info->block_group_cache_lock);
7176
7177                 down_write(&block_group->space_info->groups_sem);
7178                 list_del(&block_group->list);
7179                 up_write(&block_group->space_info->groups_sem);
7180
7181                 if (block_group->cached == BTRFS_CACHE_STARTED)
7182                         wait_event(block_group->caching_q,
7183                                    block_group_cache_done(block_group));
7184
7185                 btrfs_remove_free_space_cache(block_group);
7186
7187                 WARN_ON(atomic_read(&block_group->count) != 1);
7188                 kfree(block_group);
7189
7190                 spin_lock(&info->block_group_cache_lock);
7191         }
7192         spin_unlock(&info->block_group_cache_lock);
7193
7194         /* now that all the block groups are freed, go through and
7195          * free all the space_info structs.  This is only called during
7196          * the final stages of unmount, and so we know nobody is
7197          * using them.  We call synchronize_rcu() once before we start,
7198          * just to be on the safe side.
7199          */
7200         synchronize_rcu();
7201
7202         while(!list_empty(&info->space_info)) {
7203                 space_info = list_entry(info->space_info.next,
7204                                         struct btrfs_space_info,
7205                                         list);
7206
7207                 list_del(&space_info->list);
7208                 kfree(space_info);
7209         }
7210         return 0;
7211 }
7212
7213 int btrfs_read_block_groups(struct btrfs_root *root)
7214 {
7215         struct btrfs_path *path;
7216         int ret;
7217         struct btrfs_block_group_cache *cache;
7218         struct btrfs_fs_info *info = root->fs_info;
7219         struct btrfs_space_info *space_info;
7220         struct btrfs_key key;
7221         struct btrfs_key found_key;
7222         struct extent_buffer *leaf;
7223
7224         root = info->extent_root;
7225         key.objectid = 0;
7226         key.offset = 0;
7227         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7228         path = btrfs_alloc_path();
7229         if (!path)
7230                 return -ENOMEM;
7231
7232         while (1) {
7233                 ret = find_first_block_group(root, path, &key);
7234                 if (ret > 0) {
7235                         ret = 0;
7236                         goto error;
7237                 }
7238                 if (ret != 0)
7239                         goto error;
7240
7241                 leaf = path->nodes[0];
7242                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7243                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7244                 if (!cache) {
7245                         ret = -ENOMEM;
7246                         break;
7247                 }
7248
7249                 atomic_set(&cache->count, 1);
7250                 spin_lock_init(&cache->lock);
7251                 spin_lock_init(&cache->tree_lock);
7252                 cache->fs_info = info;
7253                 init_waitqueue_head(&cache->caching_q);
7254                 INIT_LIST_HEAD(&cache->list);
7255                 INIT_LIST_HEAD(&cache->cluster_list);
7256
7257                 /*
7258                  * we only want to have 32k of ram per block group for keeping
7259                  * track of free space, and if we pass 1/2 of that we want to
7260                  * start converting things over to using bitmaps
7261                  */
7262                 cache->extents_thresh = ((1024 * 32) / 2) /
7263                         sizeof(struct btrfs_free_space);
7264
7265                 read_extent_buffer(leaf, &cache->item,
7266                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7267                                    sizeof(cache->item));
7268                 memcpy(&cache->key, &found_key, sizeof(found_key));
7269
7270                 key.objectid = found_key.objectid + found_key.offset;
7271                 btrfs_release_path(root, path);
7272                 cache->flags = btrfs_block_group_flags(&cache->item);
7273                 cache->sectorsize = root->sectorsize;
7274
7275                 remove_sb_from_cache(root, cache);
7276
7277                 /*
7278                  * check for two cases, either we are full, and therefore
7279                  * don't need to bother with the caching work since we won't
7280                  * find any space, or we are empty, and we can just add all
7281                  * the space in and be done with it.  This saves us _alot_ of
7282                  * time, particularly in the full case.
7283                  */
7284                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7285                         cache->cached = BTRFS_CACHE_FINISHED;
7286                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7287                         cache->cached = BTRFS_CACHE_FINISHED;
7288                         add_new_free_space(cache, root->fs_info,
7289                                            found_key.objectid,
7290                                            found_key.objectid +
7291                                            found_key.offset);
7292                 }
7293
7294                 ret = update_space_info(info, cache->flags, found_key.offset,
7295                                         btrfs_block_group_used(&cache->item),
7296                                         &space_info);
7297                 BUG_ON(ret);
7298                 cache->space_info = space_info;
7299                 down_write(&space_info->groups_sem);
7300                 list_add_tail(&cache->list, &space_info->block_groups);
7301                 up_write(&space_info->groups_sem);
7302
7303                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7304                 BUG_ON(ret);
7305
7306                 set_avail_alloc_bits(root->fs_info, cache->flags);
7307                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7308                         set_block_group_readonly(cache);
7309         }
7310         ret = 0;
7311 error:
7312         btrfs_free_path(path);
7313         return ret;
7314 }
7315
7316 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7317                            struct btrfs_root *root, u64 bytes_used,
7318                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7319                            u64 size)
7320 {
7321         int ret;
7322         struct btrfs_root *extent_root;
7323         struct btrfs_block_group_cache *cache;
7324
7325         extent_root = root->fs_info->extent_root;
7326
7327         root->fs_info->last_trans_log_full_commit = trans->transid;
7328
7329         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7330         if (!cache)
7331                 return -ENOMEM;
7332
7333         cache->key.objectid = chunk_offset;
7334         cache->key.offset = size;
7335         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7336         cache->sectorsize = root->sectorsize;
7337
7338         /*
7339          * we only want to have 32k of ram per block group for keeping track
7340          * of free space, and if we pass 1/2 of that we want to start
7341          * converting things over to using bitmaps
7342          */
7343         cache->extents_thresh = ((1024 * 32) / 2) /
7344                 sizeof(struct btrfs_free_space);
7345         atomic_set(&cache->count, 1);
7346         spin_lock_init(&cache->lock);
7347         spin_lock_init(&cache->tree_lock);
7348         init_waitqueue_head(&cache->caching_q);
7349         INIT_LIST_HEAD(&cache->list);
7350         INIT_LIST_HEAD(&cache->cluster_list);
7351
7352         btrfs_set_block_group_used(&cache->item, bytes_used);
7353         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7354         cache->flags = type;
7355         btrfs_set_block_group_flags(&cache->item, type);
7356
7357         cache->cached = BTRFS_CACHE_FINISHED;
7358         remove_sb_from_cache(root, cache);
7359
7360         add_new_free_space(cache, root->fs_info, chunk_offset,
7361                            chunk_offset + size);
7362
7363         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7364                                 &cache->space_info);
7365         BUG_ON(ret);
7366         down_write(&cache->space_info->groups_sem);
7367         list_add_tail(&cache->list, &cache->space_info->block_groups);
7368         up_write(&cache->space_info->groups_sem);
7369
7370         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7371         BUG_ON(ret);
7372
7373         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7374                                 sizeof(cache->item));
7375         BUG_ON(ret);
7376
7377         set_avail_alloc_bits(extent_root->fs_info, type);
7378
7379         return 0;
7380 }
7381
7382 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7383                              struct btrfs_root *root, u64 group_start)
7384 {
7385         struct btrfs_path *path;
7386         struct btrfs_block_group_cache *block_group;
7387         struct btrfs_free_cluster *cluster;
7388         struct btrfs_key key;
7389         int ret;
7390
7391         root = root->fs_info->extent_root;
7392
7393         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7394         BUG_ON(!block_group);
7395         BUG_ON(!block_group->ro);
7396
7397         memcpy(&key, &block_group->key, sizeof(key));
7398
7399         /* make sure this block group isn't part of an allocation cluster */
7400         cluster = &root->fs_info->data_alloc_cluster;
7401         spin_lock(&cluster->refill_lock);
7402         btrfs_return_cluster_to_free_space(block_group, cluster);
7403         spin_unlock(&cluster->refill_lock);
7404
7405         /*
7406          * make sure this block group isn't part of a metadata
7407          * allocation cluster
7408          */
7409         cluster = &root->fs_info->meta_alloc_cluster;
7410         spin_lock(&cluster->refill_lock);
7411         btrfs_return_cluster_to_free_space(block_group, cluster);
7412         spin_unlock(&cluster->refill_lock);
7413
7414         path = btrfs_alloc_path();
7415         BUG_ON(!path);
7416
7417         spin_lock(&root->fs_info->block_group_cache_lock);
7418         rb_erase(&block_group->cache_node,
7419                  &root->fs_info->block_group_cache_tree);
7420         spin_unlock(&root->fs_info->block_group_cache_lock);
7421
7422         down_write(&block_group->space_info->groups_sem);
7423         /*
7424          * we must use list_del_init so people can check to see if they
7425          * are still on the list after taking the semaphore
7426          */
7427         list_del_init(&block_group->list);
7428         up_write(&block_group->space_info->groups_sem);
7429
7430         if (block_group->cached == BTRFS_CACHE_STARTED)
7431                 wait_event(block_group->caching_q,
7432                            block_group_cache_done(block_group));
7433
7434         btrfs_remove_free_space_cache(block_group);
7435
7436         spin_lock(&block_group->space_info->lock);
7437         block_group->space_info->total_bytes -= block_group->key.offset;
7438         block_group->space_info->bytes_readonly -= block_group->key.offset;
7439         spin_unlock(&block_group->space_info->lock);
7440
7441         btrfs_clear_space_info_full(root->fs_info);
7442
7443         btrfs_put_block_group(block_group);
7444         btrfs_put_block_group(block_group);
7445
7446         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7447         if (ret > 0)
7448                 ret = -EIO;
7449         if (ret < 0)
7450                 goto out;
7451
7452         ret = btrfs_del_item(trans, root, path);
7453 out:
7454         btrfs_free_path(path);
7455         return ret;
7456 }