Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[pandora-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 static int update_block_group(struct btrfs_trans_handle *trans,
37                               struct btrfs_root *root,
38                               u64 bytenr, u64 num_bytes, int alloc);
39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40                                  u64 num_bytes, int reserve, int sinfo);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61                           struct btrfs_root *extent_root, u64 alloc_bytes,
62                           u64 flags, int force);
63 static int find_next_key(struct btrfs_path *path, int level,
64                          struct btrfs_key *key);
65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66                             int dump_block_groups);
67
68 static noinline int
69 block_group_cache_done(struct btrfs_block_group_cache *cache)
70 {
71         smp_mb();
72         return cache->cached == BTRFS_CACHE_FINISHED;
73 }
74
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77         return (cache->flags & bits) == bits;
78 }
79
80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
81 {
82         atomic_inc(&cache->count);
83 }
84
85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
86 {
87         if (atomic_dec_and_test(&cache->count)) {
88                 WARN_ON(cache->pinned > 0);
89                 WARN_ON(cache->reserved > 0);
90                 WARN_ON(cache->reserved_pinned > 0);
91                 kfree(cache);
92         }
93 }
94
95 /*
96  * this adds the block group to the fs_info rb tree for the block group
97  * cache
98  */
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
100                                 struct btrfs_block_group_cache *block_group)
101 {
102         struct rb_node **p;
103         struct rb_node *parent = NULL;
104         struct btrfs_block_group_cache *cache;
105
106         spin_lock(&info->block_group_cache_lock);
107         p = &info->block_group_cache_tree.rb_node;
108
109         while (*p) {
110                 parent = *p;
111                 cache = rb_entry(parent, struct btrfs_block_group_cache,
112                                  cache_node);
113                 if (block_group->key.objectid < cache->key.objectid) {
114                         p = &(*p)->rb_left;
115                 } else if (block_group->key.objectid > cache->key.objectid) {
116                         p = &(*p)->rb_right;
117                 } else {
118                         spin_unlock(&info->block_group_cache_lock);
119                         return -EEXIST;
120                 }
121         }
122
123         rb_link_node(&block_group->cache_node, parent, p);
124         rb_insert_color(&block_group->cache_node,
125                         &info->block_group_cache_tree);
126         spin_unlock(&info->block_group_cache_lock);
127
128         return 0;
129 }
130
131 /*
132  * This will return the block group at or after bytenr if contains is 0, else
133  * it will return the block group that contains the bytenr
134  */
135 static struct btrfs_block_group_cache *
136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
137                               int contains)
138 {
139         struct btrfs_block_group_cache *cache, *ret = NULL;
140         struct rb_node *n;
141         u64 end, start;
142
143         spin_lock(&info->block_group_cache_lock);
144         n = info->block_group_cache_tree.rb_node;
145
146         while (n) {
147                 cache = rb_entry(n, struct btrfs_block_group_cache,
148                                  cache_node);
149                 end = cache->key.objectid + cache->key.offset - 1;
150                 start = cache->key.objectid;
151
152                 if (bytenr < start) {
153                         if (!contains && (!ret || start < ret->key.objectid))
154                                 ret = cache;
155                         n = n->rb_left;
156                 } else if (bytenr > start) {
157                         if (contains && bytenr <= end) {
158                                 ret = cache;
159                                 break;
160                         }
161                         n = n->rb_right;
162                 } else {
163                         ret = cache;
164                         break;
165                 }
166         }
167         if (ret)
168                 btrfs_get_block_group(ret);
169         spin_unlock(&info->block_group_cache_lock);
170
171         return ret;
172 }
173
174 static int add_excluded_extent(struct btrfs_root *root,
175                                u64 start, u64 num_bytes)
176 {
177         u64 end = start + num_bytes - 1;
178         set_extent_bits(&root->fs_info->freed_extents[0],
179                         start, end, EXTENT_UPTODATE, GFP_NOFS);
180         set_extent_bits(&root->fs_info->freed_extents[1],
181                         start, end, EXTENT_UPTODATE, GFP_NOFS);
182         return 0;
183 }
184
185 static void free_excluded_extents(struct btrfs_root *root,
186                                   struct btrfs_block_group_cache *cache)
187 {
188         u64 start, end;
189
190         start = cache->key.objectid;
191         end = start + cache->key.offset - 1;
192
193         clear_extent_bits(&root->fs_info->freed_extents[0],
194                           start, end, EXTENT_UPTODATE, GFP_NOFS);
195         clear_extent_bits(&root->fs_info->freed_extents[1],
196                           start, end, EXTENT_UPTODATE, GFP_NOFS);
197 }
198
199 static int exclude_super_stripes(struct btrfs_root *root,
200                                  struct btrfs_block_group_cache *cache)
201 {
202         u64 bytenr;
203         u64 *logical;
204         int stripe_len;
205         int i, nr, ret;
206
207         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209                 cache->bytes_super += stripe_len;
210                 ret = add_excluded_extent(root, cache->key.objectid,
211                                           stripe_len);
212                 BUG_ON(ret);
213         }
214
215         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216                 bytenr = btrfs_sb_offset(i);
217                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218                                        cache->key.objectid, bytenr,
219                                        0, &logical, &nr, &stripe_len);
220                 BUG_ON(ret);
221
222                 while (nr--) {
223                         cache->bytes_super += stripe_len;
224                         ret = add_excluded_extent(root, logical[nr],
225                                                   stripe_len);
226                         BUG_ON(ret);
227                 }
228
229                 kfree(logical);
230         }
231         return 0;
232 }
233
234 static struct btrfs_caching_control *
235 get_caching_control(struct btrfs_block_group_cache *cache)
236 {
237         struct btrfs_caching_control *ctl;
238
239         spin_lock(&cache->lock);
240         if (cache->cached != BTRFS_CACHE_STARTED) {
241                 spin_unlock(&cache->lock);
242                 return NULL;
243         }
244
245         /* We're loading it the fast way, so we don't have a caching_ctl. */
246         if (!cache->caching_ctl) {
247                 spin_unlock(&cache->lock);
248                 return NULL;
249         }
250
251         ctl = cache->caching_ctl;
252         atomic_inc(&ctl->count);
253         spin_unlock(&cache->lock);
254         return ctl;
255 }
256
257 static void put_caching_control(struct btrfs_caching_control *ctl)
258 {
259         if (atomic_dec_and_test(&ctl->count))
260                 kfree(ctl);
261 }
262
263 /*
264  * this is only called by cache_block_group, since we could have freed extents
265  * we need to check the pinned_extents for any extents that can't be used yet
266  * since their free space will be released as soon as the transaction commits.
267  */
268 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
269                               struct btrfs_fs_info *info, u64 start, u64 end)
270 {
271         u64 extent_start, extent_end, size, total_added = 0;
272         int ret;
273
274         while (start < end) {
275                 ret = find_first_extent_bit(info->pinned_extents, start,
276                                             &extent_start, &extent_end,
277                                             EXTENT_DIRTY | EXTENT_UPTODATE);
278                 if (ret)
279                         break;
280
281                 if (extent_start <= start) {
282                         start = extent_end + 1;
283                 } else if (extent_start > start && extent_start < end) {
284                         size = extent_start - start;
285                         total_added += size;
286                         ret = btrfs_add_free_space(block_group, start,
287                                                    size);
288                         BUG_ON(ret);
289                         start = extent_end + 1;
290                 } else {
291                         break;
292                 }
293         }
294
295         if (start < end) {
296                 size = end - start;
297                 total_added += size;
298                 ret = btrfs_add_free_space(block_group, start, size);
299                 BUG_ON(ret);
300         }
301
302         return total_added;
303 }
304
305 static int caching_kthread(void *data)
306 {
307         struct btrfs_block_group_cache *block_group = data;
308         struct btrfs_fs_info *fs_info = block_group->fs_info;
309         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
310         struct btrfs_root *extent_root = fs_info->extent_root;
311         struct btrfs_path *path;
312         struct extent_buffer *leaf;
313         struct btrfs_key key;
314         u64 total_found = 0;
315         u64 last = 0;
316         u32 nritems;
317         int ret = 0;
318
319         path = btrfs_alloc_path();
320         if (!path)
321                 return -ENOMEM;
322
323         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
324
325         /*
326          * We don't want to deadlock with somebody trying to allocate a new
327          * extent for the extent root while also trying to search the extent
328          * root to add free space.  So we skip locking and search the commit
329          * root, since its read-only
330          */
331         path->skip_locking = 1;
332         path->search_commit_root = 1;
333         path->reada = 2;
334
335         key.objectid = last;
336         key.offset = 0;
337         key.type = BTRFS_EXTENT_ITEM_KEY;
338 again:
339         mutex_lock(&caching_ctl->mutex);
340         /* need to make sure the commit_root doesn't disappear */
341         down_read(&fs_info->extent_commit_sem);
342
343         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
344         if (ret < 0)
345                 goto err;
346
347         leaf = path->nodes[0];
348         nritems = btrfs_header_nritems(leaf);
349
350         while (1) {
351                 smp_mb();
352                 if (fs_info->closing > 1) {
353                         last = (u64)-1;
354                         break;
355                 }
356
357                 if (path->slots[0] < nritems) {
358                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
359                 } else {
360                         ret = find_next_key(path, 0, &key);
361                         if (ret)
362                                 break;
363
364                         caching_ctl->progress = last;
365                         btrfs_release_path(extent_root, path);
366                         up_read(&fs_info->extent_commit_sem);
367                         mutex_unlock(&caching_ctl->mutex);
368                         if (btrfs_transaction_in_commit(fs_info))
369                                 schedule_timeout(1);
370                         else
371                                 cond_resched();
372                         goto again;
373                 }
374
375                 if (key.objectid < block_group->key.objectid) {
376                         path->slots[0]++;
377                         continue;
378                 }
379
380                 if (key.objectid >= block_group->key.objectid +
381                     block_group->key.offset)
382                         break;
383
384                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
385                         total_found += add_new_free_space(block_group,
386                                                           fs_info, last,
387                                                           key.objectid);
388                         last = key.objectid + key.offset;
389
390                         if (total_found > (1024 * 1024 * 2)) {
391                                 total_found = 0;
392                                 wake_up(&caching_ctl->wait);
393                         }
394                 }
395                 path->slots[0]++;
396         }
397         ret = 0;
398
399         total_found += add_new_free_space(block_group, fs_info, last,
400                                           block_group->key.objectid +
401                                           block_group->key.offset);
402         caching_ctl->progress = (u64)-1;
403
404         spin_lock(&block_group->lock);
405         block_group->caching_ctl = NULL;
406         block_group->cached = BTRFS_CACHE_FINISHED;
407         spin_unlock(&block_group->lock);
408
409 err:
410         btrfs_free_path(path);
411         up_read(&fs_info->extent_commit_sem);
412
413         free_excluded_extents(extent_root, block_group);
414
415         mutex_unlock(&caching_ctl->mutex);
416         wake_up(&caching_ctl->wait);
417
418         put_caching_control(caching_ctl);
419         atomic_dec(&block_group->space_info->caching_threads);
420         btrfs_put_block_group(block_group);
421
422         return 0;
423 }
424
425 static int cache_block_group(struct btrfs_block_group_cache *cache,
426                              struct btrfs_trans_handle *trans,
427                              struct btrfs_root *root,
428                              int load_cache_only)
429 {
430         struct btrfs_fs_info *fs_info = cache->fs_info;
431         struct btrfs_caching_control *caching_ctl;
432         struct task_struct *tsk;
433         int ret = 0;
434
435         smp_mb();
436         if (cache->cached != BTRFS_CACHE_NO)
437                 return 0;
438
439         /*
440          * We can't do the read from on-disk cache during a commit since we need
441          * to have the normal tree locking.  Also if we are currently trying to
442          * allocate blocks for the tree root we can't do the fast caching since
443          * we likely hold important locks.
444          */
445         if (!trans->transaction->in_commit &&
446             (root && root != root->fs_info->tree_root)) {
447                 spin_lock(&cache->lock);
448                 if (cache->cached != BTRFS_CACHE_NO) {
449                         spin_unlock(&cache->lock);
450                         return 0;
451                 }
452                 cache->cached = BTRFS_CACHE_STARTED;
453                 spin_unlock(&cache->lock);
454
455                 ret = load_free_space_cache(fs_info, cache);
456
457                 spin_lock(&cache->lock);
458                 if (ret == 1) {
459                         cache->cached = BTRFS_CACHE_FINISHED;
460                         cache->last_byte_to_unpin = (u64)-1;
461                 } else {
462                         cache->cached = BTRFS_CACHE_NO;
463                 }
464                 spin_unlock(&cache->lock);
465                 if (ret == 1) {
466                         free_excluded_extents(fs_info->extent_root, cache);
467                         return 0;
468                 }
469         }
470
471         if (load_cache_only)
472                 return 0;
473
474         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
475         BUG_ON(!caching_ctl);
476
477         INIT_LIST_HEAD(&caching_ctl->list);
478         mutex_init(&caching_ctl->mutex);
479         init_waitqueue_head(&caching_ctl->wait);
480         caching_ctl->block_group = cache;
481         caching_ctl->progress = cache->key.objectid;
482         /* one for caching kthread, one for caching block group list */
483         atomic_set(&caching_ctl->count, 2);
484
485         spin_lock(&cache->lock);
486         if (cache->cached != BTRFS_CACHE_NO) {
487                 spin_unlock(&cache->lock);
488                 kfree(caching_ctl);
489                 return 0;
490         }
491         cache->caching_ctl = caching_ctl;
492         cache->cached = BTRFS_CACHE_STARTED;
493         spin_unlock(&cache->lock);
494
495         down_write(&fs_info->extent_commit_sem);
496         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
497         up_write(&fs_info->extent_commit_sem);
498
499         atomic_inc(&cache->space_info->caching_threads);
500         btrfs_get_block_group(cache);
501
502         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
503                           cache->key.objectid);
504         if (IS_ERR(tsk)) {
505                 ret = PTR_ERR(tsk);
506                 printk(KERN_ERR "error running thread %d\n", ret);
507                 BUG();
508         }
509
510         return ret;
511 }
512
513 /*
514  * return the block group that starts at or after bytenr
515  */
516 static struct btrfs_block_group_cache *
517 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
518 {
519         struct btrfs_block_group_cache *cache;
520
521         cache = block_group_cache_tree_search(info, bytenr, 0);
522
523         return cache;
524 }
525
526 /*
527  * return the block group that contains the given bytenr
528  */
529 struct btrfs_block_group_cache *btrfs_lookup_block_group(
530                                                  struct btrfs_fs_info *info,
531                                                  u64 bytenr)
532 {
533         struct btrfs_block_group_cache *cache;
534
535         cache = block_group_cache_tree_search(info, bytenr, 1);
536
537         return cache;
538 }
539
540 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
541                                                   u64 flags)
542 {
543         struct list_head *head = &info->space_info;
544         struct btrfs_space_info *found;
545
546         flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
547                  BTRFS_BLOCK_GROUP_METADATA;
548
549         rcu_read_lock();
550         list_for_each_entry_rcu(found, head, list) {
551                 if (found->flags & flags) {
552                         rcu_read_unlock();
553                         return found;
554                 }
555         }
556         rcu_read_unlock();
557         return NULL;
558 }
559
560 /*
561  * after adding space to the filesystem, we need to clear the full flags
562  * on all the space infos.
563  */
564 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
565 {
566         struct list_head *head = &info->space_info;
567         struct btrfs_space_info *found;
568
569         rcu_read_lock();
570         list_for_each_entry_rcu(found, head, list)
571                 found->full = 0;
572         rcu_read_unlock();
573 }
574
575 static u64 div_factor(u64 num, int factor)
576 {
577         if (factor == 10)
578                 return num;
579         num *= factor;
580         do_div(num, 10);
581         return num;
582 }
583
584 static u64 div_factor_fine(u64 num, int factor)
585 {
586         if (factor == 100)
587                 return num;
588         num *= factor;
589         do_div(num, 100);
590         return num;
591 }
592
593 u64 btrfs_find_block_group(struct btrfs_root *root,
594                            u64 search_start, u64 search_hint, int owner)
595 {
596         struct btrfs_block_group_cache *cache;
597         u64 used;
598         u64 last = max(search_hint, search_start);
599         u64 group_start = 0;
600         int full_search = 0;
601         int factor = 9;
602         int wrapped = 0;
603 again:
604         while (1) {
605                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
606                 if (!cache)
607                         break;
608
609                 spin_lock(&cache->lock);
610                 last = cache->key.objectid + cache->key.offset;
611                 used = btrfs_block_group_used(&cache->item);
612
613                 if ((full_search || !cache->ro) &&
614                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
615                         if (used + cache->pinned + cache->reserved <
616                             div_factor(cache->key.offset, factor)) {
617                                 group_start = cache->key.objectid;
618                                 spin_unlock(&cache->lock);
619                                 btrfs_put_block_group(cache);
620                                 goto found;
621                         }
622                 }
623                 spin_unlock(&cache->lock);
624                 btrfs_put_block_group(cache);
625                 cond_resched();
626         }
627         if (!wrapped) {
628                 last = search_start;
629                 wrapped = 1;
630                 goto again;
631         }
632         if (!full_search && factor < 10) {
633                 last = search_start;
634                 full_search = 1;
635                 factor = 10;
636                 goto again;
637         }
638 found:
639         return group_start;
640 }
641
642 /* simple helper to search for an existing extent at a given offset */
643 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
644 {
645         int ret;
646         struct btrfs_key key;
647         struct btrfs_path *path;
648
649         path = btrfs_alloc_path();
650         BUG_ON(!path);
651         key.objectid = start;
652         key.offset = len;
653         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
654         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
655                                 0, 0);
656         btrfs_free_path(path);
657         return ret;
658 }
659
660 /*
661  * helper function to lookup reference count and flags of extent.
662  *
663  * the head node for delayed ref is used to store the sum of all the
664  * reference count modifications queued up in the rbtree. the head
665  * node may also store the extent flags to set. This way you can check
666  * to see what the reference count and extent flags would be if all of
667  * the delayed refs are not processed.
668  */
669 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
670                              struct btrfs_root *root, u64 bytenr,
671                              u64 num_bytes, u64 *refs, u64 *flags)
672 {
673         struct btrfs_delayed_ref_head *head;
674         struct btrfs_delayed_ref_root *delayed_refs;
675         struct btrfs_path *path;
676         struct btrfs_extent_item *ei;
677         struct extent_buffer *leaf;
678         struct btrfs_key key;
679         u32 item_size;
680         u64 num_refs;
681         u64 extent_flags;
682         int ret;
683
684         path = btrfs_alloc_path();
685         if (!path)
686                 return -ENOMEM;
687
688         key.objectid = bytenr;
689         key.type = BTRFS_EXTENT_ITEM_KEY;
690         key.offset = num_bytes;
691         if (!trans) {
692                 path->skip_locking = 1;
693                 path->search_commit_root = 1;
694         }
695 again:
696         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
697                                 &key, path, 0, 0);
698         if (ret < 0)
699                 goto out_free;
700
701         if (ret == 0) {
702                 leaf = path->nodes[0];
703                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
704                 if (item_size >= sizeof(*ei)) {
705                         ei = btrfs_item_ptr(leaf, path->slots[0],
706                                             struct btrfs_extent_item);
707                         num_refs = btrfs_extent_refs(leaf, ei);
708                         extent_flags = btrfs_extent_flags(leaf, ei);
709                 } else {
710 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
711                         struct btrfs_extent_item_v0 *ei0;
712                         BUG_ON(item_size != sizeof(*ei0));
713                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
714                                              struct btrfs_extent_item_v0);
715                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
716                         /* FIXME: this isn't correct for data */
717                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
718 #else
719                         BUG();
720 #endif
721                 }
722                 BUG_ON(num_refs == 0);
723         } else {
724                 num_refs = 0;
725                 extent_flags = 0;
726                 ret = 0;
727         }
728
729         if (!trans)
730                 goto out;
731
732         delayed_refs = &trans->transaction->delayed_refs;
733         spin_lock(&delayed_refs->lock);
734         head = btrfs_find_delayed_ref_head(trans, bytenr);
735         if (head) {
736                 if (!mutex_trylock(&head->mutex)) {
737                         atomic_inc(&head->node.refs);
738                         spin_unlock(&delayed_refs->lock);
739
740                         btrfs_release_path(root->fs_info->extent_root, path);
741
742                         mutex_lock(&head->mutex);
743                         mutex_unlock(&head->mutex);
744                         btrfs_put_delayed_ref(&head->node);
745                         goto again;
746                 }
747                 if (head->extent_op && head->extent_op->update_flags)
748                         extent_flags |= head->extent_op->flags_to_set;
749                 else
750                         BUG_ON(num_refs == 0);
751
752                 num_refs += head->node.ref_mod;
753                 mutex_unlock(&head->mutex);
754         }
755         spin_unlock(&delayed_refs->lock);
756 out:
757         WARN_ON(num_refs == 0);
758         if (refs)
759                 *refs = num_refs;
760         if (flags)
761                 *flags = extent_flags;
762 out_free:
763         btrfs_free_path(path);
764         return ret;
765 }
766
767 /*
768  * Back reference rules.  Back refs have three main goals:
769  *
770  * 1) differentiate between all holders of references to an extent so that
771  *    when a reference is dropped we can make sure it was a valid reference
772  *    before freeing the extent.
773  *
774  * 2) Provide enough information to quickly find the holders of an extent
775  *    if we notice a given block is corrupted or bad.
776  *
777  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
778  *    maintenance.  This is actually the same as #2, but with a slightly
779  *    different use case.
780  *
781  * There are two kinds of back refs. The implicit back refs is optimized
782  * for pointers in non-shared tree blocks. For a given pointer in a block,
783  * back refs of this kind provide information about the block's owner tree
784  * and the pointer's key. These information allow us to find the block by
785  * b-tree searching. The full back refs is for pointers in tree blocks not
786  * referenced by their owner trees. The location of tree block is recorded
787  * in the back refs. Actually the full back refs is generic, and can be
788  * used in all cases the implicit back refs is used. The major shortcoming
789  * of the full back refs is its overhead. Every time a tree block gets
790  * COWed, we have to update back refs entry for all pointers in it.
791  *
792  * For a newly allocated tree block, we use implicit back refs for
793  * pointers in it. This means most tree related operations only involve
794  * implicit back refs. For a tree block created in old transaction, the
795  * only way to drop a reference to it is COW it. So we can detect the
796  * event that tree block loses its owner tree's reference and do the
797  * back refs conversion.
798  *
799  * When a tree block is COW'd through a tree, there are four cases:
800  *
801  * The reference count of the block is one and the tree is the block's
802  * owner tree. Nothing to do in this case.
803  *
804  * The reference count of the block is one and the tree is not the
805  * block's owner tree. In this case, full back refs is used for pointers
806  * in the block. Remove these full back refs, add implicit back refs for
807  * every pointers in the new block.
808  *
809  * The reference count of the block is greater than one and the tree is
810  * the block's owner tree. In this case, implicit back refs is used for
811  * pointers in the block. Add full back refs for every pointers in the
812  * block, increase lower level extents' reference counts. The original
813  * implicit back refs are entailed to the new block.
814  *
815  * The reference count of the block is greater than one and the tree is
816  * not the block's owner tree. Add implicit back refs for every pointer in
817  * the new block, increase lower level extents' reference count.
818  *
819  * Back Reference Key composing:
820  *
821  * The key objectid corresponds to the first byte in the extent,
822  * The key type is used to differentiate between types of back refs.
823  * There are different meanings of the key offset for different types
824  * of back refs.
825  *
826  * File extents can be referenced by:
827  *
828  * - multiple snapshots, subvolumes, or different generations in one subvol
829  * - different files inside a single subvolume
830  * - different offsets inside a file (bookend extents in file.c)
831  *
832  * The extent ref structure for the implicit back refs has fields for:
833  *
834  * - Objectid of the subvolume root
835  * - objectid of the file holding the reference
836  * - original offset in the file
837  * - how many bookend extents
838  *
839  * The key offset for the implicit back refs is hash of the first
840  * three fields.
841  *
842  * The extent ref structure for the full back refs has field for:
843  *
844  * - number of pointers in the tree leaf
845  *
846  * The key offset for the implicit back refs is the first byte of
847  * the tree leaf
848  *
849  * When a file extent is allocated, The implicit back refs is used.
850  * the fields are filled in:
851  *
852  *     (root_key.objectid, inode objectid, offset in file, 1)
853  *
854  * When a file extent is removed file truncation, we find the
855  * corresponding implicit back refs and check the following fields:
856  *
857  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
858  *
859  * Btree extents can be referenced by:
860  *
861  * - Different subvolumes
862  *
863  * Both the implicit back refs and the full back refs for tree blocks
864  * only consist of key. The key offset for the implicit back refs is
865  * objectid of block's owner tree. The key offset for the full back refs
866  * is the first byte of parent block.
867  *
868  * When implicit back refs is used, information about the lowest key and
869  * level of the tree block are required. These information are stored in
870  * tree block info structure.
871  */
872
873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
874 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
875                                   struct btrfs_root *root,
876                                   struct btrfs_path *path,
877                                   u64 owner, u32 extra_size)
878 {
879         struct btrfs_extent_item *item;
880         struct btrfs_extent_item_v0 *ei0;
881         struct btrfs_extent_ref_v0 *ref0;
882         struct btrfs_tree_block_info *bi;
883         struct extent_buffer *leaf;
884         struct btrfs_key key;
885         struct btrfs_key found_key;
886         u32 new_size = sizeof(*item);
887         u64 refs;
888         int ret;
889
890         leaf = path->nodes[0];
891         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
892
893         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
894         ei0 = btrfs_item_ptr(leaf, path->slots[0],
895                              struct btrfs_extent_item_v0);
896         refs = btrfs_extent_refs_v0(leaf, ei0);
897
898         if (owner == (u64)-1) {
899                 while (1) {
900                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
901                                 ret = btrfs_next_leaf(root, path);
902                                 if (ret < 0)
903                                         return ret;
904                                 BUG_ON(ret > 0);
905                                 leaf = path->nodes[0];
906                         }
907                         btrfs_item_key_to_cpu(leaf, &found_key,
908                                               path->slots[0]);
909                         BUG_ON(key.objectid != found_key.objectid);
910                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
911                                 path->slots[0]++;
912                                 continue;
913                         }
914                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
915                                               struct btrfs_extent_ref_v0);
916                         owner = btrfs_ref_objectid_v0(leaf, ref0);
917                         break;
918                 }
919         }
920         btrfs_release_path(root, path);
921
922         if (owner < BTRFS_FIRST_FREE_OBJECTID)
923                 new_size += sizeof(*bi);
924
925         new_size -= sizeof(*ei0);
926         ret = btrfs_search_slot(trans, root, &key, path,
927                                 new_size + extra_size, 1);
928         if (ret < 0)
929                 return ret;
930         BUG_ON(ret);
931
932         ret = btrfs_extend_item(trans, root, path, new_size);
933         BUG_ON(ret);
934
935         leaf = path->nodes[0];
936         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
937         btrfs_set_extent_refs(leaf, item, refs);
938         /* FIXME: get real generation */
939         btrfs_set_extent_generation(leaf, item, 0);
940         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
941                 btrfs_set_extent_flags(leaf, item,
942                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
943                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
944                 bi = (struct btrfs_tree_block_info *)(item + 1);
945                 /* FIXME: get first key of the block */
946                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
947                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
948         } else {
949                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
950         }
951         btrfs_mark_buffer_dirty(leaf);
952         return 0;
953 }
954 #endif
955
956 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
957 {
958         u32 high_crc = ~(u32)0;
959         u32 low_crc = ~(u32)0;
960         __le64 lenum;
961
962         lenum = cpu_to_le64(root_objectid);
963         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
964         lenum = cpu_to_le64(owner);
965         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
966         lenum = cpu_to_le64(offset);
967         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
968
969         return ((u64)high_crc << 31) ^ (u64)low_crc;
970 }
971
972 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
973                                      struct btrfs_extent_data_ref *ref)
974 {
975         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
976                                     btrfs_extent_data_ref_objectid(leaf, ref),
977                                     btrfs_extent_data_ref_offset(leaf, ref));
978 }
979
980 static int match_extent_data_ref(struct extent_buffer *leaf,
981                                  struct btrfs_extent_data_ref *ref,
982                                  u64 root_objectid, u64 owner, u64 offset)
983 {
984         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
985             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
986             btrfs_extent_data_ref_offset(leaf, ref) != offset)
987                 return 0;
988         return 1;
989 }
990
991 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
992                                            struct btrfs_root *root,
993                                            struct btrfs_path *path,
994                                            u64 bytenr, u64 parent,
995                                            u64 root_objectid,
996                                            u64 owner, u64 offset)
997 {
998         struct btrfs_key key;
999         struct btrfs_extent_data_ref *ref;
1000         struct extent_buffer *leaf;
1001         u32 nritems;
1002         int ret;
1003         int recow;
1004         int err = -ENOENT;
1005
1006         key.objectid = bytenr;
1007         if (parent) {
1008                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1009                 key.offset = parent;
1010         } else {
1011                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1012                 key.offset = hash_extent_data_ref(root_objectid,
1013                                                   owner, offset);
1014         }
1015 again:
1016         recow = 0;
1017         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1018         if (ret < 0) {
1019                 err = ret;
1020                 goto fail;
1021         }
1022
1023         if (parent) {
1024                 if (!ret)
1025                         return 0;
1026 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1027                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1028                 btrfs_release_path(root, path);
1029                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1030                 if (ret < 0) {
1031                         err = ret;
1032                         goto fail;
1033                 }
1034                 if (!ret)
1035                         return 0;
1036 #endif
1037                 goto fail;
1038         }
1039
1040         leaf = path->nodes[0];
1041         nritems = btrfs_header_nritems(leaf);
1042         while (1) {
1043                 if (path->slots[0] >= nritems) {
1044                         ret = btrfs_next_leaf(root, path);
1045                         if (ret < 0)
1046                                 err = ret;
1047                         if (ret)
1048                                 goto fail;
1049
1050                         leaf = path->nodes[0];
1051                         nritems = btrfs_header_nritems(leaf);
1052                         recow = 1;
1053                 }
1054
1055                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1056                 if (key.objectid != bytenr ||
1057                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1058                         goto fail;
1059
1060                 ref = btrfs_item_ptr(leaf, path->slots[0],
1061                                      struct btrfs_extent_data_ref);
1062
1063                 if (match_extent_data_ref(leaf, ref, root_objectid,
1064                                           owner, offset)) {
1065                         if (recow) {
1066                                 btrfs_release_path(root, path);
1067                                 goto again;
1068                         }
1069                         err = 0;
1070                         break;
1071                 }
1072                 path->slots[0]++;
1073         }
1074 fail:
1075         return err;
1076 }
1077
1078 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1079                                            struct btrfs_root *root,
1080                                            struct btrfs_path *path,
1081                                            u64 bytenr, u64 parent,
1082                                            u64 root_objectid, u64 owner,
1083                                            u64 offset, int refs_to_add)
1084 {
1085         struct btrfs_key key;
1086         struct extent_buffer *leaf;
1087         u32 size;
1088         u32 num_refs;
1089         int ret;
1090
1091         key.objectid = bytenr;
1092         if (parent) {
1093                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1094                 key.offset = parent;
1095                 size = sizeof(struct btrfs_shared_data_ref);
1096         } else {
1097                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1098                 key.offset = hash_extent_data_ref(root_objectid,
1099                                                   owner, offset);
1100                 size = sizeof(struct btrfs_extent_data_ref);
1101         }
1102
1103         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1104         if (ret && ret != -EEXIST)
1105                 goto fail;
1106
1107         leaf = path->nodes[0];
1108         if (parent) {
1109                 struct btrfs_shared_data_ref *ref;
1110                 ref = btrfs_item_ptr(leaf, path->slots[0],
1111                                      struct btrfs_shared_data_ref);
1112                 if (ret == 0) {
1113                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1114                 } else {
1115                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1116                         num_refs += refs_to_add;
1117                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1118                 }
1119         } else {
1120                 struct btrfs_extent_data_ref *ref;
1121                 while (ret == -EEXIST) {
1122                         ref = btrfs_item_ptr(leaf, path->slots[0],
1123                                              struct btrfs_extent_data_ref);
1124                         if (match_extent_data_ref(leaf, ref, root_objectid,
1125                                                   owner, offset))
1126                                 break;
1127                         btrfs_release_path(root, path);
1128                         key.offset++;
1129                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1130                                                       size);
1131                         if (ret && ret != -EEXIST)
1132                                 goto fail;
1133
1134                         leaf = path->nodes[0];
1135                 }
1136                 ref = btrfs_item_ptr(leaf, path->slots[0],
1137                                      struct btrfs_extent_data_ref);
1138                 if (ret == 0) {
1139                         btrfs_set_extent_data_ref_root(leaf, ref,
1140                                                        root_objectid);
1141                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1142                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1143                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1144                 } else {
1145                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1146                         num_refs += refs_to_add;
1147                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1148                 }
1149         }
1150         btrfs_mark_buffer_dirty(leaf);
1151         ret = 0;
1152 fail:
1153         btrfs_release_path(root, path);
1154         return ret;
1155 }
1156
1157 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1158                                            struct btrfs_root *root,
1159                                            struct btrfs_path *path,
1160                                            int refs_to_drop)
1161 {
1162         struct btrfs_key key;
1163         struct btrfs_extent_data_ref *ref1 = NULL;
1164         struct btrfs_shared_data_ref *ref2 = NULL;
1165         struct extent_buffer *leaf;
1166         u32 num_refs = 0;
1167         int ret = 0;
1168
1169         leaf = path->nodes[0];
1170         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1171
1172         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1173                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1174                                       struct btrfs_extent_data_ref);
1175                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1176         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1177                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1178                                       struct btrfs_shared_data_ref);
1179                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1180 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1181         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1182                 struct btrfs_extent_ref_v0 *ref0;
1183                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1184                                       struct btrfs_extent_ref_v0);
1185                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1186 #endif
1187         } else {
1188                 BUG();
1189         }
1190
1191         BUG_ON(num_refs < refs_to_drop);
1192         num_refs -= refs_to_drop;
1193
1194         if (num_refs == 0) {
1195                 ret = btrfs_del_item(trans, root, path);
1196         } else {
1197                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1198                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1199                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1200                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1201 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1202                 else {
1203                         struct btrfs_extent_ref_v0 *ref0;
1204                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1205                                         struct btrfs_extent_ref_v0);
1206                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1207                 }
1208 #endif
1209                 btrfs_mark_buffer_dirty(leaf);
1210         }
1211         return ret;
1212 }
1213
1214 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1215                                           struct btrfs_path *path,
1216                                           struct btrfs_extent_inline_ref *iref)
1217 {
1218         struct btrfs_key key;
1219         struct extent_buffer *leaf;
1220         struct btrfs_extent_data_ref *ref1;
1221         struct btrfs_shared_data_ref *ref2;
1222         u32 num_refs = 0;
1223
1224         leaf = path->nodes[0];
1225         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1226         if (iref) {
1227                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1228                     BTRFS_EXTENT_DATA_REF_KEY) {
1229                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1230                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1231                 } else {
1232                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1233                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1234                 }
1235         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1236                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1237                                       struct btrfs_extent_data_ref);
1238                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1239         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1240                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1241                                       struct btrfs_shared_data_ref);
1242                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1243 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1244         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1245                 struct btrfs_extent_ref_v0 *ref0;
1246                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1247                                       struct btrfs_extent_ref_v0);
1248                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1249 #endif
1250         } else {
1251                 WARN_ON(1);
1252         }
1253         return num_refs;
1254 }
1255
1256 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1257                                           struct btrfs_root *root,
1258                                           struct btrfs_path *path,
1259                                           u64 bytenr, u64 parent,
1260                                           u64 root_objectid)
1261 {
1262         struct btrfs_key key;
1263         int ret;
1264
1265         key.objectid = bytenr;
1266         if (parent) {
1267                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1268                 key.offset = parent;
1269         } else {
1270                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1271                 key.offset = root_objectid;
1272         }
1273
1274         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1275         if (ret > 0)
1276                 ret = -ENOENT;
1277 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1278         if (ret == -ENOENT && parent) {
1279                 btrfs_release_path(root, path);
1280                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1281                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1282                 if (ret > 0)
1283                         ret = -ENOENT;
1284         }
1285 #endif
1286         return ret;
1287 }
1288
1289 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1290                                           struct btrfs_root *root,
1291                                           struct btrfs_path *path,
1292                                           u64 bytenr, u64 parent,
1293                                           u64 root_objectid)
1294 {
1295         struct btrfs_key key;
1296         int ret;
1297
1298         key.objectid = bytenr;
1299         if (parent) {
1300                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1301                 key.offset = parent;
1302         } else {
1303                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1304                 key.offset = root_objectid;
1305         }
1306
1307         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1308         btrfs_release_path(root, path);
1309         return ret;
1310 }
1311
1312 static inline int extent_ref_type(u64 parent, u64 owner)
1313 {
1314         int type;
1315         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1316                 if (parent > 0)
1317                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1318                 else
1319                         type = BTRFS_TREE_BLOCK_REF_KEY;
1320         } else {
1321                 if (parent > 0)
1322                         type = BTRFS_SHARED_DATA_REF_KEY;
1323                 else
1324                         type = BTRFS_EXTENT_DATA_REF_KEY;
1325         }
1326         return type;
1327 }
1328
1329 static int find_next_key(struct btrfs_path *path, int level,
1330                          struct btrfs_key *key)
1331
1332 {
1333         for (; level < BTRFS_MAX_LEVEL; level++) {
1334                 if (!path->nodes[level])
1335                         break;
1336                 if (path->slots[level] + 1 >=
1337                     btrfs_header_nritems(path->nodes[level]))
1338                         continue;
1339                 if (level == 0)
1340                         btrfs_item_key_to_cpu(path->nodes[level], key,
1341                                               path->slots[level] + 1);
1342                 else
1343                         btrfs_node_key_to_cpu(path->nodes[level], key,
1344                                               path->slots[level] + 1);
1345                 return 0;
1346         }
1347         return 1;
1348 }
1349
1350 /*
1351  * look for inline back ref. if back ref is found, *ref_ret is set
1352  * to the address of inline back ref, and 0 is returned.
1353  *
1354  * if back ref isn't found, *ref_ret is set to the address where it
1355  * should be inserted, and -ENOENT is returned.
1356  *
1357  * if insert is true and there are too many inline back refs, the path
1358  * points to the extent item, and -EAGAIN is returned.
1359  *
1360  * NOTE: inline back refs are ordered in the same way that back ref
1361  *       items in the tree are ordered.
1362  */
1363 static noinline_for_stack
1364 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1365                                  struct btrfs_root *root,
1366                                  struct btrfs_path *path,
1367                                  struct btrfs_extent_inline_ref **ref_ret,
1368                                  u64 bytenr, u64 num_bytes,
1369                                  u64 parent, u64 root_objectid,
1370                                  u64 owner, u64 offset, int insert)
1371 {
1372         struct btrfs_key key;
1373         struct extent_buffer *leaf;
1374         struct btrfs_extent_item *ei;
1375         struct btrfs_extent_inline_ref *iref;
1376         u64 flags;
1377         u64 item_size;
1378         unsigned long ptr;
1379         unsigned long end;
1380         int extra_size;
1381         int type;
1382         int want;
1383         int ret;
1384         int err = 0;
1385
1386         key.objectid = bytenr;
1387         key.type = BTRFS_EXTENT_ITEM_KEY;
1388         key.offset = num_bytes;
1389
1390         want = extent_ref_type(parent, owner);
1391         if (insert) {
1392                 extra_size = btrfs_extent_inline_ref_size(want);
1393                 path->keep_locks = 1;
1394         } else
1395                 extra_size = -1;
1396         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1397         if (ret < 0) {
1398                 err = ret;
1399                 goto out;
1400         }
1401         BUG_ON(ret);
1402
1403         leaf = path->nodes[0];
1404         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1405 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1406         if (item_size < sizeof(*ei)) {
1407                 if (!insert) {
1408                         err = -ENOENT;
1409                         goto out;
1410                 }
1411                 ret = convert_extent_item_v0(trans, root, path, owner,
1412                                              extra_size);
1413                 if (ret < 0) {
1414                         err = ret;
1415                         goto out;
1416                 }
1417                 leaf = path->nodes[0];
1418                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1419         }
1420 #endif
1421         BUG_ON(item_size < sizeof(*ei));
1422
1423         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1424         flags = btrfs_extent_flags(leaf, ei);
1425
1426         ptr = (unsigned long)(ei + 1);
1427         end = (unsigned long)ei + item_size;
1428
1429         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1430                 ptr += sizeof(struct btrfs_tree_block_info);
1431                 BUG_ON(ptr > end);
1432         } else {
1433                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1434         }
1435
1436         err = -ENOENT;
1437         while (1) {
1438                 if (ptr >= end) {
1439                         WARN_ON(ptr > end);
1440                         break;
1441                 }
1442                 iref = (struct btrfs_extent_inline_ref *)ptr;
1443                 type = btrfs_extent_inline_ref_type(leaf, iref);
1444                 if (want < type)
1445                         break;
1446                 if (want > type) {
1447                         ptr += btrfs_extent_inline_ref_size(type);
1448                         continue;
1449                 }
1450
1451                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1452                         struct btrfs_extent_data_ref *dref;
1453                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1454                         if (match_extent_data_ref(leaf, dref, root_objectid,
1455                                                   owner, offset)) {
1456                                 err = 0;
1457                                 break;
1458                         }
1459                         if (hash_extent_data_ref_item(leaf, dref) <
1460                             hash_extent_data_ref(root_objectid, owner, offset))
1461                                 break;
1462                 } else {
1463                         u64 ref_offset;
1464                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1465                         if (parent > 0) {
1466                                 if (parent == ref_offset) {
1467                                         err = 0;
1468                                         break;
1469                                 }
1470                                 if (ref_offset < parent)
1471                                         break;
1472                         } else {
1473                                 if (root_objectid == ref_offset) {
1474                                         err = 0;
1475                                         break;
1476                                 }
1477                                 if (ref_offset < root_objectid)
1478                                         break;
1479                         }
1480                 }
1481                 ptr += btrfs_extent_inline_ref_size(type);
1482         }
1483         if (err == -ENOENT && insert) {
1484                 if (item_size + extra_size >=
1485                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1486                         err = -EAGAIN;
1487                         goto out;
1488                 }
1489                 /*
1490                  * To add new inline back ref, we have to make sure
1491                  * there is no corresponding back ref item.
1492                  * For simplicity, we just do not add new inline back
1493                  * ref if there is any kind of item for this block
1494                  */
1495                 if (find_next_key(path, 0, &key) == 0 &&
1496                     key.objectid == bytenr &&
1497                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1498                         err = -EAGAIN;
1499                         goto out;
1500                 }
1501         }
1502         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1503 out:
1504         if (insert) {
1505                 path->keep_locks = 0;
1506                 btrfs_unlock_up_safe(path, 1);
1507         }
1508         return err;
1509 }
1510
1511 /*
1512  * helper to add new inline back ref
1513  */
1514 static noinline_for_stack
1515 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1516                                 struct btrfs_root *root,
1517                                 struct btrfs_path *path,
1518                                 struct btrfs_extent_inline_ref *iref,
1519                                 u64 parent, u64 root_objectid,
1520                                 u64 owner, u64 offset, int refs_to_add,
1521                                 struct btrfs_delayed_extent_op *extent_op)
1522 {
1523         struct extent_buffer *leaf;
1524         struct btrfs_extent_item *ei;
1525         unsigned long ptr;
1526         unsigned long end;
1527         unsigned long item_offset;
1528         u64 refs;
1529         int size;
1530         int type;
1531         int ret;
1532
1533         leaf = path->nodes[0];
1534         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1535         item_offset = (unsigned long)iref - (unsigned long)ei;
1536
1537         type = extent_ref_type(parent, owner);
1538         size = btrfs_extent_inline_ref_size(type);
1539
1540         ret = btrfs_extend_item(trans, root, path, size);
1541         BUG_ON(ret);
1542
1543         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1544         refs = btrfs_extent_refs(leaf, ei);
1545         refs += refs_to_add;
1546         btrfs_set_extent_refs(leaf, ei, refs);
1547         if (extent_op)
1548                 __run_delayed_extent_op(extent_op, leaf, ei);
1549
1550         ptr = (unsigned long)ei + item_offset;
1551         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1552         if (ptr < end - size)
1553                 memmove_extent_buffer(leaf, ptr + size, ptr,
1554                                       end - size - ptr);
1555
1556         iref = (struct btrfs_extent_inline_ref *)ptr;
1557         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1558         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1559                 struct btrfs_extent_data_ref *dref;
1560                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1561                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1562                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1563                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1564                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1565         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1566                 struct btrfs_shared_data_ref *sref;
1567                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1568                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1569                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1570         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1571                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1572         } else {
1573                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1574         }
1575         btrfs_mark_buffer_dirty(leaf);
1576         return 0;
1577 }
1578
1579 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1580                                  struct btrfs_root *root,
1581                                  struct btrfs_path *path,
1582                                  struct btrfs_extent_inline_ref **ref_ret,
1583                                  u64 bytenr, u64 num_bytes, u64 parent,
1584                                  u64 root_objectid, u64 owner, u64 offset)
1585 {
1586         int ret;
1587
1588         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1589                                            bytenr, num_bytes, parent,
1590                                            root_objectid, owner, offset, 0);
1591         if (ret != -ENOENT)
1592                 return ret;
1593
1594         btrfs_release_path(root, path);
1595         *ref_ret = NULL;
1596
1597         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1598                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1599                                             root_objectid);
1600         } else {
1601                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1602                                              root_objectid, owner, offset);
1603         }
1604         return ret;
1605 }
1606
1607 /*
1608  * helper to update/remove inline back ref
1609  */
1610 static noinline_for_stack
1611 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1612                                  struct btrfs_root *root,
1613                                  struct btrfs_path *path,
1614                                  struct btrfs_extent_inline_ref *iref,
1615                                  int refs_to_mod,
1616                                  struct btrfs_delayed_extent_op *extent_op)
1617 {
1618         struct extent_buffer *leaf;
1619         struct btrfs_extent_item *ei;
1620         struct btrfs_extent_data_ref *dref = NULL;
1621         struct btrfs_shared_data_ref *sref = NULL;
1622         unsigned long ptr;
1623         unsigned long end;
1624         u32 item_size;
1625         int size;
1626         int type;
1627         int ret;
1628         u64 refs;
1629
1630         leaf = path->nodes[0];
1631         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1632         refs = btrfs_extent_refs(leaf, ei);
1633         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1634         refs += refs_to_mod;
1635         btrfs_set_extent_refs(leaf, ei, refs);
1636         if (extent_op)
1637                 __run_delayed_extent_op(extent_op, leaf, ei);
1638
1639         type = btrfs_extent_inline_ref_type(leaf, iref);
1640
1641         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1642                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1643                 refs = btrfs_extent_data_ref_count(leaf, dref);
1644         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1645                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1646                 refs = btrfs_shared_data_ref_count(leaf, sref);
1647         } else {
1648                 refs = 1;
1649                 BUG_ON(refs_to_mod != -1);
1650         }
1651
1652         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1653         refs += refs_to_mod;
1654
1655         if (refs > 0) {
1656                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1657                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1658                 else
1659                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1660         } else {
1661                 size =  btrfs_extent_inline_ref_size(type);
1662                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1663                 ptr = (unsigned long)iref;
1664                 end = (unsigned long)ei + item_size;
1665                 if (ptr + size < end)
1666                         memmove_extent_buffer(leaf, ptr, ptr + size,
1667                                               end - ptr - size);
1668                 item_size -= size;
1669                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1670                 BUG_ON(ret);
1671         }
1672         btrfs_mark_buffer_dirty(leaf);
1673         return 0;
1674 }
1675
1676 static noinline_for_stack
1677 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1678                                  struct btrfs_root *root,
1679                                  struct btrfs_path *path,
1680                                  u64 bytenr, u64 num_bytes, u64 parent,
1681                                  u64 root_objectid, u64 owner,
1682                                  u64 offset, int refs_to_add,
1683                                  struct btrfs_delayed_extent_op *extent_op)
1684 {
1685         struct btrfs_extent_inline_ref *iref;
1686         int ret;
1687
1688         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1689                                            bytenr, num_bytes, parent,
1690                                            root_objectid, owner, offset, 1);
1691         if (ret == 0) {
1692                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1693                 ret = update_inline_extent_backref(trans, root, path, iref,
1694                                                    refs_to_add, extent_op);
1695         } else if (ret == -ENOENT) {
1696                 ret = setup_inline_extent_backref(trans, root, path, iref,
1697                                                   parent, root_objectid,
1698                                                   owner, offset, refs_to_add,
1699                                                   extent_op);
1700         }
1701         return ret;
1702 }
1703
1704 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1705                                  struct btrfs_root *root,
1706                                  struct btrfs_path *path,
1707                                  u64 bytenr, u64 parent, u64 root_objectid,
1708                                  u64 owner, u64 offset, int refs_to_add)
1709 {
1710         int ret;
1711         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1712                 BUG_ON(refs_to_add != 1);
1713                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1714                                             parent, root_objectid);
1715         } else {
1716                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1717                                              parent, root_objectid,
1718                                              owner, offset, refs_to_add);
1719         }
1720         return ret;
1721 }
1722
1723 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1724                                  struct btrfs_root *root,
1725                                  struct btrfs_path *path,
1726                                  struct btrfs_extent_inline_ref *iref,
1727                                  int refs_to_drop, int is_data)
1728 {
1729         int ret;
1730
1731         BUG_ON(!is_data && refs_to_drop != 1);
1732         if (iref) {
1733                 ret = update_inline_extent_backref(trans, root, path, iref,
1734                                                    -refs_to_drop, NULL);
1735         } else if (is_data) {
1736                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1737         } else {
1738                 ret = btrfs_del_item(trans, root, path);
1739         }
1740         return ret;
1741 }
1742
1743 static void btrfs_issue_discard(struct block_device *bdev,
1744                                 u64 start, u64 len)
1745 {
1746         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
1747 }
1748
1749 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1750                                 u64 num_bytes)
1751 {
1752         int ret;
1753         u64 map_length = num_bytes;
1754         struct btrfs_multi_bio *multi = NULL;
1755
1756         if (!btrfs_test_opt(root, DISCARD))
1757                 return 0;
1758
1759         /* Tell the block device(s) that the sectors can be discarded */
1760         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1761                               bytenr, &map_length, &multi, 0);
1762         if (!ret) {
1763                 struct btrfs_bio_stripe *stripe = multi->stripes;
1764                 int i;
1765
1766                 if (map_length > num_bytes)
1767                         map_length = num_bytes;
1768
1769                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1770                         btrfs_issue_discard(stripe->dev->bdev,
1771                                             stripe->physical,
1772                                             map_length);
1773                 }
1774                 kfree(multi);
1775         }
1776
1777         return ret;
1778 }
1779
1780 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1781                          struct btrfs_root *root,
1782                          u64 bytenr, u64 num_bytes, u64 parent,
1783                          u64 root_objectid, u64 owner, u64 offset)
1784 {
1785         int ret;
1786         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1787                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1788
1789         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1790                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1791                                         parent, root_objectid, (int)owner,
1792                                         BTRFS_ADD_DELAYED_REF, NULL);
1793         } else {
1794                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1795                                         parent, root_objectid, owner, offset,
1796                                         BTRFS_ADD_DELAYED_REF, NULL);
1797         }
1798         return ret;
1799 }
1800
1801 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1802                                   struct btrfs_root *root,
1803                                   u64 bytenr, u64 num_bytes,
1804                                   u64 parent, u64 root_objectid,
1805                                   u64 owner, u64 offset, int refs_to_add,
1806                                   struct btrfs_delayed_extent_op *extent_op)
1807 {
1808         struct btrfs_path *path;
1809         struct extent_buffer *leaf;
1810         struct btrfs_extent_item *item;
1811         u64 refs;
1812         int ret;
1813         int err = 0;
1814
1815         path = btrfs_alloc_path();
1816         if (!path)
1817                 return -ENOMEM;
1818
1819         path->reada = 1;
1820         path->leave_spinning = 1;
1821         /* this will setup the path even if it fails to insert the back ref */
1822         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1823                                            path, bytenr, num_bytes, parent,
1824                                            root_objectid, owner, offset,
1825                                            refs_to_add, extent_op);
1826         if (ret == 0)
1827                 goto out;
1828
1829         if (ret != -EAGAIN) {
1830                 err = ret;
1831                 goto out;
1832         }
1833
1834         leaf = path->nodes[0];
1835         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1836         refs = btrfs_extent_refs(leaf, item);
1837         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1838         if (extent_op)
1839                 __run_delayed_extent_op(extent_op, leaf, item);
1840
1841         btrfs_mark_buffer_dirty(leaf);
1842         btrfs_release_path(root->fs_info->extent_root, path);
1843
1844         path->reada = 1;
1845         path->leave_spinning = 1;
1846
1847         /* now insert the actual backref */
1848         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1849                                     path, bytenr, parent, root_objectid,
1850                                     owner, offset, refs_to_add);
1851         BUG_ON(ret);
1852 out:
1853         btrfs_free_path(path);
1854         return err;
1855 }
1856
1857 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1858                                 struct btrfs_root *root,
1859                                 struct btrfs_delayed_ref_node *node,
1860                                 struct btrfs_delayed_extent_op *extent_op,
1861                                 int insert_reserved)
1862 {
1863         int ret = 0;
1864         struct btrfs_delayed_data_ref *ref;
1865         struct btrfs_key ins;
1866         u64 parent = 0;
1867         u64 ref_root = 0;
1868         u64 flags = 0;
1869
1870         ins.objectid = node->bytenr;
1871         ins.offset = node->num_bytes;
1872         ins.type = BTRFS_EXTENT_ITEM_KEY;
1873
1874         ref = btrfs_delayed_node_to_data_ref(node);
1875         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1876                 parent = ref->parent;
1877         else
1878                 ref_root = ref->root;
1879
1880         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1881                 if (extent_op) {
1882                         BUG_ON(extent_op->update_key);
1883                         flags |= extent_op->flags_to_set;
1884                 }
1885                 ret = alloc_reserved_file_extent(trans, root,
1886                                                  parent, ref_root, flags,
1887                                                  ref->objectid, ref->offset,
1888                                                  &ins, node->ref_mod);
1889         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1890                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1891                                              node->num_bytes, parent,
1892                                              ref_root, ref->objectid,
1893                                              ref->offset, node->ref_mod,
1894                                              extent_op);
1895         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1896                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1897                                           node->num_bytes, parent,
1898                                           ref_root, ref->objectid,
1899                                           ref->offset, node->ref_mod,
1900                                           extent_op);
1901         } else {
1902                 BUG();
1903         }
1904         return ret;
1905 }
1906
1907 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1908                                     struct extent_buffer *leaf,
1909                                     struct btrfs_extent_item *ei)
1910 {
1911         u64 flags = btrfs_extent_flags(leaf, ei);
1912         if (extent_op->update_flags) {
1913                 flags |= extent_op->flags_to_set;
1914                 btrfs_set_extent_flags(leaf, ei, flags);
1915         }
1916
1917         if (extent_op->update_key) {
1918                 struct btrfs_tree_block_info *bi;
1919                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1920                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1921                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1922         }
1923 }
1924
1925 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1926                                  struct btrfs_root *root,
1927                                  struct btrfs_delayed_ref_node *node,
1928                                  struct btrfs_delayed_extent_op *extent_op)
1929 {
1930         struct btrfs_key key;
1931         struct btrfs_path *path;
1932         struct btrfs_extent_item *ei;
1933         struct extent_buffer *leaf;
1934         u32 item_size;
1935         int ret;
1936         int err = 0;
1937
1938         path = btrfs_alloc_path();
1939         if (!path)
1940                 return -ENOMEM;
1941
1942         key.objectid = node->bytenr;
1943         key.type = BTRFS_EXTENT_ITEM_KEY;
1944         key.offset = node->num_bytes;
1945
1946         path->reada = 1;
1947         path->leave_spinning = 1;
1948         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1949                                 path, 0, 1);
1950         if (ret < 0) {
1951                 err = ret;
1952                 goto out;
1953         }
1954         if (ret > 0) {
1955                 err = -EIO;
1956                 goto out;
1957         }
1958
1959         leaf = path->nodes[0];
1960         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1961 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1962         if (item_size < sizeof(*ei)) {
1963                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1964                                              path, (u64)-1, 0);
1965                 if (ret < 0) {
1966                         err = ret;
1967                         goto out;
1968                 }
1969                 leaf = path->nodes[0];
1970                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1971         }
1972 #endif
1973         BUG_ON(item_size < sizeof(*ei));
1974         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1975         __run_delayed_extent_op(extent_op, leaf, ei);
1976
1977         btrfs_mark_buffer_dirty(leaf);
1978 out:
1979         btrfs_free_path(path);
1980         return err;
1981 }
1982
1983 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1984                                 struct btrfs_root *root,
1985                                 struct btrfs_delayed_ref_node *node,
1986                                 struct btrfs_delayed_extent_op *extent_op,
1987                                 int insert_reserved)
1988 {
1989         int ret = 0;
1990         struct btrfs_delayed_tree_ref *ref;
1991         struct btrfs_key ins;
1992         u64 parent = 0;
1993         u64 ref_root = 0;
1994
1995         ins.objectid = node->bytenr;
1996         ins.offset = node->num_bytes;
1997         ins.type = BTRFS_EXTENT_ITEM_KEY;
1998
1999         ref = btrfs_delayed_node_to_tree_ref(node);
2000         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2001                 parent = ref->parent;
2002         else
2003                 ref_root = ref->root;
2004
2005         BUG_ON(node->ref_mod != 1);
2006         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2007                 BUG_ON(!extent_op || !extent_op->update_flags ||
2008                        !extent_op->update_key);
2009                 ret = alloc_reserved_tree_block(trans, root,
2010                                                 parent, ref_root,
2011                                                 extent_op->flags_to_set,
2012                                                 &extent_op->key,
2013                                                 ref->level, &ins);
2014         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2015                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2016                                              node->num_bytes, parent, ref_root,
2017                                              ref->level, 0, 1, extent_op);
2018         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2019                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2020                                           node->num_bytes, parent, ref_root,
2021                                           ref->level, 0, 1, extent_op);
2022         } else {
2023                 BUG();
2024         }
2025         return ret;
2026 }
2027
2028 /* helper function to actually process a single delayed ref entry */
2029 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2030                                struct btrfs_root *root,
2031                                struct btrfs_delayed_ref_node *node,
2032                                struct btrfs_delayed_extent_op *extent_op,
2033                                int insert_reserved)
2034 {
2035         int ret;
2036         if (btrfs_delayed_ref_is_head(node)) {
2037                 struct btrfs_delayed_ref_head *head;
2038                 /*
2039                  * we've hit the end of the chain and we were supposed
2040                  * to insert this extent into the tree.  But, it got
2041                  * deleted before we ever needed to insert it, so all
2042                  * we have to do is clean up the accounting
2043                  */
2044                 BUG_ON(extent_op);
2045                 head = btrfs_delayed_node_to_head(node);
2046                 if (insert_reserved) {
2047                         btrfs_pin_extent(root, node->bytenr,
2048                                          node->num_bytes, 1);
2049                         if (head->is_data) {
2050                                 ret = btrfs_del_csums(trans, root,
2051                                                       node->bytenr,
2052                                                       node->num_bytes);
2053                                 BUG_ON(ret);
2054                         }
2055                 }
2056                 mutex_unlock(&head->mutex);
2057                 return 0;
2058         }
2059
2060         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2061             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2062                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2063                                            insert_reserved);
2064         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2065                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2066                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2067                                            insert_reserved);
2068         else
2069                 BUG();
2070         return ret;
2071 }
2072
2073 static noinline struct btrfs_delayed_ref_node *
2074 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2075 {
2076         struct rb_node *node;
2077         struct btrfs_delayed_ref_node *ref;
2078         int action = BTRFS_ADD_DELAYED_REF;
2079 again:
2080         /*
2081          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2082          * this prevents ref count from going down to zero when
2083          * there still are pending delayed ref.
2084          */
2085         node = rb_prev(&head->node.rb_node);
2086         while (1) {
2087                 if (!node)
2088                         break;
2089                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2090                                 rb_node);
2091                 if (ref->bytenr != head->node.bytenr)
2092                         break;
2093                 if (ref->action == action)
2094                         return ref;
2095                 node = rb_prev(node);
2096         }
2097         if (action == BTRFS_ADD_DELAYED_REF) {
2098                 action = BTRFS_DROP_DELAYED_REF;
2099                 goto again;
2100         }
2101         return NULL;
2102 }
2103
2104 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2105                                        struct btrfs_root *root,
2106                                        struct list_head *cluster)
2107 {
2108         struct btrfs_delayed_ref_root *delayed_refs;
2109         struct btrfs_delayed_ref_node *ref;
2110         struct btrfs_delayed_ref_head *locked_ref = NULL;
2111         struct btrfs_delayed_extent_op *extent_op;
2112         int ret;
2113         int count = 0;
2114         int must_insert_reserved = 0;
2115
2116         delayed_refs = &trans->transaction->delayed_refs;
2117         while (1) {
2118                 if (!locked_ref) {
2119                         /* pick a new head ref from the cluster list */
2120                         if (list_empty(cluster))
2121                                 break;
2122
2123                         locked_ref = list_entry(cluster->next,
2124                                      struct btrfs_delayed_ref_head, cluster);
2125
2126                         /* grab the lock that says we are going to process
2127                          * all the refs for this head */
2128                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2129
2130                         /*
2131                          * we may have dropped the spin lock to get the head
2132                          * mutex lock, and that might have given someone else
2133                          * time to free the head.  If that's true, it has been
2134                          * removed from our list and we can move on.
2135                          */
2136                         if (ret == -EAGAIN) {
2137                                 locked_ref = NULL;
2138                                 count++;
2139                                 continue;
2140                         }
2141                 }
2142
2143                 /*
2144                  * record the must insert reserved flag before we
2145                  * drop the spin lock.
2146                  */
2147                 must_insert_reserved = locked_ref->must_insert_reserved;
2148                 locked_ref->must_insert_reserved = 0;
2149
2150                 extent_op = locked_ref->extent_op;
2151                 locked_ref->extent_op = NULL;
2152
2153                 /*
2154                  * locked_ref is the head node, so we have to go one
2155                  * node back for any delayed ref updates
2156                  */
2157                 ref = select_delayed_ref(locked_ref);
2158                 if (!ref) {
2159                         /* All delayed refs have been processed, Go ahead
2160                          * and send the head node to run_one_delayed_ref,
2161                          * so that any accounting fixes can happen
2162                          */
2163                         ref = &locked_ref->node;
2164
2165                         if (extent_op && must_insert_reserved) {
2166                                 kfree(extent_op);
2167                                 extent_op = NULL;
2168                         }
2169
2170                         if (extent_op) {
2171                                 spin_unlock(&delayed_refs->lock);
2172
2173                                 ret = run_delayed_extent_op(trans, root,
2174                                                             ref, extent_op);
2175                                 BUG_ON(ret);
2176                                 kfree(extent_op);
2177
2178                                 cond_resched();
2179                                 spin_lock(&delayed_refs->lock);
2180                                 continue;
2181                         }
2182
2183                         list_del_init(&locked_ref->cluster);
2184                         locked_ref = NULL;
2185                 }
2186
2187                 ref->in_tree = 0;
2188                 rb_erase(&ref->rb_node, &delayed_refs->root);
2189                 delayed_refs->num_entries--;
2190
2191                 spin_unlock(&delayed_refs->lock);
2192
2193                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2194                                           must_insert_reserved);
2195                 BUG_ON(ret);
2196
2197                 btrfs_put_delayed_ref(ref);
2198                 kfree(extent_op);
2199                 count++;
2200
2201                 cond_resched();
2202                 spin_lock(&delayed_refs->lock);
2203         }
2204         return count;
2205 }
2206
2207 /*
2208  * this starts processing the delayed reference count updates and
2209  * extent insertions we have queued up so far.  count can be
2210  * 0, which means to process everything in the tree at the start
2211  * of the run (but not newly added entries), or it can be some target
2212  * number you'd like to process.
2213  */
2214 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2215                            struct btrfs_root *root, unsigned long count)
2216 {
2217         struct rb_node *node;
2218         struct btrfs_delayed_ref_root *delayed_refs;
2219         struct btrfs_delayed_ref_node *ref;
2220         struct list_head cluster;
2221         int ret;
2222         int run_all = count == (unsigned long)-1;
2223         int run_most = 0;
2224
2225         if (root == root->fs_info->extent_root)
2226                 root = root->fs_info->tree_root;
2227
2228         delayed_refs = &trans->transaction->delayed_refs;
2229         INIT_LIST_HEAD(&cluster);
2230 again:
2231         spin_lock(&delayed_refs->lock);
2232         if (count == 0) {
2233                 count = delayed_refs->num_entries * 2;
2234                 run_most = 1;
2235         }
2236         while (1) {
2237                 if (!(run_all || run_most) &&
2238                     delayed_refs->num_heads_ready < 64)
2239                         break;
2240
2241                 /*
2242                  * go find something we can process in the rbtree.  We start at
2243                  * the beginning of the tree, and then build a cluster
2244                  * of refs to process starting at the first one we are able to
2245                  * lock
2246                  */
2247                 ret = btrfs_find_ref_cluster(trans, &cluster,
2248                                              delayed_refs->run_delayed_start);
2249                 if (ret)
2250                         break;
2251
2252                 ret = run_clustered_refs(trans, root, &cluster);
2253                 BUG_ON(ret < 0);
2254
2255                 count -= min_t(unsigned long, ret, count);
2256
2257                 if (count == 0)
2258                         break;
2259         }
2260
2261         if (run_all) {
2262                 node = rb_first(&delayed_refs->root);
2263                 if (!node)
2264                         goto out;
2265                 count = (unsigned long)-1;
2266
2267                 while (node) {
2268                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2269                                        rb_node);
2270                         if (btrfs_delayed_ref_is_head(ref)) {
2271                                 struct btrfs_delayed_ref_head *head;
2272
2273                                 head = btrfs_delayed_node_to_head(ref);
2274                                 atomic_inc(&ref->refs);
2275
2276                                 spin_unlock(&delayed_refs->lock);
2277                                 mutex_lock(&head->mutex);
2278                                 mutex_unlock(&head->mutex);
2279
2280                                 btrfs_put_delayed_ref(ref);
2281                                 cond_resched();
2282                                 goto again;
2283                         }
2284                         node = rb_next(node);
2285                 }
2286                 spin_unlock(&delayed_refs->lock);
2287                 schedule_timeout(1);
2288                 goto again;
2289         }
2290 out:
2291         spin_unlock(&delayed_refs->lock);
2292         return 0;
2293 }
2294
2295 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2296                                 struct btrfs_root *root,
2297                                 u64 bytenr, u64 num_bytes, u64 flags,
2298                                 int is_data)
2299 {
2300         struct btrfs_delayed_extent_op *extent_op;
2301         int ret;
2302
2303         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2304         if (!extent_op)
2305                 return -ENOMEM;
2306
2307         extent_op->flags_to_set = flags;
2308         extent_op->update_flags = 1;
2309         extent_op->update_key = 0;
2310         extent_op->is_data = is_data ? 1 : 0;
2311
2312         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2313         if (ret)
2314                 kfree(extent_op);
2315         return ret;
2316 }
2317
2318 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2319                                       struct btrfs_root *root,
2320                                       struct btrfs_path *path,
2321                                       u64 objectid, u64 offset, u64 bytenr)
2322 {
2323         struct btrfs_delayed_ref_head *head;
2324         struct btrfs_delayed_ref_node *ref;
2325         struct btrfs_delayed_data_ref *data_ref;
2326         struct btrfs_delayed_ref_root *delayed_refs;
2327         struct rb_node *node;
2328         int ret = 0;
2329
2330         ret = -ENOENT;
2331         delayed_refs = &trans->transaction->delayed_refs;
2332         spin_lock(&delayed_refs->lock);
2333         head = btrfs_find_delayed_ref_head(trans, bytenr);
2334         if (!head)
2335                 goto out;
2336
2337         if (!mutex_trylock(&head->mutex)) {
2338                 atomic_inc(&head->node.refs);
2339                 spin_unlock(&delayed_refs->lock);
2340
2341                 btrfs_release_path(root->fs_info->extent_root, path);
2342
2343                 mutex_lock(&head->mutex);
2344                 mutex_unlock(&head->mutex);
2345                 btrfs_put_delayed_ref(&head->node);
2346                 return -EAGAIN;
2347         }
2348
2349         node = rb_prev(&head->node.rb_node);
2350         if (!node)
2351                 goto out_unlock;
2352
2353         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2354
2355         if (ref->bytenr != bytenr)
2356                 goto out_unlock;
2357
2358         ret = 1;
2359         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2360                 goto out_unlock;
2361
2362         data_ref = btrfs_delayed_node_to_data_ref(ref);
2363
2364         node = rb_prev(node);
2365         if (node) {
2366                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2367                 if (ref->bytenr == bytenr)
2368                         goto out_unlock;
2369         }
2370
2371         if (data_ref->root != root->root_key.objectid ||
2372             data_ref->objectid != objectid || data_ref->offset != offset)
2373                 goto out_unlock;
2374
2375         ret = 0;
2376 out_unlock:
2377         mutex_unlock(&head->mutex);
2378 out:
2379         spin_unlock(&delayed_refs->lock);
2380         return ret;
2381 }
2382
2383 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2384                                         struct btrfs_root *root,
2385                                         struct btrfs_path *path,
2386                                         u64 objectid, u64 offset, u64 bytenr)
2387 {
2388         struct btrfs_root *extent_root = root->fs_info->extent_root;
2389         struct extent_buffer *leaf;
2390         struct btrfs_extent_data_ref *ref;
2391         struct btrfs_extent_inline_ref *iref;
2392         struct btrfs_extent_item *ei;
2393         struct btrfs_key key;
2394         u32 item_size;
2395         int ret;
2396
2397         key.objectid = bytenr;
2398         key.offset = (u64)-1;
2399         key.type = BTRFS_EXTENT_ITEM_KEY;
2400
2401         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2402         if (ret < 0)
2403                 goto out;
2404         BUG_ON(ret == 0);
2405
2406         ret = -ENOENT;
2407         if (path->slots[0] == 0)
2408                 goto out;
2409
2410         path->slots[0]--;
2411         leaf = path->nodes[0];
2412         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2413
2414         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2415                 goto out;
2416
2417         ret = 1;
2418         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2419 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2420         if (item_size < sizeof(*ei)) {
2421                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2422                 goto out;
2423         }
2424 #endif
2425         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2426
2427         if (item_size != sizeof(*ei) +
2428             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2429                 goto out;
2430
2431         if (btrfs_extent_generation(leaf, ei) <=
2432             btrfs_root_last_snapshot(&root->root_item))
2433                 goto out;
2434
2435         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2436         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2437             BTRFS_EXTENT_DATA_REF_KEY)
2438                 goto out;
2439
2440         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2441         if (btrfs_extent_refs(leaf, ei) !=
2442             btrfs_extent_data_ref_count(leaf, ref) ||
2443             btrfs_extent_data_ref_root(leaf, ref) !=
2444             root->root_key.objectid ||
2445             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2446             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2447                 goto out;
2448
2449         ret = 0;
2450 out:
2451         return ret;
2452 }
2453
2454 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2455                           struct btrfs_root *root,
2456                           u64 objectid, u64 offset, u64 bytenr)
2457 {
2458         struct btrfs_path *path;
2459         int ret;
2460         int ret2;
2461
2462         path = btrfs_alloc_path();
2463         if (!path)
2464                 return -ENOENT;
2465
2466         do {
2467                 ret = check_committed_ref(trans, root, path, objectid,
2468                                           offset, bytenr);
2469                 if (ret && ret != -ENOENT)
2470                         goto out;
2471
2472                 ret2 = check_delayed_ref(trans, root, path, objectid,
2473                                          offset, bytenr);
2474         } while (ret2 == -EAGAIN);
2475
2476         if (ret2 && ret2 != -ENOENT) {
2477                 ret = ret2;
2478                 goto out;
2479         }
2480
2481         if (ret != -ENOENT || ret2 != -ENOENT)
2482                 ret = 0;
2483 out:
2484         btrfs_free_path(path);
2485         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2486                 WARN_ON(ret > 0);
2487         return ret;
2488 }
2489
2490 #if 0
2491 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2492                     struct extent_buffer *buf, u32 nr_extents)
2493 {
2494         struct btrfs_key key;
2495         struct btrfs_file_extent_item *fi;
2496         u64 root_gen;
2497         u32 nritems;
2498         int i;
2499         int level;
2500         int ret = 0;
2501         int shared = 0;
2502
2503         if (!root->ref_cows)
2504                 return 0;
2505
2506         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2507                 shared = 0;
2508                 root_gen = root->root_key.offset;
2509         } else {
2510                 shared = 1;
2511                 root_gen = trans->transid - 1;
2512         }
2513
2514         level = btrfs_header_level(buf);
2515         nritems = btrfs_header_nritems(buf);
2516
2517         if (level == 0) {
2518                 struct btrfs_leaf_ref *ref;
2519                 struct btrfs_extent_info *info;
2520
2521                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2522                 if (!ref) {
2523                         ret = -ENOMEM;
2524                         goto out;
2525                 }
2526
2527                 ref->root_gen = root_gen;
2528                 ref->bytenr = buf->start;
2529                 ref->owner = btrfs_header_owner(buf);
2530                 ref->generation = btrfs_header_generation(buf);
2531                 ref->nritems = nr_extents;
2532                 info = ref->extents;
2533
2534                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2535                         u64 disk_bytenr;
2536                         btrfs_item_key_to_cpu(buf, &key, i);
2537                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2538                                 continue;
2539                         fi = btrfs_item_ptr(buf, i,
2540                                             struct btrfs_file_extent_item);
2541                         if (btrfs_file_extent_type(buf, fi) ==
2542                             BTRFS_FILE_EXTENT_INLINE)
2543                                 continue;
2544                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2545                         if (disk_bytenr == 0)
2546                                 continue;
2547
2548                         info->bytenr = disk_bytenr;
2549                         info->num_bytes =
2550                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2551                         info->objectid = key.objectid;
2552                         info->offset = key.offset;
2553                         info++;
2554                 }
2555
2556                 ret = btrfs_add_leaf_ref(root, ref, shared);
2557                 if (ret == -EEXIST && shared) {
2558                         struct btrfs_leaf_ref *old;
2559                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2560                         BUG_ON(!old);
2561                         btrfs_remove_leaf_ref(root, old);
2562                         btrfs_free_leaf_ref(root, old);
2563                         ret = btrfs_add_leaf_ref(root, ref, shared);
2564                 }
2565                 WARN_ON(ret);
2566                 btrfs_free_leaf_ref(root, ref);
2567         }
2568 out:
2569         return ret;
2570 }
2571
2572 /* when a block goes through cow, we update the reference counts of
2573  * everything that block points to.  The internal pointers of the block
2574  * can be in just about any order, and it is likely to have clusters of
2575  * things that are close together and clusters of things that are not.
2576  *
2577  * To help reduce the seeks that come with updating all of these reference
2578  * counts, sort them by byte number before actual updates are done.
2579  *
2580  * struct refsort is used to match byte number to slot in the btree block.
2581  * we sort based on the byte number and then use the slot to actually
2582  * find the item.
2583  *
2584  * struct refsort is smaller than strcut btrfs_item and smaller than
2585  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2586  * for a btree block, there's no way for a kmalloc of refsorts for a
2587  * single node to be bigger than a page.
2588  */
2589 struct refsort {
2590         u64 bytenr;
2591         u32 slot;
2592 };
2593
2594 /*
2595  * for passing into sort()
2596  */
2597 static int refsort_cmp(const void *a_void, const void *b_void)
2598 {
2599         const struct refsort *a = a_void;
2600         const struct refsort *b = b_void;
2601
2602         if (a->bytenr < b->bytenr)
2603                 return -1;
2604         if (a->bytenr > b->bytenr)
2605                 return 1;
2606         return 0;
2607 }
2608 #endif
2609
2610 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2611                            struct btrfs_root *root,
2612                            struct extent_buffer *buf,
2613                            int full_backref, int inc)
2614 {
2615         u64 bytenr;
2616         u64 num_bytes;
2617         u64 parent;
2618         u64 ref_root;
2619         u32 nritems;
2620         struct btrfs_key key;
2621         struct btrfs_file_extent_item *fi;
2622         int i;
2623         int level;
2624         int ret = 0;
2625         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2626                             u64, u64, u64, u64, u64, u64);
2627
2628         ref_root = btrfs_header_owner(buf);
2629         nritems = btrfs_header_nritems(buf);
2630         level = btrfs_header_level(buf);
2631
2632         if (!root->ref_cows && level == 0)
2633                 return 0;
2634
2635         if (inc)
2636                 process_func = btrfs_inc_extent_ref;
2637         else
2638                 process_func = btrfs_free_extent;
2639
2640         if (full_backref)
2641                 parent = buf->start;
2642         else
2643                 parent = 0;
2644
2645         for (i = 0; i < nritems; i++) {
2646                 if (level == 0) {
2647                         btrfs_item_key_to_cpu(buf, &key, i);
2648                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2649                                 continue;
2650                         fi = btrfs_item_ptr(buf, i,
2651                                             struct btrfs_file_extent_item);
2652                         if (btrfs_file_extent_type(buf, fi) ==
2653                             BTRFS_FILE_EXTENT_INLINE)
2654                                 continue;
2655                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2656                         if (bytenr == 0)
2657                                 continue;
2658
2659                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2660                         key.offset -= btrfs_file_extent_offset(buf, fi);
2661                         ret = process_func(trans, root, bytenr, num_bytes,
2662                                            parent, ref_root, key.objectid,
2663                                            key.offset);
2664                         if (ret)
2665                                 goto fail;
2666                 } else {
2667                         bytenr = btrfs_node_blockptr(buf, i);
2668                         num_bytes = btrfs_level_size(root, level - 1);
2669                         ret = process_func(trans, root, bytenr, num_bytes,
2670                                            parent, ref_root, level - 1, 0);
2671                         if (ret)
2672                                 goto fail;
2673                 }
2674         }
2675         return 0;
2676 fail:
2677         BUG();
2678         return ret;
2679 }
2680
2681 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2682                   struct extent_buffer *buf, int full_backref)
2683 {
2684         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2685 }
2686
2687 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2688                   struct extent_buffer *buf, int full_backref)
2689 {
2690         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2691 }
2692
2693 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2694                                  struct btrfs_root *root,
2695                                  struct btrfs_path *path,
2696                                  struct btrfs_block_group_cache *cache)
2697 {
2698         int ret;
2699         struct btrfs_root *extent_root = root->fs_info->extent_root;
2700         unsigned long bi;
2701         struct extent_buffer *leaf;
2702
2703         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2704         if (ret < 0)
2705                 goto fail;
2706         BUG_ON(ret);
2707
2708         leaf = path->nodes[0];
2709         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2710         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2711         btrfs_mark_buffer_dirty(leaf);
2712         btrfs_release_path(extent_root, path);
2713 fail:
2714         if (ret)
2715                 return ret;
2716         return 0;
2717
2718 }
2719
2720 static struct btrfs_block_group_cache *
2721 next_block_group(struct btrfs_root *root,
2722                  struct btrfs_block_group_cache *cache)
2723 {
2724         struct rb_node *node;
2725         spin_lock(&root->fs_info->block_group_cache_lock);
2726         node = rb_next(&cache->cache_node);
2727         btrfs_put_block_group(cache);
2728         if (node) {
2729                 cache = rb_entry(node, struct btrfs_block_group_cache,
2730                                  cache_node);
2731                 btrfs_get_block_group(cache);
2732         } else
2733                 cache = NULL;
2734         spin_unlock(&root->fs_info->block_group_cache_lock);
2735         return cache;
2736 }
2737
2738 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2739                             struct btrfs_trans_handle *trans,
2740                             struct btrfs_path *path)
2741 {
2742         struct btrfs_root *root = block_group->fs_info->tree_root;
2743         struct inode *inode = NULL;
2744         u64 alloc_hint = 0;
2745         int dcs = BTRFS_DC_ERROR;
2746         int num_pages = 0;
2747         int retries = 0;
2748         int ret = 0;
2749
2750         /*
2751          * If this block group is smaller than 100 megs don't bother caching the
2752          * block group.
2753          */
2754         if (block_group->key.offset < (100 * 1024 * 1024)) {
2755                 spin_lock(&block_group->lock);
2756                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2757                 spin_unlock(&block_group->lock);
2758                 return 0;
2759         }
2760
2761 again:
2762         inode = lookup_free_space_inode(root, block_group, path);
2763         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2764                 ret = PTR_ERR(inode);
2765                 btrfs_release_path(root, path);
2766                 goto out;
2767         }
2768
2769         if (IS_ERR(inode)) {
2770                 BUG_ON(retries);
2771                 retries++;
2772
2773                 if (block_group->ro)
2774                         goto out_free;
2775
2776                 ret = create_free_space_inode(root, trans, block_group, path);
2777                 if (ret)
2778                         goto out_free;
2779                 goto again;
2780         }
2781
2782         /*
2783          * We want to set the generation to 0, that way if anything goes wrong
2784          * from here on out we know not to trust this cache when we load up next
2785          * time.
2786          */
2787         BTRFS_I(inode)->generation = 0;
2788         ret = btrfs_update_inode(trans, root, inode);
2789         WARN_ON(ret);
2790
2791         if (i_size_read(inode) > 0) {
2792                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2793                                                       inode);
2794                 if (ret)
2795                         goto out_put;
2796         }
2797
2798         spin_lock(&block_group->lock);
2799         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2800                 /* We're not cached, don't bother trying to write stuff out */
2801                 dcs = BTRFS_DC_WRITTEN;
2802                 spin_unlock(&block_group->lock);
2803                 goto out_put;
2804         }
2805         spin_unlock(&block_group->lock);
2806
2807         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2808         if (!num_pages)
2809                 num_pages = 1;
2810
2811         /*
2812          * Just to make absolutely sure we have enough space, we're going to
2813          * preallocate 12 pages worth of space for each block group.  In
2814          * practice we ought to use at most 8, but we need extra space so we can
2815          * add our header and have a terminator between the extents and the
2816          * bitmaps.
2817          */
2818         num_pages *= 16;
2819         num_pages *= PAGE_CACHE_SIZE;
2820
2821         ret = btrfs_check_data_free_space(inode, num_pages);
2822         if (ret)
2823                 goto out_put;
2824
2825         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2826                                               num_pages, num_pages,
2827                                               &alloc_hint);
2828         if (!ret)
2829                 dcs = BTRFS_DC_SETUP;
2830         btrfs_free_reserved_data_space(inode, num_pages);
2831 out_put:
2832         iput(inode);
2833 out_free:
2834         btrfs_release_path(root, path);
2835 out:
2836         spin_lock(&block_group->lock);
2837         block_group->disk_cache_state = dcs;
2838         spin_unlock(&block_group->lock);
2839
2840         return ret;
2841 }
2842
2843 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2844                                    struct btrfs_root *root)
2845 {
2846         struct btrfs_block_group_cache *cache;
2847         int err = 0;
2848         struct btrfs_path *path;
2849         u64 last = 0;
2850
2851         path = btrfs_alloc_path();
2852         if (!path)
2853                 return -ENOMEM;
2854
2855 again:
2856         while (1) {
2857                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2858                 while (cache) {
2859                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2860                                 break;
2861                         cache = next_block_group(root, cache);
2862                 }
2863                 if (!cache) {
2864                         if (last == 0)
2865                                 break;
2866                         last = 0;
2867                         continue;
2868                 }
2869                 err = cache_save_setup(cache, trans, path);
2870                 last = cache->key.objectid + cache->key.offset;
2871                 btrfs_put_block_group(cache);
2872         }
2873
2874         while (1) {
2875                 if (last == 0) {
2876                         err = btrfs_run_delayed_refs(trans, root,
2877                                                      (unsigned long)-1);
2878                         BUG_ON(err);
2879                 }
2880
2881                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2882                 while (cache) {
2883                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2884                                 btrfs_put_block_group(cache);
2885                                 goto again;
2886                         }
2887
2888                         if (cache->dirty)
2889                                 break;
2890                         cache = next_block_group(root, cache);
2891                 }
2892                 if (!cache) {
2893                         if (last == 0)
2894                                 break;
2895                         last = 0;
2896                         continue;
2897                 }
2898
2899                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2900                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2901                 cache->dirty = 0;
2902                 last = cache->key.objectid + cache->key.offset;
2903
2904                 err = write_one_cache_group(trans, root, path, cache);
2905                 BUG_ON(err);
2906                 btrfs_put_block_group(cache);
2907         }
2908
2909         while (1) {
2910                 /*
2911                  * I don't think this is needed since we're just marking our
2912                  * preallocated extent as written, but just in case it can't
2913                  * hurt.
2914                  */
2915                 if (last == 0) {
2916                         err = btrfs_run_delayed_refs(trans, root,
2917                                                      (unsigned long)-1);
2918                         BUG_ON(err);
2919                 }
2920
2921                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2922                 while (cache) {
2923                         /*
2924                          * Really this shouldn't happen, but it could if we
2925                          * couldn't write the entire preallocated extent and
2926                          * splitting the extent resulted in a new block.
2927                          */
2928                         if (cache->dirty) {
2929                                 btrfs_put_block_group(cache);
2930                                 goto again;
2931                         }
2932                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2933                                 break;
2934                         cache = next_block_group(root, cache);
2935                 }
2936                 if (!cache) {
2937                         if (last == 0)
2938                                 break;
2939                         last = 0;
2940                         continue;
2941                 }
2942
2943                 btrfs_write_out_cache(root, trans, cache, path);
2944
2945                 /*
2946                  * If we didn't have an error then the cache state is still
2947                  * NEED_WRITE, so we can set it to WRITTEN.
2948                  */
2949                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2950                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
2951                 last = cache->key.objectid + cache->key.offset;
2952                 btrfs_put_block_group(cache);
2953         }
2954
2955         btrfs_free_path(path);
2956         return 0;
2957 }
2958
2959 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2960 {
2961         struct btrfs_block_group_cache *block_group;
2962         int readonly = 0;
2963
2964         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2965         if (!block_group || block_group->ro)
2966                 readonly = 1;
2967         if (block_group)
2968                 btrfs_put_block_group(block_group);
2969         return readonly;
2970 }
2971
2972 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2973                              u64 total_bytes, u64 bytes_used,
2974                              struct btrfs_space_info **space_info)
2975 {
2976         struct btrfs_space_info *found;
2977         int i;
2978         int factor;
2979
2980         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2981                      BTRFS_BLOCK_GROUP_RAID10))
2982                 factor = 2;
2983         else
2984                 factor = 1;
2985
2986         found = __find_space_info(info, flags);
2987         if (found) {
2988                 spin_lock(&found->lock);
2989                 found->total_bytes += total_bytes;
2990                 found->disk_total += total_bytes * factor;
2991                 found->bytes_used += bytes_used;
2992                 found->disk_used += bytes_used * factor;
2993                 found->full = 0;
2994                 spin_unlock(&found->lock);
2995                 *space_info = found;
2996                 return 0;
2997         }
2998         found = kzalloc(sizeof(*found), GFP_NOFS);
2999         if (!found)
3000                 return -ENOMEM;
3001
3002         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3003                 INIT_LIST_HEAD(&found->block_groups[i]);
3004         init_rwsem(&found->groups_sem);
3005         spin_lock_init(&found->lock);
3006         found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
3007                                 BTRFS_BLOCK_GROUP_SYSTEM |
3008                                 BTRFS_BLOCK_GROUP_METADATA);
3009         found->total_bytes = total_bytes;
3010         found->disk_total = total_bytes * factor;
3011         found->bytes_used = bytes_used;
3012         found->disk_used = bytes_used * factor;
3013         found->bytes_pinned = 0;
3014         found->bytes_reserved = 0;
3015         found->bytes_readonly = 0;
3016         found->bytes_may_use = 0;
3017         found->full = 0;
3018         found->force_alloc = 0;
3019         *space_info = found;
3020         list_add_rcu(&found->list, &info->space_info);
3021         atomic_set(&found->caching_threads, 0);
3022         return 0;
3023 }
3024
3025 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3026 {
3027         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
3028                                    BTRFS_BLOCK_GROUP_RAID1 |
3029                                    BTRFS_BLOCK_GROUP_RAID10 |
3030                                    BTRFS_BLOCK_GROUP_DUP);
3031         if (extra_flags) {
3032