Btrfs: finish ordered extents in their own thread
[pandora-kernel.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21 #include "check-integrity.h"
22 #include "locking.h"
23
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29
30 #define LEAK_DEBUG 0
31 #if LEAK_DEBUG
32 static DEFINE_SPINLOCK(leak_lock);
33 #endif
34
35 #define BUFFER_LRU_MAX 64
36
37 struct tree_entry {
38         u64 start;
39         u64 end;
40         struct rb_node rb_node;
41 };
42
43 struct extent_page_data {
44         struct bio *bio;
45         struct extent_io_tree *tree;
46         get_extent_t *get_extent;
47
48         /* tells writepage not to lock the state bits for this range
49          * it still does the unlocking
50          */
51         unsigned int extent_locked:1;
52
53         /* tells the submit_bio code to use a WRITE_SYNC */
54         unsigned int sync_io:1;
55 };
56
57 static noinline void flush_write_bio(void *data);
58 static inline struct btrfs_fs_info *
59 tree_fs_info(struct extent_io_tree *tree)
60 {
61         return btrfs_sb(tree->mapping->host->i_sb);
62 }
63
64 int __init extent_io_init(void)
65 {
66         extent_state_cache = kmem_cache_create("extent_state",
67                         sizeof(struct extent_state), 0,
68                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
69         if (!extent_state_cache)
70                 return -ENOMEM;
71
72         extent_buffer_cache = kmem_cache_create("extent_buffers",
73                         sizeof(struct extent_buffer), 0,
74                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
75         if (!extent_buffer_cache)
76                 goto free_state_cache;
77         return 0;
78
79 free_state_cache:
80         kmem_cache_destroy(extent_state_cache);
81         return -ENOMEM;
82 }
83
84 void extent_io_exit(void)
85 {
86         struct extent_state *state;
87         struct extent_buffer *eb;
88
89         while (!list_empty(&states)) {
90                 state = list_entry(states.next, struct extent_state, leak_list);
91                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
92                        "state %lu in tree %p refs %d\n",
93                        (unsigned long long)state->start,
94                        (unsigned long long)state->end,
95                        state->state, state->tree, atomic_read(&state->refs));
96                 list_del(&state->leak_list);
97                 kmem_cache_free(extent_state_cache, state);
98
99         }
100
101         while (!list_empty(&buffers)) {
102                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
103                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
104                        "refs %d\n", (unsigned long long)eb->start,
105                        eb->len, atomic_read(&eb->refs));
106                 list_del(&eb->leak_list);
107                 kmem_cache_free(extent_buffer_cache, eb);
108         }
109         if (extent_state_cache)
110                 kmem_cache_destroy(extent_state_cache);
111         if (extent_buffer_cache)
112                 kmem_cache_destroy(extent_buffer_cache);
113 }
114
115 void extent_io_tree_init(struct extent_io_tree *tree,
116                          struct address_space *mapping)
117 {
118         tree->state = RB_ROOT;
119         INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
120         tree->ops = NULL;
121         tree->dirty_bytes = 0;
122         spin_lock_init(&tree->lock);
123         spin_lock_init(&tree->buffer_lock);
124         tree->mapping = mapping;
125 }
126
127 static struct extent_state *alloc_extent_state(gfp_t mask)
128 {
129         struct extent_state *state;
130 #if LEAK_DEBUG
131         unsigned long flags;
132 #endif
133
134         state = kmem_cache_alloc(extent_state_cache, mask);
135         if (!state)
136                 return state;
137         state->state = 0;
138         state->private = 0;
139         state->tree = NULL;
140 #if LEAK_DEBUG
141         spin_lock_irqsave(&leak_lock, flags);
142         list_add(&state->leak_list, &states);
143         spin_unlock_irqrestore(&leak_lock, flags);
144 #endif
145         atomic_set(&state->refs, 1);
146         init_waitqueue_head(&state->wq);
147         trace_alloc_extent_state(state, mask, _RET_IP_);
148         return state;
149 }
150
151 void free_extent_state(struct extent_state *state)
152 {
153         if (!state)
154                 return;
155         if (atomic_dec_and_test(&state->refs)) {
156 #if LEAK_DEBUG
157                 unsigned long flags;
158 #endif
159                 WARN_ON(state->tree);
160 #if LEAK_DEBUG
161                 spin_lock_irqsave(&leak_lock, flags);
162                 list_del(&state->leak_list);
163                 spin_unlock_irqrestore(&leak_lock, flags);
164 #endif
165                 trace_free_extent_state(state, _RET_IP_);
166                 kmem_cache_free(extent_state_cache, state);
167         }
168 }
169
170 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
171                                    struct rb_node *node)
172 {
173         struct rb_node **p = &root->rb_node;
174         struct rb_node *parent = NULL;
175         struct tree_entry *entry;
176
177         while (*p) {
178                 parent = *p;
179                 entry = rb_entry(parent, struct tree_entry, rb_node);
180
181                 if (offset < entry->start)
182                         p = &(*p)->rb_left;
183                 else if (offset > entry->end)
184                         p = &(*p)->rb_right;
185                 else
186                         return parent;
187         }
188
189         rb_link_node(node, parent, p);
190         rb_insert_color(node, root);
191         return NULL;
192 }
193
194 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
195                                      struct rb_node **prev_ret,
196                                      struct rb_node **next_ret)
197 {
198         struct rb_root *root = &tree->state;
199         struct rb_node *n = root->rb_node;
200         struct rb_node *prev = NULL;
201         struct rb_node *orig_prev = NULL;
202         struct tree_entry *entry;
203         struct tree_entry *prev_entry = NULL;
204
205         while (n) {
206                 entry = rb_entry(n, struct tree_entry, rb_node);
207                 prev = n;
208                 prev_entry = entry;
209
210                 if (offset < entry->start)
211                         n = n->rb_left;
212                 else if (offset > entry->end)
213                         n = n->rb_right;
214                 else
215                         return n;
216         }
217
218         if (prev_ret) {
219                 orig_prev = prev;
220                 while (prev && offset > prev_entry->end) {
221                         prev = rb_next(prev);
222                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223                 }
224                 *prev_ret = prev;
225                 prev = orig_prev;
226         }
227
228         if (next_ret) {
229                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
230                 while (prev && offset < prev_entry->start) {
231                         prev = rb_prev(prev);
232                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
233                 }
234                 *next_ret = prev;
235         }
236         return NULL;
237 }
238
239 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
240                                           u64 offset)
241 {
242         struct rb_node *prev = NULL;
243         struct rb_node *ret;
244
245         ret = __etree_search(tree, offset, &prev, NULL);
246         if (!ret)
247                 return prev;
248         return ret;
249 }
250
251 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
252                      struct extent_state *other)
253 {
254         if (tree->ops && tree->ops->merge_extent_hook)
255                 tree->ops->merge_extent_hook(tree->mapping->host, new,
256                                              other);
257 }
258
259 /*
260  * utility function to look for merge candidates inside a given range.
261  * Any extents with matching state are merged together into a single
262  * extent in the tree.  Extents with EXTENT_IO in their state field
263  * are not merged because the end_io handlers need to be able to do
264  * operations on them without sleeping (or doing allocations/splits).
265  *
266  * This should be called with the tree lock held.
267  */
268 static void merge_state(struct extent_io_tree *tree,
269                         struct extent_state *state)
270 {
271         struct extent_state *other;
272         struct rb_node *other_node;
273
274         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
275                 return;
276
277         other_node = rb_prev(&state->rb_node);
278         if (other_node) {
279                 other = rb_entry(other_node, struct extent_state, rb_node);
280                 if (other->end == state->start - 1 &&
281                     other->state == state->state) {
282                         merge_cb(tree, state, other);
283                         state->start = other->start;
284                         other->tree = NULL;
285                         rb_erase(&other->rb_node, &tree->state);
286                         free_extent_state(other);
287                 }
288         }
289         other_node = rb_next(&state->rb_node);
290         if (other_node) {
291                 other = rb_entry(other_node, struct extent_state, rb_node);
292                 if (other->start == state->end + 1 &&
293                     other->state == state->state) {
294                         merge_cb(tree, state, other);
295                         state->end = other->end;
296                         other->tree = NULL;
297                         rb_erase(&other->rb_node, &tree->state);
298                         free_extent_state(other);
299                 }
300         }
301 }
302
303 static void set_state_cb(struct extent_io_tree *tree,
304                          struct extent_state *state, int *bits)
305 {
306         if (tree->ops && tree->ops->set_bit_hook)
307                 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
308 }
309
310 static void clear_state_cb(struct extent_io_tree *tree,
311                            struct extent_state *state, int *bits)
312 {
313         if (tree->ops && tree->ops->clear_bit_hook)
314                 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
315 }
316
317 static void set_state_bits(struct extent_io_tree *tree,
318                            struct extent_state *state, int *bits);
319
320 /*
321  * insert an extent_state struct into the tree.  'bits' are set on the
322  * struct before it is inserted.
323  *
324  * This may return -EEXIST if the extent is already there, in which case the
325  * state struct is freed.
326  *
327  * The tree lock is not taken internally.  This is a utility function and
328  * probably isn't what you want to call (see set/clear_extent_bit).
329  */
330 static int insert_state(struct extent_io_tree *tree,
331                         struct extent_state *state, u64 start, u64 end,
332                         int *bits)
333 {
334         struct rb_node *node;
335
336         if (end < start) {
337                 printk(KERN_ERR "btrfs end < start %llu %llu\n",
338                        (unsigned long long)end,
339                        (unsigned long long)start);
340                 WARN_ON(1);
341         }
342         state->start = start;
343         state->end = end;
344
345         set_state_bits(tree, state, bits);
346
347         node = tree_insert(&tree->state, end, &state->rb_node);
348         if (node) {
349                 struct extent_state *found;
350                 found = rb_entry(node, struct extent_state, rb_node);
351                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
352                        "%llu %llu\n", (unsigned long long)found->start,
353                        (unsigned long long)found->end,
354                        (unsigned long long)start, (unsigned long long)end);
355                 return -EEXIST;
356         }
357         state->tree = tree;
358         merge_state(tree, state);
359         return 0;
360 }
361
362 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
363                      u64 split)
364 {
365         if (tree->ops && tree->ops->split_extent_hook)
366                 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
367 }
368
369 /*
370  * split a given extent state struct in two, inserting the preallocated
371  * struct 'prealloc' as the newly created second half.  'split' indicates an
372  * offset inside 'orig' where it should be split.
373  *
374  * Before calling,
375  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
376  * are two extent state structs in the tree:
377  * prealloc: [orig->start, split - 1]
378  * orig: [ split, orig->end ]
379  *
380  * The tree locks are not taken by this function. They need to be held
381  * by the caller.
382  */
383 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
384                        struct extent_state *prealloc, u64 split)
385 {
386         struct rb_node *node;
387
388         split_cb(tree, orig, split);
389
390         prealloc->start = orig->start;
391         prealloc->end = split - 1;
392         prealloc->state = orig->state;
393         orig->start = split;
394
395         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
396         if (node) {
397                 free_extent_state(prealloc);
398                 return -EEXIST;
399         }
400         prealloc->tree = tree;
401         return 0;
402 }
403
404 static struct extent_state *next_state(struct extent_state *state)
405 {
406         struct rb_node *next = rb_next(&state->rb_node);
407         if (next)
408                 return rb_entry(next, struct extent_state, rb_node);
409         else
410                 return NULL;
411 }
412
413 /*
414  * utility function to clear some bits in an extent state struct.
415  * it will optionally wake up any one waiting on this state (wake == 1).
416  *
417  * If no bits are set on the state struct after clearing things, the
418  * struct is freed and removed from the tree
419  */
420 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
421                                             struct extent_state *state,
422                                             int *bits, int wake)
423 {
424         struct extent_state *next;
425         int bits_to_clear = *bits & ~EXTENT_CTLBITS;
426
427         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
428                 u64 range = state->end - state->start + 1;
429                 WARN_ON(range > tree->dirty_bytes);
430                 tree->dirty_bytes -= range;
431         }
432         clear_state_cb(tree, state, bits);
433         state->state &= ~bits_to_clear;
434         if (wake)
435                 wake_up(&state->wq);
436         if (state->state == 0) {
437                 next = next_state(state);
438                 if (state->tree) {
439                         rb_erase(&state->rb_node, &tree->state);
440                         state->tree = NULL;
441                         free_extent_state(state);
442                 } else {
443                         WARN_ON(1);
444                 }
445         } else {
446                 merge_state(tree, state);
447                 next = next_state(state);
448         }
449         return next;
450 }
451
452 static struct extent_state *
453 alloc_extent_state_atomic(struct extent_state *prealloc)
454 {
455         if (!prealloc)
456                 prealloc = alloc_extent_state(GFP_ATOMIC);
457
458         return prealloc;
459 }
460
461 void extent_io_tree_panic(struct extent_io_tree *tree, int err)
462 {
463         btrfs_panic(tree_fs_info(tree), err, "Locking error: "
464                     "Extent tree was modified by another "
465                     "thread while locked.");
466 }
467
468 /*
469  * clear some bits on a range in the tree.  This may require splitting
470  * or inserting elements in the tree, so the gfp mask is used to
471  * indicate which allocations or sleeping are allowed.
472  *
473  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
474  * the given range from the tree regardless of state (ie for truncate).
475  *
476  * the range [start, end] is inclusive.
477  *
478  * This takes the tree lock, and returns 0 on success and < 0 on error.
479  */
480 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
481                      int bits, int wake, int delete,
482                      struct extent_state **cached_state,
483                      gfp_t mask)
484 {
485         struct extent_state *state;
486         struct extent_state *cached;
487         struct extent_state *prealloc = NULL;
488         struct rb_node *node;
489         u64 last_end;
490         int err;
491         int clear = 0;
492
493         if (delete)
494                 bits |= ~EXTENT_CTLBITS;
495         bits |= EXTENT_FIRST_DELALLOC;
496
497         if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
498                 clear = 1;
499 again:
500         if (!prealloc && (mask & __GFP_WAIT)) {
501                 prealloc = alloc_extent_state(mask);
502                 if (!prealloc)
503                         return -ENOMEM;
504         }
505
506         spin_lock(&tree->lock);
507         if (cached_state) {
508                 cached = *cached_state;
509
510                 if (clear) {
511                         *cached_state = NULL;
512                         cached_state = NULL;
513                 }
514
515                 if (cached && cached->tree && cached->start <= start &&
516                     cached->end > start) {
517                         if (clear)
518                                 atomic_dec(&cached->refs);
519                         state = cached;
520                         goto hit_next;
521                 }
522                 if (clear)
523                         free_extent_state(cached);
524         }
525         /*
526          * this search will find the extents that end after
527          * our range starts
528          */
529         node = tree_search(tree, start);
530         if (!node)
531                 goto out;
532         state = rb_entry(node, struct extent_state, rb_node);
533 hit_next:
534         if (state->start > end)
535                 goto out;
536         WARN_ON(state->end < start);
537         last_end = state->end;
538
539         /* the state doesn't have the wanted bits, go ahead */
540         if (!(state->state & bits)) {
541                 state = next_state(state);
542                 goto next;
543         }
544
545         /*
546          *     | ---- desired range ---- |
547          *  | state | or
548          *  | ------------- state -------------- |
549          *
550          * We need to split the extent we found, and may flip
551          * bits on second half.
552          *
553          * If the extent we found extends past our range, we
554          * just split and search again.  It'll get split again
555          * the next time though.
556          *
557          * If the extent we found is inside our range, we clear
558          * the desired bit on it.
559          */
560
561         if (state->start < start) {
562                 prealloc = alloc_extent_state_atomic(prealloc);
563                 BUG_ON(!prealloc);
564                 err = split_state(tree, state, prealloc, start);
565                 if (err)
566                         extent_io_tree_panic(tree, err);
567
568                 prealloc = NULL;
569                 if (err)
570                         goto out;
571                 if (state->end <= end) {
572                         clear_state_bit(tree, state, &bits, wake);
573                         if (last_end == (u64)-1)
574                                 goto out;
575                         start = last_end + 1;
576                 }
577                 goto search_again;
578         }
579         /*
580          * | ---- desired range ---- |
581          *                        | state |
582          * We need to split the extent, and clear the bit
583          * on the first half
584          */
585         if (state->start <= end && state->end > end) {
586                 prealloc = alloc_extent_state_atomic(prealloc);
587                 BUG_ON(!prealloc);
588                 err = split_state(tree, state, prealloc, end + 1);
589                 if (err)
590                         extent_io_tree_panic(tree, err);
591
592                 if (wake)
593                         wake_up(&state->wq);
594
595                 clear_state_bit(tree, prealloc, &bits, wake);
596
597                 prealloc = NULL;
598                 goto out;
599         }
600
601         state = clear_state_bit(tree, state, &bits, wake);
602 next:
603         if (last_end == (u64)-1)
604                 goto out;
605         start = last_end + 1;
606         if (start <= end && state && !need_resched())
607                 goto hit_next;
608         goto search_again;
609
610 out:
611         spin_unlock(&tree->lock);
612         if (prealloc)
613                 free_extent_state(prealloc);
614
615         return 0;
616
617 search_again:
618         if (start > end)
619                 goto out;
620         spin_unlock(&tree->lock);
621         if (mask & __GFP_WAIT)
622                 cond_resched();
623         goto again;
624 }
625
626 static void wait_on_state(struct extent_io_tree *tree,
627                           struct extent_state *state)
628                 __releases(tree->lock)
629                 __acquires(tree->lock)
630 {
631         DEFINE_WAIT(wait);
632         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
633         spin_unlock(&tree->lock);
634         schedule();
635         spin_lock(&tree->lock);
636         finish_wait(&state->wq, &wait);
637 }
638
639 /*
640  * waits for one or more bits to clear on a range in the state tree.
641  * The range [start, end] is inclusive.
642  * The tree lock is taken by this function
643  */
644 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
645 {
646         struct extent_state *state;
647         struct rb_node *node;
648
649         spin_lock(&tree->lock);
650 again:
651         while (1) {
652                 /*
653                  * this search will find all the extents that end after
654                  * our range starts
655                  */
656                 node = tree_search(tree, start);
657                 if (!node)
658                         break;
659
660                 state = rb_entry(node, struct extent_state, rb_node);
661
662                 if (state->start > end)
663                         goto out;
664
665                 if (state->state & bits) {
666                         start = state->start;
667                         atomic_inc(&state->refs);
668                         wait_on_state(tree, state);
669                         free_extent_state(state);
670                         goto again;
671                 }
672                 start = state->end + 1;
673
674                 if (start > end)
675                         break;
676
677                 cond_resched_lock(&tree->lock);
678         }
679 out:
680         spin_unlock(&tree->lock);
681 }
682
683 static void set_state_bits(struct extent_io_tree *tree,
684                            struct extent_state *state,
685                            int *bits)
686 {
687         int bits_to_set = *bits & ~EXTENT_CTLBITS;
688
689         set_state_cb(tree, state, bits);
690         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
691                 u64 range = state->end - state->start + 1;
692                 tree->dirty_bytes += range;
693         }
694         state->state |= bits_to_set;
695 }
696
697 static void cache_state(struct extent_state *state,
698                         struct extent_state **cached_ptr)
699 {
700         if (cached_ptr && !(*cached_ptr)) {
701                 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
702                         *cached_ptr = state;
703                         atomic_inc(&state->refs);
704                 }
705         }
706 }
707
708 static void uncache_state(struct extent_state **cached_ptr)
709 {
710         if (cached_ptr && (*cached_ptr)) {
711                 struct extent_state *state = *cached_ptr;
712                 *cached_ptr = NULL;
713                 free_extent_state(state);
714         }
715 }
716
717 /*
718  * set some bits on a range in the tree.  This may require allocations or
719  * sleeping, so the gfp mask is used to indicate what is allowed.
720  *
721  * If any of the exclusive bits are set, this will fail with -EEXIST if some
722  * part of the range already has the desired bits set.  The start of the
723  * existing range is returned in failed_start in this case.
724  *
725  * [start, end] is inclusive This takes the tree lock.
726  */
727
728 static int __must_check
729 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
730                  int bits, int exclusive_bits, u64 *failed_start,
731                  struct extent_state **cached_state, gfp_t mask)
732 {
733         struct extent_state *state;
734         struct extent_state *prealloc = NULL;
735         struct rb_node *node;
736         int err = 0;
737         u64 last_start;
738         u64 last_end;
739
740         bits |= EXTENT_FIRST_DELALLOC;
741 again:
742         if (!prealloc && (mask & __GFP_WAIT)) {
743                 prealloc = alloc_extent_state(mask);
744                 BUG_ON(!prealloc);
745         }
746
747         spin_lock(&tree->lock);
748         if (cached_state && *cached_state) {
749                 state = *cached_state;
750                 if (state->start <= start && state->end > start &&
751                     state->tree) {
752                         node = &state->rb_node;
753                         goto hit_next;
754                 }
755         }
756         /*
757          * this search will find all the extents that end after
758          * our range starts.
759          */
760         node = tree_search(tree, start);
761         if (!node) {
762                 prealloc = alloc_extent_state_atomic(prealloc);
763                 BUG_ON(!prealloc);
764                 err = insert_state(tree, prealloc, start, end, &bits);
765                 if (err)
766                         extent_io_tree_panic(tree, err);
767
768                 prealloc = NULL;
769                 goto out;
770         }
771         state = rb_entry(node, struct extent_state, rb_node);
772 hit_next:
773         last_start = state->start;
774         last_end = state->end;
775
776         /*
777          * | ---- desired range ---- |
778          * | state |
779          *
780          * Just lock what we found and keep going
781          */
782         if (state->start == start && state->end <= end) {
783                 struct rb_node *next_node;
784                 if (state->state & exclusive_bits) {
785                         *failed_start = state->start;
786                         err = -EEXIST;
787                         goto out;
788                 }
789
790                 set_state_bits(tree, state, &bits);
791
792                 cache_state(state, cached_state);
793                 merge_state(tree, state);
794                 if (last_end == (u64)-1)
795                         goto out;
796
797                 start = last_end + 1;
798                 next_node = rb_next(&state->rb_node);
799                 if (next_node && start < end && prealloc && !need_resched()) {
800                         state = rb_entry(next_node, struct extent_state,
801                                          rb_node);
802                         if (state->start == start)
803                                 goto hit_next;
804                 }
805                 goto search_again;
806         }
807
808         /*
809          *     | ---- desired range ---- |
810          * | state |
811          *   or
812          * | ------------- state -------------- |
813          *
814          * We need to split the extent we found, and may flip bits on
815          * second half.
816          *
817          * If the extent we found extends past our
818          * range, we just split and search again.  It'll get split
819          * again the next time though.
820          *
821          * If the extent we found is inside our range, we set the
822          * desired bit on it.
823          */
824         if (state->start < start) {
825                 if (state->state & exclusive_bits) {
826                         *failed_start = start;
827                         err = -EEXIST;
828                         goto out;
829                 }
830
831                 prealloc = alloc_extent_state_atomic(prealloc);
832                 BUG_ON(!prealloc);
833                 err = split_state(tree, state, prealloc, start);
834                 if (err)
835                         extent_io_tree_panic(tree, err);
836
837                 prealloc = NULL;
838                 if (err)
839                         goto out;
840                 if (state->end <= end) {
841                         set_state_bits(tree, state, &bits);
842                         cache_state(state, cached_state);
843                         merge_state(tree, state);
844                         if (last_end == (u64)-1)
845                                 goto out;
846                         start = last_end + 1;
847                 }
848                 goto search_again;
849         }
850         /*
851          * | ---- desired range ---- |
852          *     | state | or               | state |
853          *
854          * There's a hole, we need to insert something in it and
855          * ignore the extent we found.
856          */
857         if (state->start > start) {
858                 u64 this_end;
859                 if (end < last_start)
860                         this_end = end;
861                 else
862                         this_end = last_start - 1;
863
864                 prealloc = alloc_extent_state_atomic(prealloc);
865                 BUG_ON(!prealloc);
866
867                 /*
868                  * Avoid to free 'prealloc' if it can be merged with
869                  * the later extent.
870                  */
871                 err = insert_state(tree, prealloc, start, this_end,
872                                    &bits);
873                 if (err)
874                         extent_io_tree_panic(tree, err);
875
876                 cache_state(prealloc, cached_state);
877                 prealloc = NULL;
878                 start = this_end + 1;
879                 goto search_again;
880         }
881         /*
882          * | ---- desired range ---- |
883          *                        | state |
884          * We need to split the extent, and set the bit
885          * on the first half
886          */
887         if (state->start <= end && state->end > end) {
888                 if (state->state & exclusive_bits) {
889                         *failed_start = start;
890                         err = -EEXIST;
891                         goto out;
892                 }
893
894                 prealloc = alloc_extent_state_atomic(prealloc);
895                 BUG_ON(!prealloc);
896                 err = split_state(tree, state, prealloc, end + 1);
897                 if (err)
898                         extent_io_tree_panic(tree, err);
899
900                 set_state_bits(tree, prealloc, &bits);
901                 cache_state(prealloc, cached_state);
902                 merge_state(tree, prealloc);
903                 prealloc = NULL;
904                 goto out;
905         }
906
907         goto search_again;
908
909 out:
910         spin_unlock(&tree->lock);
911         if (prealloc)
912                 free_extent_state(prealloc);
913
914         return err;
915
916 search_again:
917         if (start > end)
918                 goto out;
919         spin_unlock(&tree->lock);
920         if (mask & __GFP_WAIT)
921                 cond_resched();
922         goto again;
923 }
924
925 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
926                    u64 *failed_start, struct extent_state **cached_state,
927                    gfp_t mask)
928 {
929         return __set_extent_bit(tree, start, end, bits, 0, failed_start,
930                                 cached_state, mask);
931 }
932
933
934 /**
935  * convert_extent - convert all bits in a given range from one bit to another
936  * @tree:       the io tree to search
937  * @start:      the start offset in bytes
938  * @end:        the end offset in bytes (inclusive)
939  * @bits:       the bits to set in this range
940  * @clear_bits: the bits to clear in this range
941  * @mask:       the allocation mask
942  *
943  * This will go through and set bits for the given range.  If any states exist
944  * already in this range they are set with the given bit and cleared of the
945  * clear_bits.  This is only meant to be used by things that are mergeable, ie
946  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
947  * boundary bits like LOCK.
948  */
949 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
950                        int bits, int clear_bits, gfp_t mask)
951 {
952         struct extent_state *state;
953         struct extent_state *prealloc = NULL;
954         struct rb_node *node;
955         int err = 0;
956         u64 last_start;
957         u64 last_end;
958
959 again:
960         if (!prealloc && (mask & __GFP_WAIT)) {
961                 prealloc = alloc_extent_state(mask);
962                 if (!prealloc)
963                         return -ENOMEM;
964         }
965
966         spin_lock(&tree->lock);
967         /*
968          * this search will find all the extents that end after
969          * our range starts.
970          */
971         node = tree_search(tree, start);
972         if (!node) {
973                 prealloc = alloc_extent_state_atomic(prealloc);
974                 if (!prealloc) {
975                         err = -ENOMEM;
976                         goto out;
977                 }
978                 err = insert_state(tree, prealloc, start, end, &bits);
979                 prealloc = NULL;
980                 if (err)
981                         extent_io_tree_panic(tree, err);
982                 goto out;
983         }
984         state = rb_entry(node, struct extent_state, rb_node);
985 hit_next:
986         last_start = state->start;
987         last_end = state->end;
988
989         /*
990          * | ---- desired range ---- |
991          * | state |
992          *
993          * Just lock what we found and keep going
994          */
995         if (state->start == start && state->end <= end) {
996                 struct rb_node *next_node;
997
998                 set_state_bits(tree, state, &bits);
999                 clear_state_bit(tree, state, &clear_bits, 0);
1000                 if (last_end == (u64)-1)
1001                         goto out;
1002
1003                 start = last_end + 1;
1004                 next_node = rb_next(&state->rb_node);
1005                 if (next_node && start < end && prealloc && !need_resched()) {
1006                         state = rb_entry(next_node, struct extent_state,
1007                                          rb_node);
1008                         if (state->start == start)
1009                                 goto hit_next;
1010                 }
1011                 goto search_again;
1012         }
1013
1014         /*
1015          *     | ---- desired range ---- |
1016          * | state |
1017          *   or
1018          * | ------------- state -------------- |
1019          *
1020          * We need to split the extent we found, and may flip bits on
1021          * second half.
1022          *
1023          * If the extent we found extends past our
1024          * range, we just split and search again.  It'll get split
1025          * again the next time though.
1026          *
1027          * If the extent we found is inside our range, we set the
1028          * desired bit on it.
1029          */
1030         if (state->start < start) {
1031                 prealloc = alloc_extent_state_atomic(prealloc);
1032                 if (!prealloc) {
1033                         err = -ENOMEM;
1034                         goto out;
1035                 }
1036                 err = split_state(tree, state, prealloc, start);
1037                 if (err)
1038                         extent_io_tree_panic(tree, err);
1039                 prealloc = NULL;
1040                 if (err)
1041                         goto out;
1042                 if (state->end <= end) {
1043                         set_state_bits(tree, state, &bits);
1044                         clear_state_bit(tree, state, &clear_bits, 0);
1045                         if (last_end == (u64)-1)
1046                                 goto out;
1047                         start = last_end + 1;
1048                 }
1049                 goto search_again;
1050         }
1051         /*
1052          * | ---- desired range ---- |
1053          *     | state | or               | state |
1054          *
1055          * There's a hole, we need to insert something in it and
1056          * ignore the extent we found.
1057          */
1058         if (state->start > start) {
1059                 u64 this_end;
1060                 if (end < last_start)
1061                         this_end = end;
1062                 else
1063                         this_end = last_start - 1;
1064
1065                 prealloc = alloc_extent_state_atomic(prealloc);
1066                 if (!prealloc) {
1067                         err = -ENOMEM;
1068                         goto out;
1069                 }
1070
1071                 /*
1072                  * Avoid to free 'prealloc' if it can be merged with
1073                  * the later extent.
1074                  */
1075                 err = insert_state(tree, prealloc, start, this_end,
1076                                    &bits);
1077                 if (err)
1078                         extent_io_tree_panic(tree, err);
1079                 prealloc = NULL;
1080                 start = this_end + 1;
1081                 goto search_again;
1082         }
1083         /*
1084          * | ---- desired range ---- |
1085          *                        | state |
1086          * We need to split the extent, and set the bit
1087          * on the first half
1088          */
1089         if (state->start <= end && state->end > end) {
1090                 prealloc = alloc_extent_state_atomic(prealloc);
1091                 if (!prealloc) {
1092                         err = -ENOMEM;
1093                         goto out;
1094                 }
1095
1096                 err = split_state(tree, state, prealloc, end + 1);
1097                 if (err)
1098                         extent_io_tree_panic(tree, err);
1099
1100                 set_state_bits(tree, prealloc, &bits);
1101                 clear_state_bit(tree, prealloc, &clear_bits, 0);
1102                 prealloc = NULL;
1103                 goto out;
1104         }
1105
1106         goto search_again;
1107
1108 out:
1109         spin_unlock(&tree->lock);
1110         if (prealloc)
1111                 free_extent_state(prealloc);
1112
1113         return err;
1114
1115 search_again:
1116         if (start > end)
1117                 goto out;
1118         spin_unlock(&tree->lock);
1119         if (mask & __GFP_WAIT)
1120                 cond_resched();
1121         goto again;
1122 }
1123
1124 /* wrappers around set/clear extent bit */
1125 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1126                      gfp_t mask)
1127 {
1128         return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1129                               NULL, mask);
1130 }
1131
1132 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1133                     int bits, gfp_t mask)
1134 {
1135         return set_extent_bit(tree, start, end, bits, NULL,
1136                               NULL, mask);
1137 }
1138
1139 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1140                       int bits, gfp_t mask)
1141 {
1142         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1143 }
1144
1145 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1146                         struct extent_state **cached_state, gfp_t mask)
1147 {
1148         return set_extent_bit(tree, start, end,
1149                               EXTENT_DELALLOC | EXTENT_UPTODATE,
1150                               NULL, cached_state, mask);
1151 }
1152
1153 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1154                        gfp_t mask)
1155 {
1156         return clear_extent_bit(tree, start, end,
1157                                 EXTENT_DIRTY | EXTENT_DELALLOC |
1158                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1159 }
1160
1161 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1162                      gfp_t mask)
1163 {
1164         return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1165                               NULL, mask);
1166 }
1167
1168 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1169                         struct extent_state **cached_state, gfp_t mask)
1170 {
1171         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1172                               cached_state, mask);
1173 }
1174
1175 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1176                           struct extent_state **cached_state, gfp_t mask)
1177 {
1178         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1179                                 cached_state, mask);
1180 }
1181
1182 /*
1183  * either insert or lock state struct between start and end use mask to tell
1184  * us if waiting is desired.
1185  */
1186 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1187                      int bits, struct extent_state **cached_state)
1188 {
1189         int err;
1190         u64 failed_start;
1191         while (1) {
1192                 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1193                                        EXTENT_LOCKED, &failed_start,
1194                                        cached_state, GFP_NOFS);
1195                 if (err == -EEXIST) {
1196                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1197                         start = failed_start;
1198                 } else
1199                         break;
1200                 WARN_ON(start > end);
1201         }
1202         return err;
1203 }
1204
1205 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1206 {
1207         return lock_extent_bits(tree, start, end, 0, NULL);
1208 }
1209
1210 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1211 {
1212         int err;
1213         u64 failed_start;
1214
1215         err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1216                                &failed_start, NULL, GFP_NOFS);
1217         if (err == -EEXIST) {
1218                 if (failed_start > start)
1219                         clear_extent_bit(tree, start, failed_start - 1,
1220                                          EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1221                 return 0;
1222         }
1223         return 1;
1224 }
1225
1226 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1227                          struct extent_state **cached, gfp_t mask)
1228 {
1229         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1230                                 mask);
1231 }
1232
1233 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1234 {
1235         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1236                                 GFP_NOFS);
1237 }
1238
1239 /*
1240  * helper function to set both pages and extents in the tree writeback
1241  */
1242 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1243 {
1244         unsigned long index = start >> PAGE_CACHE_SHIFT;
1245         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1246         struct page *page;
1247
1248         while (index <= end_index) {
1249                 page = find_get_page(tree->mapping, index);
1250                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1251                 set_page_writeback(page);
1252                 page_cache_release(page);
1253                 index++;
1254         }
1255         return 0;
1256 }
1257
1258 /* find the first state struct with 'bits' set after 'start', and
1259  * return it.  tree->lock must be held.  NULL will returned if
1260  * nothing was found after 'start'
1261  */
1262 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1263                                                  u64 start, int bits)
1264 {
1265         struct rb_node *node;
1266         struct extent_state *state;
1267
1268         /*
1269          * this search will find all the extents that end after
1270          * our range starts.
1271          */
1272         node = tree_search(tree, start);
1273         if (!node)
1274                 goto out;
1275
1276         while (1) {
1277                 state = rb_entry(node, struct extent_state, rb_node);
1278                 if (state->end >= start && (state->state & bits))
1279                         return state;
1280
1281                 node = rb_next(node);
1282                 if (!node)
1283                         break;
1284         }
1285 out:
1286         return NULL;
1287 }
1288
1289 /*
1290  * find the first offset in the io tree with 'bits' set. zero is
1291  * returned if we find something, and *start_ret and *end_ret are
1292  * set to reflect the state struct that was found.
1293  *
1294  * If nothing was found, 1 is returned. If found something, return 0.
1295  */
1296 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1297                           u64 *start_ret, u64 *end_ret, int bits)
1298 {
1299         struct extent_state *state;
1300         int ret = 1;
1301
1302         spin_lock(&tree->lock);
1303         state = find_first_extent_bit_state(tree, start, bits);
1304         if (state) {
1305                 *start_ret = state->start;
1306                 *end_ret = state->end;
1307                 ret = 0;
1308         }
1309         spin_unlock(&tree->lock);
1310         return ret;
1311 }
1312
1313 /*
1314  * find a contiguous range of bytes in the file marked as delalloc, not
1315  * more than 'max_bytes'.  start and end are used to return the range,
1316  *
1317  * 1 is returned if we find something, 0 if nothing was in the tree
1318  */
1319 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1320                                         u64 *start, u64 *end, u64 max_bytes,
1321                                         struct extent_state **cached_state)
1322 {
1323         struct rb_node *node;
1324         struct extent_state *state;
1325         u64 cur_start = *start;
1326         u64 found = 0;
1327         u64 total_bytes = 0;
1328
1329         spin_lock(&tree->lock);
1330
1331         /*
1332          * this search will find all the extents that end after
1333          * our range starts.
1334          */
1335         node = tree_search(tree, cur_start);
1336         if (!node) {
1337                 if (!found)
1338                         *end = (u64)-1;
1339                 goto out;
1340         }
1341
1342         while (1) {
1343                 state = rb_entry(node, struct extent_state, rb_node);
1344                 if (found && (state->start != cur_start ||
1345                               (state->state & EXTENT_BOUNDARY))) {
1346                         goto out;
1347                 }
1348                 if (!(state->state & EXTENT_DELALLOC)) {
1349                         if (!found)
1350                                 *end = state->end;
1351                         goto out;
1352                 }
1353                 if (!found) {
1354                         *start = state->start;
1355                         *cached_state = state;
1356                         atomic_inc(&state->refs);
1357                 }
1358                 found++;
1359                 *end = state->end;
1360                 cur_start = state->end + 1;
1361                 node = rb_next(node);
1362                 if (!node)
1363                         break;
1364                 total_bytes += state->end - state->start + 1;
1365                 if (total_bytes >= max_bytes)
1366                         break;
1367         }
1368 out:
1369         spin_unlock(&tree->lock);
1370         return found;
1371 }
1372
1373 static noinline void __unlock_for_delalloc(struct inode *inode,
1374                                            struct page *locked_page,
1375                                            u64 start, u64 end)
1376 {
1377         int ret;
1378         struct page *pages[16];
1379         unsigned long index = start >> PAGE_CACHE_SHIFT;
1380         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1381         unsigned long nr_pages = end_index - index + 1;
1382         int i;
1383
1384         if (index == locked_page->index && end_index == index)
1385                 return;
1386
1387         while (nr_pages > 0) {
1388                 ret = find_get_pages_contig(inode->i_mapping, index,
1389                                      min_t(unsigned long, nr_pages,
1390                                      ARRAY_SIZE(pages)), pages);
1391                 for (i = 0; i < ret; i++) {
1392                         if (pages[i] != locked_page)
1393                                 unlock_page(pages[i]);
1394                         page_cache_release(pages[i]);
1395                 }
1396                 nr_pages -= ret;
1397                 index += ret;
1398                 cond_resched();
1399         }
1400 }
1401
1402 static noinline int lock_delalloc_pages(struct inode *inode,
1403                                         struct page *locked_page,
1404                                         u64 delalloc_start,
1405                                         u64 delalloc_end)
1406 {
1407         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1408         unsigned long start_index = index;
1409         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1410         unsigned long pages_locked = 0;
1411         struct page *pages[16];
1412         unsigned long nrpages;
1413         int ret;
1414         int i;
1415
1416         /* the caller is responsible for locking the start index */
1417         if (index == locked_page->index && index == end_index)
1418                 return 0;
1419
1420         /* skip the page at the start index */
1421         nrpages = end_index - index + 1;
1422         while (nrpages > 0) {
1423                 ret = find_get_pages_contig(inode->i_mapping, index,
1424                                      min_t(unsigned long,
1425                                      nrpages, ARRAY_SIZE(pages)), pages);
1426                 if (ret == 0) {
1427                         ret = -EAGAIN;
1428                         goto done;
1429                 }
1430                 /* now we have an array of pages, lock them all */
1431                 for (i = 0; i < ret; i++) {
1432                         /*
1433                          * the caller is taking responsibility for
1434                          * locked_page
1435                          */
1436                         if (pages[i] != locked_page) {
1437                                 lock_page(pages[i]);
1438                                 if (!PageDirty(pages[i]) ||
1439                                     pages[i]->mapping != inode->i_mapping) {
1440                                         ret = -EAGAIN;
1441                                         unlock_page(pages[i]);
1442                                         page_cache_release(pages[i]);
1443                                         goto done;
1444                                 }
1445                         }
1446                         page_cache_release(pages[i]);
1447                         pages_locked++;
1448                 }
1449                 nrpages -= ret;
1450                 index += ret;
1451                 cond_resched();
1452         }
1453         ret = 0;
1454 done:
1455         if (ret && pages_locked) {
1456                 __unlock_for_delalloc(inode, locked_page,
1457                               delalloc_start,
1458                               ((u64)(start_index + pages_locked - 1)) <<
1459                               PAGE_CACHE_SHIFT);
1460         }
1461         return ret;
1462 }
1463
1464 /*
1465  * find a contiguous range of bytes in the file marked as delalloc, not
1466  * more than 'max_bytes'.  start and end are used to return the range,
1467  *
1468  * 1 is returned if we find something, 0 if nothing was in the tree
1469  */
1470 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1471                                              struct extent_io_tree *tree,
1472                                              struct page *locked_page,
1473                                              u64 *start, u64 *end,
1474                                              u64 max_bytes)
1475 {
1476         u64 delalloc_start;
1477         u64 delalloc_end;
1478         u64 found;
1479         struct extent_state *cached_state = NULL;
1480         int ret;
1481         int loops = 0;
1482
1483 again:
1484         /* step one, find a bunch of delalloc bytes starting at start */
1485         delalloc_start = *start;
1486         delalloc_end = 0;
1487         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1488                                     max_bytes, &cached_state);
1489         if (!found || delalloc_end <= *start) {
1490                 *start = delalloc_start;
1491                 *end = delalloc_end;
1492                 free_extent_state(cached_state);
1493                 return found;
1494         }
1495
1496         /*
1497          * start comes from the offset of locked_page.  We have to lock
1498          * pages in order, so we can't process delalloc bytes before
1499          * locked_page
1500          */
1501         if (delalloc_start < *start)
1502                 delalloc_start = *start;
1503
1504         /*
1505          * make sure to limit the number of pages we try to lock down
1506          * if we're looping.
1507          */
1508         if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1509                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1510
1511         /* step two, lock all the pages after the page that has start */
1512         ret = lock_delalloc_pages(inode, locked_page,
1513                                   delalloc_start, delalloc_end);
1514         if (ret == -EAGAIN) {
1515                 /* some of the pages are gone, lets avoid looping by
1516                  * shortening the size of the delalloc range we're searching
1517                  */
1518                 free_extent_state(cached_state);
1519                 if (!loops) {
1520                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1521                         max_bytes = PAGE_CACHE_SIZE - offset;
1522                         loops = 1;
1523                         goto again;
1524                 } else {
1525                         found = 0;
1526                         goto out_failed;
1527                 }
1528         }
1529         BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1530
1531         /* step three, lock the state bits for the whole range */
1532         lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1533
1534         /* then test to make sure it is all still delalloc */
1535         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1536                              EXTENT_DELALLOC, 1, cached_state);
1537         if (!ret) {
1538                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1539                                      &cached_state, GFP_NOFS);
1540                 __unlock_for_delalloc(inode, locked_page,
1541                               delalloc_start, delalloc_end);
1542                 cond_resched();
1543                 goto again;
1544         }
1545         free_extent_state(cached_state);
1546         *start = delalloc_start;
1547         *end = delalloc_end;
1548 out_failed:
1549         return found;
1550 }
1551
1552 int extent_clear_unlock_delalloc(struct inode *inode,
1553                                 struct extent_io_tree *tree,
1554                                 u64 start, u64 end, struct page *locked_page,
1555                                 unsigned long op)
1556 {
1557         int ret;
1558         struct page *pages[16];
1559         unsigned long index = start >> PAGE_CACHE_SHIFT;
1560         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1561         unsigned long nr_pages = end_index - index + 1;
1562         int i;
1563         int clear_bits = 0;
1564
1565         if (op & EXTENT_CLEAR_UNLOCK)
1566                 clear_bits |= EXTENT_LOCKED;
1567         if (op & EXTENT_CLEAR_DIRTY)
1568                 clear_bits |= EXTENT_DIRTY;
1569
1570         if (op & EXTENT_CLEAR_DELALLOC)
1571                 clear_bits |= EXTENT_DELALLOC;
1572
1573         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1574         if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1575                     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1576                     EXTENT_SET_PRIVATE2)))
1577                 return 0;
1578
1579         while (nr_pages > 0) {
1580                 ret = find_get_pages_contig(inode->i_mapping, index,
1581                                      min_t(unsigned long,
1582                                      nr_pages, ARRAY_SIZE(pages)), pages);
1583                 for (i = 0; i < ret; i++) {
1584
1585                         if (op & EXTENT_SET_PRIVATE2)
1586                                 SetPagePrivate2(pages[i]);
1587
1588                         if (pages[i] == locked_page) {
1589                                 page_cache_release(pages[i]);
1590                                 continue;
1591                         }
1592                         if (op & EXTENT_CLEAR_DIRTY)
1593                                 clear_page_dirty_for_io(pages[i]);
1594                         if (op & EXTENT_SET_WRITEBACK)
1595                                 set_page_writeback(pages[i]);
1596                         if (op & EXTENT_END_WRITEBACK)
1597                                 end_page_writeback(pages[i]);
1598                         if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1599                                 unlock_page(pages[i]);
1600                         page_cache_release(pages[i]);
1601                 }
1602                 nr_pages -= ret;
1603                 index += ret;
1604                 cond_resched();
1605         }
1606         return 0;
1607 }
1608
1609 /*
1610  * count the number of bytes in the tree that have a given bit(s)
1611  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1612  * cached.  The total number found is returned.
1613  */
1614 u64 count_range_bits(struct extent_io_tree *tree,
1615                      u64 *start, u64 search_end, u64 max_bytes,
1616                      unsigned long bits, int contig)
1617 {
1618         struct rb_node *node;
1619         struct extent_state *state;
1620         u64 cur_start = *start;
1621         u64 total_bytes = 0;
1622         u64 last = 0;
1623         int found = 0;
1624
1625         if (search_end <= cur_start) {
1626                 WARN_ON(1);
1627                 return 0;
1628         }
1629
1630         spin_lock(&tree->lock);
1631         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1632                 total_bytes = tree->dirty_bytes;
1633                 goto out;
1634         }
1635         /*
1636          * this search will find all the extents that end after
1637          * our range starts.
1638          */
1639         node = tree_search(tree, cur_start);
1640         if (!node)
1641                 goto out;
1642
1643         while (1) {
1644                 state = rb_entry(node, struct extent_state, rb_node);
1645                 if (state->start > search_end)
1646                         break;
1647                 if (contig && found && state->start > last + 1)
1648                         break;
1649                 if (state->end >= cur_start && (state->state & bits) == bits) {
1650                         total_bytes += min(search_end, state->end) + 1 -
1651                                        max(cur_start, state->start);
1652                         if (total_bytes >= max_bytes)
1653                                 break;
1654                         if (!found) {
1655                                 *start = max(cur_start, state->start);
1656                                 found = 1;
1657                         }
1658                         last = state->end;
1659                 } else if (contig && found) {
1660                         break;
1661                 }
1662                 node = rb_next(node);
1663                 if (!node)
1664                         break;
1665         }
1666 out:
1667         spin_unlock(&tree->lock);
1668         return total_bytes;
1669 }
1670
1671 /*
1672  * set the private field for a given byte offset in the tree.  If there isn't
1673  * an extent_state there already, this does nothing.
1674  */
1675 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1676 {
1677         struct rb_node *node;
1678         struct extent_state *state;
1679         int ret = 0;
1680
1681         spin_lock(&tree->lock);
1682         /*
1683          * this search will find all the extents that end after
1684          * our range starts.
1685          */
1686         node = tree_search(tree, start);
1687         if (!node) {
1688                 ret = -ENOENT;
1689                 goto out;
1690         }
1691         state = rb_entry(node, struct extent_state, rb_node);
1692         if (state->start != start) {
1693                 ret = -ENOENT;
1694                 goto out;
1695         }
1696         state->private = private;
1697 out:
1698         spin_unlock(&tree->lock);
1699         return ret;
1700 }
1701
1702 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1703 {
1704         struct rb_node *node;
1705         struct extent_state *state;
1706         int ret = 0;
1707
1708         spin_lock(&tree->lock);
1709         /*
1710          * this search will find all the extents that end after
1711          * our range starts.
1712          */
1713         node = tree_search(tree, start);
1714         if (!node) {
1715                 ret = -ENOENT;
1716                 goto out;
1717         }
1718         state = rb_entry(node, struct extent_state, rb_node);
1719         if (state->start != start) {
1720                 ret = -ENOENT;
1721                 goto out;
1722         }
1723         *private = state->private;
1724 out:
1725         spin_unlock(&tree->lock);
1726         return ret;
1727 }
1728
1729 /*
1730  * searches a range in the state tree for a given mask.
1731  * If 'filled' == 1, this returns 1 only if every extent in the tree
1732  * has the bits set.  Otherwise, 1 is returned if any bit in the
1733  * range is found set.
1734  */
1735 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1736                    int bits, int filled, struct extent_state *cached)
1737 {
1738         struct extent_state *state = NULL;
1739         struct rb_node *node;
1740         int bitset = 0;
1741
1742         spin_lock(&tree->lock);
1743         if (cached && cached->tree && cached->start <= start &&
1744             cached->end > start)
1745                 node = &cached->rb_node;
1746         else
1747                 node = tree_search(tree, start);
1748         while (node && start <= end) {
1749                 state = rb_entry(node, struct extent_state, rb_node);
1750
1751                 if (filled && state->start > start) {
1752                         bitset = 0;
1753                         break;
1754                 }
1755
1756                 if (state->start > end)
1757                         break;
1758
1759                 if (state->state & bits) {
1760                         bitset = 1;
1761                         if (!filled)
1762                                 break;
1763                 } else if (filled) {
1764                         bitset = 0;
1765                         break;
1766                 }
1767
1768                 if (state->end == (u64)-1)
1769                         break;
1770
1771                 start = state->end + 1;
1772                 if (start > end)
1773                         break;
1774                 node = rb_next(node);
1775                 if (!node) {
1776                         if (filled)
1777                                 bitset = 0;
1778                         break;
1779                 }
1780         }
1781         spin_unlock(&tree->lock);
1782         return bitset;
1783 }
1784
1785 /*
1786  * helper function to set a given page up to date if all the
1787  * extents in the tree for that page are up to date
1788  */
1789 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1790 {
1791         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1792         u64 end = start + PAGE_CACHE_SIZE - 1;
1793         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1794                 SetPageUptodate(page);
1795 }
1796
1797 /*
1798  * helper function to unlock a page if all the extents in the tree
1799  * for that page are unlocked
1800  */
1801 static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1802 {
1803         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1804         u64 end = start + PAGE_CACHE_SIZE - 1;
1805         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1806                 unlock_page(page);
1807 }
1808
1809 /*
1810  * helper function to end page writeback if all the extents
1811  * in the tree for that page are done with writeback
1812  */
1813 static void check_page_writeback(struct extent_io_tree *tree,
1814                                  struct page *page)
1815 {
1816         end_page_writeback(page);
1817 }
1818
1819 /*
1820  * When IO fails, either with EIO or csum verification fails, we
1821  * try other mirrors that might have a good copy of the data.  This
1822  * io_failure_record is used to record state as we go through all the
1823  * mirrors.  If another mirror has good data, the page is set up to date
1824  * and things continue.  If a good mirror can't be found, the original
1825  * bio end_io callback is called to indicate things have failed.
1826  */
1827 struct io_failure_record {
1828         struct page *page;
1829         u64 start;
1830         u64 len;
1831         u64 logical;
1832         unsigned long bio_flags;
1833         int this_mirror;
1834         int failed_mirror;
1835         int in_validation;
1836 };
1837
1838 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1839                                 int did_repair)
1840 {
1841         int ret;
1842         int err = 0;
1843         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1844
1845         set_state_private(failure_tree, rec->start, 0);
1846         ret = clear_extent_bits(failure_tree, rec->start,
1847                                 rec->start + rec->len - 1,
1848                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1849         if (ret)
1850                 err = ret;
1851
1852         if (did_repair) {
1853                 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1854                                         rec->start + rec->len - 1,
1855                                         EXTENT_DAMAGED, GFP_NOFS);
1856                 if (ret && !err)
1857                         err = ret;
1858         }
1859
1860         kfree(rec);
1861         return err;
1862 }
1863
1864 static void repair_io_failure_callback(struct bio *bio, int err)
1865 {
1866         complete(bio->bi_private);
1867 }
1868
1869 /*
1870  * this bypasses the standard btrfs submit functions deliberately, as
1871  * the standard behavior is to write all copies in a raid setup. here we only
1872  * want to write the one bad copy. so we do the mapping for ourselves and issue
1873  * submit_bio directly.
1874  * to avoid any synchonization issues, wait for the data after writing, which
1875  * actually prevents the read that triggered the error from finishing.
1876  * currently, there can be no more than two copies of every data bit. thus,
1877  * exactly one rewrite is required.
1878  */
1879 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1880                         u64 length, u64 logical, struct page *page,
1881                         int mirror_num)
1882 {
1883         struct bio *bio;
1884         struct btrfs_device *dev;
1885         DECLARE_COMPLETION_ONSTACK(compl);
1886         u64 map_length = 0;
1887         u64 sector;
1888         struct btrfs_bio *bbio = NULL;
1889         int ret;
1890
1891         BUG_ON(!mirror_num);
1892
1893         bio = bio_alloc(GFP_NOFS, 1);
1894         if (!bio)
1895                 return -EIO;
1896         bio->bi_private = &compl;
1897         bio->bi_end_io = repair_io_failure_callback;
1898         bio->bi_size = 0;
1899         map_length = length;
1900
1901         ret = btrfs_map_block(map_tree, WRITE, logical,
1902                               &map_length, &bbio, mirror_num);
1903         if (ret) {
1904                 bio_put(bio);
1905                 return -EIO;
1906         }
1907         BUG_ON(mirror_num != bbio->mirror_num);
1908         sector = bbio->stripes[mirror_num-1].physical >> 9;
1909         bio->bi_sector = sector;
1910         dev = bbio->stripes[mirror_num-1].dev;
1911         kfree(bbio);
1912         if (!dev || !dev->bdev || !dev->writeable) {
1913                 bio_put(bio);
1914                 return -EIO;
1915         }
1916         bio->bi_bdev = dev->bdev;
1917         bio_add_page(bio, page, length, start-page_offset(page));
1918         btrfsic_submit_bio(WRITE_SYNC, bio);
1919         wait_for_completion(&compl);
1920
1921         if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1922                 /* try to remap that extent elsewhere? */
1923                 bio_put(bio);
1924                 return -EIO;
1925         }
1926
1927         printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1928                         "sector %llu)\n", page->mapping->host->i_ino, start,
1929                         dev->name, sector);
1930
1931         bio_put(bio);
1932         return 0;
1933 }
1934
1935 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1936                          int mirror_num)
1937 {
1938         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1939         u64 start = eb->start;
1940         unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1941         int ret = 0;
1942
1943         for (i = 0; i < num_pages; i++) {
1944                 struct page *p = extent_buffer_page(eb, i);
1945                 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1946                                         start, p, mirror_num);
1947                 if (ret)
1948                         break;
1949                 start += PAGE_CACHE_SIZE;
1950         }
1951
1952         return ret;
1953 }
1954
1955 /*
1956  * each time an IO finishes, we do a fast check in the IO failure tree
1957  * to see if we need to process or clean up an io_failure_record
1958  */
1959 static int clean_io_failure(u64 start, struct page *page)
1960 {
1961         u64 private;
1962         u64 private_failure;
1963         struct io_failure_record *failrec;
1964         struct btrfs_mapping_tree *map_tree;
1965         struct extent_state *state;
1966         int num_copies;
1967         int did_repair = 0;
1968         int ret;
1969         struct inode *inode = page->mapping->host;
1970
1971         private = 0;
1972         ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1973                                 (u64)-1, 1, EXTENT_DIRTY, 0);
1974         if (!ret)
1975                 return 0;
1976
1977         ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1978                                 &private_failure);
1979         if (ret)
1980                 return 0;
1981
1982         failrec = (struct io_failure_record *)(unsigned long) private_failure;
1983         BUG_ON(!failrec->this_mirror);
1984
1985         if (failrec->in_validation) {
1986                 /* there was no real error, just free the record */
1987                 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1988                          failrec->start);
1989                 did_repair = 1;
1990                 goto out;
1991         }
1992
1993         spin_lock(&BTRFS_I(inode)->io_tree.lock);
1994         state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1995                                             failrec->start,
1996                                             EXTENT_LOCKED);
1997         spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1998
1999         if (state && state->start == failrec->start) {
2000                 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
2001                 num_copies = btrfs_num_copies(map_tree, failrec->logical,
2002                                                 failrec->len);
2003                 if (num_copies > 1)  {
2004                         ret = repair_io_failure(map_tree, start, failrec->len,
2005                                                 failrec->logical, page,
2006                                                 failrec->failed_mirror);
2007                         did_repair = !ret;
2008                 }
2009         }
2010
2011 out:
2012         if (!ret)
2013                 ret = free_io_failure(inode, failrec, did_repair);
2014
2015         return ret;
2016 }
2017
2018 /*
2019  * this is a generic handler for readpage errors (default
2020  * readpage_io_failed_hook). if other copies exist, read those and write back
2021  * good data to the failed position. does not investigate in remapping the
2022  * failed extent elsewhere, hoping the device will be smart enough to do this as
2023  * needed
2024  */
2025
2026 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2027                                 u64 start, u64 end, int failed_mirror,
2028                                 struct extent_state *state)
2029 {
2030         struct io_failure_record *failrec = NULL;
2031         u64 private;
2032         struct extent_map *em;
2033         struct inode *inode = page->mapping->host;
2034         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2035         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2036         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2037         struct bio *bio;
2038         int num_copies;
2039         int ret;
2040         int read_mode;
2041         u64 logical;
2042
2043         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2044
2045         ret = get_state_private(failure_tree, start, &private);
2046         if (ret) {
2047                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2048                 if (!failrec)
2049                         return -ENOMEM;
2050                 failrec->start = start;
2051                 failrec->len = end - start + 1;
2052                 failrec->this_mirror = 0;
2053                 failrec->bio_flags = 0;
2054                 failrec->in_validation = 0;
2055
2056                 read_lock(&em_tree->lock);
2057                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2058                 if (!em) {
2059                         read_unlock(&em_tree->lock);
2060                         kfree(failrec);
2061                         return -EIO;
2062                 }
2063
2064                 if (em->start > start || em->start + em->len < start) {
2065                         free_extent_map(em);
2066                         em = NULL;
2067                 }
2068                 read_unlock(&em_tree->lock);
2069
2070                 if (!em || IS_ERR(em)) {
2071                         kfree(failrec);
2072                         return -EIO;
2073                 }
2074                 logical = start - em->start;
2075                 logical = em->block_start + logical;
2076                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2077                         logical = em->block_start;
2078                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2079                         extent_set_compress_type(&failrec->bio_flags,
2080                                                  em->compress_type);
2081                 }
2082                 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2083                          "len=%llu\n", logical, start, failrec->len);
2084                 failrec->logical = logical;
2085                 free_extent_map(em);
2086
2087                 /* set the bits in the private failure tree */
2088                 ret = set_extent_bits(failure_tree, start, end,
2089                                         EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2090                 if (ret >= 0)
2091                         ret = set_state_private(failure_tree, start,
2092                                                 (u64)(unsigned long)failrec);
2093                 /* set the bits in the inode's tree */
2094                 if (ret >= 0)
2095                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2096                                                 GFP_NOFS);
2097                 if (ret < 0) {
2098                         kfree(failrec);
2099                         return ret;
2100                 }
2101         } else {
2102                 failrec = (struct io_failure_record *)(unsigned long)private;
2103                 pr_debug("bio_readpage_error: (found) logical=%llu, "
2104                          "start=%llu, len=%llu, validation=%d\n",
2105                          failrec->logical, failrec->start, failrec->len,
2106                          failrec->in_validation);
2107                 /*
2108                  * when data can be on disk more than twice, add to failrec here
2109                  * (e.g. with a list for failed_mirror) to make
2110                  * clean_io_failure() clean all those errors at once.
2111                  */
2112         }
2113         num_copies = btrfs_num_copies(
2114                               &BTRFS_I(inode)->root->fs_info->mapping_tree,
2115                               failrec->logical, failrec->len);
2116         if (num_copies == 1) {
2117                 /*
2118                  * we only have a single copy of the data, so don't bother with
2119                  * all the retry and error correction code that follows. no
2120                  * matter what the error is, it is very likely to persist.
2121                  */
2122                 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2123                          "state=%p, num_copies=%d, next_mirror %d, "
2124                          "failed_mirror %d\n", state, num_copies,
2125                          failrec->this_mirror, failed_mirror);
2126                 free_io_failure(inode, failrec, 0);
2127                 return -EIO;
2128         }
2129
2130         if (!state) {
2131                 spin_lock(&tree->lock);
2132                 state = find_first_extent_bit_state(tree, failrec->start,
2133                                                     EXTENT_LOCKED);
2134                 if (state && state->start != failrec->start)
2135                         state = NULL;
2136                 spin_unlock(&tree->lock);
2137         }
2138
2139         /*
2140          * there are two premises:
2141          *      a) deliver good data to the caller
2142          *      b) correct the bad sectors on disk
2143          */
2144         if (failed_bio->bi_vcnt > 1) {
2145                 /*
2146                  * to fulfill b), we need to know the exact failing sectors, as
2147                  * we don't want to rewrite any more than the failed ones. thus,
2148                  * we need separate read requests for the failed bio
2149                  *
2150                  * if the following BUG_ON triggers, our validation request got
2151                  * merged. we need separate requests for our algorithm to work.
2152                  */
2153                 BUG_ON(failrec->in_validation);
2154                 failrec->in_validation = 1;
2155                 failrec->this_mirror = failed_mirror;
2156                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2157         } else {
2158                 /*
2159                  * we're ready to fulfill a) and b) alongside. get a good copy
2160                  * of the failed sector and if we succeed, we have setup
2161                  * everything for repair_io_failure to do the rest for us.
2162                  */
2163                 if (failrec->in_validation) {
2164                         BUG_ON(failrec->this_mirror != failed_mirror);
2165                         failrec->in_validation = 0;
2166                         failrec->this_mirror = 0;
2167                 }
2168                 failrec->failed_mirror = failed_mirror;
2169                 failrec->this_mirror++;
2170                 if (failrec->this_mirror == failed_mirror)
2171                         failrec->this_mirror++;
2172                 read_mode = READ_SYNC;
2173         }
2174
2175         if (!state || failrec->this_mirror > num_copies) {
2176                 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2177                          "next_mirror %d, failed_mirror %d\n", state,
2178                          num_copies, failrec->this_mirror, failed_mirror);
2179                 free_io_failure(inode, failrec, 0);
2180                 return -EIO;
2181         }
2182
2183         bio = bio_alloc(GFP_NOFS, 1);
2184         if (!bio) {
2185                 free_io_failure(inode, failrec, 0);
2186                 return -EIO;
2187         }
2188         bio->bi_private = state;
2189         bio->bi_end_io = failed_bio->bi_end_io;
2190         bio->bi_sector = failrec->logical >> 9;
2191         bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2192         bio->bi_size = 0;
2193
2194         bio_add_page(bio, page, failrec->len, start - page_offset(page));
2195
2196         pr_debug("bio_readpage_error: submitting new read[%#x] to "
2197                  "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2198                  failrec->this_mirror, num_copies, failrec->in_validation);
2199
2200         ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2201                                          failrec->this_mirror,
2202                                          failrec->bio_flags, 0);
2203         return ret;
2204 }
2205
2206 /* lots and lots of room for performance fixes in the end_bio funcs */
2207
2208 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2209 {
2210         int uptodate = (err == 0);
2211         struct extent_io_tree *tree;
2212         int ret;
2213
2214         tree = &BTRFS_I(page->mapping->host)->io_tree;
2215
2216         if (tree->ops && tree->ops->writepage_end_io_hook) {
2217                 ret = tree->ops->writepage_end_io_hook(page, start,
2218                                                end, NULL, uptodate);
2219                 if (ret)
2220                         uptodate = 0;
2221         }
2222
2223         if (!uptodate) {
2224                 ClearPageUptodate(page);
2225                 SetPageError(page);
2226         }
2227         return 0;
2228 }
2229
2230 /*
2231  * after a writepage IO is done, we need to:
2232  * clear the uptodate bits on error
2233  * clear the writeback bits in the extent tree for this IO
2234  * end_page_writeback if the page has no more pending IO
2235  *
2236  * Scheduling is not allowed, so the extent state tree is expected
2237  * to have one and only one object corresponding to this IO.
2238  */
2239 static void end_bio_extent_writepage(struct bio *bio, int err)
2240 {
2241         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2242         struct extent_io_tree *tree;
2243         u64 start;
2244         u64 end;
2245         int whole_page;
2246
2247         do {
2248                 struct page *page = bvec->bv_page;
2249                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2250
2251                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2252                          bvec->bv_offset;
2253                 end = start + bvec->bv_len - 1;
2254
2255                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2256                         whole_page = 1;
2257                 else
2258                         whole_page = 0;
2259
2260                 if (--bvec >= bio->bi_io_vec)
2261                         prefetchw(&bvec->bv_page->flags);
2262
2263                 if (end_extent_writepage(page, err, start, end))
2264                         continue;
2265
2266                 if (whole_page)
2267                         end_page_writeback(page);
2268                 else
2269                         check_page_writeback(tree, page);
2270         } while (bvec >= bio->bi_io_vec);
2271
2272         bio_put(bio);
2273 }
2274
2275 /*
2276  * after a readpage IO is done, we need to:
2277  * clear the uptodate bits on error
2278  * set the uptodate bits if things worked
2279  * set the page up to date if all extents in the tree are uptodate
2280  * clear the lock bit in the extent tree
2281  * unlock the page if there are no other extents locked for it
2282  *
2283  * Scheduling is not allowed, so the extent state tree is expected
2284  * to have one and only one object corresponding to this IO.
2285  */
2286 static void end_bio_extent_readpage(struct bio *bio, int err)
2287 {
2288         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2289         struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2290         struct bio_vec *bvec = bio->bi_io_vec;
2291         struct extent_io_tree *tree;
2292         u64 start;
2293         u64 end;
2294         int whole_page;
2295         int mirror;
2296         int ret;
2297
2298         if (err)
2299                 uptodate = 0;
2300
2301         do {
2302                 struct page *page = bvec->bv_page;
2303                 struct extent_state *cached = NULL;
2304                 struct extent_state *state;
2305
2306                 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2307                          "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2308                          (long int)bio->bi_bdev);
2309                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2310
2311                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2312                         bvec->bv_offset;
2313                 end = start + bvec->bv_len - 1;
2314
2315                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2316                         whole_page = 1;
2317                 else
2318                         whole_page = 0;
2319
2320                 if (++bvec <= bvec_end)
2321                         prefetchw(&bvec->bv_page->flags);
2322
2323                 spin_lock(&tree->lock);
2324                 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2325                 if (state && state->start == start) {
2326                         /*
2327                          * take a reference on the state, unlock will drop
2328                          * the ref
2329                          */
2330                         cache_state(state, &cached);
2331                 }
2332                 spin_unlock(&tree->lock);
2333
2334                 mirror = (int)(unsigned long)bio->bi_bdev;
2335                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2336                         ret = tree->ops->readpage_end_io_hook(page, start, end,
2337                                                               state, mirror);
2338                         if (ret)
2339                                 uptodate = 0;
2340                         else
2341                                 clean_io_failure(start, page);
2342                 }
2343
2344                 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2345                         ret = tree->ops->readpage_io_failed_hook(page, mirror);
2346                         if (!ret && !err &&
2347                             test_bit(BIO_UPTODATE, &bio->bi_flags))
2348                                 uptodate = 1;
2349                 } else if (!uptodate) {
2350                         /*
2351                          * The generic bio_readpage_error handles errors the
2352                          * following way: If possible, new read requests are
2353                          * created and submitted and will end up in
2354                          * end_bio_extent_readpage as well (if we're lucky, not
2355                          * in the !uptodate case). In that case it returns 0 and
2356                          * we just go on with the next page in our bio. If it
2357                          * can't handle the error it will return -EIO and we
2358                          * remain responsible for that page.
2359                          */
2360                         ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
2361                         if (ret == 0) {
2362                                 uptodate =
2363                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
2364                                 if (err)
2365                                         uptodate = 0;
2366                                 uncache_state(&cached);
2367                                 continue;
2368                         }
2369                 }
2370
2371                 if (uptodate && tree->track_uptodate) {
2372                         set_extent_uptodate(tree, start, end, &cached,
2373                                             GFP_ATOMIC);
2374                 }
2375                 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2376
2377                 if (whole_page) {
2378                         if (uptodate) {
2379                                 SetPageUptodate(page);
2380                         } else {
2381                                 ClearPageUptodate(page);
2382                                 SetPageError(page);
2383                         }
2384                         unlock_page(page);
2385                 } else {
2386                         if (uptodate) {
2387                                 check_page_uptodate(tree, page);
2388                         } else {
2389                                 ClearPageUptodate(page);
2390                                 SetPageError(page);
2391                         }
2392                         check_page_locked(tree, page);
2393                 }
2394         } while (bvec <= bvec_end);
2395
2396         bio_put(bio);
2397 }
2398
2399 struct bio *
2400 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2401                 gfp_t gfp_flags)
2402 {
2403         struct bio *bio;
2404
2405         bio = bio_alloc(gfp_flags, nr_vecs);
2406
2407         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2408                 while (!bio && (nr_vecs /= 2))
2409                         bio = bio_alloc(gfp_flags, nr_vecs);
2410         }
2411
2412         if (bio) {
2413                 bio->bi_size = 0;
2414                 bio->bi_bdev = bdev;
2415                 bio->bi_sector = first_sector;
2416         }
2417         return bio;
2418 }
2419
2420 /*
2421  * Since writes are async, they will only return -ENOMEM.
2422  * Reads can return the full range of I/O error conditions.
2423  */
2424 static int __must_check submit_one_bio(int rw, struct bio *bio,
2425                                        int mirror_num, unsigned long bio_flags)
2426 {
2427         int ret = 0;
2428         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2429         struct page *page = bvec->bv_page;
2430         struct extent_io_tree *tree = bio->bi_private;
2431         u64 start;
2432
2433         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2434
2435         bio->bi_private = NULL;
2436
2437         bio_get(bio);
2438
2439         if (tree->ops && tree->ops->submit_bio_hook)
2440                 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2441                                            mirror_num, bio_flags, start);
2442         else
2443                 btrfsic_submit_bio(rw, bio);
2444
2445         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2446                 ret = -EOPNOTSUPP;
2447         bio_put(bio);
2448         return ret;
2449 }
2450
2451 static int merge_bio(struct extent_io_tree *tree, struct page *page,
2452                      unsigned long offset, size_t size, struct bio *bio,
2453                      unsigned long bio_flags)
2454 {
2455         int ret = 0;
2456         if (tree->ops && tree->ops->merge_bio_hook)
2457                 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2458                                                 bio_flags);
2459         BUG_ON(ret < 0);
2460         return ret;
2461
2462 }
2463
2464 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2465                               struct page *page, sector_t sector,
2466                               size_t size, unsigned long offset,
2467                               struct block_device *bdev,
2468                               struct bio **bio_ret,
2469                               unsigned long max_pages,
2470                               bio_end_io_t end_io_func,
2471                               int mirror_num,
2472                               unsigned long prev_bio_flags,
2473                               unsigned long bio_flags)
2474 {
2475         int ret = 0;
2476         struct bio *bio;
2477         int nr;
2478         int contig = 0;
2479         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2480         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2481         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2482
2483         if (bio_ret && *bio_ret) {
2484                 bio = *bio_ret;
2485                 if (old_compressed)
2486                         contig = bio->bi_sector == sector;
2487                 else
2488                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
2489                                 sector;
2490
2491                 if (prev_bio_flags != bio_flags || !contig ||
2492                     merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2493                     bio_add_page(bio, page, page_size, offset) < page_size) {
2494                         ret = submit_one_bio(rw, bio, mirror_num,
2495                                              prev_bio_flags);
2496                         if (ret < 0)
2497                                 return ret;
2498                         bio = NULL;
2499                 } else {
2500                         return 0;
2501                 }
2502         }
2503         if (this_compressed)
2504                 nr = BIO_MAX_PAGES;
2505         else
2506                 nr = bio_get_nr_vecs(bdev);
2507
2508         bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2509         if (!bio)
2510                 return -ENOMEM;
2511
2512         bio_add_page(bio, page, page_size, offset);
2513         bio->bi_end_io = end_io_func;
2514         bio->bi_private = tree;
2515
2516         if (bio_ret)
2517                 *bio_ret = bio;
2518         else
2519                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2520
2521         return ret;
2522 }
2523
2524 void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2525 {
2526         if (!PagePrivate(page)) {
2527                 SetPagePrivate(page);
2528                 page_cache_get(page);
2529                 set_page_private(page, (unsigned long)eb);
2530         } else {
2531                 WARN_ON(page->private != (unsigned long)eb);
2532         }
2533 }
2534
2535 void set_page_extent_mapped(struct page *page)
2536 {
2537         if (!PagePrivate(page)) {
2538                 SetPagePrivate(page);
2539                 page_cache_get(page);
2540                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2541         }
2542 }
2543
2544 /*
2545  * basic readpage implementation.  Locked extent state structs are inserted
2546  * into the tree that are removed when the IO is done (by the end_io
2547  * handlers)
2548  * XXX JDM: This needs looking at to ensure proper page locking
2549  */
2550 static int __extent_read_full_page(struct extent_io_tree *tree,
2551                                    struct page *page,
2552                                    get_extent_t *get_extent,
2553                                    struct bio **bio, int mirror_num,
2554                                    unsigned long *bio_flags)
2555 {
2556         struct inode *inode = page->mapping->host;
2557         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2558         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2559         u64 end;
2560         u64 cur = start;
2561         u64 extent_offset;
2562         u64 last_byte = i_size_read(inode);
2563         u64 block_start;
2564         u64 cur_end;
2565         sector_t sector;
2566         struct extent_map *em;
2567         struct block_device *bdev;
2568         struct btrfs_ordered_extent *ordered;
2569         int ret;
2570         int nr = 0;
2571         size_t pg_offset = 0;
2572         size_t iosize;
2573         size_t disk_io_size;
2574         size_t blocksize = inode->i_sb->s_blocksize;
2575         unsigned long this_bio_flag = 0;
2576
2577         set_page_extent_mapped(page);
2578
2579         if (!PageUptodate(page)) {
2580                 if (cleancache_get_page(page) == 0) {
2581                         BUG_ON(blocksize != PAGE_SIZE);
2582                         goto out;
2583                 }
2584         }
2585
2586         end = page_end;
2587         while (1) {
2588                 lock_extent(tree, start, end);
2589                 ordered = btrfs_lookup_ordered_extent(inode, start);
2590                 if (!ordered)
2591                         break;
2592                 unlock_extent(tree, start, end);
2593                 btrfs_start_ordered_extent(inode, ordered, 1);
2594                 btrfs_put_ordered_extent(ordered);
2595         }
2596
2597         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2598                 char *userpage;
2599                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2600
2601                 if (zero_offset) {
2602                         iosize = PAGE_CACHE_SIZE - zero_offset;
2603                         userpage = kmap_atomic(page);
2604                         memset(userpage + zero_offset, 0, iosize);
2605                         flush_dcache_page(page);
2606                         kunmap_atomic(userpage);
2607                 }
2608         }
2609         while (cur <= end) {
2610                 if (cur >= last_byte) {
2611                         char *userpage;
2612                         struct extent_state *cached = NULL;
2613
2614                         iosize = PAGE_CACHE_SIZE - pg_offset;
2615                         userpage = kmap_atomic(page);
2616                         memset(userpage + pg_offset, 0, iosize);
2617                         flush_dcache_page(page);
2618                         kunmap_atomic(userpage);
2619                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2620                                             &cached, GFP_NOFS);
2621                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2622                                              &cached, GFP_NOFS);
2623                         break;
2624                 }
2625                 em = get_extent(inode, page, pg_offset, cur,
2626                                 end - cur + 1, 0);
2627                 if (IS_ERR_OR_NULL(em)) {
2628                         SetPageError(page);
2629                         unlock_extent(tree, cur, end);
2630                         break;
2631                 }
2632                 extent_offset = cur - em->start;
2633                 BUG_ON(extent_map_end(em) <= cur);
2634                 BUG_ON(end < cur);
2635
2636                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2637                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2638                         extent_set_compress_type(&this_bio_flag,
2639                                                  em->compress_type);
2640                 }
2641
2642                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2643                 cur_end = min(extent_map_end(em) - 1, end);
2644                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2645                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2646                         disk_io_size = em->block_len;
2647                         sector = em->block_start >> 9;
2648                 } else {
2649                         sector = (em->block_start + extent_offset) >> 9;
2650                         disk_io_size = iosize;
2651                 }
2652                 bdev = em->bdev;
2653                 block_start = em->block_start;
2654                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2655                         block_start = EXTENT_MAP_HOLE;
2656                 free_extent_map(em);
2657                 em = NULL;
2658
2659                 /* we've found a hole, just zero and go on */
2660                 if (block_start == EXTENT_MAP_HOLE) {
2661                         char *userpage;
2662                         struct extent_state *cached = NULL;
2663
2664                         userpage = kmap_atomic(page);
2665                         memset(userpage + pg_offset, 0, iosize);
2666                         flush_dcache_page(page);
2667                         kunmap_atomic(userpage);
2668
2669                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2670                                             &cached, GFP_NOFS);
2671                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2672                                              &cached, GFP_NOFS);
2673                         cur = cur + iosize;
2674                         pg_offset += iosize;
2675                         continue;
2676                 }
2677                 /* the get_extent function already copied into the page */
2678                 if (test_range_bit(tree, cur, cur_end,
2679                                    EXTENT_UPTODATE, 1, NULL)) {
2680                         check_page_uptodate(tree, page);
2681                         unlock_extent(tree, cur, cur + iosize - 1);
2682                         cur = cur + iosize;
2683                         pg_offset += iosize;
2684                         continue;
2685                 }
2686                 /* we have an inline extent but it didn't get marked up
2687                  * to date.  Error out
2688                  */
2689                 if (block_start == EXTENT_MAP_INLINE) {
2690                         SetPageError(page);
2691                         unlock_extent(tree, cur, cur + iosize - 1);
2692                         cur = cur + iosize;
2693                         pg_offset += iosize;
2694                         continue;
2695                 }
2696
2697                 ret = 0;
2698                 if (tree->ops && tree->ops->readpage_io_hook) {
2699                         ret = tree->ops->readpage_io_hook(page, cur,
2700                                                           cur + iosize - 1);
2701                 }
2702                 if (!ret) {
2703                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2704                         pnr -= page->index;
2705                         ret = submit_extent_page(READ, tree, page,
2706                                          sector, disk_io_size, pg_offset,
2707                                          bdev, bio, pnr,
2708                                          end_bio_extent_readpage, mirror_num,
2709                                          *bio_flags,
2710                                          this_bio_flag);
2711                         BUG_ON(ret == -ENOMEM);
2712                         nr++;
2713                         *bio_flags = this_bio_flag;
2714                 }
2715                 if (ret)
2716                         SetPageError(page);
2717                 cur = cur + iosize;
2718                 pg_offset += iosize;
2719         }
2720 out:
2721         if (!nr) {
2722                 if (!PageError(page))
2723                         SetPageUptodate(page);
2724                 unlock_page(page);
2725         }
2726         return 0;
2727 }
2728
2729 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2730                             get_extent_t *get_extent, int mirror_num)
2731 {
2732         struct bio *bio = NULL;
2733         unsigned long bio_flags = 0;
2734         int ret;
2735
2736         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2737                                       &bio_flags);
2738         if (bio)
2739                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2740         return ret;
2741 }
2742
2743 static noinline void update_nr_written(struct page *page,
2744                                       struct writeback_control *wbc,
2745                                       unsigned long nr_written)
2746 {
2747         wbc->nr_to_write -= nr_written;
2748         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2749             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2750                 page->mapping->writeback_index = page->index + nr_written;
2751 }
2752
2753 /*
2754  * the writepage semantics are similar to regular writepage.  extent
2755  * records are inserted to lock ranges in the tree, and as dirty areas
2756  * are found, they are marked writeback.  Then the lock bits are removed
2757  * and the end_io handler clears the writeback ranges
2758  */
2759 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2760                               void *data)
2761 {
2762         struct inode *inode = page->mapping->host;
2763         struct extent_page_data *epd = data;
2764         struct extent_io_tree *tree = epd->tree;
2765         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2766         u64 delalloc_start;
2767         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2768         u64 end;
2769         u64 cur = start;
2770         u64 extent_offset;
2771         u64 last_byte = i_size_read(inode);
2772         u64 block_start;
2773         u64 iosize;
2774         sector_t sector;
2775         struct extent_state *cached_state = NULL;
2776         struct extent_map *em;
2777         struct block_device *bdev;
2778         int ret;
2779         int nr = 0;
2780         size_t pg_offset = 0;
2781         size_t blocksize;
2782         loff_t i_size = i_size_read(inode);
2783         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2784         u64 nr_delalloc;
2785         u64 delalloc_end;
2786         int page_started;
2787         int compressed;
2788         int write_flags;
2789         unsigned long nr_written = 0;
2790         bool fill_delalloc = true;
2791
2792         if (wbc->sync_mode == WB_SYNC_ALL)
2793                 write_flags = WRITE_SYNC;
2794         else
2795                 write_flags = WRITE;
2796
2797         trace___extent_writepage(page, inode, wbc);
2798
2799         WARN_ON(!PageLocked(page));
2800
2801         ClearPageError(page);
2802
2803         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2804         if (page->index > end_index ||
2805            (page->index == end_index && !pg_offset)) {
2806                 page->mapping->a_ops->invalidatepage(page, 0);
2807                 unlock_page(page);
2808                 return 0;
2809         }
2810
2811         if (page->index == end_index) {
2812                 char *userpage;
2813
2814                 userpage = kmap_atomic(page);
2815                 memset(userpage + pg_offset, 0,
2816                        PAGE_CACHE_SIZE - pg_offset);
2817                 kunmap_atomic(userpage);
2818                 flush_dcache_page(page);
2819         }
2820         pg_offset = 0;
2821
2822         set_page_extent_mapped(page);
2823
2824         if (!tree->ops || !tree->ops->fill_delalloc)
2825                 fill_delalloc = false;
2826
2827         delalloc_start = start;
2828         delalloc_end = 0;
2829         page_started = 0;
2830         if (!epd->extent_locked && fill_delalloc) {
2831                 u64 delalloc_to_write = 0;
2832                 /*
2833                  * make sure the wbc mapping index is at least updated
2834                  * to this page.
2835                  */
2836                 update_nr_written(page, wbc, 0);
2837
2838                 while (delalloc_end < page_end) {
2839                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2840                                                        page,
2841                                                        &delalloc_start,
2842                                                        &delalloc_end,
2843                                                        128 * 1024 * 1024);
2844                         if (nr_delalloc == 0) {
2845                                 delalloc_start = delalloc_end + 1;
2846                                 continue;
2847                         }
2848                         ret = tree->ops->fill_delalloc(inode, page,
2849                                                        delalloc_start,
2850                                                        delalloc_end,
2851                                                        &page_started,
2852                                                        &nr_written);
2853                         /* File system has been set read-only */
2854                         if (ret) {
2855                                 SetPageError(page);
2856                                 goto done;
2857                         }
2858                         /*
2859                          * delalloc_end is already one less than the total
2860                          * length, so we don't subtract one from
2861                          * PAGE_CACHE_SIZE
2862                          */
2863                         delalloc_to_write += (delalloc_end - delalloc_start +
2864                                               PAGE_CACHE_SIZE) >>
2865                                               PAGE_CACHE_SHIFT;
2866                         delalloc_start = delalloc_end + 1;
2867                 }
2868                 if (wbc->nr_to_write < delalloc_to_write) {
2869                         int thresh = 8192;
2870
2871                         if (delalloc_to_write < thresh * 2)
2872                                 thresh = delalloc_to_write;
2873                         wbc->nr_to_write = min_t(u64, delalloc_to_write,
2874                                                  thresh);
2875                 }
2876
2877                 /* did the fill delalloc function already unlock and start
2878                  * the IO?
2879                  */
2880                 if (page_started) {
2881                         ret = 0;
2882                         /*
2883                          * we've unlocked the page, so we can't update
2884                          * the mapping's writeback index, just update
2885                          * nr_to_write.
2886                          */
2887                         wbc->nr_to_write -= nr_written;
2888                         goto done_unlocked;
2889                 }
2890         }
2891         if (tree->ops && tree->ops->writepage_start_hook) {
2892                 ret = tree->ops->writepage_start_hook(page, start,
2893                                                       page_end);
2894                 if (ret) {
2895                         /* Fixup worker will requeue */
2896                         if (ret == -EBUSY)
2897                                 wbc->pages_skipped++;
2898                         else
2899                                 redirty_page_for_writepage(wbc, page);
2900                         update_nr_written(page, wbc, nr_written);
2901                         unlock_page(page);
2902                         ret = 0;
2903                         goto done_unlocked;
2904                 }
2905         }
2906
2907         /*
2908          * we don't want to touch the inode after unlocking the page,
2909          * so we update the mapping writeback index now
2910          */
2911         update_nr_written(page, wbc, nr_written + 1);
2912
2913         end = page_end;
2914         if (last_byte <= start) {
2915                 if (tree->ops && tree->ops->writepage_end_io_hook)
2916                         tree->ops->writepage_end_io_hook(page, start,
2917                                                          page_end, NULL, 1);
2918                 goto done;
2919         }
2920
2921         blocksize = inode->i_sb->s_blocksize;
2922
2923         while (cur <= end) {
2924                 if (cur >= last_byte) {
2925                         if (tree->ops && tree->ops->writepage_end_io_hook)
2926                                 tree->ops->writepage_end_io_hook(page, cur,
2927                                                          page_end, NULL, 1);
2928                         break;
2929                 }
2930                 em = epd->get_extent(inode, page, pg_offset, cur,
2931                                      end - cur + 1, 1);
2932                 if (IS_ERR_OR_NULL(em)) {
2933                         SetPageError(page);
2934                         break;
2935                 }
2936
2937                 extent_offset = cur - em->start;
2938                 BUG_ON(extent_map_end(em) <= cur);
2939                 BUG_ON(end < cur);
2940                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2941                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2942                 sector = (em->block_start + extent_offset) >> 9;
2943                 bdev = em->bdev;
2944                 block_start = em->block_start;
2945                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2946                 free_extent_map(em);
2947                 em = NULL;
2948
2949                 /*
2950                  * compressed and inline extents are written through other
2951                  * paths in the FS
2952                  */
2953                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2954                     block_start == EXTENT_MAP_INLINE) {
2955                         /*
2956                          * end_io notification does not happen here for
2957                          * compressed extents
2958                          */
2959                         if (!compressed && tree->ops &&
2960                             tree->ops->writepage_end_io_hook)
2961                                 tree->ops->writepage_end_io_hook(page, cur,
2962                                                          cur + iosize - 1,
2963                                                          NULL, 1);
2964                         else if (compressed) {
2965                                 /* we don't want to end_page_writeback on
2966                                  * a compressed extent.  this happens
2967                                  * elsewhere
2968                                  */
2969                                 nr++;
2970                         }
2971
2972                         cur += iosize;
2973                         pg_offset += iosize;
2974                         continue;
2975                 }
2976                 /* leave this out until we have a page_mkwrite call */
2977                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2978                                    EXTENT_DIRTY, 0, NULL)) {
2979                         cur = cur + iosize;
2980                         pg_offset += iosize;
2981                         continue;
2982                 }
2983
2984                 if (tree->ops && tree->ops->writepage_io_hook) {
2985                         ret = tree->ops->writepage_io_hook(page, cur,
2986                                                 cur + iosize - 1);
2987                 } else {
2988                         ret = 0;
2989                 }
2990                 if (ret) {
2991                         SetPageError(page);
2992                 } else {
2993                         unsigned long max_nr = end_index + 1;
2994
2995                         set_range_writeback(tree, cur, cur + iosize - 1);
2996                         if (!PageWriteback(page)) {
2997                                 printk(KERN_ERR "btrfs warning page %lu not "
2998                                        "writeback, cur %llu end %llu\n",
2999                                        page->index, (unsigned long long)cur,
3000                                        (unsigned long long)end);
3001                         }
3002
3003                         ret = submit_extent_page(write_flags, tree, page,
3004                                                  sector, iosize, pg_offset,
3005                                                  bdev, &epd->bio, max_nr,
3006                                                  end_bio_extent_writepage,
3007                                                  0, 0, 0);
3008                         if (ret)
3009                                 SetPageError(page);
3010                 }
3011                 cur = cur + iosize;
3012                 pg_offset += iosize;
3013                 nr++;
3014         }
3015 done:
3016         if (nr == 0) {
3017                 /* make sure the mapping tag for page dirty gets cleared */
3018                 set_page_writeback(page);
3019                 end_page_writeback(page);
3020         }
3021         unlock_page(page);
3022
3023 done_unlocked:
3024
3025         /* drop our reference on any cached states */
3026         free_extent_state(cached_state);
3027         return 0;
3028 }
3029
3030 static int eb_wait(void *word)
3031 {
3032         io_schedule();
3033         return 0;
3034 }
3035
3036 static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3037 {
3038         wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3039                     TASK_UNINTERRUPTIBLE);
3040 }
3041
3042 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3043                                      struct btrfs_fs_info *fs_info,
3044                                      struct extent_page_data *epd)
3045 {
3046         unsigned long i, num_pages;
3047         int flush = 0;
3048         int ret = 0;
3049
3050         if (!btrfs_try_tree_write_lock(eb)) {
3051                 flush = 1;
3052                 flush_write_bio(epd);
3053                 btrfs_tree_lock(eb);
3054         }
3055
3056         if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3057                 btrfs_tree_unlock(eb);
3058                 if (!epd->sync_io)
3059                         return 0;
3060                 if (!flush) {
3061                         flush_write_bio(epd);
3062                         flush = 1;
3063                 }
3064                 while (1) {
3065                         wait_on_extent_buffer_writeback(eb);
3066                         btrfs_tree_lock(eb);
3067                         if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3068                                 break;
3069                         btrfs_tree_unlock(eb);
3070                 }
3071         }
3072
3073         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3074                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3075                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3076                 spin_lock(&fs_info->delalloc_lock);
3077                 if (fs_info->dirty_metadata_bytes >= eb->len)
3078                         fs_info->dirty_metadata_bytes -= eb->len;
3079                 else
3080                         WARN_ON(1);
3081                 spin_unlock(&fs_info->delalloc_lock);
3082                 ret = 1;
3083         }
3084
3085         btrfs_tree_unlock(eb);
3086
3087         if (!ret)
3088                 return ret;
3089
3090         num_pages = num_extent_pages(eb->start, eb->len);
3091         for (i = 0; i < num_pages; i++) {
3092                 struct page *p = extent_buffer_page(eb, i);
3093
3094                 if (!trylock_page(p)) {
3095                         if (!flush) {
3096                                 flush_write_bio(epd);
3097                                 flush = 1;
3098                         }
3099                         lock_page(p);
3100                 }
3101         }
3102
3103         return ret;
3104 }
3105
3106 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3107 {
3108         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3109         smp_mb__after_clear_bit();
3110         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3111 }
3112
3113 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3114 {
3115         int uptodate = err == 0;
3116         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3117         struct extent_buffer *eb;
3118         int done;
3119
3120         do {
3121                 struct page *page = bvec->bv_page;
3122
3123                 bvec--;
3124                 eb = (struct extent_buffer *)page->private;
3125                 BUG_ON(!eb);
3126                 done = atomic_dec_and_test(&eb->io_pages);
3127
3128                 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3129                         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3130                         ClearPageUptodate(page);
3131                         SetPageError(page);
3132                 }
3133
3134                 end_page_writeback(page);
3135
3136                 if (!done)
3137                         continue;
3138
3139                 end_extent_buffer_writeback(eb);
3140         } while (bvec >= bio->bi_io_vec);
3141
3142         bio_put(bio);
3143
3144 }
3145
3146 static int write_one_eb(struct extent_buffer *eb,
3147                         struct btrfs_fs_info *fs_info,
3148                         struct writeback_control *wbc,
3149                         struct extent_page_data *epd)
3150 {
3151         struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3152         u64 offset = eb->start;
3153         unsigned long i, num_pages;
3154         int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3155         int ret = 0;
3156
3157         clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3158         num_pages = num_extent_pages(eb->start, eb->len);
3159         atomic_set(&eb->io_pages, num_pages);
3160         for (i = 0; i < num_pages; i++) {
3161                 struct page *p = extent_buffer_page(eb, i);
3162
3163                 clear_page_dirty_for_io(p);
3164                 set_page_writeback(p);
3165                 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3166                                          PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3167                                          -1, end_bio_extent_buffer_writepage,
3168                                          0, 0, 0);
3169                 if (ret) {
3170                         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3171                         SetPageError(p);
3172                         if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3173                                 end_extent_buffer_writeback(eb);
3174                         ret = -EIO;
3175                         break;
3176                 }
3177                 offset += PAGE_CACHE_SIZE;
3178                 update_nr_written(p, wbc, 1);
3179                 unlock_page(p);
3180         }
3181
3182         if (unlikely(ret)) {
3183                 for (; i < num_pages; i++) {
3184                         struct page *p = extent_buffer_page(eb, i);
3185                         unlock_page(p);
3186                 }
3187         }
3188
3189         return ret;
3190 }
3191
3192 int btree_write_cache_pages(struct address_space *mapping,
3193                                    struct writeback_control *wbc)
3194 {
3195         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3196         struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3197         struct extent_buffer *eb, *prev_eb = NULL;
3198         struct extent_page_data epd = {
3199                 .bio = NULL,
3200                 .tree = tree,
3201                 .extent_locked = 0,
3202                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3203         };
3204         int ret = 0;
3205         int done = 0;
3206         int nr_to_write_done = 0;
3207         struct pagevec pvec;
3208         int nr_pages;
3209         pgoff_t index;
3210         pgoff_t end;            /* Inclusive */
3211         int scanned = 0;
3212         int tag;
3213
3214         pagevec_init(&pvec, 0);
3215         if (wbc->range_cyclic) {
3216                 index = mapping->writeback_index; /* Start from prev offset */
3217                 end = -1;
3218         } else {
3219                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3220                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3221                 scanned = 1;
3222         }
3223         if (wbc->sync_mode == WB_SYNC_ALL)
3224                 tag = PAGECACHE_TAG_TOWRITE;
3225         else
3226                 tag = PAGECACHE_TAG_DIRTY;
3227 retry:
3228         if (wbc->sync_mode == WB_SYNC_ALL)
3229                 tag_pages_for_writeback(mapping, index, end);
3230         while (!done && !nr_to_write_done && (index <= end) &&
3231                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3232                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3233                 unsigned i;
3234
3235                 scanned = 1;
3236                 for (i = 0; i < nr_pages; i++) {
3237                         struct page *page = pvec.pages[i];
3238
3239                         if (!PagePrivate(page))
3240                                 continue;
3241
3242                         if (!wbc->range_cyclic && page->index > end) {
3243                                 done = 1;
3244                                 break;
3245                         }
3246
3247                         eb = (struct extent_buffer *)page->private;
3248                         if (!eb) {
3249                                 WARN_ON(1);
3250                                 continue;
3251                         }
3252
3253                         if (eb == prev_eb)
3254                                 continue;
3255
3256                         if (!atomic_inc_not_zero(&eb->refs)) {
3257                                 WARN_ON(1);
3258                                 continue;
3259                         }
3260
3261                         prev_eb = eb;
3262                         ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3263                         if (!ret) {
3264                                 free_extent_buffer(eb);
3265                                 continue;
3266                         }
3267
3268                         ret = write_one_eb(eb, fs_info, wbc, &epd);
3269                         if (ret) {
3270                                 done = 1;
3271                                 free_extent_buffer(eb);
3272                                 break;
3273                         }
3274                         free_extent_buffer(eb);
3275
3276                         /*
3277                          * the filesystem may choose to bump up nr_to_write.
3278                          * We have to make sure to honor the new nr_to_write
3279                          * at any time
3280                          */
3281                         nr_to_write_done = wbc->nr_to_write <= 0;
3282                 }
3283                 pagevec_release(&pvec);
3284                 cond_resched();
3285         }
3286         if (!scanned && !done) {
3287                 /*
3288                  * We hit the last page and there is more work to be done: wrap
3289                  * back to the start of the file
3290                  */
3291                 scanned = 1;
3292                 index = 0;
3293                 goto retry;
3294         }
3295         flush_write_bio(&epd);
3296         return ret;
3297 }
3298
3299 /**
3300  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3301  * @mapping: address space structure to write
3302  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3303  * @writepage: function called for each page
3304  * @data: data passed to writepage function
3305  *
3306  * If a page is already under I/O, write_cache_pages() skips it, even
3307  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3308  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3309  * and msync() need to guarantee that all the data which was dirty at the time
3310  * the call was made get new I/O started against them.  If wbc->sync_mode is
3311  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3312  * existing IO to complete.
3313  */
3314 static int extent_write_cache_pages(struct extent_io_tree *tree,
3315                              struct address_space *mapping,
3316                              struct writeback_control *wbc,
3317                              writepage_t writepage, void *data,
3318                              void (*flush_fn)(void *))
3319 {
3320         int ret = 0;
3321         int done = 0;
3322         int nr_to_write_done = 0;
3323         struct pagevec pvec;
3324         int nr_pages;
3325         pgoff_t index;
3326         pgoff_t end;            /* Inclusive */
3327         int scanned = 0;
3328         int tag;
3329
3330         pagevec_init(&pvec, 0);
3331         if (wbc->range_cyclic) {
3332                 index = mapping->writeback_index; /* Start from prev offset */
3333                 end = -1;
3334         } else {
3335                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3336                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3337                 scanned = 1;
3338         }
3339         if (wbc->sync_mode == WB_SYNC_ALL)
3340                 tag = PAGECACHE_TAG_TOWRITE;
3341         else
3342                 tag = PAGECACHE_TAG_DIRTY;
3343 retry:
3344         if (wbc->sync_mode == WB_SYNC_ALL)
3345                 tag_pages_for_writeback(mapping, index, end);
3346         while (!done && !nr_to_write_done && (index <= end) &&
3347                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3348                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3349                 unsigned i;
3350
3351                 scanned = 1;
3352                 for (i = 0; i < nr_pages; i++) {
3353                         struct page *page = pvec.pages[i];
3354
3355                         /*
3356                          * At this point we hold neither mapping->tree_lock nor
3357                          * lock on the page itself: the page may be truncated or
3358                          * invalidated (changing page->mapping to NULL), or even
3359                          * swizzled back from swapper_space to tmpfs file
3360                          * mapping
3361                          */
3362                         if (tree->ops &&
3363                             tree->ops->write_cache_pages_lock_hook) {
3364                                 tree->ops->write_cache_pages_lock_hook(page,
3365                                                                data, flush_fn);
3366                         } else {
3367                                 if (!trylock_page(page)) {
3368                                         flush_fn(data);
3369                                         lock_page(page);
3370                                 }
3371                         }
3372
3373                         if (unlikely(page->mapping != mapping)) {
3374                                 unlock_page(page);
3375                                 continue;
3376                         }
3377
3378                         if (!wbc->range_cyclic && page->index > end) {
3379                                 done = 1;
3380                                 unlock_page(page);
3381                                 continue;
3382                         }
3383
3384                         if (wbc->sync_mode != WB_SYNC_NONE) {
3385                                 if (PageWriteback(page))
3386                                         flush_fn(data);
3387                                 wait_on_page_writeback(page);
3388                         }
3389
3390                         if (PageWriteback(page) ||
3391                             !clear_page_dirty_for_io(page)) {
3392                                 unlock_page(page);
3393                                 continue;
3394                         }
3395
3396                         ret = (*writepage)(page, wbc, data);
3397
3398                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3399                                 unlock_page(page);
3400                                 ret = 0;
3401                         }
3402                         if (ret)
3403                                 done = 1;
3404
3405                         /*
3406                          * the filesystem may choose to bump up nr_to_write.
3407                          * We have to make sure to honor the new nr_to_write
3408                          * at any time
3409                          */
3410                         nr_to_write_done = wbc->nr_to_write <= 0;
3411                 }
3412                 pagevec_release(&pvec);
3413                 cond_resched();
3414         }
3415         if (!scanned && !done) {
3416                 /*
3417                  * We hit the last page and there is more work to be done: wrap
3418                  * back to the start of the file
3419                  */
3420                 scanned = 1;
3421                 index = 0;
3422                 goto retry;
3423         }
3424         return ret;
3425 }
3426
3427 static void flush_epd_write_bio(struct extent_page_data *epd)
3428 {
3429         if (epd->bio) {
3430                 int rw = WRITE;
3431                 int ret;
3432
3433                 if (epd->sync_io)
3434                         rw = WRITE_SYNC;
3435
3436                 ret = submit_one_bio(rw, epd->bio, 0, 0);
3437                 BUG_ON(ret < 0); /* -ENOMEM */
3438                 epd->bio = NULL;
3439         }
3440 }
3441
3442 static noinline void flush_write_bio(void *data)
3443 {
3444         struct extent_page_data *epd = data;
3445         flush_epd_write_bio(epd);
3446 }
3447
3448 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3449                           get_extent_t *get_extent,
3450                           struct writeback_control *wbc)
3451 {
3452         int ret;
3453         struct extent_page_data epd = {
3454                 .bio = NULL,
3455                 .tree = tree,
3456                 .get_extent = get_extent,
3457                 .extent_locked = 0,
3458                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3459         };
3460
3461         ret = __extent_writepage(page, wbc, &epd);
3462
3463         flush_epd_write_bio(&epd);
3464         return ret;
3465 }
3466
3467 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3468                               u64 start, u64 end, get_extent_t *get_extent,
3469                               int mode)
3470 {
3471         int ret = 0;
3472         struct address_space *mapping = inode->i_mapping;
3473         struct page *page;
3474         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3475                 PAGE_CACHE_SHIFT;
3476
3477         struct extent_page_data epd = {
3478                 .bio = NULL,
3479                 .tree = tree,
3480                 .get_extent = get_extent,
3481                 .extent_locked = 1,
3482                 .sync_io = mode == WB_SYNC_ALL,
3483         };
3484         struct writeback_control wbc_writepages = {
3485                 .sync_mode      = mode,
3486                 .nr_to_write    = nr_pages * 2,
3487                 .range_start    = start,
3488                 .range_end      = end + 1,
3489         };
3490
3491         while (start <= end) {
3492                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3493                 if (clear_page_dirty_for_io(page))
3494                         ret = __extent_writepage(page, &wbc_writepages, &epd);
3495                 else {
3496                         if (tree->ops && tree->ops->writepage_end_io_hook)
3497                                 tree->ops->writepage_end_io_hook(page, start,
3498                                                  start + PAGE_CACHE_SIZE - 1,
3499                                                  NULL, 1);
3500                         unlock_page(page);
3501                 }
3502                 page_cache_release(page);
3503                 start += PAGE_CACHE_SIZE;
3504         }
3505
3506         flush_epd_write_bio(&epd);
3507         return ret;
3508 }
3509
3510 int extent_writepages(struct extent_io_tree *tree,
3511                       struct address_space *mapping,
3512                       get_extent_t *get_extent,
3513                       struct writeback_control *wbc)
3514 {
3515         int ret = 0;
3516         struct extent_page_data epd = {
3517                 .bio = NULL,
3518                 .tree = tree,
3519                 .get_extent = get_extent,
3520                 .extent_locked = 0,
3521                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3522         };
3523
3524         ret = extent_write_cache_pages(tree, mapping, wbc,
3525                                        __extent_writepage, &epd,
3526                                        flush_write_bio);
3527         flush_epd_write_bio(&epd);
3528         return ret;
3529 }
3530
3531 int extent_readpages(struct extent_io_tree *tree,
3532                      struct address_space *mapping,
3533                      struct list_head *pages, unsigned nr_pages,
3534                      get_extent_t get_extent)
3535 {
3536         struct bio *bio = NULL;
3537         unsigned page_idx;
3538         unsigned long bio_flags = 0;
3539
3540         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3541                 struct page *page = list_entry(pages->prev, struct page, lru);
3542
3543                 prefetchw(&page->flags);
3544                 list_del(&page->lru);
3545                 if (!add_to_page_cache_lru(page, mapping,
3546                                         page->index, GFP_NOFS)) {
3547                         __extent_read_full_page(tree, page, get_extent,
3548                                                 &bio, 0, &bio_flags);
3549                 }
3550                 page_cache_release(page);
3551         }
3552         BUG_ON(!list_empty(pages));
3553         if (bio)
3554                 return submit_one_bio(READ, bio, 0, bio_flags);
3555         return 0;
3556 }
3557
3558 /*
3559  * basic invalidatepage code, this waits on any locked or writeback
3560  * ranges corresponding to the page, and then deletes any extent state
3561  * records from the tree
3562  */
3563 int extent_invalidatepage(struct extent_io_tree *tree,
3564                           struct page *page, unsigned long offset)
3565 {
3566         struct extent_state *cached_state = NULL;
3567         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3568         u64 end = start + PAGE_CACHE_SIZE - 1;
3569         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3570
3571         start += (offset + blocksize - 1) & ~(blocksize - 1);
3572         if (start > end)
3573                 return 0;
3574
3575         lock_extent_bits(tree, start, end, 0, &cached_state);
3576         wait_on_page_writeback(page);
3577         clear_extent_bit(tree, start, end,
3578                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3579                          EXTENT_DO_ACCOUNTING,
3580                          1, 1, &cached_state, GFP_NOFS);
3581         return 0;
3582 }
3583
3584 /*
3585  * a helper for releasepage, this tests for areas of the page that
3586  * are locked or under IO and drops the related state bits if it is safe
3587  * to drop the page.
3588  */
3589 int try_release_extent_state(struct extent_map_tree *map,
3590                              struct extent_io_tree *tree, struct page *page,
3591                              gfp_t mask)
3592 {
3593         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3594         u64 end = start + PAGE_CACHE_SIZE - 1;
3595         int ret = 1;
3596
3597         if (test_range_bit(tree, start, end,
3598                            EXTENT_IOBITS, 0, NULL))
3599                 ret = 0;
3600         else {
3601                 if ((mask & GFP_NOFS) == GFP_NOFS)
3602                         mask = GFP_NOFS;
3603                 /*
3604                  * at this point we can safely clear everything except the
3605                  * locked bit and the nodatasum bit
3606                  */
3607                 ret = clear_extent_bit(tree, start, end,
3608                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3609                                  0, 0, NULL, mask);
3610
3611                 /* if clear_extent_bit failed for enomem reasons,
3612                  * we can't allow the release to continue.
3613                  */
3614                 if (ret < 0)
3615                         ret = 0;
3616                 else
3617                         ret = 1;
3618         }
3619         return ret;
3620 }
3621
3622 /*
3623  * a helper for releasepage.  As long as there are no locked extents
3624  * in the range corresponding to the page, both state records and extent
3625  * map records are removed
3626  */
3627 int try_release_extent_mapping(struct extent_map_tree *map,
3628                                struct extent_io_tree *tree, struct page *page,
3629                                gfp_t mask)
3630 {
3631         struct extent_map *em;
3632         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3633         u64 end = start + PAGE_CACHE_SIZE - 1;
3634
3635         if ((mask & __GFP_WAIT) &&
3636             page->mapping->host->i_size > 16 * 1024 * 1024) {
3637                 u64 len;
3638                 while (start <= end) {
3639                         len = end - start + 1;
3640                         write_lock(&map->lock);
3641                         em = lookup_extent_mapping(map, start, len);
3642                         if (!em) {
3643                                 write_unlock(&map->lock);
3644                                 break;
3645                         }
3646                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3647                             em->start != start) {
3648                                 write_unlock(&map->lock);
3649                                 free_extent_map(em);
3650                                 break;
3651                         }
3652                         if (!test_range_bit(tree, em->start,
3653                                             extent_map_end(em) - 1,
3654                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
3655                                             0, NULL)) {
3656                                 remove_extent_mapping(map, em);
3657                                 /* once for the rb tree */
3658                                 free_extent_map(em);
3659                         }
3660                         start = extent_map_end(em);
3661                         write_unlock(&map->lock);
3662
3663                         /* once for us */
3664                         free_extent_map(em);
3665                 }
3666         }
3667         return try_release_extent_state(map, tree, page, mask);
3668 }
3669
3670 /*
3671  * helper function for fiemap, which doesn't want to see any holes.
3672  * This maps until we find something past 'last'
3673  */
3674 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3675                                                 u64 offset,
3676                                                 u64 last,
3677                                                 get_extent_t *get_extent)
3678 {
3679         u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3680         struct extent_map *em;
3681         u64 len;
3682
3683         if (offset >= last)
3684                 return NULL;
3685
3686         while(1) {
3687                 len = last - offset;
3688                 if (len == 0)
3689                         break;
3690                 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3691                 em = get_extent(inode, NULL, 0, offset, len, 0);
3692                 if (IS_ERR_OR_NULL(em))
3693                         return em;
3694
3695                 /* if this isn't a hole return it */
3696                 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3697                     em->block_start != EXTENT_MAP_HOLE) {
3698                         return em;
3699                 }
3700
3701                 /* this is a hole, advance to the next extent */
3702                 offset = extent_map_end(em);
3703                 free_extent_map(em);
3704                 if (offset >= last)
3705                         break;
3706         }
3707         return NULL;
3708 }
3709
3710 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3711                 __u64 start, __u64 len, get_extent_t *get_extent)
3712 {
3713         int ret = 0;
3714         u64 off = start;
3715         u64 max = start + len;
3716         u32 flags = 0;
3717         u32 found_type;
3718         u64 last;
3719         u64 last_for_get_extent = 0;
3720         u64 disko = 0;
3721         u64 isize = i_size_read(inode);
3722         struct btrfs_key found_key;
3723         struct extent_map *em = NULL;
3724         struct extent_state *cached_state = NULL;
3725         struct btrfs_path *path;
3726         struct btrfs_file_extent_item *item;
3727         int end = 0;
3728         u64 em_start = 0;
3729         u64 em_len = 0;
3730         u64 em_end = 0;
3731         unsigned long emflags;
3732
3733         if (len == 0)
3734                 return -EINVAL;
3735
3736         path = btrfs_alloc_path();
3737         if (!path)
3738                 return -ENOMEM;
3739         path->leave_spinning = 1;
3740
3741         start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3742         len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3743
3744         /*
3745          * lookup the last file extent.  We're not using i_size here
3746          * because there might be preallocation past i_size
3747          */
3748         ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3749                                        path, btrfs_ino(inode), -1, 0);
3750         if (ret < 0) {
3751                 btrfs_free_path(path);
3752                 return ret;
3753         }
3754         WARN_ON(!ret);
3755         path->slots[0]--;
3756         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3757                               struct btrfs_file_extent_item);
3758         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3759         found_type = btrfs_key_type(&found_key);
3760
3761         /* No extents, but there might be delalloc bits */
3762         if (found_key.objectid != btrfs_ino(inode) ||
3763             found_type != BTRFS_EXTENT_DATA_KEY) {
3764                 /* have to trust i_size as the end */
3765                 last = (u64)-1;
3766                 last_for_get_extent = isize;
3767         } else {
3768                 /*
3769                  * remember the start of the last extent.  There are a
3770                  * bunch of different factors that go into the length of the
3771                  * extent, so its much less complex to remember where it started
3772                  */
3773                 last = found_key.offset;
3774                 last_for_get_extent = last + 1;
3775         }
3776         btrfs_free_path(path);
3777
3778         /*
3779          * we might have some extents allocated but more delalloc past those
3780          * extents.  so, we trust isize unless the start of the last extent is
3781          * beyond isize
3782          */
3783         if (last < isize) {
3784                 last = (u64)-1;
3785                 last_for_get_extent = isize;
3786         }
3787
3788         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3789                          &cached_state);
3790
3791         em = get_extent_skip_holes(inode, start, last_for_get_extent,
3792                                    get_extent);
3793         if (!em)
3794                 goto out;
3795         if (IS_ERR(em)) {
3796                 ret = PTR_ERR(em);
3797                 goto out;
3798         }
3799
3800         while (!end) {
3801                 u64 offset_in_extent;
3802
3803                 /* break if the extent we found is outside the range */
3804                 if (em->start >= max || extent_map_end(em) < off)
3805                         break;
3806
3807                 /*
3808                  * get_extent may return an extent that starts before our
3809                  * requested range.  We have to make sure the ranges
3810                  * we return to fiemap always move forward and don't
3811                  * overlap, so adjust the offsets here
3812                  */
3813                 em_start = max(em->start, off);
3814
3815                 /*
3816                  * record the offset from the start of the extent
3817                  * for adjusting the disk offset below
3818                  */
3819                 offset_in_extent = em_start - em->start;
3820                 em_end = extent_map_end(em);
3821                 em_len = em_end - em_start;
3822                 emflags = em->flags;
3823                 disko = 0;
3824                 flags = 0;
3825
3826                 /*
3827                  * bump off for our next call to get_extent
3828                  */
3829                 off = extent_map_end(em);
3830                 if (off >= max)
3831                         end = 1;
3832
3833                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3834                         end = 1;
3835                         flags |= FIEMAP_EXTENT_LAST;
3836                 } else if (em->block_start == EXTENT_MAP_INLINE) {
3837                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
3838                                   FIEMAP_EXTENT_NOT_ALIGNED);
3839                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3840                         flags |= (FIEMAP_EXTENT_DELALLOC |
3841                                   FIEMAP_EXTENT_UNKNOWN);
3842                 } else {
3843                         disko = em->block_start + offset_in_extent;
3844                 }
3845                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3846                         flags |= FIEMAP_EXTENT_ENCODED;
3847
3848                 free_extent_map(em);
3849                 em = NULL;
3850                 if ((em_start >= last) || em_len == (u64)-1 ||
3851                    (last == (u64)-1 && isize <= em_end)) {
3852                         flags |= FIEMAP_EXTENT_LAST;
3853                         end = 1;
3854                 }
3855
3856                 /* now scan forward to see if this is really the last extent. */
3857                 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3858                                            get_extent);
3859                 if (IS_ERR(em)) {
3860                         ret = PTR_ERR(em);
3861                         goto out;
3862                 }
3863                 if (!em) {
3864                         flags |= FIEMAP_EXTENT_LAST;
3865                         end = 1;
3866                 }
3867                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3868                                               em_len, flags);
3869                 if (ret)
3870                         goto out_free;
3871         }
3872 out_free:
3873         free_extent_map(em);
3874 out:
3875         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3876                              &cached_state, GFP_NOFS);
3877         return ret;
3878 }
3879
3880 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3881                                               unsigned long i)
3882 {
3883         return eb->pages[i];
3884 }
3885
3886 inline unsigned long num_extent_pages(u64 start, u64 len)
3887 {
3888         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3889                 (start >> PAGE_CACHE_SHIFT);
3890 }
3891
3892 static void __free_extent_buffer(struct extent_buffer *eb)
3893 {
3894 #if LEAK_DEBUG
3895         unsigned long flags;
3896         spin_lock_irqsave(&leak_lock, flags);
3897         list_del(&eb->leak_list);
3898         spin_unlock_irqrestore(&leak_lock, flags);
3899 #endif
3900         if (eb->pages && eb->pages != eb->inline_pages)
3901                 kfree(eb->pages);
3902         kmem_cache_free(extent_buffer_cache, eb);
3903 }
3904
3905 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3906                                                    u64 start,
3907                                                    unsigned long len,
3908                                                    gfp_t mask)
3909 {
3910         struct extent_buffer *eb = NULL;
3911 #if LEAK_DEBUG
3912         unsigned long flags;
3913 #endif
3914
3915         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3916         if (eb == NULL)
3917                 return NULL;
3918         eb->start = start;
3919         eb->len = len;
3920         eb->tree = tree;
3921         rwlock_init(&eb->lock);
3922         atomic_set(&eb->write_locks, 0);
3923         atomic_set(&eb->read_locks, 0);
3924         atomic_set(&eb->blocking_readers, 0);
3925         atomic_set(&eb->blocking_writers, 0);
3926         atomic_set(&eb->spinning_readers, 0);
3927         atomic_set(&eb->spinning_writers, 0);
3928         eb->lock_nested = 0;
3929         init_waitqueue_head(&eb->write_lock_wq);
3930         init_waitqueue_head(&eb->read_lock_wq);
3931
3932 #if LEAK_DEBUG
3933         spin_lock_irqsave(&leak_lock, flags);
3934         list_add(&eb->leak_list, &buffers);
3935         spin_unlock_irqrestore(&leak_lock, flags);
3936 #endif
3937         spin_lock_init(&eb->refs_lock);
3938         atomic_set(&eb->refs, 1);
3939         atomic_set(&eb->io_pages, 0);
3940
3941         if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3942                 struct page **pages;
3943                 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3944                         PAGE_CACHE_SHIFT;
3945                 pages = kzalloc(num_pages, mask);
3946                 if (!pages) {
3947                         __free_extent_buffer(eb);
3948                         return NULL;
3949                 }
3950                 eb->pages = pages;
3951         } else {
3952                 eb->pages = eb->inline_pages;
3953         }
3954
3955         return eb;
3956 }
3957
3958 static int extent_buffer_under_io(struct extent_buffer *eb)
3959 {
3960         return (atomic_read(&eb->io_pages) ||
3961                 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3962                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3963 }
3964
3965 /*
3966  * Helper for releasing extent buffer page.
3967  */
3968 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3969                                                 unsigned long start_idx)
3970 {
3971         unsigned long index;
3972         unsigned long num_pages;
3973         struct page *page;
3974
3975         BUG_ON(extent_buffer_under_io(eb));
3976
3977         num_pages = num_extent_pages(eb->start, eb->len);
3978         index = start_idx + num_pages;
3979         if (start_idx >= index)
3980                 return;
3981
3982         do {
3983                 index--;
3984                 page = extent_buffer_page(eb, index);
3985                 if (page) {
3986                         spin_lock(&page->mapping->private_lock);
3987                         /*
3988                          * We do this since we'll remove the pages after we've
3989                          * removed the eb from the radix tree, so we could race
3990                          * and have this page now attached to the new eb.  So
3991                          * only clear page_private if it's still connected to
3992                          * this eb.
3993                          */
3994                         if (PagePrivate(page) &&
3995                             page->private == (unsigned long)eb) {
3996                                 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3997                                 BUG_ON(PageDirty(page));
3998                                 BUG_ON(PageWriteback(page));
3999                                 /*
4000                                  * We need to make sure we haven't be attached
4001                                  * to a new eb.
4002                                  */
4003                                 ClearPagePrivate(page);
4004                                 set_page_private(page, 0);
4005                                 /* One for the page private */
4006                                 page_cache_release(page);
4007                         }
4008                         spin_unlock(&page->mapping->private_lock);
4009
4010                         /* One for when we alloced the page */
4011                         page_cache_release(page);
4012                 }
4013         } while (index != start_idx);
4014 }
4015
4016 /*
4017  * Helper for releasing the extent buffer.
4018  */
4019 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4020 {
4021         btrfs_release_extent_buffer_page(eb, 0);
4022         __free_extent_buffer(eb);
4023 }
4024
4025 static void check_buffer_tree_ref(struct extent_buffer *eb)
4026 {
4027         /* the ref bit is tricky.  We have to make sure it is set
4028          * if we have the buffer dirty.   Otherwise the
4029          * code to free a buffer can end up dropping a dirty
4030          * page
4031          *
4032          * Once the ref bit is set, it won't go away while the
4033          * buffer is dirty or in writeback, and it also won't
4034          * go away while we have the reference count on the
4035          * eb bumped.
4036          *
4037          * We can't just set the ref bit without bumping the
4038          * ref on the eb because free_extent_buffer might
4039          * see the ref bit and try to clear it.  If this happens
4040          * free_extent_buffer might end up dropping our original
4041          * ref by mistake and freeing the page before we are able
4042          * to add one more ref.
4043          *
4044          * So bump the ref count first, then set the bit.  If someone
4045          * beat us to it, drop the ref we added.
4046          */
4047         if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4048                 atomic_inc(&eb->refs);
4049                 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4050                         atomic_dec(&eb->refs);
4051         }
4052 }
4053
4054 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4055 {
4056         unsigned long num_pages, i;
4057
4058         check_buffer_tree_ref(eb);
4059
4060         num_pages = num_extent_pages(eb->start, eb->len);
4061         for (i = 0; i < num_pages; i++) {
4062                 struct page *p = extent_buffer_page(eb, i);
4063                 mark_page_accessed(p);
4064         }
4065 }
4066
4067 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4068                                           u64 start, unsigned long len)
4069 {
4070         unsigned long num_pages = num_extent_pages(start, len);
4071         unsigned long i;
4072         unsigned long index = start >> PAGE_CACHE_SHIFT;
4073         struct extent_buffer *eb;
4074         struct extent_buffer *exists = NULL;
4075         struct page *p;
4076         struct address_space *mapping = tree->mapping;
4077         int uptodate = 1;
4078         int ret;
4079
4080         rcu_read_lock();
4081         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4082         if (eb && atomic_inc_not_zero(&eb->refs)) {
4083                 rcu_read_unlock();
4084                 mark_extent_buffer_accessed(eb);
4085                 return eb;
4086         }
4087         rcu_read_unlock();
4088
4089         eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4090         if (!eb)
4091                 return NULL;
4092
4093         for (i = 0; i < num_pages; i++, index++) {
4094                 p = find_or_create_page(mapping, index, GFP_NOFS);
4095                 if (!p) {
4096                         WARN_ON(1);
4097                         goto free_eb;
4098                 }
4099
4100                 spin_lock(&mapping->private_lock);
4101                 if (PagePrivate(p)) {
4102                         /*
4103                          * We could have already allocated an eb for this page
4104                          * and attached one so lets see if we can get a ref on
4105                          * the existing eb, and if we can we know it's good and
4106                          * we can just return that one, else we know we can just
4107                          * overwrite page->private.
4108                          */
4109                         exists = (struct extent_buffer *)p->private;
4110                         if (atomic_inc_not_zero(&exists->refs)) {
4111                                 spin_unlock(&mapping->private_lock);
4112                                 unlock_page(p);
4113                                 page_cache_release(p);
4114                                 mark_extent_buffer_accessed(exists);
4115                                 goto free_eb;
4116                         }
4117
4118                         /*
4119                          * Do this so attach doesn't complain and we need to
4120                          * drop the ref the old guy had.
4121                          */
4122                         ClearPagePrivate(p);
4123                         WARN_ON(PageDirty(p));
4124                         page_cache_release(p);
4125                 }
4126                 attach_extent_buffer_page(eb, p);
4127                 spin_unlock(&mapping->private_lock);
4128                 WARN_ON(PageDirty(p));
4129                 mark_page_accessed(p);
4130                 eb->pages[i] = p;
4131                 if (!PageUptodate(p))
4132                         uptodate = 0;
4133
4134                 /*
4135                  * see below about how we avoid a nasty race with release page
4136                  * and why we unlock later
4137                  */
4138         }
4139         if (uptodate)
4140                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4141 again:
4142         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4143         if (ret)
4144                 goto free_eb;
4145
4146         spin_lock(&tree->buffer_lock);
4147         ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4148         if (ret == -EEXIST) {
4149                 exists = radix_tree_lookup(&tree->buffer,
4150                                                 start >> PAGE_CACHE_SHIFT);
4151                 if (!atomic_inc_not_zero(&exists->refs)) {
4152                         spin_unlock(&tree->buffer_lock);
4153                         radix_tree_preload_end();
4154                         exists = NULL;
4155                         goto again;
4156                 }
4157                 spin_unlock(&tree->buffer_lock);
4158                 radix_tree_preload_end();
4159                 mark_extent_buffer_accessed(exists);
4160                 goto free_eb;
4161         }
4162         /* add one reference for the tree */
4163         spin_lock(&eb->refs_lock);
4164         check_buffer_tree_ref(eb);
4165         spin_unlock(&eb->refs_lock);
4166         spin_unlock(&tree->buffer_lock);
4167         radix_tree_preload_end();
4168
4169         /*
4170          * there is a race where release page may have
4171          * tried to find this extent buffer in the radix
4172          * but failed.  It will tell the VM it is safe to
4173          * reclaim the, and it will clear the page private bit.
4174          * We must make sure to set the page private bit properly
4175          * after the extent buffer is in the radix tree so
4176          * it doesn't get lost
4177          */
4178         SetPageChecked(eb->pages[0]);
4179         for (i = 1; i < num_pages; i++) {
4180                 p = extent_buffer_page(eb, i);
4181                 ClearPageChecked(p);
4182                 unlock_page(p);
4183         }
4184         unlock_page(eb->pages[0]);
4185         return eb;
4186
4187 free_eb:
4188         for (i = 0; i < num_pages; i++) {
4189                 if (eb->pages[i])
4190                         unlock_page(eb->pages[i]);
4191         }
4192
4193         WARN_ON(!atomic_dec_and_test(&eb->refs));
4194         btrfs_release_extent_buffer(eb);
4195         return exists;
4196 }
4197
4198 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4199                                          u64 start, unsigned long len)
4200 {
4201         struct extent_buffer *eb;
4202
4203         rcu_read_lock();
4204         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4205         if (eb && atomic_inc_not_zero(&eb->refs)) {
4206                 rcu_read_unlock();
4207                 mark_extent_buffer_accessed(eb);
4208                 return eb;
4209         }
4210         rcu_read_unlock();
4211
4212         return NULL;
4213 }
4214
4215 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4216 {
4217         struct extent_buffer *eb =
4218                         container_of(head, struct extent_buffer, rcu_head);
4219
4220         __free_extent_buffer(eb);
4221 }
4222
4223 /* Expects to have eb->eb_lock already held */
4224 static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4225 {
4226         WARN_ON(atomic_read(&eb->refs) == 0);
4227         if (atomic_dec_and_test(&eb->refs)) {
4228                 struct extent_io_tree *tree = eb->tree;
4229
4230                 spin_unlock(&eb->refs_lock);
4231
4232                 spin_lock(&tree->buffer_lock);
4233                 radix_tree_delete(&tree->buffer,
4234                                   eb->start >> PAGE_CACHE_SHIFT);
4235                 spin_unlock(&tree->buffer_lock);
4236
4237                 /* Should be safe to release our pages at this point */
4238                 btrfs_release_extent_buffer_page(eb, 0);
4239
4240                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4241                 return;
4242         }
4243         spin_unlock(&eb->refs_lock);
4244 }
4245
4246 void free_extent_buffer(struct extent_buffer *eb)
4247 {
4248         if (!eb)
4249                 return;
4250
4251         spin_lock(&eb->refs_lock);
4252         if (atomic_read(&eb->refs) == 2 &&
4253             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4254             !extent_buffer_under_io(eb) &&
4255             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4256                 atomic_dec(&eb->refs);
4257
4258         /*
4259          * I know this is terrible, but it's temporary until we stop tracking
4260          * the uptodate bits and such for the extent buffers.
4261          */
4262         release_extent_buffer(eb, GFP_ATOMIC);
4263 }
4264
4265 void free_extent_buffer_stale(struct extent_buffer *eb)
4266 {
4267         if (!eb)
4268                 return;
4269
4270         spin_lock(&eb->refs_lock);
4271         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4272
4273         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4274             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4275                 atomic_dec(&eb->refs);
4276         release_extent_buffer(eb, GFP_NOFS);
4277 }
4278
4279 void clear_extent_buffer_dirty(struct extent_buffer *eb)
4280 {
4281         unsigned long i;
4282         unsigned long num_pages;
4283         struct page *page;
4284
4285         num_pages = num_extent_pages(eb->start, eb->len);
4286
4287         for (i = 0; i < num_pages; i++) {
4288                 page = extent_buffer_page(eb, i);
4289                 if (!PageDirty(page))
4290                         continue;
4291
4292                 lock_page(page);
4293                 WARN_ON(!PagePrivate(page));
4294
4295                 clear_page_dirty_for_io(page);
4296                 spin_lock_irq(&page->mapping->tree_lock);
4297                 if (!PageDirty(page)) {
4298                         radix_tree_tag_clear(&page->mapping->page_tree,
4299                                                 page_index(page),
4300                                                 PAGECACHE_TAG_DIRTY);
4301                 }
4302                 spin_unlock_irq(&page->mapping->tree_lock);
4303                 ClearPageError(page);
4304                 unlock_page(page);
4305         }
4306         WARN_ON(atomic_read(&eb->refs) == 0);
4307 }
4308
4309 int set_extent_buffer_dirty(struct extent_buffer *eb)
4310 {
4311         unsigned long i;
4312         unsigned long num_pages;
4313         int was_dirty = 0;
4314
4315         check_buffer_tree_ref(eb);
4316
4317         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4318
4319         num_pages = num_extent_pages(eb->start, eb->len);
4320         WARN_ON(atomic_read(&eb->refs) == 0);
4321         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4322
4323         for (i = 0; i < num_pages; i++)
4324                 set_page_dirty(extent_buffer_page(eb, i));
4325         return was_dirty;
4326 }
4327
4328 static int range_straddles_pages(u64 start, u64 len)
4329 {
4330         if (len < PAGE_CACHE_SIZE)
4331                 return 1;
4332         if (start & (PAGE_CACHE_SIZE - 1))
4333                 return 1;
4334         if ((start + len) & (PAGE_CACHE_SIZE - 1))
4335                 return 1;
4336         return 0;
4337 }
4338
4339 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4340 {
4341         unsigned long i;
4342         struct page *page;
4343         unsigned long num_pages;
4344
4345         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4346         num_pages = num_extent_pages(eb->start, eb->len);
4347         for (i = 0; i < num_pages; i++) {
4348                 page = extent_buffer_page(eb, i);
4349                 if (page)
4350                         ClearPageUptodate(page);
4351         }
4352         return 0;
4353 }
4354
4355 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4356 {
4357         unsigned long i;
4358         struct page *page;
4359         unsigned long num_pages;
4360
4361         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4362         num_pages = num_extent_pages(eb->start, eb->len);
4363         for (i = 0; i < num_pages; i++) {
4364                 page = extent_buffer_page(eb, i);
4365                 SetPageUptodate(page);
4366         }
4367         return 0;
4368 }
4369
4370 int extent_range_uptodate(struct extent_io_tree *tree,
4371                           u64 start, u64 end)
4372 {
4373         struct page *page;
4374         int ret;
4375         int pg_uptodate = 1;
4376         int uptodate;
4377         unsigned long index;
4378
4379         if (range_straddles_pages(start, end - start + 1)) {
4380                 ret = test_range_bit(tree, start, end,
4381                                      EXTENT_UPTODATE, 1, NULL);
4382                 if (ret)
4383                         return 1;
4384         }
4385         while (start <= end) {
4386                 index = start >> PAGE_CACHE_SHIFT;
4387                 page = find_get_page(tree->mapping, index);
4388                 if (!page)
4389                         return 1;
4390                 uptodate = PageUptodate(page);
4391                 page_cache_release(page);
4392                 if (!uptodate) {
4393                         pg_uptodate = 0;
4394                         break;
4395                 }
4396                 start += PAGE_CACHE_SIZE;
4397         }
4398         return pg_uptodate;
4399 }
4400
4401 int extent_buffer_uptodate(struct extent_buffer *eb)
4402 {
4403         return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4404 }
4405
4406 int read_extent_buffer_pages(struct extent_io_tree *tree,
4407                              struct extent_buffer *eb, u64 start, int wait,
4408                              get_extent_t *get_extent, int mirror_num)
4409 {
4410         unsigned long i;
4411         unsigned long start_i;
4412         struct page *page;
4413         int err;
4414         int ret = 0;
4415         int locked_pages = 0;
4416         int all_uptodate = 1;
4417         unsigned long num_pages;
4418         unsigned long num_reads = 0;
4419         struct bio *bio = NULL;
4420         unsigned long bio_flags = 0;
4421
4422         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4423                 return 0;
4424
4425         if (start) {
4426                 WARN_ON(start < eb->start);
4427                 start_i = (start >> PAGE_CACHE_SHIFT) -
4428                         (eb->start >> PAGE_CACHE_SHIFT);
4429         } else {
4430                 start_i = 0;
4431         }
4432
4433         num_pages = num_extent_pages(eb->start, eb->len);
4434         for (i = start_i; i < num_pages; i++) {
4435                 page = extent_buffer_page(eb, i);
4436                 if (wait == WAIT_NONE) {
4437                         if (!trylock_page(page))
4438                                 goto unlock_exit;
4439                 } else {
4440                         lock_page(page);
4441                 }
4442                 locked_pages++;
4443                 if (!PageUptodate(page)) {
4444                         num_reads++;
4445                         all_uptodate = 0;
4446                 }
4447         }
4448         if (all_uptodate) {
4449                 if (start_i == 0)
4450                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4451                 goto unlock_exit;
4452         }
4453
4454         clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4455         eb->read_mirror = 0;
4456         atomic_set(&eb->io_pages, num_reads);
4457         for (i = start_i; i < num_pages; i++) {
4458                 page = extent_buffer_page(eb, i);
4459                 if (!PageUptodate(page)) {
4460                         ClearPageError(page);
4461                         err = __extent_read_full_page(tree, page,
4462                                                       get_extent, &bio,
4463                                                       mirror_num, &bio_flags);
4464                         if (err)
4465                                 ret = err;
4466                 } else {
4467                         unlock_page(page);
4468                 }
4469         }
4470
4471         if (bio) {
4472                 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4473                 if (err)
4474                         return err;
4475         }
4476
4477         if (ret || wait != WAIT_COMPLETE)
4478                 return ret;
4479
4480         for (i = start_i; i < num_pages; i++) {
4481                 page = extent_buffer_page(eb, i);
4482                 wait_on_page_locked(page);
4483                 if (!PageUptodate(page))
4484                         ret = -EIO;
4485         }
4486
4487         return ret;
4488
4489 unlock_exit:
4490         i = start_i;
4491         while (locked_pages > 0) {
4492                 page = extent_buffer_page(eb, i);
4493                 i++;
4494                 unlock_page(page);
4495                 locked_pages--;
4496         }
4497         return ret;
4498 }
4499
4500 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4501                         unsigned long start,
4502                         unsigned long len)
4503 {
4504         size_t cur;
4505         size_t offset;
4506         struct page *page;
4507         char *kaddr;
4508         char *dst = (char *)dstv;
4509         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4510         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4511
4512         WARN_ON(start > eb->len);
4513         WARN_ON(start + len > eb->start + eb->len);
4514
4515         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4516
4517         while (len > 0) {
4518                 page = extent_buffer_page(eb, i);
4519
4520                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4521                 kaddr = page_address(page);
4522                 memcpy(dst, kaddr + offset, cur);
4523
4524                 dst += cur;
4525                 len -= cur;
4526                 offset = 0;
4527                 i++;
4528         }
4529 }
4530
4531 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4532                                unsigned long min_len, char **map,
4533                                unsigned long *map_start,
4534                                unsigned long *map_len)
4535 {
4536         size_t offset = start & (PAGE_CACHE_SIZE - 1);
4537         char *kaddr;
4538         struct page *p;
4539         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4540         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4541         unsigned long end_i = (start_offset + start + min_len - 1) >>
4542                 PAGE_CACHE_SHIFT;
4543
4544         if (i != end_i)
4545                 return -EINVAL;
4546
4547         if (i == 0) {
4548                 offset = start_offset;
4549                 *map_start = 0;
4550         } else {
4551                 offset = 0;
4552                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4553         }
4554
4555         if (start + min_len > eb->len) {
4556                 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4557                        "wanted %lu %lu\n", (unsigned long long)eb->start,
4558                        eb->len, start, min_len);
4559                 WARN_ON(1);
4560                 return -EINVAL;
4561         }
4562
4563         p = extent_buffer_page(eb, i);
4564         kaddr = page_address(p);
4565         *map = kaddr + offset;
4566         *map_len = PAGE_CACHE_SIZE - offset;
4567         return 0;
4568 }
4569
4570 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4571                           unsigned long start,
4572                           unsigned long len)
4573 {
4574         size_t cur;
4575         size_t offset;
4576         struct page *page;
4577         char *kaddr;
4578         char *ptr = (char *)ptrv;
4579         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4580         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4581         int ret = 0;
4582
4583         WARN_ON(start > eb->len);
4584         WARN_ON(start + len > eb->start + eb->len);
4585
4586         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4587
4588         while (len > 0) {
4589                 page = extent_buffer_page(eb, i);
4590
4591                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4592
4593                 kaddr = page_address(page);
4594                 ret = memcmp(ptr, kaddr + offset, cur);
4595                 if (ret)
4596                         break;
4597
4598                 ptr += cur;
4599                 len -= cur;
4600                 offset = 0;
4601                 i++;
4602         }
4603         return ret;
4604 }
4605
4606 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4607                          unsigned long start, unsigned long len)
4608 {
4609         size_t cur;
4610         size_t offset;
4611         struct page *page;
4612         char *kaddr;
4613         char *src = (char *)srcv;
4614         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4615         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4616
4617         WARN_ON(start > eb->len);
4618         WARN_ON(start + len > eb->start + eb->len);
4619
4620         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4621
4622         while (len > 0) {
4623                 page = extent_buffer_page(eb, i);
4624                 WARN_ON(!PageUptodate(page));
4625
4626                 cur = min(len, PAGE_CACHE_SIZE - offset);
4627                 kaddr = page_address(page);
4628                 memcpy(kaddr + offset, src, cur);
4629
4630                 src += cur;
4631                 len -= cur;
4632                 offset = 0;
4633                 i++;
4634         }
4635 }
4636
4637 void memset_extent_buffer(struct extent_buffer *eb, char c,
4638                           unsigned long start, unsigned long len)
4639 {
4640         size_t cur;
4641         size_t offset;
4642         struct page *page;
4643         char *kaddr;
4644         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4645         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4646
4647         WARN_ON(start > eb->len);
4648         WARN_ON(start + len > eb->start + eb->len);
4649
4650         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4651
4652         while (len > 0) {
4653                 page = extent_buffer_page(eb, i);
4654                 WARN_ON(!PageUptodate(page));
4655
4656                 cur = min(len, PAGE_CACHE_SIZE - offset);
4657                 kaddr = page_address(page);
4658                 memset(kaddr + offset, c, cur);
4659
4660                 len -= cur;
4661                 offset = 0;
4662                 i++;
4663         }
4664 }
4665
4666 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4667                         unsigned long dst_offset, unsigned long src_offset,
4668                         unsigned long len)
4669 {
4670         u64 dst_len = dst->len;
4671         size_t cur;
4672         size_t offset;
4673         struct page *page;
4674         char *kaddr;
4675         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4676         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4677
4678         WARN_ON(src->len != dst_len);
4679
4680         offset = (start_offset + dst_offset) &
4681                 ((unsigned long)PAGE_CACHE_SIZE - 1);
4682
4683         while (len > 0) {
4684                 page = extent_buffer_page(dst, i);
4685                 WARN_ON(!PageUptodate(page));
4686
4687                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4688
4689                 kaddr = page_address(page);
4690                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4691
4692                 src_offset += cur;
4693                 len -= cur;
4694                 offset = 0;
4695                 i++;
4696         }
4697 }
4698
4699 static void move_pages(struct page *dst_page, struct page *src_page,
4700                        unsigned long dst_off, unsigned long src_off,
4701                        unsigned long len)
4702 {
4703         char *dst_kaddr = page_address(dst_page);
4704         if (dst_page == src_page) {
4705                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4706         } else {
4707                 char *src_kaddr = page_address(src_page);
4708                 char *p = dst_kaddr + dst_off + len;
4709                 char *s = src_kaddr + src_off + len;
4710
4711                 while (len--)
4712                         *--p = *--s;
4713         }
4714 }
4715
4716 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4717 {
4718         unsigned long distance = (src > dst) ? src - dst : dst - src;
4719         return distance < len;
4720 }
4721
4722 static void copy_pages(struct page *dst_page, struct page *src_page,
4723                        unsigned long dst_off, unsigned long src_off,
4724                        unsigned long len)
4725 {
4726         char *dst_kaddr = page_address(dst_page);
4727         char *src_kaddr;
4728         int must_memmove = 0;
4729
4730         if (dst_page != src_page) {
4731                 src_kaddr = page_address(src_page);
4732         } else {
4733                 src_kaddr = dst_kaddr;
4734                 if (areas_overlap(src_off, dst_off, len))
4735                         must_memmove = 1;
4736         }
4737
4738         if (must_memmove)
4739                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4740         else
4741                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4742 }
4743
4744 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4745                            unsigned long src_offset, unsigned long len)
4746 {
4747         size_t cur;
4748         size_t dst_off_in_page;
4749         size_t src_off_in_page;
4750         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4751         unsigned long dst_i;
4752         unsigned long src_i;
4753
4754         if (src_offset + len > dst->len) {
4755                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4756                        "len %lu dst len %lu\n", src_offset, len, dst->len);
4757                 BUG_ON(1);
4758         }
4759         if (dst_offset + len > dst->len) {
4760                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4761                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
4762                 BUG_ON(1);
4763         }
4764
4765         while (len > 0) {
4766                 dst_off_in_page = (start_offset + dst_offset) &
4767                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4768                 src_off_in_page = (start_offset + src_offset) &
4769                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4770
4771                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4772                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4773
4774                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4775                                                src_off_in_page));
4776                 cur = min_t(unsigned long, cur,
4777                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4778
4779                 copy_pages(extent_buffer_page(dst, dst_i),
4780                            extent_buffer_page(dst, src_i),
4781                            dst_off_in_page, src_off_in_page, cur);
4782
4783                 src_offset += cur;
4784                 dst_offset += cur;
4785                 len -= cur;
4786         }
4787 }
4788
4789 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4790                            unsigned long src_offset, unsigned long len)
4791 {
4792         size_t cur;
4793         size_t dst_off_in_page;
4794         size_t src_off_in_page;
4795         unsigned long dst_end = dst_offset + len - 1;
4796         unsigned long src_end = src_offset + len - 1;
4797         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4798         unsigned long dst_i;
4799         unsigned long src_i;
4800
4801         if (src_offset + len > dst->len) {
4802                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4803                        "len %lu len %lu\n", src_offset, len, dst->len);
4804                 BUG_ON(1);
4805         }
4806         if (dst_offset + len > dst->len) {
4807                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4808                        "len %lu len %lu\n", dst_offset, len, dst->len);
4809                 BUG_ON(1);
4810         }
4811         if (dst_offset < src_offset) {
4812                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4813                 return;
4814         }
4815         while (len > 0) {
4816                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4817                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4818
4819                 dst_off_in_page = (start_offset + dst_end) &
4820                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4821                 src_off_in_page = (start_offset + src_end) &
4822                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4823
4824                 cur = min_t(unsigned long, len, src_off_in_page + 1);
4825                 cur = min(cur, dst_off_in_page + 1);
4826                 move_pages(extent_buffer_page(dst, dst_i),
4827                            extent_buffer_page(dst, src_i),
4828                            dst_off_in_page - cur + 1,
4829                            src_off_in_page - cur + 1, cur);
4830
4831                 dst_end -= cur;
4832                 src_end -= cur;
4833                 len -= cur;
4834         }
4835 }
4836
4837 int try_release_extent_buffer(struct page *page, gfp_t mask)
4838 {
4839         struct extent_buffer *eb;
4840
4841         /*
4842          * We need to make sure noboody is attaching this page to an eb right
4843          * now.
4844          */
4845         spin_lock(&page->mapping->private_lock);
4846         if (!PagePrivate(page)) {
4847                 spin_unlock(&page->mapping->private_lock);
4848                 return 1;
4849         }
4850
4851         eb = (struct extent_buffer *)page->private;
4852         BUG_ON(!eb);
4853
4854         /*
4855          * This is a little awful but should be ok, we need to make sure that
4856          * the eb doesn't disappear out from under us while we're looking at
4857          * this page.
4858          */
4859         spin_lock(&eb->refs_lock);
4860         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4861                 spin_unlock(&eb->refs_lock);
4862                 spin_unlock(&page->mapping->private_lock);
4863                 return 0;
4864         }
4865         spin_unlock(&page->mapping->private_lock);
4866
4867         if ((mask & GFP_NOFS) == GFP_NOFS)
4868                 mask = GFP_NOFS;
4869
4870         /*
4871          * If tree ref isn't set then we know the ref on this eb is a real ref,
4872          * so just return, this page will likely be freed soon anyway.
4873          */
4874         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4875                 spin_unlock(&eb->refs_lock);
4876                 return 0;
4877         }
4878         release_extent_buffer(eb, mask);
4879
4880         return 1;
4881 }