Btrfs: clean up for insert_state()
[pandora-kernel.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20
21 static struct kmem_cache *extent_state_cache;
22 static struct kmem_cache *extent_buffer_cache;
23
24 static LIST_HEAD(buffers);
25 static LIST_HEAD(states);
26
27 #define LEAK_DEBUG 0
28 #if LEAK_DEBUG
29 static DEFINE_SPINLOCK(leak_lock);
30 #endif
31
32 #define BUFFER_LRU_MAX 64
33
34 struct tree_entry {
35         u64 start;
36         u64 end;
37         struct rb_node rb_node;
38 };
39
40 struct extent_page_data {
41         struct bio *bio;
42         struct extent_io_tree *tree;
43         get_extent_t *get_extent;
44
45         /* tells writepage not to lock the state bits for this range
46          * it still does the unlocking
47          */
48         unsigned int extent_locked:1;
49
50         /* tells the submit_bio code to use a WRITE_SYNC */
51         unsigned int sync_io:1;
52 };
53
54 int __init extent_io_init(void)
55 {
56         extent_state_cache = kmem_cache_create("extent_state",
57                         sizeof(struct extent_state), 0,
58                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
59         if (!extent_state_cache)
60                 return -ENOMEM;
61
62         extent_buffer_cache = kmem_cache_create("extent_buffers",
63                         sizeof(struct extent_buffer), 0,
64                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
65         if (!extent_buffer_cache)
66                 goto free_state_cache;
67         return 0;
68
69 free_state_cache:
70         kmem_cache_destroy(extent_state_cache);
71         return -ENOMEM;
72 }
73
74 void extent_io_exit(void)
75 {
76         struct extent_state *state;
77         struct extent_buffer *eb;
78
79         while (!list_empty(&states)) {
80                 state = list_entry(states.next, struct extent_state, leak_list);
81                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
82                        "state %lu in tree %p refs %d\n",
83                        (unsigned long long)state->start,
84                        (unsigned long long)state->end,
85                        state->state, state->tree, atomic_read(&state->refs));
86                 list_del(&state->leak_list);
87                 kmem_cache_free(extent_state_cache, state);
88
89         }
90
91         while (!list_empty(&buffers)) {
92                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
93                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
94                        "refs %d\n", (unsigned long long)eb->start,
95                        eb->len, atomic_read(&eb->refs));
96                 list_del(&eb->leak_list);
97                 kmem_cache_free(extent_buffer_cache, eb);
98         }
99         if (extent_state_cache)
100                 kmem_cache_destroy(extent_state_cache);
101         if (extent_buffer_cache)
102                 kmem_cache_destroy(extent_buffer_cache);
103 }
104
105 void extent_io_tree_init(struct extent_io_tree *tree,
106                          struct address_space *mapping)
107 {
108         tree->state = RB_ROOT;
109         INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
110         tree->ops = NULL;
111         tree->dirty_bytes = 0;
112         spin_lock_init(&tree->lock);
113         spin_lock_init(&tree->buffer_lock);
114         tree->mapping = mapping;
115 }
116
117 static struct extent_state *alloc_extent_state(gfp_t mask)
118 {
119         struct extent_state *state;
120 #if LEAK_DEBUG
121         unsigned long flags;
122 #endif
123
124         state = kmem_cache_alloc(extent_state_cache, mask);
125         if (!state)
126                 return state;
127         state->state = 0;
128         state->private = 0;
129         state->tree = NULL;
130 #if LEAK_DEBUG
131         spin_lock_irqsave(&leak_lock, flags);
132         list_add(&state->leak_list, &states);
133         spin_unlock_irqrestore(&leak_lock, flags);
134 #endif
135         atomic_set(&state->refs, 1);
136         init_waitqueue_head(&state->wq);
137         return state;
138 }
139
140 void free_extent_state(struct extent_state *state)
141 {
142         if (!state)
143                 return;
144         if (atomic_dec_and_test(&state->refs)) {
145 #if LEAK_DEBUG
146                 unsigned long flags;
147 #endif
148                 WARN_ON(state->tree);
149 #if LEAK_DEBUG
150                 spin_lock_irqsave(&leak_lock, flags);
151                 list_del(&state->leak_list);
152                 spin_unlock_irqrestore(&leak_lock, flags);
153 #endif
154                 kmem_cache_free(extent_state_cache, state);
155         }
156 }
157
158 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159                                    struct rb_node *node)
160 {
161         struct rb_node **p = &root->rb_node;
162         struct rb_node *parent = NULL;
163         struct tree_entry *entry;
164
165         while (*p) {
166                 parent = *p;
167                 entry = rb_entry(parent, struct tree_entry, rb_node);
168
169                 if (offset < entry->start)
170                         p = &(*p)->rb_left;
171                 else if (offset > entry->end)
172                         p = &(*p)->rb_right;
173                 else
174                         return parent;
175         }
176
177         entry = rb_entry(node, struct tree_entry, rb_node);
178         rb_link_node(node, parent, p);
179         rb_insert_color(node, root);
180         return NULL;
181 }
182
183 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
184                                      struct rb_node **prev_ret,
185                                      struct rb_node **next_ret)
186 {
187         struct rb_root *root = &tree->state;
188         struct rb_node *n = root->rb_node;
189         struct rb_node *prev = NULL;
190         struct rb_node *orig_prev = NULL;
191         struct tree_entry *entry;
192         struct tree_entry *prev_entry = NULL;
193
194         while (n) {
195                 entry = rb_entry(n, struct tree_entry, rb_node);
196                 prev = n;
197                 prev_entry = entry;
198
199                 if (offset < entry->start)
200                         n = n->rb_left;
201                 else if (offset > entry->end)
202                         n = n->rb_right;
203                 else
204                         return n;
205         }
206
207         if (prev_ret) {
208                 orig_prev = prev;
209                 while (prev && offset > prev_entry->end) {
210                         prev = rb_next(prev);
211                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
212                 }
213                 *prev_ret = prev;
214                 prev = orig_prev;
215         }
216
217         if (next_ret) {
218                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
219                 while (prev && offset < prev_entry->start) {
220                         prev = rb_prev(prev);
221                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222                 }
223                 *next_ret = prev;
224         }
225         return NULL;
226 }
227
228 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
229                                           u64 offset)
230 {
231         struct rb_node *prev = NULL;
232         struct rb_node *ret;
233
234         ret = __etree_search(tree, offset, &prev, NULL);
235         if (!ret)
236                 return prev;
237         return ret;
238 }
239
240 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
241                      struct extent_state *other)
242 {
243         if (tree->ops && tree->ops->merge_extent_hook)
244                 tree->ops->merge_extent_hook(tree->mapping->host, new,
245                                              other);
246 }
247
248 /*
249  * utility function to look for merge candidates inside a given range.
250  * Any extents with matching state are merged together into a single
251  * extent in the tree.  Extents with EXTENT_IO in their state field
252  * are not merged because the end_io handlers need to be able to do
253  * operations on them without sleeping (or doing allocations/splits).
254  *
255  * This should be called with the tree lock held.
256  */
257 static void merge_state(struct extent_io_tree *tree,
258                         struct extent_state *state)
259 {
260         struct extent_state *other;
261         struct rb_node *other_node;
262
263         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
264                 return;
265
266         other_node = rb_prev(&state->rb_node);
267         if (other_node) {
268                 other = rb_entry(other_node, struct extent_state, rb_node);
269                 if (other->end == state->start - 1 &&
270                     other->state == state->state) {
271                         merge_cb(tree, state, other);
272                         state->start = other->start;
273                         other->tree = NULL;
274                         rb_erase(&other->rb_node, &tree->state);
275                         free_extent_state(other);
276                 }
277         }
278         other_node = rb_next(&state->rb_node);
279         if (other_node) {
280                 other = rb_entry(other_node, struct extent_state, rb_node);
281                 if (other->start == state->end + 1 &&
282                     other->state == state->state) {
283                         merge_cb(tree, state, other);
284                         state->end = other->end;
285                         other->tree = NULL;
286                         rb_erase(&other->rb_node, &tree->state);
287                         free_extent_state(other);
288                 }
289         }
290 }
291
292 static void set_state_cb(struct extent_io_tree *tree,
293                          struct extent_state *state, int *bits)
294 {
295         if (tree->ops && tree->ops->set_bit_hook)
296                 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
297 }
298
299 static void clear_state_cb(struct extent_io_tree *tree,
300                            struct extent_state *state, int *bits)
301 {
302         if (tree->ops && tree->ops->clear_bit_hook)
303                 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
304 }
305
306 static void set_state_bits(struct extent_io_tree *tree,
307                            struct extent_state *state, int *bits);
308
309 /*
310  * insert an extent_state struct into the tree.  'bits' are set on the
311  * struct before it is inserted.
312  *
313  * This may return -EEXIST if the extent is already there, in which case the
314  * state struct is freed.
315  *
316  * The tree lock is not taken internally.  This is a utility function and
317  * probably isn't what you want to call (see set/clear_extent_bit).
318  */
319 static int insert_state(struct extent_io_tree *tree,
320                         struct extent_state *state, u64 start, u64 end,
321                         int *bits)
322 {
323         struct rb_node *node;
324
325         if (end < start) {
326                 printk(KERN_ERR "btrfs end < start %llu %llu\n",
327                        (unsigned long long)end,
328                        (unsigned long long)start);
329                 WARN_ON(1);
330         }
331         state->start = start;
332         state->end = end;
333
334         set_state_bits(tree, state, bits);
335
336         node = tree_insert(&tree->state, end, &state->rb_node);
337         if (node) {
338                 struct extent_state *found;
339                 found = rb_entry(node, struct extent_state, rb_node);
340                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
341                        "%llu %llu\n", (unsigned long long)found->start,
342                        (unsigned long long)found->end,
343                        (unsigned long long)start, (unsigned long long)end);
344                 return -EEXIST;
345         }
346         state->tree = tree;
347         merge_state(tree, state);
348         return 0;
349 }
350
351 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
352                      u64 split)
353 {
354         if (tree->ops && tree->ops->split_extent_hook)
355                 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
356 }
357
358 /*
359  * split a given extent state struct in two, inserting the preallocated
360  * struct 'prealloc' as the newly created second half.  'split' indicates an
361  * offset inside 'orig' where it should be split.
362  *
363  * Before calling,
364  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
365  * are two extent state structs in the tree:
366  * prealloc: [orig->start, split - 1]
367  * orig: [ split, orig->end ]
368  *
369  * The tree locks are not taken by this function. They need to be held
370  * by the caller.
371  */
372 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
373                        struct extent_state *prealloc, u64 split)
374 {
375         struct rb_node *node;
376
377         split_cb(tree, orig, split);
378
379         prealloc->start = orig->start;
380         prealloc->end = split - 1;
381         prealloc->state = orig->state;
382         orig->start = split;
383
384         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
385         if (node) {
386                 free_extent_state(prealloc);
387                 return -EEXIST;
388         }
389         prealloc->tree = tree;
390         return 0;
391 }
392
393 /*
394  * utility function to clear some bits in an extent state struct.
395  * it will optionally wake up any one waiting on this state (wake == 1), or
396  * forcibly remove the state from the tree (delete == 1).
397  *
398  * If no bits are set on the state struct after clearing things, the
399  * struct is freed and removed from the tree
400  */
401 static int clear_state_bit(struct extent_io_tree *tree,
402                             struct extent_state *state,
403                             int *bits, int wake)
404 {
405         int bits_to_clear = *bits & ~EXTENT_CTLBITS;
406         int ret = state->state & bits_to_clear;
407
408         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
409                 u64 range = state->end - state->start + 1;
410                 WARN_ON(range > tree->dirty_bytes);
411                 tree->dirty_bytes -= range;
412         }
413         clear_state_cb(tree, state, bits);
414         state->state &= ~bits_to_clear;
415         if (wake)
416                 wake_up(&state->wq);
417         if (state->state == 0) {
418                 if (state->tree) {
419                         rb_erase(&state->rb_node, &tree->state);
420                         state->tree = NULL;
421                         free_extent_state(state);
422                 } else {
423                         WARN_ON(1);
424                 }
425         } else {
426                 merge_state(tree, state);
427         }
428         return ret;
429 }
430
431 static struct extent_state *
432 alloc_extent_state_atomic(struct extent_state *prealloc)
433 {
434         if (!prealloc)
435                 prealloc = alloc_extent_state(GFP_ATOMIC);
436
437         return prealloc;
438 }
439
440 /*
441  * clear some bits on a range in the tree.  This may require splitting
442  * or inserting elements in the tree, so the gfp mask is used to
443  * indicate which allocations or sleeping are allowed.
444  *
445  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
446  * the given range from the tree regardless of state (ie for truncate).
447  *
448  * the range [start, end] is inclusive.
449  *
450  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
451  * bits were already set, or zero if none of the bits were already set.
452  */
453 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
454                      int bits, int wake, int delete,
455                      struct extent_state **cached_state,
456                      gfp_t mask)
457 {
458         struct extent_state *state;
459         struct extent_state *cached;
460         struct extent_state *prealloc = NULL;
461         struct rb_node *next_node;
462         struct rb_node *node;
463         u64 last_end;
464         int err;
465         int set = 0;
466         int clear = 0;
467
468         if (delete)
469                 bits |= ~EXTENT_CTLBITS;
470         bits |= EXTENT_FIRST_DELALLOC;
471
472         if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
473                 clear = 1;
474 again:
475         if (!prealloc && (mask & __GFP_WAIT)) {
476                 prealloc = alloc_extent_state(mask);
477                 if (!prealloc)
478                         return -ENOMEM;
479         }
480
481         spin_lock(&tree->lock);
482         if (cached_state) {
483                 cached = *cached_state;
484
485                 if (clear) {
486                         *cached_state = NULL;
487                         cached_state = NULL;
488                 }
489
490                 if (cached && cached->tree && cached->start <= start &&
491                     cached->end > start) {
492                         if (clear)
493                                 atomic_dec(&cached->refs);
494                         state = cached;
495                         goto hit_next;
496                 }
497                 if (clear)
498                         free_extent_state(cached);
499         }
500         /*
501          * this search will find the extents that end after
502          * our range starts
503          */
504         node = tree_search(tree, start);
505         if (!node)
506                 goto out;
507         state = rb_entry(node, struct extent_state, rb_node);
508 hit_next:
509         if (state->start > end)
510                 goto out;
511         WARN_ON(state->end < start);
512         last_end = state->end;
513
514         /*
515          *     | ---- desired range ---- |
516          *  | state | or
517          *  | ------------- state -------------- |
518          *
519          * We need to split the extent we found, and may flip
520          * bits on second half.
521          *
522          * If the extent we found extends past our range, we
523          * just split and search again.  It'll get split again
524          * the next time though.
525          *
526          * If the extent we found is inside our range, we clear
527          * the desired bit on it.
528          */
529
530         if (state->start < start) {
531                 prealloc = alloc_extent_state_atomic(prealloc);
532                 BUG_ON(!prealloc);
533                 err = split_state(tree, state, prealloc, start);
534                 BUG_ON(err == -EEXIST);
535                 prealloc = NULL;
536                 if (err)
537                         goto out;
538                 if (state->end <= end) {
539                         set |= clear_state_bit(tree, state, &bits, wake);
540                         if (last_end == (u64)-1)
541                                 goto out;
542                         start = last_end + 1;
543                 }
544                 goto search_again;
545         }
546         /*
547          * | ---- desired range ---- |
548          *                        | state |
549          * We need to split the extent, and clear the bit
550          * on the first half
551          */
552         if (state->start <= end && state->end > end) {
553                 prealloc = alloc_extent_state_atomic(prealloc);
554                 BUG_ON(!prealloc);
555                 err = split_state(tree, state, prealloc, end + 1);
556                 BUG_ON(err == -EEXIST);
557                 if (wake)
558                         wake_up(&state->wq);
559
560                 set |= clear_state_bit(tree, prealloc, &bits, wake);
561
562                 prealloc = NULL;
563                 goto out;
564         }
565
566         if (state->end < end && prealloc && !need_resched())
567                 next_node = rb_next(&state->rb_node);
568         else
569                 next_node = NULL;
570
571         set |= clear_state_bit(tree, state, &bits, wake);
572         if (last_end == (u64)-1)
573                 goto out;
574         start = last_end + 1;
575         if (start <= end && next_node) {
576                 state = rb_entry(next_node, struct extent_state,
577                                  rb_node);
578                 if (state->start == start)
579                         goto hit_next;
580         }
581         goto search_again;
582
583 out:
584         spin_unlock(&tree->lock);
585         if (prealloc)
586                 free_extent_state(prealloc);
587
588         return set;
589
590 search_again:
591         if (start > end)
592                 goto out;
593         spin_unlock(&tree->lock);
594         if (mask & __GFP_WAIT)
595                 cond_resched();
596         goto again;
597 }
598
599 static int wait_on_state(struct extent_io_tree *tree,
600                          struct extent_state *state)
601                 __releases(tree->lock)
602                 __acquires(tree->lock)
603 {
604         DEFINE_WAIT(wait);
605         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
606         spin_unlock(&tree->lock);
607         schedule();
608         spin_lock(&tree->lock);
609         finish_wait(&state->wq, &wait);
610         return 0;
611 }
612
613 /*
614  * waits for one or more bits to clear on a range in the state tree.
615  * The range [start, end] is inclusive.
616  * The tree lock is taken by this function
617  */
618 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
619 {
620         struct extent_state *state;
621         struct rb_node *node;
622
623         spin_lock(&tree->lock);
624 again:
625         while (1) {
626                 /*
627                  * this search will find all the extents that end after
628                  * our range starts
629                  */
630                 node = tree_search(tree, start);
631                 if (!node)
632                         break;
633
634                 state = rb_entry(node, struct extent_state, rb_node);
635
636                 if (state->start > end)
637                         goto out;
638
639                 if (state->state & bits) {
640                         start = state->start;
641                         atomic_inc(&state->refs);
642                         wait_on_state(tree, state);
643                         free_extent_state(state);
644                         goto again;
645                 }
646                 start = state->end + 1;
647
648                 if (start > end)
649                         break;
650
651                 if (need_resched()) {
652                         spin_unlock(&tree->lock);
653                         cond_resched();
654                         spin_lock(&tree->lock);
655                 }
656         }
657 out:
658         spin_unlock(&tree->lock);
659         return 0;
660 }
661
662 static void set_state_bits(struct extent_io_tree *tree,
663                            struct extent_state *state,
664                            int *bits)
665 {
666         int bits_to_set = *bits & ~EXTENT_CTLBITS;
667
668         set_state_cb(tree, state, bits);
669         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
670                 u64 range = state->end - state->start + 1;
671                 tree->dirty_bytes += range;
672         }
673         state->state |= bits_to_set;
674 }
675
676 static void cache_state(struct extent_state *state,
677                         struct extent_state **cached_ptr)
678 {
679         if (cached_ptr && !(*cached_ptr)) {
680                 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
681                         *cached_ptr = state;
682                         atomic_inc(&state->refs);
683                 }
684         }
685 }
686
687 static void uncache_state(struct extent_state **cached_ptr)
688 {
689         if (cached_ptr && (*cached_ptr)) {
690                 struct extent_state *state = *cached_ptr;
691                 *cached_ptr = NULL;
692                 free_extent_state(state);
693         }
694 }
695
696 /*
697  * set some bits on a range in the tree.  This may require allocations or
698  * sleeping, so the gfp mask is used to indicate what is allowed.
699  *
700  * If any of the exclusive bits are set, this will fail with -EEXIST if some
701  * part of the range already has the desired bits set.  The start of the
702  * existing range is returned in failed_start in this case.
703  *
704  * [start, end] is inclusive This takes the tree lock.
705  */
706
707 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
708                    int bits, int exclusive_bits, u64 *failed_start,
709                    struct extent_state **cached_state, gfp_t mask)
710 {
711         struct extent_state *state;
712         struct extent_state *prealloc = NULL;
713         struct rb_node *node;
714         int err = 0;
715         u64 last_start;
716         u64 last_end;
717
718         bits |= EXTENT_FIRST_DELALLOC;
719 again:
720         if (!prealloc && (mask & __GFP_WAIT)) {
721                 prealloc = alloc_extent_state(mask);
722                 BUG_ON(!prealloc);
723         }
724
725         spin_lock(&tree->lock);
726         if (cached_state && *cached_state) {
727                 state = *cached_state;
728                 if (state->start <= start && state->end > start &&
729                     state->tree) {
730                         node = &state->rb_node;
731                         goto hit_next;
732                 }
733         }
734         /*
735          * this search will find all the extents that end after
736          * our range starts.
737          */
738         node = tree_search(tree, start);
739         if (!node) {
740                 prealloc = alloc_extent_state_atomic(prealloc);
741                 BUG_ON(!prealloc);
742                 err = insert_state(tree, prealloc, start, end, &bits);
743                 prealloc = NULL;
744                 BUG_ON(err == -EEXIST);
745                 goto out;
746         }
747         state = rb_entry(node, struct extent_state, rb_node);
748 hit_next:
749         last_start = state->start;
750         last_end = state->end;
751
752         /*
753          * | ---- desired range ---- |
754          * | state |
755          *
756          * Just lock what we found and keep going
757          */
758         if (state->start == start && state->end <= end) {
759                 struct rb_node *next_node;
760                 if (state->state & exclusive_bits) {
761                         *failed_start = state->start;
762                         err = -EEXIST;
763                         goto out;
764                 }
765
766                 set_state_bits(tree, state, &bits);
767
768                 cache_state(state, cached_state);
769                 merge_state(tree, state);
770                 if (last_end == (u64)-1)
771                         goto out;
772
773                 start = last_end + 1;
774                 next_node = rb_next(&state->rb_node);
775                 if (next_node && start < end && prealloc && !need_resched()) {
776                         state = rb_entry(next_node, struct extent_state,
777                                          rb_node);
778                         if (state->start == start)
779                                 goto hit_next;
780                 }
781                 goto search_again;
782         }
783
784         /*
785          *     | ---- desired range ---- |
786          * | state |
787          *   or
788          * | ------------- state -------------- |
789          *
790          * We need to split the extent we found, and may flip bits on
791          * second half.
792          *
793          * If the extent we found extends past our
794          * range, we just split and search again.  It'll get split
795          * again the next time though.
796          *
797          * If the extent we found is inside our range, we set the
798          * desired bit on it.
799          */
800         if (state->start < start) {
801                 if (state->state & exclusive_bits) {
802                         *failed_start = start;
803                         err = -EEXIST;
804                         goto out;
805                 }
806
807                 prealloc = alloc_extent_state_atomic(prealloc);
808                 BUG_ON(!prealloc);
809                 err = split_state(tree, state, prealloc, start);
810                 BUG_ON(err == -EEXIST);
811                 prealloc = NULL;
812                 if (err)
813                         goto out;
814                 if (state->end <= end) {
815                         set_state_bits(tree, state, &bits);
816                         cache_state(state, cached_state);
817                         merge_state(tree, state);
818                         if (last_end == (u64)-1)
819                                 goto out;
820                         start = last_end + 1;
821                 }
822                 goto search_again;
823         }
824         /*
825          * | ---- desired range ---- |
826          *     | state | or               | state |
827          *
828          * There's a hole, we need to insert something in it and
829          * ignore the extent we found.
830          */
831         if (state->start > start) {
832                 u64 this_end;
833                 if (end < last_start)
834                         this_end = end;
835                 else
836                         this_end = last_start - 1;
837
838                 prealloc = alloc_extent_state_atomic(prealloc);
839                 BUG_ON(!prealloc);
840
841                 /*
842                  * Avoid to free 'prealloc' if it can be merged with
843                  * the later extent.
844                  */
845                 err = insert_state(tree, prealloc, start, this_end,
846                                    &bits);
847                 BUG_ON(err == -EEXIST);
848                 if (err) {
849                         free_extent_state(prealloc);
850                         prealloc = NULL;
851                         goto out;
852                 }
853                 cache_state(prealloc, cached_state);
854                 prealloc = NULL;
855                 start = this_end + 1;
856                 goto search_again;
857         }
858         /*
859          * | ---- desired range ---- |
860          *                        | state |
861          * We need to split the extent, and set the bit
862          * on the first half
863          */
864         if (state->start <= end && state->end > end) {
865                 if (state->state & exclusive_bits) {
866                         *failed_start = start;
867                         err = -EEXIST;
868                         goto out;
869                 }
870
871                 prealloc = alloc_extent_state_atomic(prealloc);
872                 BUG_ON(!prealloc);
873                 err = split_state(tree, state, prealloc, end + 1);
874                 BUG_ON(err == -EEXIST);
875
876                 set_state_bits(tree, prealloc, &bits);
877                 cache_state(prealloc, cached_state);
878                 merge_state(tree, prealloc);
879                 prealloc = NULL;
880                 goto out;
881         }
882
883         goto search_again;
884
885 out:
886         spin_unlock(&tree->lock);
887         if (prealloc)
888                 free_extent_state(prealloc);
889
890         return err;
891
892 search_again:
893         if (start > end)
894                 goto out;
895         spin_unlock(&tree->lock);
896         if (mask & __GFP_WAIT)
897                 cond_resched();
898         goto again;
899 }
900
901 /* wrappers around set/clear extent bit */
902 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
903                      gfp_t mask)
904 {
905         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
906                               NULL, mask);
907 }
908
909 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
910                     int bits, gfp_t mask)
911 {
912         return set_extent_bit(tree, start, end, bits, 0, NULL,
913                               NULL, mask);
914 }
915
916 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
917                       int bits, gfp_t mask)
918 {
919         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
920 }
921
922 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
923                         struct extent_state **cached_state, gfp_t mask)
924 {
925         return set_extent_bit(tree, start, end,
926                               EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
927                               0, NULL, cached_state, mask);
928 }
929
930 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
931                        gfp_t mask)
932 {
933         return clear_extent_bit(tree, start, end,
934                                 EXTENT_DIRTY | EXTENT_DELALLOC |
935                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
936 }
937
938 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
939                      gfp_t mask)
940 {
941         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
942                               NULL, mask);
943 }
944
945 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
946                         struct extent_state **cached_state, gfp_t mask)
947 {
948         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
949                               NULL, cached_state, mask);
950 }
951
952 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
953                                  u64 end, struct extent_state **cached_state,
954                                  gfp_t mask)
955 {
956         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
957                                 cached_state, mask);
958 }
959
960 /*
961  * either insert or lock state struct between start and end use mask to tell
962  * us if waiting is desired.
963  */
964 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
965                      int bits, struct extent_state **cached_state, gfp_t mask)
966 {
967         int err;
968         u64 failed_start;
969         while (1) {
970                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
971                                      EXTENT_LOCKED, &failed_start,
972                                      cached_state, mask);
973                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
974                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
975                         start = failed_start;
976                 } else {
977                         break;
978                 }
979                 WARN_ON(start > end);
980         }
981         return err;
982 }
983
984 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
985 {
986         return lock_extent_bits(tree, start, end, 0, NULL, mask);
987 }
988
989 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
990                     gfp_t mask)
991 {
992         int err;
993         u64 failed_start;
994
995         err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
996                              &failed_start, NULL, mask);
997         if (err == -EEXIST) {
998                 if (failed_start > start)
999                         clear_extent_bit(tree, start, failed_start - 1,
1000                                          EXTENT_LOCKED, 1, 0, NULL, mask);
1001                 return 0;
1002         }
1003         return 1;
1004 }
1005
1006 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1007                          struct extent_state **cached, gfp_t mask)
1008 {
1009         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1010                                 mask);
1011 }
1012
1013 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1014 {
1015         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1016                                 mask);
1017 }
1018
1019 /*
1020  * helper function to set both pages and extents in the tree writeback
1021  */
1022 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1023 {
1024         unsigned long index = start >> PAGE_CACHE_SHIFT;
1025         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1026         struct page *page;
1027
1028         while (index <= end_index) {
1029                 page = find_get_page(tree->mapping, index);
1030                 BUG_ON(!page);
1031                 set_page_writeback(page);
1032                 page_cache_release(page);
1033                 index++;
1034         }
1035         return 0;
1036 }
1037
1038 /*
1039  * find the first offset in the io tree with 'bits' set. zero is
1040  * returned if we find something, and *start_ret and *end_ret are
1041  * set to reflect the state struct that was found.
1042  *
1043  * If nothing was found, 1 is returned, < 0 on error
1044  */
1045 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1046                           u64 *start_ret, u64 *end_ret, int bits)
1047 {
1048         struct rb_node *node;
1049         struct extent_state *state;
1050         int ret = 1;
1051
1052         spin_lock(&tree->lock);
1053         /*
1054          * this search will find all the extents that end after
1055          * our range starts.
1056          */
1057         node = tree_search(tree, start);
1058         if (!node)
1059                 goto out;
1060
1061         while (1) {
1062                 state = rb_entry(node, struct extent_state, rb_node);
1063                 if (state->end >= start && (state->state & bits)) {
1064                         *start_ret = state->start;
1065                         *end_ret = state->end;
1066                         ret = 0;
1067                         break;
1068                 }
1069                 node = rb_next(node);
1070                 if (!node)
1071                         break;
1072         }
1073 out:
1074         spin_unlock(&tree->lock);
1075         return ret;
1076 }
1077
1078 /* find the first state struct with 'bits' set after 'start', and
1079  * return it.  tree->lock must be held.  NULL will returned if
1080  * nothing was found after 'start'
1081  */
1082 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1083                                                  u64 start, int bits)
1084 {
1085         struct rb_node *node;
1086         struct extent_state *state;
1087
1088         /*
1089          * this search will find all the extents that end after
1090          * our range starts.
1091          */
1092         node = tree_search(tree, start);
1093         if (!node)
1094                 goto out;
1095
1096         while (1) {
1097                 state = rb_entry(node, struct extent_state, rb_node);
1098                 if (state->end >= start && (state->state & bits))
1099                         return state;
1100
1101                 node = rb_next(node);
1102                 if (!node)
1103                         break;
1104         }
1105 out:
1106         return NULL;
1107 }
1108
1109 /*
1110  * find a contiguous range of bytes in the file marked as delalloc, not
1111  * more than 'max_bytes'.  start and end are used to return the range,
1112  *
1113  * 1 is returned if we find something, 0 if nothing was in the tree
1114  */
1115 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1116                                         u64 *start, u64 *end, u64 max_bytes,
1117                                         struct extent_state **cached_state)
1118 {
1119         struct rb_node *node;
1120         struct extent_state *state;
1121         u64 cur_start = *start;
1122         u64 found = 0;
1123         u64 total_bytes = 0;
1124
1125         spin_lock(&tree->lock);
1126
1127         /*
1128          * this search will find all the extents that end after
1129          * our range starts.
1130          */
1131         node = tree_search(tree, cur_start);
1132         if (!node) {
1133                 if (!found)
1134                         *end = (u64)-1;
1135                 goto out;
1136         }
1137
1138         while (1) {
1139                 state = rb_entry(node, struct extent_state, rb_node);
1140                 if (found && (state->start != cur_start ||
1141                               (state->state & EXTENT_BOUNDARY))) {
1142                         goto out;
1143                 }
1144                 if (!(state->state & EXTENT_DELALLOC)) {
1145                         if (!found)
1146                                 *end = state->end;
1147                         goto out;
1148                 }
1149                 if (!found) {
1150                         *start = state->start;
1151                         *cached_state = state;
1152                         atomic_inc(&state->refs);
1153                 }
1154                 found++;
1155                 *end = state->end;
1156                 cur_start = state->end + 1;
1157                 node = rb_next(node);
1158                 if (!node)
1159                         break;
1160                 total_bytes += state->end - state->start + 1;
1161                 if (total_bytes >= max_bytes)
1162                         break;
1163         }
1164 out:
1165         spin_unlock(&tree->lock);
1166         return found;
1167 }
1168
1169 static noinline int __unlock_for_delalloc(struct inode *inode,
1170                                           struct page *locked_page,
1171                                           u64 start, u64 end)
1172 {
1173         int ret;
1174         struct page *pages[16];
1175         unsigned long index = start >> PAGE_CACHE_SHIFT;
1176         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1177         unsigned long nr_pages = end_index - index + 1;
1178         int i;
1179
1180         if (index == locked_page->index && end_index == index)
1181                 return 0;
1182
1183         while (nr_pages > 0) {
1184                 ret = find_get_pages_contig(inode->i_mapping, index,
1185                                      min_t(unsigned long, nr_pages,
1186                                      ARRAY_SIZE(pages)), pages);
1187                 for (i = 0; i < ret; i++) {
1188                         if (pages[i] != locked_page)
1189                                 unlock_page(pages[i]);
1190                         page_cache_release(pages[i]);
1191                 }
1192                 nr_pages -= ret;
1193                 index += ret;
1194                 cond_resched();
1195         }
1196         return 0;
1197 }
1198
1199 static noinline int lock_delalloc_pages(struct inode *inode,
1200                                         struct page *locked_page,
1201                                         u64 delalloc_start,
1202                                         u64 delalloc_end)
1203 {
1204         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1205         unsigned long start_index = index;
1206         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1207         unsigned long pages_locked = 0;
1208         struct page *pages[16];
1209         unsigned long nrpages;
1210         int ret;
1211         int i;
1212
1213         /* the caller is responsible for locking the start index */
1214         if (index == locked_page->index && index == end_index)
1215                 return 0;
1216
1217         /* skip the page at the start index */
1218         nrpages = end_index - index + 1;
1219         while (nrpages > 0) {
1220                 ret = find_get_pages_contig(inode->i_mapping, index,
1221                                      min_t(unsigned long,
1222                                      nrpages, ARRAY_SIZE(pages)), pages);
1223                 if (ret == 0) {
1224                         ret = -EAGAIN;
1225                         goto done;
1226                 }
1227                 /* now we have an array of pages, lock them all */
1228                 for (i = 0; i < ret; i++) {
1229                         /*
1230                          * the caller is taking responsibility for
1231                          * locked_page
1232                          */
1233                         if (pages[i] != locked_page) {
1234                                 lock_page(pages[i]);
1235                                 if (!PageDirty(pages[i]) ||
1236                                     pages[i]->mapping != inode->i_mapping) {
1237                                         ret = -EAGAIN;
1238                                         unlock_page(pages[i]);
1239                                         page_cache_release(pages[i]);
1240                                         goto done;
1241                                 }
1242                         }
1243                         page_cache_release(pages[i]);
1244                         pages_locked++;
1245                 }
1246                 nrpages -= ret;
1247                 index += ret;
1248                 cond_resched();
1249         }
1250         ret = 0;
1251 done:
1252         if (ret && pages_locked) {
1253                 __unlock_for_delalloc(inode, locked_page,
1254                               delalloc_start,
1255                               ((u64)(start_index + pages_locked - 1)) <<
1256                               PAGE_CACHE_SHIFT);
1257         }
1258         return ret;
1259 }
1260
1261 /*
1262  * find a contiguous range of bytes in the file marked as delalloc, not
1263  * more than 'max_bytes'.  start and end are used to return the range,
1264  *
1265  * 1 is returned if we find something, 0 if nothing was in the tree
1266  */
1267 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1268                                              struct extent_io_tree *tree,
1269                                              struct page *locked_page,
1270                                              u64 *start, u64 *end,
1271                                              u64 max_bytes)
1272 {
1273         u64 delalloc_start;
1274         u64 delalloc_end;
1275         u64 found;
1276         struct extent_state *cached_state = NULL;
1277         int ret;
1278         int loops = 0;
1279
1280 again:
1281         /* step one, find a bunch of delalloc bytes starting at start */
1282         delalloc_start = *start;
1283         delalloc_end = 0;
1284         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1285                                     max_bytes, &cached_state);
1286         if (!found || delalloc_end <= *start) {
1287                 *start = delalloc_start;
1288                 *end = delalloc_end;
1289                 free_extent_state(cached_state);
1290                 return found;
1291         }
1292
1293         /*
1294          * start comes from the offset of locked_page.  We have to lock
1295          * pages in order, so we can't process delalloc bytes before
1296          * locked_page
1297          */
1298         if (delalloc_start < *start)
1299                 delalloc_start = *start;
1300
1301         /*
1302          * make sure to limit the number of pages we try to lock down
1303          * if we're looping.
1304          */
1305         if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1306                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1307
1308         /* step two, lock all the pages after the page that has start */
1309         ret = lock_delalloc_pages(inode, locked_page,
1310                                   delalloc_start, delalloc_end);
1311         if (ret == -EAGAIN) {
1312                 /* some of the pages are gone, lets avoid looping by
1313                  * shortening the size of the delalloc range we're searching
1314                  */
1315                 free_extent_state(cached_state);
1316                 if (!loops) {
1317                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1318                         max_bytes = PAGE_CACHE_SIZE - offset;
1319                         loops = 1;
1320                         goto again;
1321                 } else {
1322                         found = 0;
1323                         goto out_failed;
1324                 }
1325         }
1326         BUG_ON(ret);
1327
1328         /* step three, lock the state bits for the whole range */
1329         lock_extent_bits(tree, delalloc_start, delalloc_end,
1330                          0, &cached_state, GFP_NOFS);
1331
1332         /* then test to make sure it is all still delalloc */
1333         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1334                              EXTENT_DELALLOC, 1, cached_state);
1335         if (!ret) {
1336                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1337                                      &cached_state, GFP_NOFS);
1338                 __unlock_for_delalloc(inode, locked_page,
1339                               delalloc_start, delalloc_end);
1340                 cond_resched();
1341                 goto again;
1342         }
1343         free_extent_state(cached_state);
1344         *start = delalloc_start;
1345         *end = delalloc_end;
1346 out_failed:
1347         return found;
1348 }
1349
1350 int extent_clear_unlock_delalloc(struct inode *inode,
1351                                 struct extent_io_tree *tree,
1352                                 u64 start, u64 end, struct page *locked_page,
1353                                 unsigned long op)
1354 {
1355         int ret;
1356         struct page *pages[16];
1357         unsigned long index = start >> PAGE_CACHE_SHIFT;
1358         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1359         unsigned long nr_pages = end_index - index + 1;
1360         int i;
1361         int clear_bits = 0;
1362
1363         if (op & EXTENT_CLEAR_UNLOCK)
1364                 clear_bits |= EXTENT_LOCKED;
1365         if (op & EXTENT_CLEAR_DIRTY)
1366                 clear_bits |= EXTENT_DIRTY;
1367
1368         if (op & EXTENT_CLEAR_DELALLOC)
1369                 clear_bits |= EXTENT_DELALLOC;
1370
1371         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1372         if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1373                     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1374                     EXTENT_SET_PRIVATE2)))
1375                 return 0;
1376
1377         while (nr_pages > 0) {
1378                 ret = find_get_pages_contig(inode->i_mapping, index,
1379                                      min_t(unsigned long,
1380                                      nr_pages, ARRAY_SIZE(pages)), pages);
1381                 for (i = 0; i < ret; i++) {
1382
1383                         if (op & EXTENT_SET_PRIVATE2)
1384                                 SetPagePrivate2(pages[i]);
1385
1386                         if (pages[i] == locked_page) {
1387                                 page_cache_release(pages[i]);
1388                                 continue;
1389                         }
1390                         if (op & EXTENT_CLEAR_DIRTY)
1391                                 clear_page_dirty_for_io(pages[i]);
1392                         if (op & EXTENT_SET_WRITEBACK)
1393                                 set_page_writeback(pages[i]);
1394                         if (op & EXTENT_END_WRITEBACK)
1395                                 end_page_writeback(pages[i]);
1396                         if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1397                                 unlock_page(pages[i]);
1398                         page_cache_release(pages[i]);
1399                 }
1400                 nr_pages -= ret;
1401                 index += ret;
1402                 cond_resched();
1403         }
1404         return 0;
1405 }
1406
1407 /*
1408  * count the number of bytes in the tree that have a given bit(s)
1409  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1410  * cached.  The total number found is returned.
1411  */
1412 u64 count_range_bits(struct extent_io_tree *tree,
1413                      u64 *start, u64 search_end, u64 max_bytes,
1414                      unsigned long bits, int contig)
1415 {
1416         struct rb_node *node;
1417         struct extent_state *state;
1418         u64 cur_start = *start;
1419         u64 total_bytes = 0;
1420         u64 last = 0;
1421         int found = 0;
1422
1423         if (search_end <= cur_start) {
1424                 WARN_ON(1);
1425                 return 0;
1426         }
1427
1428         spin_lock(&tree->lock);
1429         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1430                 total_bytes = tree->dirty_bytes;
1431                 goto out;
1432         }
1433         /*
1434          * this search will find all the extents that end after
1435          * our range starts.
1436          */
1437         node = tree_search(tree, cur_start);
1438         if (!node)
1439                 goto out;
1440
1441         while (1) {
1442                 state = rb_entry(node, struct extent_state, rb_node);
1443                 if (state->start > search_end)
1444                         break;
1445                 if (contig && found && state->start > last + 1)
1446                         break;
1447                 if (state->end >= cur_start && (state->state & bits) == bits) {
1448                         total_bytes += min(search_end, state->end) + 1 -
1449                                        max(cur_start, state->start);
1450                         if (total_bytes >= max_bytes)
1451                                 break;
1452                         if (!found) {
1453                                 *start = max(cur_start, state->start);
1454                                 found = 1;
1455                         }
1456                         last = state->end;
1457                 } else if (contig && found) {
1458                         break;
1459                 }
1460                 node = rb_next(node);
1461                 if (!node)
1462                         break;
1463         }
1464 out:
1465         spin_unlock(&tree->lock);
1466         return total_bytes;
1467 }
1468
1469 /*
1470  * set the private field for a given byte offset in the tree.  If there isn't
1471  * an extent_state there already, this does nothing.
1472  */
1473 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1474 {
1475         struct rb_node *node;
1476         struct extent_state *state;
1477         int ret = 0;
1478
1479         spin_lock(&tree->lock);
1480         /*
1481          * this search will find all the extents that end after
1482          * our range starts.
1483          */
1484         node = tree_search(tree, start);
1485         if (!node) {
1486                 ret = -ENOENT;
1487                 goto out;
1488         }
1489         state = rb_entry(node, struct extent_state, rb_node);
1490         if (state->start != start) {
1491                 ret = -ENOENT;
1492                 goto out;
1493         }
1494         state->private = private;
1495 out:
1496         spin_unlock(&tree->lock);
1497         return ret;
1498 }
1499
1500 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1501 {
1502         struct rb_node *node;
1503         struct extent_state *state;
1504         int ret = 0;
1505
1506         spin_lock(&tree->lock);
1507         /*
1508          * this search will find all the extents that end after
1509          * our range starts.
1510          */
1511         node = tree_search(tree, start);
1512         if (!node) {
1513                 ret = -ENOENT;
1514                 goto out;
1515         }
1516         state = rb_entry(node, struct extent_state, rb_node);
1517         if (state->start != start) {
1518                 ret = -ENOENT;
1519                 goto out;
1520         }
1521         *private = state->private;
1522 out:
1523         spin_unlock(&tree->lock);
1524         return ret;
1525 }
1526
1527 /*
1528  * searches a range in the state tree for a given mask.
1529  * If 'filled' == 1, this returns 1 only if every extent in the tree
1530  * has the bits set.  Otherwise, 1 is returned if any bit in the
1531  * range is found set.
1532  */
1533 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1534                    int bits, int filled, struct extent_state *cached)
1535 {
1536         struct extent_state *state = NULL;
1537         struct rb_node *node;
1538         int bitset = 0;
1539
1540         spin_lock(&tree->lock);
1541         if (cached && cached->tree && cached->start <= start &&
1542             cached->end > start)
1543                 node = &cached->rb_node;
1544         else
1545                 node = tree_search(tree, start);
1546         while (node && start <= end) {
1547                 state = rb_entry(node, struct extent_state, rb_node);
1548
1549                 if (filled && state->start > start) {
1550                         bitset = 0;
1551                         break;
1552                 }
1553
1554                 if (state->start > end)
1555                         break;
1556
1557                 if (state->state & bits) {
1558                         bitset = 1;
1559                         if (!filled)
1560                                 break;
1561                 } else if (filled) {
1562                         bitset = 0;
1563                         break;
1564                 }
1565
1566                 if (state->end == (u64)-1)
1567                         break;
1568
1569                 start = state->end + 1;
1570                 if (start > end)
1571                         break;
1572                 node = rb_next(node);
1573                 if (!node) {
1574                         if (filled)
1575                                 bitset = 0;
1576                         break;
1577                 }
1578         }
1579         spin_unlock(&tree->lock);
1580         return bitset;
1581 }
1582
1583 /*
1584  * helper function to set a given page up to date if all the
1585  * extents in the tree for that page are up to date
1586  */
1587 static int check_page_uptodate(struct extent_io_tree *tree,
1588                                struct page *page)
1589 {
1590         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1591         u64 end = start + PAGE_CACHE_SIZE - 1;
1592         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1593                 SetPageUptodate(page);
1594         return 0;
1595 }
1596
1597 /*
1598  * helper function to unlock a page if all the extents in the tree
1599  * for that page are unlocked
1600  */
1601 static int check_page_locked(struct extent_io_tree *tree,
1602                              struct page *page)
1603 {
1604         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1605         u64 end = start + PAGE_CACHE_SIZE - 1;
1606         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1607                 unlock_page(page);
1608         return 0;
1609 }
1610
1611 /*
1612  * helper function to end page writeback if all the extents
1613  * in the tree for that page are done with writeback
1614  */
1615 static int check_page_writeback(struct extent_io_tree *tree,
1616                              struct page *page)
1617 {
1618         end_page_writeback(page);
1619         return 0;
1620 }
1621
1622 /* lots and lots of room for performance fixes in the end_bio funcs */
1623
1624 /*
1625  * after a writepage IO is done, we need to:
1626  * clear the uptodate bits on error
1627  * clear the writeback bits in the extent tree for this IO
1628  * end_page_writeback if the page has no more pending IO
1629  *
1630  * Scheduling is not allowed, so the extent state tree is expected
1631  * to have one and only one object corresponding to this IO.
1632  */
1633 static void end_bio_extent_writepage(struct bio *bio, int err)
1634 {
1635         int uptodate = err == 0;
1636         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1637         struct extent_io_tree *tree;
1638         u64 start;
1639         u64 end;
1640         int whole_page;
1641         int ret;
1642
1643         do {
1644                 struct page *page = bvec->bv_page;
1645                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1646
1647                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1648                          bvec->bv_offset;
1649                 end = start + bvec->bv_len - 1;
1650
1651                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1652                         whole_page = 1;
1653                 else
1654                         whole_page = 0;
1655
1656                 if (--bvec >= bio->bi_io_vec)
1657                         prefetchw(&bvec->bv_page->flags);
1658                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1659                         ret = tree->ops->writepage_end_io_hook(page, start,
1660                                                        end, NULL, uptodate);
1661                         if (ret)
1662                                 uptodate = 0;
1663                 }
1664
1665                 if (!uptodate && tree->ops &&
1666                     tree->ops->writepage_io_failed_hook) {
1667                         ret = tree->ops->writepage_io_failed_hook(bio, page,
1668                                                          start, end, NULL);
1669                         if (ret == 0) {
1670                                 uptodate = (err == 0);
1671                                 continue;
1672                         }
1673                 }
1674
1675                 if (!uptodate) {
1676                         clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1677                         ClearPageUptodate(page);
1678                         SetPageError(page);
1679                 }
1680
1681                 if (whole_page)
1682                         end_page_writeback(page);
1683                 else
1684                         check_page_writeback(tree, page);
1685         } while (bvec >= bio->bi_io_vec);
1686
1687         bio_put(bio);
1688 }
1689
1690 /*
1691  * after a readpage IO is done, we need to:
1692  * clear the uptodate bits on error
1693  * set the uptodate bits if things worked
1694  * set the page up to date if all extents in the tree are uptodate
1695  * clear the lock bit in the extent tree
1696  * unlock the page if there are no other extents locked for it
1697  *
1698  * Scheduling is not allowed, so the extent state tree is expected
1699  * to have one and only one object corresponding to this IO.
1700  */
1701 static void end_bio_extent_readpage(struct bio *bio, int err)
1702 {
1703         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1704         struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1705         struct bio_vec *bvec = bio->bi_io_vec;
1706         struct extent_io_tree *tree;
1707         u64 start;
1708         u64 end;
1709         int whole_page;
1710         int ret;
1711
1712         if (err)
1713                 uptodate = 0;
1714
1715         do {
1716                 struct page *page = bvec->bv_page;
1717                 struct extent_state *cached = NULL;
1718                 struct extent_state *state;
1719
1720                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1721
1722                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1723                         bvec->bv_offset;
1724                 end = start + bvec->bv_len - 1;
1725
1726                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1727                         whole_page = 1;
1728                 else
1729                         whole_page = 0;
1730
1731                 if (++bvec <= bvec_end)
1732                         prefetchw(&bvec->bv_page->flags);
1733
1734                 spin_lock(&tree->lock);
1735                 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1736                 if (state && state->start == start) {
1737                         /*
1738                          * take a reference on the state, unlock will drop
1739                          * the ref
1740                          */
1741                         cache_state(state, &cached);
1742                 }
1743                 spin_unlock(&tree->lock);
1744
1745                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1746                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1747                                                               state);
1748                         if (ret)
1749                                 uptodate = 0;
1750                 }
1751                 if (!uptodate && tree->ops &&
1752                     tree->ops->readpage_io_failed_hook) {
1753                         ret = tree->ops->readpage_io_failed_hook(bio, page,
1754                                                          start, end, NULL);
1755                         if (ret == 0) {
1756                                 uptodate =
1757                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
1758                                 if (err)
1759                                         uptodate = 0;
1760                                 uncache_state(&cached);
1761                                 continue;
1762                         }
1763                 }
1764
1765                 if (uptodate) {
1766                         set_extent_uptodate(tree, start, end, &cached,
1767                                             GFP_ATOMIC);
1768                 }
1769                 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1770
1771                 if (whole_page) {
1772                         if (uptodate) {
1773                                 SetPageUptodate(page);
1774                         } else {
1775                                 ClearPageUptodate(page);
1776                                 SetPageError(page);
1777                         }
1778                         unlock_page(page);
1779                 } else {
1780                         if (uptodate) {
1781                                 check_page_uptodate(tree, page);
1782                         } else {
1783                                 ClearPageUptodate(page);
1784                                 SetPageError(page);
1785                         }
1786                         check_page_locked(tree, page);
1787                 }
1788         } while (bvec <= bvec_end);
1789
1790         bio_put(bio);
1791 }
1792
1793 struct bio *
1794 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1795                 gfp_t gfp_flags)
1796 {
1797         struct bio *bio;
1798
1799         bio = bio_alloc(gfp_flags, nr_vecs);
1800
1801         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1802                 while (!bio && (nr_vecs /= 2))
1803                         bio = bio_alloc(gfp_flags, nr_vecs);
1804         }
1805
1806         if (bio) {
1807                 bio->bi_size = 0;
1808                 bio->bi_bdev = bdev;
1809                 bio->bi_sector = first_sector;
1810         }
1811         return bio;
1812 }
1813
1814 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1815                           unsigned long bio_flags)
1816 {
1817         int ret = 0;
1818         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1819         struct page *page = bvec->bv_page;
1820         struct extent_io_tree *tree = bio->bi_private;
1821         u64 start;
1822
1823         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1824
1825         bio->bi_private = NULL;
1826
1827         bio_get(bio);
1828
1829         if (tree->ops && tree->ops->submit_bio_hook)
1830                 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1831                                            mirror_num, bio_flags, start);
1832         else
1833                 submit_bio(rw, bio);
1834         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1835                 ret = -EOPNOTSUPP;
1836         bio_put(bio);
1837         return ret;
1838 }
1839
1840 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1841                               struct page *page, sector_t sector,
1842                               size_t size, unsigned long offset,
1843                               struct block_device *bdev,
1844                               struct bio **bio_ret,
1845                               unsigned long max_pages,
1846                               bio_end_io_t end_io_func,
1847                               int mirror_num,
1848                               unsigned long prev_bio_flags,
1849                               unsigned long bio_flags)
1850 {
1851         int ret = 0;
1852         struct bio *bio;
1853         int nr;
1854         int contig = 0;
1855         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1856         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1857         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1858
1859         if (bio_ret && *bio_ret) {
1860                 bio = *bio_ret;
1861                 if (old_compressed)
1862                         contig = bio->bi_sector == sector;
1863                 else
1864                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
1865                                 sector;
1866
1867                 if (prev_bio_flags != bio_flags || !contig ||
1868                     (tree->ops && tree->ops->merge_bio_hook &&
1869                      tree->ops->merge_bio_hook(page, offset, page_size, bio,
1870                                                bio_flags)) ||
1871                     bio_add_page(bio, page, page_size, offset) < page_size) {
1872                         ret = submit_one_bio(rw, bio, mirror_num,
1873                                              prev_bio_flags);
1874                         bio = NULL;
1875                 } else {
1876                         return 0;
1877                 }
1878         }
1879         if (this_compressed)
1880                 nr = BIO_MAX_PAGES;
1881         else
1882                 nr = bio_get_nr_vecs(bdev);
1883
1884         bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1885         if (!bio)
1886                 return -ENOMEM;
1887
1888         bio_add_page(bio, page, page_size, offset);
1889         bio->bi_end_io = end_io_func;
1890         bio->bi_private = tree;
1891
1892         if (bio_ret)
1893                 *bio_ret = bio;
1894         else
1895                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1896
1897         return ret;
1898 }
1899
1900 void set_page_extent_mapped(struct page *page)
1901 {
1902         if (!PagePrivate(page)) {
1903                 SetPagePrivate(page);
1904                 page_cache_get(page);
1905                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1906         }
1907 }
1908
1909 static void set_page_extent_head(struct page *page, unsigned long len)
1910 {
1911         WARN_ON(!PagePrivate(page));
1912         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1913 }
1914
1915 /*
1916  * basic readpage implementation.  Locked extent state structs are inserted
1917  * into the tree that are removed when the IO is done (by the end_io
1918  * handlers)
1919  */
1920 static int __extent_read_full_page(struct extent_io_tree *tree,
1921                                    struct page *page,
1922                                    get_extent_t *get_extent,
1923                                    struct bio **bio, int mirror_num,
1924                                    unsigned long *bio_flags)
1925 {
1926         struct inode *inode = page->mapping->host;
1927         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1928         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1929         u64 end;
1930         u64 cur = start;
1931         u64 extent_offset;
1932         u64 last_byte = i_size_read(inode);
1933         u64 block_start;
1934         u64 cur_end;
1935         sector_t sector;
1936         struct extent_map *em;
1937         struct block_device *bdev;
1938         struct btrfs_ordered_extent *ordered;
1939         int ret;
1940         int nr = 0;
1941         size_t pg_offset = 0;
1942         size_t iosize;
1943         size_t disk_io_size;
1944         size_t blocksize = inode->i_sb->s_blocksize;
1945         unsigned long this_bio_flag = 0;
1946
1947         set_page_extent_mapped(page);
1948
1949         if (!PageUptodate(page)) {
1950                 if (cleancache_get_page(page) == 0) {
1951                         BUG_ON(blocksize != PAGE_SIZE);
1952                         goto out;
1953                 }
1954         }
1955
1956         end = page_end;
1957         while (1) {
1958                 lock_extent(tree, start, end, GFP_NOFS);
1959                 ordered = btrfs_lookup_ordered_extent(inode, start);
1960                 if (!ordered)
1961                         break;
1962                 unlock_extent(tree, start, end, GFP_NOFS);
1963                 btrfs_start_ordered_extent(inode, ordered, 1);
1964                 btrfs_put_ordered_extent(ordered);
1965         }
1966
1967         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1968                 char *userpage;
1969                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1970
1971                 if (zero_offset) {
1972                         iosize = PAGE_CACHE_SIZE - zero_offset;
1973                         userpage = kmap_atomic(page, KM_USER0);
1974                         memset(userpage + zero_offset, 0, iosize);
1975                         flush_dcache_page(page);
1976                         kunmap_atomic(userpage, KM_USER0);
1977                 }
1978         }
1979         while (cur <= end) {
1980                 if (cur >= last_byte) {
1981                         char *userpage;
1982                         struct extent_state *cached = NULL;
1983
1984                         iosize = PAGE_CACHE_SIZE - pg_offset;
1985                         userpage = kmap_atomic(page, KM_USER0);
1986                         memset(userpage + pg_offset, 0, iosize);
1987                         flush_dcache_page(page);
1988                         kunmap_atomic(userpage, KM_USER0);
1989                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1990                                             &cached, GFP_NOFS);
1991                         unlock_extent_cached(tree, cur, cur + iosize - 1,
1992                                              &cached, GFP_NOFS);
1993                         break;
1994                 }
1995                 em = get_extent(inode, page, pg_offset, cur,
1996                                 end - cur + 1, 0);
1997                 if (IS_ERR_OR_NULL(em)) {
1998                         SetPageError(page);
1999                         unlock_extent(tree, cur, end, GFP_NOFS);
2000                         break;
2001                 }
2002                 extent_offset = cur - em->start;
2003                 BUG_ON(extent_map_end(em) <= cur);
2004                 BUG_ON(end < cur);
2005
2006                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2007                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2008                         extent_set_compress_type(&this_bio_flag,
2009                                                  em->compress_type);
2010                 }
2011
2012                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2013                 cur_end = min(extent_map_end(em) - 1, end);
2014                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2015                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2016                         disk_io_size = em->block_len;
2017                         sector = em->block_start >> 9;
2018                 } else {
2019                         sector = (em->block_start + extent_offset) >> 9;
2020                         disk_io_size = iosize;
2021                 }
2022                 bdev = em->bdev;
2023                 block_start = em->block_start;
2024                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2025                         block_start = EXTENT_MAP_HOLE;
2026                 free_extent_map(em);
2027                 em = NULL;
2028
2029                 /* we've found a hole, just zero and go on */
2030                 if (block_start == EXTENT_MAP_HOLE) {
2031                         char *userpage;
2032                         struct extent_state *cached = NULL;
2033
2034                         userpage = kmap_atomic(page, KM_USER0);
2035                         memset(userpage + pg_offset, 0, iosize);
2036                         flush_dcache_page(page);
2037                         kunmap_atomic(userpage, KM_USER0);
2038
2039                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2040                                             &cached, GFP_NOFS);
2041                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2042                                              &cached, GFP_NOFS);
2043                         cur = cur + iosize;
2044                         pg_offset += iosize;
2045                         continue;
2046                 }
2047                 /* the get_extent function already copied into the page */
2048                 if (test_range_bit(tree, cur, cur_end,
2049                                    EXTENT_UPTODATE, 1, NULL)) {
2050                         check_page_uptodate(tree, page);
2051                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2052                         cur = cur + iosize;
2053                         pg_offset += iosize;
2054                         continue;
2055                 }
2056                 /* we have an inline extent but it didn't get marked up
2057                  * to date.  Error out
2058                  */
2059                 if (block_start == EXTENT_MAP_INLINE) {
2060                         SetPageError(page);
2061                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2062                         cur = cur + iosize;
2063                         pg_offset += iosize;
2064                         continue;
2065                 }
2066
2067                 ret = 0;
2068                 if (tree->ops && tree->ops->readpage_io_hook) {
2069                         ret = tree->ops->readpage_io_hook(page, cur,
2070                                                           cur + iosize - 1);
2071                 }
2072                 if (!ret) {
2073                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2074                         pnr -= page->index;
2075                         ret = submit_extent_page(READ, tree, page,
2076                                          sector, disk_io_size, pg_offset,
2077                                          bdev, bio, pnr,
2078                                          end_bio_extent_readpage, mirror_num,
2079                                          *bio_flags,
2080                                          this_bio_flag);
2081                         nr++;
2082                         *bio_flags = this_bio_flag;
2083                 }
2084                 if (ret)
2085                         SetPageError(page);
2086                 cur = cur + iosize;
2087                 pg_offset += iosize;
2088         }
2089 out:
2090         if (!nr) {
2091                 if (!PageError(page))
2092                         SetPageUptodate(page);
2093                 unlock_page(page);
2094         }
2095         return 0;
2096 }
2097
2098 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2099                             get_extent_t *get_extent)
2100 {
2101         struct bio *bio = NULL;
2102         unsigned long bio_flags = 0;
2103         int ret;
2104
2105         ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2106                                       &bio_flags);
2107         if (bio)
2108                 ret = submit_one_bio(READ, bio, 0, bio_flags);
2109         return ret;
2110 }
2111
2112 static noinline void update_nr_written(struct page *page,
2113                                       struct writeback_control *wbc,
2114                                       unsigned long nr_written)
2115 {
2116         wbc->nr_to_write -= nr_written;
2117         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2118             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2119                 page->mapping->writeback_index = page->index + nr_written;
2120 }
2121
2122 /*
2123  * the writepage semantics are similar to regular writepage.  extent
2124  * records are inserted to lock ranges in the tree, and as dirty areas
2125  * are found, they are marked writeback.  Then the lock bits are removed
2126  * and the end_io handler clears the writeback ranges
2127  */
2128 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2129                               void *data)
2130 {
2131         struct inode *inode = page->mapping->host;
2132         struct extent_page_data *epd = data;
2133         struct extent_io_tree *tree = epd->tree;
2134         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2135         u64 delalloc_start;
2136         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2137         u64 end;
2138         u64 cur = start;
2139         u64 extent_offset;
2140         u64 last_byte = i_size_read(inode);
2141         u64 block_start;
2142         u64 iosize;
2143         sector_t sector;
2144         struct extent_state *cached_state = NULL;
2145         struct extent_map *em;
2146         struct block_device *bdev;
2147         int ret;
2148         int nr = 0;
2149         size_t pg_offset = 0;
2150         size_t blocksize;
2151         loff_t i_size = i_size_read(inode);
2152         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2153         u64 nr_delalloc;
2154         u64 delalloc_end;
2155         int page_started;
2156         int compressed;
2157         int write_flags;
2158         unsigned long nr_written = 0;
2159
2160         if (wbc->sync_mode == WB_SYNC_ALL)
2161                 write_flags = WRITE_SYNC;
2162         else
2163                 write_flags = WRITE;
2164
2165         trace___extent_writepage(page, inode, wbc);
2166
2167         WARN_ON(!PageLocked(page));
2168         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2169         if (page->index > end_index ||
2170            (page->index == end_index && !pg_offset)) {
2171                 page->mapping->a_ops->invalidatepage(page, 0);
2172                 unlock_page(page);
2173                 return 0;
2174         }
2175
2176         if (page->index == end_index) {
2177                 char *userpage;
2178
2179                 userpage = kmap_atomic(page, KM_USER0);
2180                 memset(userpage + pg_offset, 0,
2181                        PAGE_CACHE_SIZE - pg_offset);
2182                 kunmap_atomic(userpage, KM_USER0);
2183                 flush_dcache_page(page);
2184         }
2185         pg_offset = 0;
2186
2187         set_page_extent_mapped(page);
2188
2189         delalloc_start = start;
2190         delalloc_end = 0;
2191         page_started = 0;
2192         if (!epd->extent_locked) {
2193                 u64 delalloc_to_write = 0;
2194                 /*
2195                  * make sure the wbc mapping index is at least updated
2196                  * to this page.
2197                  */
2198                 update_nr_written(page, wbc, 0);
2199
2200                 while (delalloc_end < page_end) {
2201                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2202                                                        page,
2203                                                        &delalloc_start,
2204                                                        &delalloc_end,
2205                                                        128 * 1024 * 1024);
2206                         if (nr_delalloc == 0) {
2207                                 delalloc_start = delalloc_end + 1;
2208                                 continue;
2209                         }
2210                         tree->ops->fill_delalloc(inode, page, delalloc_start,
2211                                                  delalloc_end, &page_started,
2212                                                  &nr_written);
2213                         /*
2214                          * delalloc_end is already one less than the total
2215                          * length, so we don't subtract one from
2216                          * PAGE_CACHE_SIZE
2217                          */
2218                         delalloc_to_write += (delalloc_end - delalloc_start +
2219                                               PAGE_CACHE_SIZE) >>
2220                                               PAGE_CACHE_SHIFT;
2221                         delalloc_start = delalloc_end + 1;
2222                 }
2223                 if (wbc->nr_to_write < delalloc_to_write) {
2224                         int thresh = 8192;
2225
2226                         if (delalloc_to_write < thresh * 2)
2227                                 thresh = delalloc_to_write;
2228                         wbc->nr_to_write = min_t(u64, delalloc_to_write,
2229                                                  thresh);
2230                 }
2231
2232                 /* did the fill delalloc function already unlock and start
2233                  * the IO?
2234                  */
2235                 if (page_started) {
2236                         ret = 0;
2237                         /*
2238                          * we've unlocked the page, so we can't update
2239                          * the mapping's writeback index, just update
2240                          * nr_to_write.
2241                          */
2242                         wbc->nr_to_write -= nr_written;
2243                         goto done_unlocked;
2244                 }
2245         }
2246         if (tree->ops && tree->ops->writepage_start_hook) {
2247                 ret = tree->ops->writepage_start_hook(page, start,
2248                                                       page_end);
2249                 if (ret == -EAGAIN) {
2250                         redirty_page_for_writepage(wbc, page);
2251                         update_nr_written(page, wbc, nr_written);
2252                         unlock_page(page);
2253                         ret = 0;
2254                         goto done_unlocked;
2255                 }
2256         }
2257
2258         /*
2259          * we don't want to touch the inode after unlocking the page,
2260          * so we update the mapping writeback index now
2261          */
2262         update_nr_written(page, wbc, nr_written + 1);
2263
2264         end = page_end;
2265         if (last_byte <= start) {
2266                 if (tree->ops && tree->ops->writepage_end_io_hook)
2267                         tree->ops->writepage_end_io_hook(page, start,
2268                                                          page_end, NULL, 1);
2269                 goto done;
2270         }
2271
2272         blocksize = inode->i_sb->s_blocksize;
2273
2274         while (cur <= end) {
2275                 if (cur >= last_byte) {
2276                         if (tree->ops && tree->ops->writepage_end_io_hook)
2277                                 tree->ops->writepage_end_io_hook(page, cur,
2278                                                          page_end, NULL, 1);
2279                         break;
2280                 }
2281                 em = epd->get_extent(inode, page, pg_offset, cur,
2282                                      end - cur + 1, 1);
2283                 if (IS_ERR_OR_NULL(em)) {
2284                         SetPageError(page);
2285                         break;
2286                 }
2287
2288                 extent_offset = cur - em->start;
2289                 BUG_ON(extent_map_end(em) <= cur);
2290                 BUG_ON(end < cur);
2291                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2292                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2293                 sector = (em->block_start + extent_offset) >> 9;
2294                 bdev = em->bdev;
2295                 block_start = em->block_start;
2296                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2297                 free_extent_map(em);
2298                 em = NULL;
2299
2300                 /*
2301                  * compressed and inline extents are written through other
2302                  * paths in the FS
2303                  */
2304                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2305                     block_start == EXTENT_MAP_INLINE) {
2306                         /*
2307                          * end_io notification does not happen here for
2308                          * compressed extents
2309                          */
2310                         if (!compressed && tree->ops &&
2311                             tree->ops->writepage_end_io_hook)
2312                                 tree->ops->writepage_end_io_hook(page, cur,
2313                                                          cur + iosize - 1,
2314                                                          NULL, 1);
2315                         else if (compressed) {
2316                                 /* we don't want to end_page_writeback on
2317                                  * a compressed extent.  this happens
2318                                  * elsewhere
2319                                  */
2320                                 nr++;
2321                         }
2322
2323                         cur += iosize;
2324                         pg_offset += iosize;
2325                         continue;
2326                 }
2327                 /* leave this out until we have a page_mkwrite call */
2328                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2329                                    EXTENT_DIRTY, 0, NULL)) {
2330                         cur = cur + iosize;
2331                         pg_offset += iosize;
2332                         continue;
2333                 }
2334
2335                 if (tree->ops && tree->ops->writepage_io_hook) {
2336                         ret = tree->ops->writepage_io_hook(page, cur,
2337                                                 cur + iosize - 1);
2338                 } else {
2339                         ret = 0;
2340                 }
2341                 if (ret) {
2342                         SetPageError(page);
2343                 } else {
2344                         unsigned long max_nr = end_index + 1;
2345
2346                         set_range_writeback(tree, cur, cur + iosize - 1);
2347                         if (!PageWriteback(page)) {
2348                                 printk(KERN_ERR "btrfs warning page %lu not "
2349                                        "writeback, cur %llu end %llu\n",
2350                                        page->index, (unsigned long long)cur,
2351                                        (unsigned long long)end);
2352                         }
2353
2354                         ret = submit_extent_page(write_flags, tree, page,
2355                                                  sector, iosize, pg_offset,
2356                                                  bdev, &epd->bio, max_nr,
2357                                                  end_bio_extent_writepage,
2358                                                  0, 0, 0);
2359                         if (ret)
2360                                 SetPageError(page);
2361                 }
2362                 cur = cur + iosize;
2363                 pg_offset += iosize;
2364                 nr++;
2365         }
2366 done:
2367         if (nr == 0) {
2368                 /* make sure the mapping tag for page dirty gets cleared */
2369                 set_page_writeback(page);
2370                 end_page_writeback(page);
2371         }
2372         unlock_page(page);
2373
2374 done_unlocked:
2375
2376         /* drop our reference on any cached states */
2377         free_extent_state(cached_state);
2378         return 0;
2379 }
2380
2381 /**
2382  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2383  * @mapping: address space structure to write
2384  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2385  * @writepage: function called for each page
2386  * @data: data passed to writepage function
2387  *
2388  * If a page is already under I/O, write_cache_pages() skips it, even
2389  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2390  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2391  * and msync() need to guarantee that all the data which was dirty at the time
2392  * the call was made get new I/O started against them.  If wbc->sync_mode is
2393  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2394  * existing IO to complete.
2395  */
2396 static int extent_write_cache_pages(struct extent_io_tree *tree,
2397                              struct address_space *mapping,
2398                              struct writeback_control *wbc,
2399                              writepage_t writepage, void *data,
2400                              void (*flush_fn)(void *))
2401 {
2402         int ret = 0;
2403         int done = 0;
2404         int nr_to_write_done = 0;
2405         struct pagevec pvec;
2406         int nr_pages;
2407         pgoff_t index;
2408         pgoff_t end;            /* Inclusive */
2409         int scanned = 0;
2410         int tag;
2411
2412         pagevec_init(&pvec, 0);
2413         if (wbc->range_cyclic) {
2414                 index = mapping->writeback_index; /* Start from prev offset */
2415                 end = -1;
2416         } else {
2417                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2418                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2419                 scanned = 1;
2420         }
2421         if (wbc->sync_mode == WB_SYNC_ALL)
2422                 tag = PAGECACHE_TAG_TOWRITE;
2423         else
2424                 tag = PAGECACHE_TAG_DIRTY;
2425 retry:
2426         if (wbc->sync_mode == WB_SYNC_ALL)
2427                 tag_pages_for_writeback(mapping, index, end);
2428         while (!done && !nr_to_write_done && (index <= end) &&
2429                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2430                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2431                 unsigned i;
2432
2433                 scanned = 1;
2434                 for (i = 0; i < nr_pages; i++) {
2435                         struct page *page = pvec.pages[i];
2436
2437                         /*
2438                          * At this point we hold neither mapping->tree_lock nor
2439                          * lock on the page itself: the page may be truncated or
2440                          * invalidated (changing page->mapping to NULL), or even
2441                          * swizzled back from swapper_space to tmpfs file
2442                          * mapping
2443                          */
2444                         if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2445                                 tree->ops->write_cache_pages_lock_hook(page);
2446                         else
2447                                 lock_page(page);
2448
2449                         if (unlikely(page->mapping != mapping)) {
2450                                 unlock_page(page);
2451                                 continue;
2452                         }
2453
2454                         if (!wbc->range_cyclic && page->index > end) {
2455                                 done = 1;
2456                                 unlock_page(page);
2457                                 continue;
2458                         }
2459
2460                         if (wbc->sync_mode != WB_SYNC_NONE) {
2461                                 if (PageWriteback(page))
2462                                         flush_fn(data);
2463                                 wait_on_page_writeback(page);
2464                         }
2465
2466                         if (PageWriteback(page) ||
2467                             !clear_page_dirty_for_io(page)) {
2468                                 unlock_page(page);
2469                                 continue;
2470                         }
2471
2472                         ret = (*writepage)(page, wbc, data);
2473
2474                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2475                                 unlock_page(page);
2476                                 ret = 0;
2477                         }
2478                         if (ret)
2479                                 done = 1;
2480
2481                         /*
2482                          * the filesystem may choose to bump up nr_to_write.
2483                          * We have to make sure to honor the new nr_to_write
2484                          * at any time
2485                          */
2486                         nr_to_write_done = wbc->nr_to_write <= 0;
2487                 }
2488                 pagevec_release(&pvec);
2489                 cond_resched();
2490         }
2491         if (!scanned && !done) {
2492                 /*
2493                  * We hit the last page and there is more work to be done: wrap
2494                  * back to the start of the file
2495                  */
2496                 scanned = 1;
2497                 index = 0;
2498                 goto retry;
2499         }
2500         return ret;
2501 }
2502
2503 static void flush_epd_write_bio(struct extent_page_data *epd)
2504 {
2505         if (epd->bio) {
2506                 if (epd->sync_io)
2507                         submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2508                 else
2509                         submit_one_bio(WRITE, epd->bio, 0, 0);
2510                 epd->bio = NULL;
2511         }
2512 }
2513
2514 static noinline void flush_write_bio(void *data)
2515 {
2516         struct extent_page_data *epd = data;
2517         flush_epd_write_bio(epd);
2518 }
2519
2520 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2521                           get_extent_t *get_extent,
2522                           struct writeback_control *wbc)
2523 {
2524         int ret;
2525         struct address_space *mapping = page->mapping;
2526         struct extent_page_data epd = {
2527                 .bio = NULL,
2528                 .tree = tree,
2529                 .get_extent = get_extent,
2530                 .extent_locked = 0,
2531                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2532         };
2533         struct writeback_control wbc_writepages = {
2534                 .sync_mode      = wbc->sync_mode,
2535                 .older_than_this = NULL,
2536                 .nr_to_write    = 64,
2537                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2538                 .range_end      = (loff_t)-1,
2539         };
2540
2541         ret = __extent_writepage(page, wbc, &epd);
2542
2543         extent_write_cache_pages(tree, mapping, &wbc_writepages,
2544                                  __extent_writepage, &epd, flush_write_bio);
2545         flush_epd_write_bio(&epd);
2546         return ret;
2547 }
2548
2549 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2550                               u64 start, u64 end, get_extent_t *get_extent,
2551                               int mode)
2552 {
2553         int ret = 0;
2554         struct address_space *mapping = inode->i_mapping;
2555         struct page *page;
2556         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2557                 PAGE_CACHE_SHIFT;
2558
2559         struct extent_page_data epd = {
2560                 .bio = NULL,
2561                 .tree = tree,
2562                 .get_extent = get_extent,
2563                 .extent_locked = 1,
2564                 .sync_io = mode == WB_SYNC_ALL,
2565         };
2566         struct writeback_control wbc_writepages = {
2567                 .sync_mode      = mode,
2568                 .older_than_this = NULL,
2569                 .nr_to_write    = nr_pages * 2,
2570                 .range_start    = start,
2571                 .range_end      = end + 1,
2572         };
2573
2574         while (start <= end) {
2575                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2576                 if (clear_page_dirty_for_io(page))
2577                         ret = __extent_writepage(page, &wbc_writepages, &epd);
2578                 else {
2579                         if (tree->ops && tree->ops->writepage_end_io_hook)
2580                                 tree->ops->writepage_end_io_hook(page, start,
2581                                                  start + PAGE_CACHE_SIZE - 1,
2582                                                  NULL, 1);
2583                         unlock_page(page);
2584                 }
2585                 page_cache_release(page);
2586                 start += PAGE_CACHE_SIZE;
2587         }
2588
2589         flush_epd_write_bio(&epd);
2590         return ret;
2591 }
2592
2593 int extent_writepages(struct extent_io_tree *tree,
2594                       struct address_space *mapping,
2595                       get_extent_t *get_extent,
2596                       struct writeback_control *wbc)
2597 {
2598         int ret = 0;
2599         struct extent_page_data epd = {
2600                 .bio = NULL,
2601                 .tree = tree,
2602                 .get_extent = get_extent,
2603                 .extent_locked = 0,
2604                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2605         };
2606
2607         ret = extent_write_cache_pages(tree, mapping, wbc,
2608                                        __extent_writepage, &epd,
2609                                        flush_write_bio);
2610         flush_epd_write_bio(&epd);
2611         return ret;
2612 }
2613
2614 int extent_readpages(struct extent_io_tree *tree,
2615                      struct address_space *mapping,
2616                      struct list_head *pages, unsigned nr_pages,
2617                      get_extent_t get_extent)
2618 {
2619         struct bio *bio = NULL;
2620         unsigned page_idx;
2621         unsigned long bio_flags = 0;
2622
2623         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2624                 struct page *page = list_entry(pages->prev, struct page, lru);
2625
2626                 prefetchw(&page->flags);
2627                 list_del(&page->lru);
2628                 if (!add_to_page_cache_lru(page, mapping,
2629                                         page->index, GFP_NOFS)) {
2630                         __extent_read_full_page(tree, page, get_extent,
2631                                                 &bio, 0, &bio_flags);
2632                 }
2633                 page_cache_release(page);
2634         }
2635         BUG_ON(!list_empty(pages));
2636         if (bio)
2637                 submit_one_bio(READ, bio, 0, bio_flags);
2638         return 0;
2639 }
2640
2641 /*
2642  * basic invalidatepage code, this waits on any locked or writeback
2643  * ranges corresponding to the page, and then deletes any extent state
2644  * records from the tree
2645  */
2646 int extent_invalidatepage(struct extent_io_tree *tree,
2647                           struct page *page, unsigned long offset)
2648 {
2649         struct extent_state *cached_state = NULL;
2650         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2651         u64 end = start + PAGE_CACHE_SIZE - 1;
2652         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2653
2654         start += (offset + blocksize - 1) & ~(blocksize - 1);
2655         if (start > end)
2656                 return 0;
2657
2658         lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2659         wait_on_page_writeback(page);
2660         clear_extent_bit(tree, start, end,
2661                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2662                          EXTENT_DO_ACCOUNTING,
2663                          1, 1, &cached_state, GFP_NOFS);
2664         return 0;
2665 }
2666
2667 /*
2668  * a helper for releasepage, this tests for areas of the page that
2669  * are locked or under IO and drops the related state bits if it is safe
2670  * to drop the page.
2671  */
2672 int try_release_extent_state(struct extent_map_tree *map,
2673                              struct extent_io_tree *tree, struct page *page,
2674                              gfp_t mask)
2675 {
2676         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2677         u64 end = start + PAGE_CACHE_SIZE - 1;
2678         int ret = 1;
2679
2680         if (test_range_bit(tree, start, end,
2681                            EXTENT_IOBITS, 0, NULL))
2682                 ret = 0;
2683         else {
2684                 if ((mask & GFP_NOFS) == GFP_NOFS)
2685                         mask = GFP_NOFS;
2686                 /*
2687                  * at this point we can safely clear everything except the
2688                  * locked bit and the nodatasum bit
2689                  */
2690                 ret = clear_extent_bit(tree, start, end,
2691                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2692                                  0, 0, NULL, mask);
2693
2694                 /* if clear_extent_bit failed for enomem reasons,
2695                  * we can't allow the release to continue.
2696                  */
2697                 if (ret < 0)
2698                         ret = 0;
2699                 else
2700                         ret = 1;
2701         }
2702         return ret;
2703 }
2704
2705 /*
2706  * a helper for releasepage.  As long as there are no locked extents
2707  * in the range corresponding to the page, both state records and extent
2708  * map records are removed
2709  */
2710 int try_release_extent_mapping(struct extent_map_tree *map,
2711                                struct extent_io_tree *tree, struct page *page,
2712                                gfp_t mask)
2713 {
2714         struct extent_map *em;
2715         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2716         u64 end = start + PAGE_CACHE_SIZE - 1;
2717
2718         if ((mask & __GFP_WAIT) &&
2719             page->mapping->host->i_size > 16 * 1024 * 1024) {
2720                 u64 len;
2721                 while (start <= end) {
2722                         len = end - start + 1;
2723                         write_lock(&map->lock);
2724                         em = lookup_extent_mapping(map, start, len);
2725                         if (IS_ERR_OR_NULL(em)) {
2726                                 write_unlock(&map->lock);
2727                                 break;
2728                         }
2729                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2730                             em->start != start) {
2731                                 write_unlock(&map->lock);
2732                                 free_extent_map(em);
2733                                 break;
2734                         }
2735                         if (!test_range_bit(tree, em->start,
2736                                             extent_map_end(em) - 1,
2737                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
2738                                             0, NULL)) {
2739                                 remove_extent_mapping(map, em);
2740                                 /* once for the rb tree */
2741                                 free_extent_map(em);
2742                         }
2743                         start = extent_map_end(em);
2744                         write_unlock(&map->lock);
2745
2746                         /* once for us */
2747                         free_extent_map(em);
2748                 }
2749         }
2750         return try_release_extent_state(map, tree, page, mask);
2751 }
2752
2753 /*
2754  * helper function for fiemap, which doesn't want to see any holes.
2755  * This maps until we find something past 'last'
2756  */
2757 static struct extent_map *get_extent_skip_holes(struct inode *inode,
2758                                                 u64 offset,
2759                                                 u64 last,
2760                                                 get_extent_t *get_extent)
2761 {
2762         u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2763         struct extent_map *em;
2764         u64 len;
2765
2766         if (offset >= last)
2767                 return NULL;
2768
2769         while(1) {
2770                 len = last - offset;
2771                 if (len == 0)
2772                         break;
2773                 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2774                 em = get_extent(inode, NULL, 0, offset, len, 0);
2775                 if (IS_ERR_OR_NULL(em))
2776                         return em;
2777
2778                 /* if this isn't a hole return it */
2779                 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2780                     em->block_start != EXTENT_MAP_HOLE) {
2781                         return em;
2782                 }
2783
2784                 /* this is a hole, advance to the next extent */
2785                 offset = extent_map_end(em);
2786                 free_extent_map(em);
2787                 if (offset >= last)
2788                         break;
2789         }
2790         return NULL;
2791 }
2792
2793 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2794                 __u64 start, __u64 len, get_extent_t *get_extent)
2795 {
2796         int ret = 0;
2797         u64 off = start;
2798         u64 max = start + len;
2799         u32 flags = 0;
2800         u32 found_type;
2801         u64 last;
2802         u64 last_for_get_extent = 0;
2803         u64 disko = 0;
2804         u64 isize = i_size_read(inode);
2805         struct btrfs_key found_key;
2806         struct extent_map *em = NULL;
2807         struct extent_state *cached_state = NULL;
2808         struct btrfs_path *path;
2809         struct btrfs_file_extent_item *item;
2810         int end = 0;
2811         u64 em_start = 0;
2812         u64 em_len = 0;
2813         u64 em_end = 0;
2814         unsigned long emflags;
2815
2816         if (len == 0)
2817                 return -EINVAL;
2818
2819         path = btrfs_alloc_path();
2820         if (!path)
2821                 return -ENOMEM;
2822         path->leave_spinning = 1;
2823
2824         /*
2825          * lookup the last file extent.  We're not using i_size here
2826          * because there might be preallocation past i_size
2827          */
2828         ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2829                                        path, btrfs_ino(inode), -1, 0);
2830         if (ret < 0) {
2831                 btrfs_free_path(path);
2832                 return ret;
2833         }
2834         WARN_ON(!ret);
2835         path->slots[0]--;
2836         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2837                               struct btrfs_file_extent_item);
2838         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2839         found_type = btrfs_key_type(&found_key);
2840
2841         /* No extents, but there might be delalloc bits */
2842         if (found_key.objectid != btrfs_ino(inode) ||
2843             found_type != BTRFS_EXTENT_DATA_KEY) {
2844                 /* have to trust i_size as the end */
2845                 last = (u64)-1;
2846                 last_for_get_extent = isize;
2847         } else {
2848                 /*
2849                  * remember the start of the last extent.  There are a
2850                  * bunch of different factors that go into the length of the
2851                  * extent, so its much less complex to remember where it started
2852                  */
2853                 last = found_key.offset;
2854                 last_for_get_extent = last + 1;
2855         }
2856         btrfs_free_path(path);
2857
2858         /*
2859          * we might have some extents allocated but more delalloc past those
2860          * extents.  so, we trust isize unless the start of the last extent is
2861          * beyond isize
2862          */
2863         if (last < isize) {
2864                 last = (u64)-1;
2865                 last_for_get_extent = isize;
2866         }
2867
2868         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2869                          &cached_state, GFP_NOFS);
2870
2871         em = get_extent_skip_holes(inode, off, last_for_get_extent,
2872                                    get_extent);
2873         if (!em)
2874                 goto out;
2875         if (IS_ERR(em)) {
2876                 ret = PTR_ERR(em);
2877                 goto out;
2878         }
2879
2880         while (!end) {
2881                 u64 offset_in_extent;
2882
2883                 /* break if the extent we found is outside the range */
2884                 if (em->start >= max || extent_map_end(em) < off)
2885                         break;
2886
2887                 /*
2888                  * get_extent may return an extent that starts before our
2889                  * requested range.  We have to make sure the ranges
2890                  * we return to fiemap always move forward and don't
2891                  * overlap, so adjust the offsets here
2892                  */
2893                 em_start = max(em->start, off);
2894
2895                 /*
2896                  * record the offset from the start of the extent
2897                  * for adjusting the disk offset below
2898                  */
2899                 offset_in_extent = em_start - em->start;
2900                 em_end = extent_map_end(em);
2901                 em_len = em_end - em_start;
2902                 emflags = em->flags;
2903                 disko = 0;
2904                 flags = 0;
2905
2906                 /*
2907                  * bump off for our next call to get_extent
2908                  */
2909                 off = extent_map_end(em);
2910                 if (off >= max)
2911                         end = 1;
2912
2913                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2914                         end = 1;
2915                         flags |= FIEMAP_EXTENT_LAST;
2916                 } else if (em->block_start == EXTENT_MAP_INLINE) {
2917                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
2918                                   FIEMAP_EXTENT_NOT_ALIGNED);
2919                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
2920                         flags |= (FIEMAP_EXTENT_DELALLOC |
2921                                   FIEMAP_EXTENT_UNKNOWN);
2922                 } else {
2923                         disko = em->block_start + offset_in_extent;
2924                 }
2925                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2926                         flags |= FIEMAP_EXTENT_ENCODED;
2927
2928                 free_extent_map(em);
2929                 em = NULL;
2930                 if ((em_start >= last) || em_len == (u64)-1 ||
2931                    (last == (u64)-1 && isize <= em_end)) {
2932                         flags |= FIEMAP_EXTENT_LAST;
2933                         end = 1;
2934                 }
2935
2936                 /* now scan forward to see if this is really the last extent. */
2937                 em = get_extent_skip_holes(inode, off, last_for_get_extent,
2938                                            get_extent);
2939                 if (IS_ERR(em)) {
2940                         ret = PTR_ERR(em);
2941                         goto out;
2942                 }
2943                 if (!em) {
2944                         flags |= FIEMAP_EXTENT_LAST;
2945                         end = 1;
2946                 }
2947                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2948                                               em_len, flags);
2949                 if (ret)
2950                         goto out_free;
2951         }
2952 out_free:
2953         free_extent_map(em);
2954 out:
2955         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
2956                              &cached_state, GFP_NOFS);
2957         return ret;
2958 }
2959
2960 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2961                                               unsigned long i)
2962 {
2963         struct page *p;
2964         struct address_space *mapping;
2965
2966         if (i == 0)
2967                 return eb->first_page;
2968         i += eb->start >> PAGE_CACHE_SHIFT;
2969         mapping = eb->first_page->mapping;
2970         if (!mapping)
2971                 return NULL;
2972
2973         /*
2974          * extent_buffer_page is only called after pinning the page
2975          * by increasing the reference count.  So we know the page must
2976          * be in the radix tree.
2977          */
2978         rcu_read_lock();
2979         p = radix_tree_lookup(&mapping->page_tree, i);
2980         rcu_read_unlock();
2981
2982         return p;
2983 }
2984
2985 static inline unsigned long num_extent_pages(u64 start, u64 len)
2986 {
2987         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2988                 (start >> PAGE_CACHE_SHIFT);
2989 }
2990
2991 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2992                                                    u64 start,
2993                                                    unsigned long len,
2994                                                    gfp_t mask)
2995 {
2996         struct extent_buffer *eb = NULL;
2997 #if LEAK_DEBUG
2998         unsigned long flags;
2999 #endif
3000
3001         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3002         if (eb == NULL)
3003                 return NULL;
3004         eb->start = start;
3005         eb->len = len;
3006         rwlock_init(&eb->lock);
3007         atomic_set(&eb->write_locks, 0);
3008         atomic_set(&eb->read_locks, 0);
3009         atomic_set(&eb->blocking_readers, 0);
3010         atomic_set(&eb->blocking_writers, 0);
3011         atomic_set(&eb->spinning_readers, 0);
3012         atomic_set(&eb->spinning_writers, 0);
3013         init_waitqueue_head(&eb->write_lock_wq);
3014         init_waitqueue_head(&eb->read_lock_wq);
3015
3016 #if LEAK_DEBUG
3017         spin_lock_irqsave(&leak_lock, flags);
3018         list_add(&eb->leak_list, &buffers);
3019         spin_unlock_irqrestore(&leak_lock, flags);
3020 #endif
3021         atomic_set(&eb->refs, 1);
3022
3023         return eb;
3024 }
3025
3026 static void __free_extent_buffer(struct extent_buffer *eb)
3027 {
3028 #if LEAK_DEBUG
3029         unsigned long flags;
3030         spin_lock_irqsave(&leak_lock, flags);
3031         list_del(&eb->leak_list);
3032         spin_unlock_irqrestore(&leak_lock, flags);
3033 #endif
3034         kmem_cache_free(extent_buffer_cache, eb);
3035 }
3036
3037 /*
3038  * Helper for releasing extent buffer page.
3039  */
3040 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3041                                                 unsigned long start_idx)
3042 {
3043         unsigned long index;
3044         struct page *page;
3045
3046         if (!eb->first_page)
3047                 return;
3048
3049         index = num_extent_pages(eb->start, eb->len);
3050         if (start_idx >= index)
3051                 return;
3052
3053         do {
3054                 index--;
3055                 page = extent_buffer_page(eb, index);
3056                 if (page)
3057                         page_cache_release(page);
3058         } while (index != start_idx);
3059 }
3060
3061 /*
3062  * Helper for releasing the extent buffer.
3063  */
3064 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3065 {
3066         btrfs_release_extent_buffer_page(eb, 0);
3067         __free_extent_buffer(eb);
3068 }
3069
3070 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3071                                           u64 start, unsigned long len,
3072                                           struct page *page0)
3073 {
3074         unsigned long num_pages = num_extent_pages(start, len);
3075         unsigned long i;
3076         unsigned long index = start >> PAGE_CACHE_SHIFT;
3077         struct extent_buffer *eb;
3078         struct extent_buffer *exists = NULL;
3079         struct page *p;
3080         struct address_space *mapping = tree->mapping;
3081         int uptodate = 1;
3082         int ret;
3083
3084         rcu_read_lock();
3085         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3086         if (eb && atomic_inc_not_zero(&eb->refs)) {
3087                 rcu_read_unlock();
3088                 mark_page_accessed(eb->first_page);
3089                 return eb;
3090         }
3091         rcu_read_unlock();
3092
3093         eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3094         if (!eb)
3095                 return NULL;
3096
3097         if (page0) {
3098                 eb->first_page = page0;
3099                 i = 1;
3100                 index++;
3101                 page_cache_get(page0);
3102                 mark_page_accessed(page0);
3103                 set_page_extent_mapped(page0);
3104                 set_page_extent_head(page0, len);
3105                 uptodate = PageUptodate(page0);
3106         } else {
3107                 i = 0;
3108         }
3109         for (; i < num_pages; i++, index++) {
3110                 p = find_or_create_page(mapping, index, GFP_NOFS);
3111                 if (!p) {
3112                         WARN_ON(1);
3113                         goto free_eb;
3114                 }
3115                 set_page_extent_mapped(p);
3116                 mark_page_accessed(p);
3117                 if (i == 0) {
3118                         eb->first_page = p;
3119                         set_page_extent_head(p, len);
3120                 } else {
3121                         set_page_private(p, EXTENT_PAGE_PRIVATE);
3122                 }
3123                 if (!PageUptodate(p))
3124                         uptodate = 0;
3125
3126                 /*
3127                  * see below about how we avoid a nasty race with release page
3128                  * and why we unlock later
3129                  */
3130                 if (i != 0)
3131                         unlock_page(p);
3132         }
3133         if (uptodate)
3134                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3135
3136         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3137         if (ret)
3138                 goto free_eb;
3139
3140         spin_lock(&tree->buffer_lock);
3141         ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3142         if (ret == -EEXIST) {
3143                 exists = radix_tree_lookup(&tree->buffer,
3144                                                 start >> PAGE_CACHE_SHIFT);
3145                 /* add one reference for the caller */
3146                 atomic_inc(&exists->refs);
3147                 spin_unlock(&tree->buffer_lock);
3148                 radix_tree_preload_end();
3149                 goto free_eb;
3150         }
3151         /* add one reference for the tree */
3152         atomic_inc(&eb->refs);
3153         spin_unlock(&tree->buffer_lock);
3154         radix_tree_preload_end();
3155
3156         /*
3157          * there is a race where release page may have
3158          * tried to find this extent buffer in the radix
3159          * but failed.  It will tell the VM it is safe to
3160          * reclaim the, and it will clear the page private bit.
3161          * We must make sure to set the page private bit properly
3162          * after the extent buffer is in the radix tree so
3163          * it doesn't get lost
3164          */
3165         set_page_extent_mapped(eb->first_page);
3166         set_page_extent_head(eb->first_page, eb->len);
3167         if (!page0)
3168                 unlock_page(eb->first_page);
3169         return eb;
3170
3171 free_eb:
3172         if (eb->first_page && !page0)
3173                 unlock_page(eb->first_page);
3174
3175         if (!atomic_dec_and_test(&eb->refs))
3176                 return exists;
3177         btrfs_release_extent_buffer(eb);
3178         return exists;
3179 }
3180
3181 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3182                                          u64 start, unsigned long len)
3183 {
3184         struct extent_buffer *eb;
3185
3186         rcu_read_lock();
3187         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3188         if (eb && atomic_inc_not_zero(&eb->refs)) {
3189                 rcu_read_unlock();
3190                 mark_page_accessed(eb->first_page);
3191                 return eb;
3192         }
3193         rcu_read_unlock();
3194
3195         return NULL;
3196 }
3197
3198 void free_extent_buffer(struct extent_buffer *eb)
3199 {
3200         if (!eb)
3201                 return;
3202
3203         if (!atomic_dec_and_test(&eb->refs))
3204                 return;
3205
3206         WARN_ON(1);
3207 }
3208
3209 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3210                               struct extent_buffer *eb)
3211 {
3212         unsigned long i;
3213         unsigned long num_pages;
3214         struct page *page;
3215
3216         num_pages = num_extent_pages(eb->start, eb->len);
3217
3218         for (i = 0; i < num_pages; i++) {
3219                 page = extent_buffer_page(eb, i);
3220                 if (!PageDirty(page))
3221                         continue;
3222
3223                 lock_page(page);
3224                 WARN_ON(!PagePrivate(page));
3225
3226                 set_page_extent_mapped(page);
3227                 if (i == 0)
3228                         set_page_extent_head(page, eb->len);
3229
3230                 clear_page_dirty_for_io(page);
3231                 spin_lock_irq(&page->mapping->tree_lock);
3232                 if (!PageDirty(page)) {
3233                         radix_tree_tag_clear(&page->mapping->page_tree,
3234                                                 page_index(page),
3235                                                 PAGECACHE_TAG_DIRTY);
3236                 }
3237                 spin_unlock_irq(&page->mapping->tree_lock);
3238                 unlock_page(page);
3239         }
3240         return 0;
3241 }
3242
3243 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3244                              struct extent_buffer *eb)
3245 {
3246         unsigned long i;
3247         unsigned long num_pages;
3248         int was_dirty = 0;
3249
3250         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3251         num_pages = num_extent_pages(eb->start, eb->len);
3252         for (i = 0; i < num_pages; i++)
3253                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3254         return was_dirty;
3255 }
3256
3257 static int __eb_straddles_pages(u64 start, u64 len)
3258 {
3259         if (len < PAGE_CACHE_SIZE)
3260                 return 1;
3261         if (start & (PAGE_CACHE_SIZE - 1))
3262                 return 1;
3263         if ((start + len) & (PAGE_CACHE_SIZE - 1))
3264                 return 1;
3265         return 0;
3266 }
3267
3268 static int eb_straddles_pages(struct extent_buffer *eb)
3269 {
3270         return __eb_straddles_pages(eb->start, eb->len);
3271 }
3272
3273 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3274                                 struct extent_buffer *eb,
3275                                 struct extent_state **cached_state)
3276 {
3277         unsigned long i;
3278         struct page *page;
3279         unsigned long num_pages;
3280
3281         num_pages = num_extent_pages(eb->start, eb->len);
3282         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3283
3284         if (eb_straddles_pages(eb)) {
3285                 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3286                                       cached_state, GFP_NOFS);
3287         }
3288         for (i = 0; i < num_pages; i++) {
3289                 page = extent_buffer_page(eb, i);
3290                 if (page)
3291                         ClearPageUptodate(page);
3292         }
3293         return 0;
3294 }
3295
3296 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3297                                 struct extent_buffer *eb)
3298 {
3299         unsigned long i;
3300         struct page *page;
3301         unsigned long num_pages;
3302
3303         num_pages = num_extent_pages(eb->start, eb->len);
3304
3305         if (eb_straddles_pages(eb)) {
3306                 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3307                                     NULL, GFP_NOFS);
3308         }
3309         for (i = 0; i < num_pages; i++) {
3310                 page = extent_buffer_page(eb, i);
3311                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3312                     ((i == num_pages - 1) &&
3313                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3314                         check_page_uptodate(tree, page);
3315                         continue;
3316                 }
3317                 SetPageUptodate(page);
3318         }
3319         return 0;
3320 }
3321
3322 int extent_range_uptodate(struct extent_io_tree *tree,
3323                           u64 start, u64 end)
3324 {
3325         struct page *page;
3326         int ret;
3327         int pg_uptodate = 1;
3328         int uptodate;
3329         unsigned long index;
3330
3331         if (__eb_straddles_pages(start, end - start + 1)) {
3332                 ret = test_range_bit(tree, start, end,
3333                                      EXTENT_UPTODATE, 1, NULL);
3334                 if (ret)
3335                         return 1;
3336         }
3337         while (start <= end) {
3338                 index = start >> PAGE_CACHE_SHIFT;
3339                 page = find_get_page(tree->mapping, index);
3340                 uptodate = PageUptodate(page);
3341                 page_cache_release(page);
3342                 if (!uptodate) {
3343                         pg_uptodate = 0;
3344                         break;
3345                 }
3346                 start += PAGE_CACHE_SIZE;
3347         }
3348         return pg_uptodate;
3349 }
3350
3351 int extent_buffer_uptodate(struct extent_io_tree *tree,
3352                            struct extent_buffer *eb,
3353                            struct extent_state *cached_state)
3354 {
3355         int ret = 0;
3356         unsigned long num_pages;
3357         unsigned long i;
3358         struct page *page;
3359         int pg_uptodate = 1;
3360
3361         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3362                 return 1;
3363
3364         if (eb_straddles_pages(eb)) {
3365                 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3366                                    EXTENT_UPTODATE, 1, cached_state);
3367                 if (ret)
3368                         return ret;
3369         }
3370
3371         num_pages = num_extent_pages(eb->start, eb->len);
3372         for (i = 0; i < num_pages; i++) {
3373                 page = extent_buffer_page(eb, i);
3374                 if (!PageUptodate(page)) {
3375                         pg_uptodate = 0;
3376                         break;
3377                 }
3378         }
3379         return pg_uptodate;
3380 }
3381
3382 int read_extent_buffer_pages(struct extent_io_tree *tree,
3383                              struct extent_buffer *eb,
3384                              u64 start, int wait,
3385                              get_extent_t *get_extent, int mirror_num)
3386 {
3387         unsigned long i;
3388         unsigned long start_i;
3389         struct page *page;
3390         int err;
3391         int ret = 0;
3392         int locked_pages = 0;
3393         int all_uptodate = 1;
3394         int inc_all_pages = 0;
3395         unsigned long num_pages;
3396         struct bio *bio = NULL;
3397         unsigned long bio_flags = 0;
3398
3399         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3400                 return 0;
3401
3402         if (eb_straddles_pages(eb)) {
3403                 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3404                                    EXTENT_UPTODATE, 1, NULL)) {
3405                         return 0;
3406                 }
3407         }
3408
3409         if (start) {
3410                 WARN_ON(start < eb->start);
3411                 start_i = (start >> PAGE_CACHE_SHIFT) -
3412                         (eb->start >> PAGE_CACHE_SHIFT);
3413         } else {
3414                 start_i = 0;
3415         }
3416
3417         num_pages = num_extent_pages(eb->start, eb->len);
3418         for (i = start_i; i < num_pages; i++) {
3419                 page = extent_buffer_page(eb, i);
3420                 if (!wait) {
3421                         if (!trylock_page(page))
3422                                 goto unlock_exit;
3423                 } else {
3424                         lock_page(page);
3425                 }
3426                 locked_pages++;
3427                 if (!PageUptodate(page))
3428                         all_uptodate = 0;
3429         }
3430         if (all_uptodate) {
3431                 if (start_i == 0)
3432                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3433                 goto unlock_exit;
3434         }
3435
3436         for (i = start_i; i < num_pages; i++) {
3437                 page = extent_buffer_page(eb, i);
3438
3439                 WARN_ON(!PagePrivate(page));
3440
3441                 set_page_extent_mapped(page);
3442                 if (i == 0)
3443                         set_page_extent_head(page, eb->len);
3444
3445                 if (inc_all_pages)
3446                         page_cache_get(page);
3447                 if (!PageUptodate(page)) {
3448                         if (start_i == 0)
3449                                 inc_all_pages = 1;
3450                         ClearPageError(page);
3451                         err = __extent_read_full_page(tree, page,
3452                                                       get_extent, &bio,
3453                                                       mirror_num, &bio_flags);
3454                         if (err)
3455                                 ret = err;
3456                 } else {
3457                         unlock_page(page);
3458                 }
3459         }
3460
3461         if (bio)
3462                 submit_one_bio(READ, bio, mirror_num, bio_flags);
3463
3464         if (ret || !wait)
3465                 return ret;
3466
3467         for (i = start_i; i < num_pages; i++) {
3468                 page = extent_buffer_page(eb, i);
3469                 wait_on_page_locked(page);
3470                 if (!PageUptodate(page))
3471                         ret = -EIO;
3472         }
3473
3474         if (!ret)
3475                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3476         return ret;
3477
3478 unlock_exit:
3479         i = start_i;
3480         while (locked_pages > 0) {
3481                 page = extent_buffer_page(eb, i);
3482                 i++;
3483                 unlock_page(page);
3484                 locked_pages--;
3485         }
3486         return ret;
3487 }
3488
3489 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3490                         unsigned long start,
3491                         unsigned long len)
3492 {
3493         size_t cur;
3494         size_t offset;
3495         struct page *page;
3496         char *kaddr;
3497         char *dst = (char *)dstv;
3498         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3499         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3500
3501         WARN_ON(start > eb->len);
3502         WARN_ON(start + len > eb->start + eb->len);
3503
3504         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3505
3506         while (len > 0) {
3507                 page = extent_buffer_page(eb, i);
3508
3509                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3510                 kaddr = page_address(page);
3511                 memcpy(dst, kaddr + offset, cur);
3512
3513                 dst += cur;
3514                 len -= cur;
3515                 offset = 0;
3516                 i++;
3517         }
3518 }
3519
3520 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3521                                unsigned long min_len, char **map,
3522                                unsigned long *map_start,
3523                                unsigned long *map_len)
3524 {
3525         size_t offset = start & (PAGE_CACHE_SIZE - 1);
3526         char *kaddr;
3527         struct page *p;
3528         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3529         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3530         unsigned long end_i = (start_offset + start + min_len - 1) >>
3531                 PAGE_CACHE_SHIFT;
3532
3533         if (i != end_i)
3534                 return -EINVAL;
3535
3536         if (i == 0) {
3537                 offset = start_offset;
3538                 *map_start = 0;
3539         } else {
3540                 offset = 0;
3541                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3542         }
3543
3544         if (start + min_len > eb->len) {
3545                 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3546                        "wanted %lu %lu\n", (unsigned long long)eb->start,
3547                        eb->len, start, min_len);
3548                 WARN_ON(1);
3549                 return -EINVAL;
3550         }
3551
3552         p = extent_buffer_page(eb, i);
3553         kaddr = page_address(p);
3554         *map = kaddr + offset;
3555         *map_len = PAGE_CACHE_SIZE - offset;
3556         return 0;
3557 }
3558
3559 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3560                           unsigned long start,
3561                           unsigned long len)
3562 {
3563         size_t cur;
3564         size_t offset;
3565         struct page *page;
3566         char *kaddr;
3567         char *ptr = (char *)ptrv;
3568         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3569         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3570         int ret = 0;
3571
3572         WARN_ON(start > eb->len);
3573         WARN_ON(start + len > eb->start + eb->len);
3574
3575         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3576
3577         while (len > 0) {
3578                 page = extent_buffer_page(eb, i);
3579
3580                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3581
3582                 kaddr = page_address(page);
3583                 ret = memcmp(ptr, kaddr + offset, cur);
3584                 if (ret)
3585                         break;
3586
3587                 ptr += cur;
3588                 len -= cur;
3589                 offset = 0;
3590                 i++;
3591         }
3592         return ret;
3593 }
3594
3595 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3596                          unsigned long start, unsigned long len)
3597 {
3598         size_t cur;
3599         size_t offset;
3600         struct page *page;
3601         char *kaddr;
3602         char *src = (char *)srcv;
3603         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3604         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3605
3606         WARN_ON(start > eb->len);
3607         WARN_ON(start + len > eb->start + eb->len);
3608
3609         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3610
3611         while (len > 0) {
3612                 page = extent_buffer_page(eb, i);
3613                 WARN_ON(!PageUptodate(page));
3614
3615                 cur = min(len, PAGE_CACHE_SIZE - offset);
3616                 kaddr = page_address(page);
3617                 memcpy(kaddr + offset, src, cur);
3618
3619                 src += cur;
3620                 len -= cur;
3621                 offset = 0;
3622                 i++;
3623         }
3624 }
3625
3626 void memset_extent_buffer(struct extent_buffer *eb, char c,
3627                           unsigned long start, unsigned long len)
3628 {
3629         size_t cur;
3630         size_t offset;
3631         struct page *page;
3632         char *kaddr;
3633         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3634         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3635
3636         WARN_ON(start > eb->len);
3637         WARN_ON(start + len > eb->start + eb->len);
3638
3639         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3640
3641         while (len > 0) {
3642                 page = extent_buffer_page(eb, i);
3643                 WARN_ON(!PageUptodate(page));
3644
3645                 cur = min(len, PAGE_CACHE_SIZE - offset);
3646                 kaddr = page_address(page);
3647                 memset(kaddr + offset, c, cur);
3648
3649                 len -= cur;
3650                 offset = 0;
3651                 i++;
3652         }
3653 }
3654
3655 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3656                         unsigned long dst_offset, unsigned long src_offset,
3657                         unsigned long len)
3658 {
3659         u64 dst_len = dst->len;
3660         size_t cur;
3661         size_t offset;
3662         struct page *page;
3663         char *kaddr;
3664         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3665         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3666
3667         WARN_ON(src->len != dst_len);
3668
3669         offset = (start_offset + dst_offset) &
3670                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3671
3672         while (len > 0) {
3673                 page = extent_buffer_page(dst, i);
3674                 WARN_ON(!PageUptodate(page));
3675
3676                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3677
3678                 kaddr = page_address(page);
3679                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3680
3681                 src_offset += cur;
3682                 len -= cur;
3683                 offset = 0;
3684                 i++;
3685         }
3686 }
3687
3688 static void move_pages(struct page *dst_page, struct page *src_page,
3689                        unsigned long dst_off, unsigned long src_off,
3690                        unsigned long len)
3691 {
3692         char *dst_kaddr = page_address(dst_page);
3693         if (dst_page == src_page) {
3694                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3695         } else {
3696                 char *src_kaddr = page_address(src_page);
3697                 char *p = dst_kaddr + dst_off + len;
3698                 char *s = src_kaddr + src_off + len;
3699
3700                 while (len--)
3701                         *--p = *--s;
3702         }
3703 }
3704
3705 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3706 {
3707         unsigned long distance = (src > dst) ? src - dst : dst - src;
3708         return distance < len;
3709 }
3710
3711 static void copy_pages(struct page *dst_page, struct page *src_page,
3712                        unsigned long dst_off, unsigned long src_off,
3713                        unsigned long len)
3714 {
3715         char *dst_kaddr = page_address(dst_page);
3716         char *src_kaddr;
3717
3718         if (dst_page != src_page) {
3719                 src_kaddr = page_address(src_page);
3720         } else {
3721                 src_kaddr = dst_kaddr;
3722                 BUG_ON(areas_overlap(src_off, dst_off, len));
3723         }
3724
3725         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3726 }
3727
3728 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3729                            unsigned long src_offset, unsigned long len)
3730 {
3731         size_t cur;
3732         size_t dst_off_in_page;
3733         size_t src_off_in_page;
3734         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3735         unsigned long dst_i;
3736         unsigned long src_i;
3737
3738         if (src_offset + len > dst->len) {
3739                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3740                        "len %lu dst len %lu\n", src_offset, len, dst->len);
3741                 BUG_ON(1);
3742         }
3743         if (dst_offset + len > dst->len) {
3744                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3745                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
3746                 BUG_ON(1);
3747         }
3748
3749         while (len > 0) {
3750                 dst_off_in_page = (start_offset + dst_offset) &
3751                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3752                 src_off_in_page = (start_offset + src_offset) &
3753                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3754
3755                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3756                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3757
3758                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3759                                                src_off_in_page));
3760                 cur = min_t(unsigned long, cur,
3761                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3762
3763                 copy_pages(extent_buffer_page(dst, dst_i),
3764                            extent_buffer_page(dst, src_i),
3765                            dst_off_in_page, src_off_in_page, cur);
3766
3767                 src_offset += cur;
3768                 dst_offset += cur;
3769                 len -= cur;
3770         }
3771 }
3772
3773 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3774                            unsigned long src_offset, unsigned long len)
3775 {
3776         size_t cur;
3777         size_t dst_off_in_page;
3778         size_t src_off_in_page;
3779         unsigned long dst_end = dst_offset + len - 1;
3780         unsigned long src_end = src_offset + len - 1;
3781         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3782         unsigned long dst_i;
3783         unsigned long src_i;
3784
3785         if (src_offset + len > dst->len) {
3786                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3787                        "len %lu len %lu\n", src_offset, len, dst->len);
3788                 BUG_ON(1);
3789         }
3790         if (dst_offset + len > dst->len) {
3791                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3792                        "len %lu len %lu\n", dst_offset, len, dst->len);
3793                 BUG_ON(1);
3794         }
3795         if (!areas_overlap(src_offset, dst_offset, len)) {
3796                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3797                 return;
3798         }
3799         while (len > 0) {
3800                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3801                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3802
3803                 dst_off_in_page = (start_offset + dst_end) &
3804                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3805                 src_off_in_page = (start_offset + src_end) &
3806                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3807
3808                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3809                 cur = min(cur, dst_off_in_page + 1);
3810                 move_pages(extent_buffer_page(dst, dst_i),
3811                            extent_buffer_page(dst, src_i),
3812                            dst_off_in_page - cur + 1,
3813                            src_off_in_page - cur + 1, cur);
3814
3815                 dst_end -= cur;
3816                 src_end -= cur;
3817                 len -= cur;
3818         }
3819 }
3820
3821 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3822 {
3823         struct extent_buffer *eb =
3824                         container_of(head, struct extent_buffer, rcu_head);
3825
3826         btrfs_release_extent_buffer(eb);
3827 }
3828
3829 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3830 {
3831         u64 start = page_offset(page);
3832         struct extent_buffer *eb;
3833         int ret = 1;
3834
3835         spin_lock(&tree->buffer_lock);
3836         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3837         if (!eb) {
3838                 spin_unlock(&tree->buffer_lock);
3839                 return ret;
3840         }
3841
3842         if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3843                 ret = 0;
3844                 goto out;
3845         }
3846
3847         /*
3848          * set @eb->refs to 0 if it is already 1, and then release the @eb.
3849          * Or go back.
3850          */
3851         if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
3852                 ret = 0;
3853                 goto out;
3854         }
3855
3856         radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3857 out:
3858         spin_unlock(&tree->buffer_lock);
3859
3860         /* at this point we can safely release the extent buffer */
3861         if (atomic_read(&eb->refs) == 0)
3862                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3863         return ret;
3864 }