Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad...
[pandora-kernel.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21
22 static struct kmem_cache *extent_state_cache;
23 static struct kmem_cache *extent_buffer_cache;
24
25 static LIST_HEAD(buffers);
26 static LIST_HEAD(states);
27
28 #define LEAK_DEBUG 0
29 #if LEAK_DEBUG
30 static DEFINE_SPINLOCK(leak_lock);
31 #endif
32
33 #define BUFFER_LRU_MAX 64
34
35 struct tree_entry {
36         u64 start;
37         u64 end;
38         struct rb_node rb_node;
39 };
40
41 struct extent_page_data {
42         struct bio *bio;
43         struct extent_io_tree *tree;
44         get_extent_t *get_extent;
45
46         /* tells writepage not to lock the state bits for this range
47          * it still does the unlocking
48          */
49         unsigned int extent_locked:1;
50
51         /* tells the submit_bio code to use a WRITE_SYNC */
52         unsigned int sync_io:1;
53 };
54
55 int __init extent_io_init(void)
56 {
57         extent_state_cache = kmem_cache_create("extent_state",
58                         sizeof(struct extent_state), 0,
59                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
60         if (!extent_state_cache)
61                 return -ENOMEM;
62
63         extent_buffer_cache = kmem_cache_create("extent_buffers",
64                         sizeof(struct extent_buffer), 0,
65                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
66         if (!extent_buffer_cache)
67                 goto free_state_cache;
68         return 0;
69
70 free_state_cache:
71         kmem_cache_destroy(extent_state_cache);
72         return -ENOMEM;
73 }
74
75 void extent_io_exit(void)
76 {
77         struct extent_state *state;
78         struct extent_buffer *eb;
79
80         while (!list_empty(&states)) {
81                 state = list_entry(states.next, struct extent_state, leak_list);
82                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
83                        "state %lu in tree %p refs %d\n",
84                        (unsigned long long)state->start,
85                        (unsigned long long)state->end,
86                        state->state, state->tree, atomic_read(&state->refs));
87                 list_del(&state->leak_list);
88                 kmem_cache_free(extent_state_cache, state);
89
90         }
91
92         while (!list_empty(&buffers)) {
93                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
94                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
95                        "refs %d\n", (unsigned long long)eb->start,
96                        eb->len, atomic_read(&eb->refs));
97                 list_del(&eb->leak_list);
98                 kmem_cache_free(extent_buffer_cache, eb);
99         }
100         if (extent_state_cache)
101                 kmem_cache_destroy(extent_state_cache);
102         if (extent_buffer_cache)
103                 kmem_cache_destroy(extent_buffer_cache);
104 }
105
106 void extent_io_tree_init(struct extent_io_tree *tree,
107                          struct address_space *mapping)
108 {
109         tree->state = RB_ROOT;
110         INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
111         tree->ops = NULL;
112         tree->dirty_bytes = 0;
113         spin_lock_init(&tree->lock);
114         spin_lock_init(&tree->buffer_lock);
115         tree->mapping = mapping;
116 }
117
118 static struct extent_state *alloc_extent_state(gfp_t mask)
119 {
120         struct extent_state *state;
121 #if LEAK_DEBUG
122         unsigned long flags;
123 #endif
124
125         state = kmem_cache_alloc(extent_state_cache, mask);
126         if (!state)
127                 return state;
128         state->state = 0;
129         state->private = 0;
130         state->tree = NULL;
131 #if LEAK_DEBUG
132         spin_lock_irqsave(&leak_lock, flags);
133         list_add(&state->leak_list, &states);
134         spin_unlock_irqrestore(&leak_lock, flags);
135 #endif
136         atomic_set(&state->refs, 1);
137         init_waitqueue_head(&state->wq);
138         return state;
139 }
140
141 void free_extent_state(struct extent_state *state)
142 {
143         if (!state)
144                 return;
145         if (atomic_dec_and_test(&state->refs)) {
146 #if LEAK_DEBUG
147                 unsigned long flags;
148 #endif
149                 WARN_ON(state->tree);
150 #if LEAK_DEBUG
151                 spin_lock_irqsave(&leak_lock, flags);
152                 list_del(&state->leak_list);
153                 spin_unlock_irqrestore(&leak_lock, flags);
154 #endif
155                 kmem_cache_free(extent_state_cache, state);
156         }
157 }
158
159 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
160                                    struct rb_node *node)
161 {
162         struct rb_node **p = &root->rb_node;
163         struct rb_node *parent = NULL;
164         struct tree_entry *entry;
165
166         while (*p) {
167                 parent = *p;
168                 entry = rb_entry(parent, struct tree_entry, rb_node);
169
170                 if (offset < entry->start)
171                         p = &(*p)->rb_left;
172                 else if (offset > entry->end)
173                         p = &(*p)->rb_right;
174                 else
175                         return parent;
176         }
177
178         entry = rb_entry(node, struct tree_entry, rb_node);
179         rb_link_node(node, parent, p);
180         rb_insert_color(node, root);
181         return NULL;
182 }
183
184 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
185                                      struct rb_node **prev_ret,
186                                      struct rb_node **next_ret)
187 {
188         struct rb_root *root = &tree->state;
189         struct rb_node *n = root->rb_node;
190         struct rb_node *prev = NULL;
191         struct rb_node *orig_prev = NULL;
192         struct tree_entry *entry;
193         struct tree_entry *prev_entry = NULL;
194
195         while (n) {
196                 entry = rb_entry(n, struct tree_entry, rb_node);
197                 prev = n;
198                 prev_entry = entry;
199
200                 if (offset < entry->start)
201                         n = n->rb_left;
202                 else if (offset > entry->end)
203                         n = n->rb_right;
204                 else
205                         return n;
206         }
207
208         if (prev_ret) {
209                 orig_prev = prev;
210                 while (prev && offset > prev_entry->end) {
211                         prev = rb_next(prev);
212                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213                 }
214                 *prev_ret = prev;
215                 prev = orig_prev;
216         }
217
218         if (next_ret) {
219                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220                 while (prev && offset < prev_entry->start) {
221                         prev = rb_prev(prev);
222                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223                 }
224                 *next_ret = prev;
225         }
226         return NULL;
227 }
228
229 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
230                                           u64 offset)
231 {
232         struct rb_node *prev = NULL;
233         struct rb_node *ret;
234
235         ret = __etree_search(tree, offset, &prev, NULL);
236         if (!ret)
237                 return prev;
238         return ret;
239 }
240
241 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
242                      struct extent_state *other)
243 {
244         if (tree->ops && tree->ops->merge_extent_hook)
245                 tree->ops->merge_extent_hook(tree->mapping->host, new,
246                                              other);
247 }
248
249 /*
250  * utility function to look for merge candidates inside a given range.
251  * Any extents with matching state are merged together into a single
252  * extent in the tree.  Extents with EXTENT_IO in their state field
253  * are not merged because the end_io handlers need to be able to do
254  * operations on them without sleeping (or doing allocations/splits).
255  *
256  * This should be called with the tree lock held.
257  */
258 static void merge_state(struct extent_io_tree *tree,
259                         struct extent_state *state)
260 {
261         struct extent_state *other;
262         struct rb_node *other_node;
263
264         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
265                 return;
266
267         other_node = rb_prev(&state->rb_node);
268         if (other_node) {
269                 other = rb_entry(other_node, struct extent_state, rb_node);
270                 if (other->end == state->start - 1 &&
271                     other->state == state->state) {
272                         merge_cb(tree, state, other);
273                         state->start = other->start;
274                         other->tree = NULL;
275                         rb_erase(&other->rb_node, &tree->state);
276                         free_extent_state(other);
277                 }
278         }
279         other_node = rb_next(&state->rb_node);
280         if (other_node) {
281                 other = rb_entry(other_node, struct extent_state, rb_node);
282                 if (other->start == state->end + 1 &&
283                     other->state == state->state) {
284                         merge_cb(tree, state, other);
285                         state->end = other->end;
286                         other->tree = NULL;
287                         rb_erase(&other->rb_node, &tree->state);
288                         free_extent_state(other);
289                 }
290         }
291 }
292
293 static void set_state_cb(struct extent_io_tree *tree,
294                          struct extent_state *state, int *bits)
295 {
296         if (tree->ops && tree->ops->set_bit_hook)
297                 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
298 }
299
300 static void clear_state_cb(struct extent_io_tree *tree,
301                            struct extent_state *state, int *bits)
302 {
303         if (tree->ops && tree->ops->clear_bit_hook)
304                 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
305 }
306
307 static void set_state_bits(struct extent_io_tree *tree,
308                            struct extent_state *state, int *bits);
309
310 /*
311  * insert an extent_state struct into the tree.  'bits' are set on the
312  * struct before it is inserted.
313  *
314  * This may return -EEXIST if the extent is already there, in which case the
315  * state struct is freed.
316  *
317  * The tree lock is not taken internally.  This is a utility function and
318  * probably isn't what you want to call (see set/clear_extent_bit).
319  */
320 static int insert_state(struct extent_io_tree *tree,
321                         struct extent_state *state, u64 start, u64 end,
322                         int *bits)
323 {
324         struct rb_node *node;
325
326         if (end < start) {
327                 printk(KERN_ERR "btrfs end < start %llu %llu\n",
328                        (unsigned long long)end,
329                        (unsigned long long)start);
330                 WARN_ON(1);
331         }
332         state->start = start;
333         state->end = end;
334
335         set_state_bits(tree, state, bits);
336
337         node = tree_insert(&tree->state, end, &state->rb_node);
338         if (node) {
339                 struct extent_state *found;
340                 found = rb_entry(node, struct extent_state, rb_node);
341                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
342                        "%llu %llu\n", (unsigned long long)found->start,
343                        (unsigned long long)found->end,
344                        (unsigned long long)start, (unsigned long long)end);
345                 return -EEXIST;
346         }
347         state->tree = tree;
348         merge_state(tree, state);
349         return 0;
350 }
351
352 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
353                      u64 split)
354 {
355         if (tree->ops && tree->ops->split_extent_hook)
356                 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
357 }
358
359 /*
360  * split a given extent state struct in two, inserting the preallocated
361  * struct 'prealloc' as the newly created second half.  'split' indicates an
362  * offset inside 'orig' where it should be split.
363  *
364  * Before calling,
365  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
366  * are two extent state structs in the tree:
367  * prealloc: [orig->start, split - 1]
368  * orig: [ split, orig->end ]
369  *
370  * The tree locks are not taken by this function. They need to be held
371  * by the caller.
372  */
373 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
374                        struct extent_state *prealloc, u64 split)
375 {
376         struct rb_node *node;
377
378         split_cb(tree, orig, split);
379
380         prealloc->start = orig->start;
381         prealloc->end = split - 1;
382         prealloc->state = orig->state;
383         orig->start = split;
384
385         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
386         if (node) {
387                 free_extent_state(prealloc);
388                 return -EEXIST;
389         }
390         prealloc->tree = tree;
391         return 0;
392 }
393
394 /*
395  * utility function to clear some bits in an extent state struct.
396  * it will optionally wake up any one waiting on this state (wake == 1), or
397  * forcibly remove the state from the tree (delete == 1).
398  *
399  * If no bits are set on the state struct after clearing things, the
400  * struct is freed and removed from the tree
401  */
402 static int clear_state_bit(struct extent_io_tree *tree,
403                             struct extent_state *state,
404                             int *bits, int wake)
405 {
406         int bits_to_clear = *bits & ~EXTENT_CTLBITS;
407         int ret = state->state & bits_to_clear;
408
409         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
410                 u64 range = state->end - state->start + 1;
411                 WARN_ON(range > tree->dirty_bytes);
412                 tree->dirty_bytes -= range;
413         }
414         clear_state_cb(tree, state, bits);
415         state->state &= ~bits_to_clear;
416         if (wake)
417                 wake_up(&state->wq);
418         if (state->state == 0) {
419                 if (state->tree) {
420                         rb_erase(&state->rb_node, &tree->state);
421                         state->tree = NULL;
422                         free_extent_state(state);
423                 } else {
424                         WARN_ON(1);
425                 }
426         } else {
427                 merge_state(tree, state);
428         }
429         return ret;
430 }
431
432 static struct extent_state *
433 alloc_extent_state_atomic(struct extent_state *prealloc)
434 {
435         if (!prealloc)
436                 prealloc = alloc_extent_state(GFP_ATOMIC);
437
438         return prealloc;
439 }
440
441 /*
442  * clear some bits on a range in the tree.  This may require splitting
443  * or inserting elements in the tree, so the gfp mask is used to
444  * indicate which allocations or sleeping are allowed.
445  *
446  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
447  * the given range from the tree regardless of state (ie for truncate).
448  *
449  * the range [start, end] is inclusive.
450  *
451  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
452  * bits were already set, or zero if none of the bits were already set.
453  */
454 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
455                      int bits, int wake, int delete,
456                      struct extent_state **cached_state,
457                      gfp_t mask)
458 {
459         struct extent_state *state;
460         struct extent_state *cached;
461         struct extent_state *prealloc = NULL;
462         struct rb_node *next_node;
463         struct rb_node *node;
464         u64 last_end;
465         int err;
466         int set = 0;
467         int clear = 0;
468
469         if (delete)
470                 bits |= ~EXTENT_CTLBITS;
471         bits |= EXTENT_FIRST_DELALLOC;
472
473         if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
474                 clear = 1;
475 again:
476         if (!prealloc && (mask & __GFP_WAIT)) {
477                 prealloc = alloc_extent_state(mask);
478                 if (!prealloc)
479                         return -ENOMEM;
480         }
481
482         spin_lock(&tree->lock);
483         if (cached_state) {
484                 cached = *cached_state;
485
486                 if (clear) {
487                         *cached_state = NULL;
488                         cached_state = NULL;
489                 }
490
491                 if (cached && cached->tree && cached->start <= start &&
492                     cached->end > start) {
493                         if (clear)
494                                 atomic_dec(&cached->refs);
495                         state = cached;
496                         goto hit_next;
497                 }
498                 if (clear)
499                         free_extent_state(cached);
500         }
501         /*
502          * this search will find the extents that end after
503          * our range starts
504          */
505         node = tree_search(tree, start);
506         if (!node)
507                 goto out;
508         state = rb_entry(node, struct extent_state, rb_node);
509 hit_next:
510         if (state->start > end)
511                 goto out;
512         WARN_ON(state->end < start);
513         last_end = state->end;
514
515         /*
516          *     | ---- desired range ---- |
517          *  | state | or
518          *  | ------------- state -------------- |
519          *
520          * We need to split the extent we found, and may flip
521          * bits on second half.
522          *
523          * If the extent we found extends past our range, we
524          * just split and search again.  It'll get split again
525          * the next time though.
526          *
527          * If the extent we found is inside our range, we clear
528          * the desired bit on it.
529          */
530
531         if (state->start < start) {
532                 prealloc = alloc_extent_state_atomic(prealloc);
533                 BUG_ON(!prealloc);
534                 err = split_state(tree, state, prealloc, start);
535                 BUG_ON(err == -EEXIST);
536                 prealloc = NULL;
537                 if (err)
538                         goto out;
539                 if (state->end <= end) {
540                         set |= clear_state_bit(tree, state, &bits, wake);
541                         if (last_end == (u64)-1)
542                                 goto out;
543                         start = last_end + 1;
544                 }
545                 goto search_again;
546         }
547         /*
548          * | ---- desired range ---- |
549          *                        | state |
550          * We need to split the extent, and clear the bit
551          * on the first half
552          */
553         if (state->start <= end && state->end > end) {
554                 prealloc = alloc_extent_state_atomic(prealloc);
555                 BUG_ON(!prealloc);
556                 err = split_state(tree, state, prealloc, end + 1);
557                 BUG_ON(err == -EEXIST);
558                 if (wake)
559                         wake_up(&state->wq);
560
561                 set |= clear_state_bit(tree, prealloc, &bits, wake);
562
563                 prealloc = NULL;
564                 goto out;
565         }
566
567         if (state->end < end && prealloc && !need_resched())
568                 next_node = rb_next(&state->rb_node);
569         else
570                 next_node = NULL;
571
572         set |= clear_state_bit(tree, state, &bits, wake);
573         if (last_end == (u64)-1)
574                 goto out;
575         start = last_end + 1;
576         if (start <= end && next_node) {
577                 state = rb_entry(next_node, struct extent_state,
578                                  rb_node);
579                 if (state->start == start)
580                         goto hit_next;
581         }
582         goto search_again;
583
584 out:
585         spin_unlock(&tree->lock);
586         if (prealloc)
587                 free_extent_state(prealloc);
588
589         return set;
590
591 search_again:
592         if (start > end)
593                 goto out;
594         spin_unlock(&tree->lock);
595         if (mask & __GFP_WAIT)
596                 cond_resched();
597         goto again;
598 }
599
600 static int wait_on_state(struct extent_io_tree *tree,
601                          struct extent_state *state)
602                 __releases(tree->lock)
603                 __acquires(tree->lock)
604 {
605         DEFINE_WAIT(wait);
606         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
607         spin_unlock(&tree->lock);
608         schedule();
609         spin_lock(&tree->lock);
610         finish_wait(&state->wq, &wait);
611         return 0;
612 }
613
614 /*
615  * waits for one or more bits to clear on a range in the state tree.
616  * The range [start, end] is inclusive.
617  * The tree lock is taken by this function
618  */
619 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
620 {
621         struct extent_state *state;
622         struct rb_node *node;
623
624         spin_lock(&tree->lock);
625 again:
626         while (1) {
627                 /*
628                  * this search will find all the extents that end after
629                  * our range starts
630                  */
631                 node = tree_search(tree, start);
632                 if (!node)
633                         break;
634
635                 state = rb_entry(node, struct extent_state, rb_node);
636
637                 if (state->start > end)
638                         goto out;
639
640                 if (state->state & bits) {
641                         start = state->start;
642                         atomic_inc(&state->refs);
643                         wait_on_state(tree, state);
644                         free_extent_state(state);
645                         goto again;
646                 }
647                 start = state->end + 1;
648
649                 if (start > end)
650                         break;
651
652                 cond_resched_lock(&tree->lock);
653         }
654 out:
655         spin_unlock(&tree->lock);
656         return 0;
657 }
658
659 static void set_state_bits(struct extent_io_tree *tree,
660                            struct extent_state *state,
661                            int *bits)
662 {
663         int bits_to_set = *bits & ~EXTENT_CTLBITS;
664
665         set_state_cb(tree, state, bits);
666         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
667                 u64 range = state->end - state->start + 1;
668                 tree->dirty_bytes += range;
669         }
670         state->state |= bits_to_set;
671 }
672
673 static void cache_state(struct extent_state *state,
674                         struct extent_state **cached_ptr)
675 {
676         if (cached_ptr && !(*cached_ptr)) {
677                 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
678                         *cached_ptr = state;
679                         atomic_inc(&state->refs);
680                 }
681         }
682 }
683
684 static void uncache_state(struct extent_state **cached_ptr)
685 {
686         if (cached_ptr && (*cached_ptr)) {
687                 struct extent_state *state = *cached_ptr;
688                 *cached_ptr = NULL;
689                 free_extent_state(state);
690         }
691 }
692
693 /*
694  * set some bits on a range in the tree.  This may require allocations or
695  * sleeping, so the gfp mask is used to indicate what is allowed.
696  *
697  * If any of the exclusive bits are set, this will fail with -EEXIST if some
698  * part of the range already has the desired bits set.  The start of the
699  * existing range is returned in failed_start in this case.
700  *
701  * [start, end] is inclusive This takes the tree lock.
702  */
703
704 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
705                    int bits, int exclusive_bits, u64 *failed_start,
706                    struct extent_state **cached_state, gfp_t mask)
707 {
708         struct extent_state *state;
709         struct extent_state *prealloc = NULL;
710         struct rb_node *node;
711         int err = 0;
712         u64 last_start;
713         u64 last_end;
714
715         bits |= EXTENT_FIRST_DELALLOC;
716 again:
717         if (!prealloc && (mask & __GFP_WAIT)) {
718                 prealloc = alloc_extent_state(mask);
719                 BUG_ON(!prealloc);
720         }
721
722         spin_lock(&tree->lock);
723         if (cached_state && *cached_state) {
724                 state = *cached_state;
725                 if (state->start <= start && state->end > start &&
726                     state->tree) {
727                         node = &state->rb_node;
728                         goto hit_next;
729                 }
730         }
731         /*
732          * this search will find all the extents that end after
733          * our range starts.
734          */
735         node = tree_search(tree, start);
736         if (!node) {
737                 prealloc = alloc_extent_state_atomic(prealloc);
738                 BUG_ON(!prealloc);
739                 err = insert_state(tree, prealloc, start, end, &bits);
740                 prealloc = NULL;
741                 BUG_ON(err == -EEXIST);
742                 goto out;
743         }
744         state = rb_entry(node, struct extent_state, rb_node);
745 hit_next:
746         last_start = state->start;
747         last_end = state->end;
748
749         /*
750          * | ---- desired range ---- |
751          * | state |
752          *
753          * Just lock what we found and keep going
754          */
755         if (state->start == start && state->end <= end) {
756                 struct rb_node *next_node;
757                 if (state->state & exclusive_bits) {
758                         *failed_start = state->start;
759                         err = -EEXIST;
760                         goto out;
761                 }
762
763                 set_state_bits(tree, state, &bits);
764
765                 cache_state(state, cached_state);
766                 merge_state(tree, state);
767                 if (last_end == (u64)-1)
768                         goto out;
769
770                 start = last_end + 1;
771                 next_node = rb_next(&state->rb_node);
772                 if (next_node && start < end && prealloc && !need_resched()) {
773                         state = rb_entry(next_node, struct extent_state,
774                                          rb_node);
775                         if (state->start == start)
776                                 goto hit_next;
777                 }
778                 goto search_again;
779         }
780
781         /*
782          *     | ---- desired range ---- |
783          * | state |
784          *   or
785          * | ------------- state -------------- |
786          *
787          * We need to split the extent we found, and may flip bits on
788          * second half.
789          *
790          * If the extent we found extends past our
791          * range, we just split and search again.  It'll get split
792          * again the next time though.
793          *
794          * If the extent we found is inside our range, we set the
795          * desired bit on it.
796          */
797         if (state->start < start) {
798                 if (state->state & exclusive_bits) {
799                         *failed_start = start;
800                         err = -EEXIST;
801                         goto out;
802                 }
803
804                 prealloc = alloc_extent_state_atomic(prealloc);
805                 BUG_ON(!prealloc);
806                 err = split_state(tree, state, prealloc, start);
807                 BUG_ON(err == -EEXIST);
808                 prealloc = NULL;
809                 if (err)
810                         goto out;
811                 if (state->end <= end) {
812                         set_state_bits(tree, state, &bits);
813                         cache_state(state, cached_state);
814                         merge_state(tree, state);
815                         if (last_end == (u64)-1)
816                                 goto out;
817                         start = last_end + 1;
818                 }
819                 goto search_again;
820         }
821         /*
822          * | ---- desired range ---- |
823          *     | state | or               | state |
824          *
825          * There's a hole, we need to insert something in it and
826          * ignore the extent we found.
827          */
828         if (state->start > start) {
829                 u64 this_end;
830                 if (end < last_start)
831                         this_end = end;
832                 else
833                         this_end = last_start - 1;
834
835                 prealloc = alloc_extent_state_atomic(prealloc);
836                 BUG_ON(!prealloc);
837
838                 /*
839                  * Avoid to free 'prealloc' if it can be merged with
840                  * the later extent.
841                  */
842                 err = insert_state(tree, prealloc, start, this_end,
843                                    &bits);
844                 BUG_ON(err == -EEXIST);
845                 if (err) {
846                         free_extent_state(prealloc);
847                         prealloc = NULL;
848                         goto out;
849                 }
850                 cache_state(prealloc, cached_state);
851                 prealloc = NULL;
852                 start = this_end + 1;
853                 goto search_again;
854         }
855         /*
856          * | ---- desired range ---- |
857          *                        | state |
858          * We need to split the extent, and set the bit
859          * on the first half
860          */
861         if (state->start <= end && state->end > end) {
862                 if (state->state & exclusive_bits) {
863                         *failed_start = start;
864                         err = -EEXIST;
865                         goto out;
866                 }
867
868                 prealloc = alloc_extent_state_atomic(prealloc);
869                 BUG_ON(!prealloc);
870                 err = split_state(tree, state, prealloc, end + 1);
871                 BUG_ON(err == -EEXIST);
872
873                 set_state_bits(tree, prealloc, &bits);
874                 cache_state(prealloc, cached_state);
875                 merge_state(tree, prealloc);
876                 prealloc = NULL;
877                 goto out;
878         }
879
880         goto search_again;
881
882 out:
883         spin_unlock(&tree->lock);
884         if (prealloc)
885                 free_extent_state(prealloc);
886
887         return err;
888
889 search_again:
890         if (start > end)
891                 goto out;
892         spin_unlock(&tree->lock);
893         if (mask & __GFP_WAIT)
894                 cond_resched();
895         goto again;
896 }
897
898 /**
899  * convert_extent - convert all bits in a given range from one bit to another
900  * @tree:       the io tree to search
901  * @start:      the start offset in bytes
902  * @end:        the end offset in bytes (inclusive)
903  * @bits:       the bits to set in this range
904  * @clear_bits: the bits to clear in this range
905  * @mask:       the allocation mask
906  *
907  * This will go through and set bits for the given range.  If any states exist
908  * already in this range they are set with the given bit and cleared of the
909  * clear_bits.  This is only meant to be used by things that are mergeable, ie
910  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
911  * boundary bits like LOCK.
912  */
913 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
914                        int bits, int clear_bits, gfp_t mask)
915 {
916         struct extent_state *state;
917         struct extent_state *prealloc = NULL;
918         struct rb_node *node;
919         int err = 0;
920         u64 last_start;
921         u64 last_end;
922
923 again:
924         if (!prealloc && (mask & __GFP_WAIT)) {
925                 prealloc = alloc_extent_state(mask);
926                 if (!prealloc)
927                         return -ENOMEM;
928         }
929
930         spin_lock(&tree->lock);
931         /*
932          * this search will find all the extents that end after
933          * our range starts.
934          */
935         node = tree_search(tree, start);
936         if (!node) {
937                 prealloc = alloc_extent_state_atomic(prealloc);
938                 if (!prealloc) {
939                         err = -ENOMEM;
940                         goto out;
941                 }
942                 err = insert_state(tree, prealloc, start, end, &bits);
943                 prealloc = NULL;
944                 BUG_ON(err == -EEXIST);
945                 goto out;
946         }
947         state = rb_entry(node, struct extent_state, rb_node);
948 hit_next:
949         last_start = state->start;
950         last_end = state->end;
951
952         /*
953          * | ---- desired range ---- |
954          * | state |
955          *
956          * Just lock what we found and keep going
957          */
958         if (state->start == start && state->end <= end) {
959                 struct rb_node *next_node;
960
961                 set_state_bits(tree, state, &bits);
962                 clear_state_bit(tree, state, &clear_bits, 0);
963
964                 merge_state(tree, state);
965                 if (last_end == (u64)-1)
966                         goto out;
967
968                 start = last_end + 1;
969                 next_node = rb_next(&state->rb_node);
970                 if (next_node && start < end && prealloc && !need_resched()) {
971                         state = rb_entry(next_node, struct extent_state,
972                                          rb_node);
973                         if (state->start == start)
974                                 goto hit_next;
975                 }
976                 goto search_again;
977         }
978
979         /*
980          *     | ---- desired range ---- |
981          * | state |
982          *   or
983          * | ------------- state -------------- |
984          *
985          * We need to split the extent we found, and may flip bits on
986          * second half.
987          *
988          * If the extent we found extends past our
989          * range, we just split and search again.  It'll get split
990          * again the next time though.
991          *
992          * If the extent we found is inside our range, we set the
993          * desired bit on it.
994          */
995         if (state->start < start) {
996                 prealloc = alloc_extent_state_atomic(prealloc);
997                 if (!prealloc) {
998                         err = -ENOMEM;
999                         goto out;
1000                 }
1001                 err = split_state(tree, state, prealloc, start);
1002                 BUG_ON(err == -EEXIST);
1003                 prealloc = NULL;
1004                 if (err)
1005                         goto out;
1006                 if (state->end <= end) {
1007                         set_state_bits(tree, state, &bits);
1008                         clear_state_bit(tree, state, &clear_bits, 0);
1009                         merge_state(tree, state);
1010                         if (last_end == (u64)-1)
1011                                 goto out;
1012                         start = last_end + 1;
1013                 }
1014                 goto search_again;
1015         }
1016         /*
1017          * | ---- desired range ---- |
1018          *     | state | or               | state |
1019          *
1020          * There's a hole, we need to insert something in it and
1021          * ignore the extent we found.
1022          */
1023         if (state->start > start) {
1024                 u64 this_end;
1025                 if (end < last_start)
1026                         this_end = end;
1027                 else
1028                         this_end = last_start - 1;
1029
1030                 prealloc = alloc_extent_state_atomic(prealloc);
1031                 if (!prealloc) {
1032                         err = -ENOMEM;
1033                         goto out;
1034                 }
1035
1036                 /*
1037                  * Avoid to free 'prealloc' if it can be merged with
1038                  * the later extent.
1039                  */
1040                 err = insert_state(tree, prealloc, start, this_end,
1041                                    &bits);
1042                 BUG_ON(err == -EEXIST);
1043                 if (err) {
1044                         free_extent_state(prealloc);
1045                         prealloc = NULL;
1046                         goto out;
1047                 }
1048                 prealloc = NULL;
1049                 start = this_end + 1;
1050                 goto search_again;
1051         }
1052         /*
1053          * | ---- desired range ---- |
1054          *                        | state |
1055          * We need to split the extent, and set the bit
1056          * on the first half
1057          */
1058         if (state->start <= end && state->end > end) {
1059                 prealloc = alloc_extent_state_atomic(prealloc);
1060                 if (!prealloc) {
1061                         err = -ENOMEM;
1062                         goto out;
1063                 }
1064
1065                 err = split_state(tree, state, prealloc, end + 1);
1066                 BUG_ON(err == -EEXIST);
1067
1068                 set_state_bits(tree, prealloc, &bits);
1069                 clear_state_bit(tree, prealloc, &clear_bits, 0);
1070
1071                 merge_state(tree, prealloc);
1072                 prealloc = NULL;
1073                 goto out;
1074         }
1075
1076         goto search_again;
1077
1078 out:
1079         spin_unlock(&tree->lock);
1080         if (prealloc)
1081                 free_extent_state(prealloc);
1082
1083         return err;
1084
1085 search_again:
1086         if (start > end)
1087                 goto out;
1088         spin_unlock(&tree->lock);
1089         if (mask & __GFP_WAIT)
1090                 cond_resched();
1091         goto again;
1092 }
1093
1094 /* wrappers around set/clear extent bit */
1095 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1096                      gfp_t mask)
1097 {
1098         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
1099                               NULL, mask);
1100 }
1101
1102 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1103                     int bits, gfp_t mask)
1104 {
1105         return set_extent_bit(tree, start, end, bits, 0, NULL,
1106                               NULL, mask);
1107 }
1108
1109 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1110                       int bits, gfp_t mask)
1111 {
1112         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1113 }
1114
1115 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1116                         struct extent_state **cached_state, gfp_t mask)
1117 {
1118         return set_extent_bit(tree, start, end,
1119                               EXTENT_DELALLOC | EXTENT_UPTODATE,
1120                               0, NULL, cached_state, mask);
1121 }
1122
1123 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1124                        gfp_t mask)
1125 {
1126         return clear_extent_bit(tree, start, end,
1127                                 EXTENT_DIRTY | EXTENT_DELALLOC |
1128                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1129 }
1130
1131 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1132                      gfp_t mask)
1133 {
1134         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
1135                               NULL, mask);
1136 }
1137
1138 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1139                         struct extent_state **cached_state, gfp_t mask)
1140 {
1141         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1142                               NULL, cached_state, mask);
1143 }
1144
1145 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1146                                  u64 end, struct extent_state **cached_state,
1147                                  gfp_t mask)
1148 {
1149         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1150                                 cached_state, mask);
1151 }
1152
1153 /*
1154  * either insert or lock state struct between start and end use mask to tell
1155  * us if waiting is desired.
1156  */
1157 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1158                      int bits, struct extent_state **cached_state, gfp_t mask)
1159 {
1160         int err;
1161         u64 failed_start;
1162         while (1) {
1163                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1164                                      EXTENT_LOCKED, &failed_start,
1165                                      cached_state, mask);
1166                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1167                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1168                         start = failed_start;
1169                 } else {
1170                         break;
1171                 }
1172                 WARN_ON(start > end);
1173         }
1174         return err;
1175 }
1176
1177 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1178 {
1179         return lock_extent_bits(tree, start, end, 0, NULL, mask);
1180 }
1181
1182 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1183                     gfp_t mask)
1184 {
1185         int err;
1186         u64 failed_start;
1187
1188         err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1189                              &failed_start, NULL, mask);
1190         if (err == -EEXIST) {
1191                 if (failed_start > start)
1192                         clear_extent_bit(tree, start, failed_start - 1,
1193                                          EXTENT_LOCKED, 1, 0, NULL, mask);
1194                 return 0;
1195         }
1196         return 1;
1197 }
1198
1199 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1200                          struct extent_state **cached, gfp_t mask)
1201 {
1202         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1203                                 mask);
1204 }
1205
1206 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1207 {
1208         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1209                                 mask);
1210 }
1211
1212 /*
1213  * helper function to set both pages and extents in the tree writeback
1214  */
1215 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1216 {
1217         unsigned long index = start >> PAGE_CACHE_SHIFT;
1218         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1219         struct page *page;
1220
1221         while (index <= end_index) {
1222                 page = find_get_page(tree->mapping, index);
1223                 BUG_ON(!page);
1224                 set_page_writeback(page);
1225                 page_cache_release(page);
1226                 index++;
1227         }
1228         return 0;
1229 }
1230
1231 /* find the first state struct with 'bits' set after 'start', and
1232  * return it.  tree->lock must be held.  NULL will returned if
1233  * nothing was found after 'start'
1234  */
1235 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1236                                                  u64 start, int bits)
1237 {
1238         struct rb_node *node;
1239         struct extent_state *state;
1240
1241         /*
1242          * this search will find all the extents that end after
1243          * our range starts.
1244          */
1245         node = tree_search(tree, start);
1246         if (!node)
1247                 goto out;
1248
1249         while (1) {
1250                 state = rb_entry(node, struct extent_state, rb_node);
1251                 if (state->end >= start && (state->state & bits))
1252                         return state;
1253
1254                 node = rb_next(node);
1255                 if (!node)
1256                         break;
1257         }
1258 out:
1259         return NULL;
1260 }
1261
1262 /*
1263  * find the first offset in the io tree with 'bits' set. zero is
1264  * returned if we find something, and *start_ret and *end_ret are
1265  * set to reflect the state struct that was found.
1266  *
1267  * If nothing was found, 1 is returned, < 0 on error
1268  */
1269 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1270                           u64 *start_ret, u64 *end_ret, int bits)
1271 {
1272         struct extent_state *state;
1273         int ret = 1;
1274
1275         spin_lock(&tree->lock);
1276         state = find_first_extent_bit_state(tree, start, bits);
1277         if (state) {
1278                 *start_ret = state->start;
1279                 *end_ret = state->end;
1280                 ret = 0;
1281         }
1282         spin_unlock(&tree->lock);
1283         return ret;
1284 }
1285
1286 /*
1287  * find a contiguous range of bytes in the file marked as delalloc, not
1288  * more than 'max_bytes'.  start and end are used to return the range,
1289  *
1290  * 1 is returned if we find something, 0 if nothing was in the tree
1291  */
1292 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1293                                         u64 *start, u64 *end, u64 max_bytes,
1294                                         struct extent_state **cached_state)
1295 {
1296         struct rb_node *node;
1297         struct extent_state *state;
1298         u64 cur_start = *start;
1299         u64 found = 0;
1300         u64 total_bytes = 0;
1301
1302         spin_lock(&tree->lock);
1303
1304         /*
1305          * this search will find all the extents that end after
1306          * our range starts.
1307          */
1308         node = tree_search(tree, cur_start);
1309         if (!node) {
1310                 if (!found)
1311                         *end = (u64)-1;
1312                 goto out;
1313         }
1314
1315         while (1) {
1316                 state = rb_entry(node, struct extent_state, rb_node);
1317                 if (found && (state->start != cur_start ||
1318                               (state->state & EXTENT_BOUNDARY))) {
1319                         goto out;
1320                 }
1321                 if (!(state->state & EXTENT_DELALLOC)) {
1322                         if (!found)
1323                                 *end = state->end;
1324                         goto out;
1325                 }
1326                 if (!found) {
1327                         *start = state->start;
1328                         *cached_state = state;
1329                         atomic_inc(&state->refs);
1330                 }
1331                 found++;
1332                 *end = state->end;
1333                 cur_start = state->end + 1;
1334                 node = rb_next(node);
1335                 if (!node)
1336                         break;
1337                 total_bytes += state->end - state->start + 1;
1338                 if (total_bytes >= max_bytes)
1339                         break;
1340         }
1341 out:
1342         spin_unlock(&tree->lock);
1343         return found;
1344 }
1345
1346 static noinline int __unlock_for_delalloc(struct inode *inode,
1347                                           struct page *locked_page,
1348                                           u64 start, u64 end)
1349 {
1350         int ret;
1351         struct page *pages[16];
1352         unsigned long index = start >> PAGE_CACHE_SHIFT;
1353         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1354         unsigned long nr_pages = end_index - index + 1;
1355         int i;
1356
1357         if (index == locked_page->index && end_index == index)
1358                 return 0;
1359
1360         while (nr_pages > 0) {
1361                 ret = find_get_pages_contig(inode->i_mapping, index,
1362                                      min_t(unsigned long, nr_pages,
1363                                      ARRAY_SIZE(pages)), pages);
1364                 for (i = 0; i < ret; i++) {
1365                         if (pages[i] != locked_page)
1366                                 unlock_page(pages[i]);
1367                         page_cache_release(pages[i]);
1368                 }
1369                 nr_pages -= ret;
1370                 index += ret;
1371                 cond_resched();
1372         }
1373         return 0;
1374 }
1375
1376 static noinline int lock_delalloc_pages(struct inode *inode,
1377                                         struct page *locked_page,
1378                                         u64 delalloc_start,
1379                                         u64 delalloc_end)
1380 {
1381         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1382         unsigned long start_index = index;
1383         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1384         unsigned long pages_locked = 0;
1385         struct page *pages[16];
1386         unsigned long nrpages;
1387         int ret;
1388         int i;
1389
1390         /* the caller is responsible for locking the start index */
1391         if (index == locked_page->index && index == end_index)
1392                 return 0;
1393
1394         /* skip the page at the start index */
1395         nrpages = end_index - index + 1;
1396         while (nrpages > 0) {
1397                 ret = find_get_pages_contig(inode->i_mapping, index,
1398                                      min_t(unsigned long,
1399                                      nrpages, ARRAY_SIZE(pages)), pages);
1400                 if (ret == 0) {
1401                         ret = -EAGAIN;
1402                         goto done;
1403                 }
1404                 /* now we have an array of pages, lock them all */
1405                 for (i = 0; i < ret; i++) {
1406                         /*
1407                          * the caller is taking responsibility for
1408                          * locked_page
1409                          */
1410                         if (pages[i] != locked_page) {
1411                                 lock_page(pages[i]);
1412                                 if (!PageDirty(pages[i]) ||
1413                                     pages[i]->mapping != inode->i_mapping) {
1414                                         ret = -EAGAIN;
1415                                         unlock_page(pages[i]);
1416                                         page_cache_release(pages[i]);
1417                                         goto done;
1418                                 }
1419                         }
1420                         page_cache_release(pages[i]);
1421                         pages_locked++;
1422                 }
1423                 nrpages -= ret;
1424                 index += ret;
1425                 cond_resched();
1426         }
1427         ret = 0;
1428 done:
1429         if (ret && pages_locked) {
1430                 __unlock_for_delalloc(inode, locked_page,
1431                               delalloc_start,
1432                               ((u64)(start_index + pages_locked - 1)) <<
1433                               PAGE_CACHE_SHIFT);
1434         }
1435         return ret;
1436 }
1437
1438 /*
1439  * find a contiguous range of bytes in the file marked as delalloc, not
1440  * more than 'max_bytes'.  start and end are used to return the range,
1441  *
1442  * 1 is returned if we find something, 0 if nothing was in the tree
1443  */
1444 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1445                                              struct extent_io_tree *tree,
1446                                              struct page *locked_page,
1447                                              u64 *start, u64 *end,
1448                                              u64 max_bytes)
1449 {
1450         u64 delalloc_start;
1451         u64 delalloc_end;
1452         u64 found;
1453         struct extent_state *cached_state = NULL;
1454         int ret;
1455         int loops = 0;
1456
1457 again:
1458         /* step one, find a bunch of delalloc bytes starting at start */
1459         delalloc_start = *start;
1460         delalloc_end = 0;
1461         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1462                                     max_bytes, &cached_state);
1463         if (!found || delalloc_end <= *start) {
1464                 *start = delalloc_start;
1465                 *end = delalloc_end;
1466                 free_extent_state(cached_state);
1467                 return found;
1468         }
1469
1470         /*
1471          * start comes from the offset of locked_page.  We have to lock
1472          * pages in order, so we can't process delalloc bytes before
1473          * locked_page
1474          */
1475         if (delalloc_start < *start)
1476                 delalloc_start = *start;
1477
1478         /*
1479          * make sure to limit the number of pages we try to lock down
1480          * if we're looping.
1481          */
1482         if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1483                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1484
1485         /* step two, lock all the pages after the page that has start */
1486         ret = lock_delalloc_pages(inode, locked_page,
1487                                   delalloc_start, delalloc_end);
1488         if (ret == -EAGAIN) {
1489                 /* some of the pages are gone, lets avoid looping by
1490                  * shortening the size of the delalloc range we're searching
1491                  */
1492                 free_extent_state(cached_state);
1493                 if (!loops) {
1494                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1495                         max_bytes = PAGE_CACHE_SIZE - offset;
1496                         loops = 1;
1497                         goto again;
1498                 } else {
1499                         found = 0;
1500                         goto out_failed;
1501                 }
1502         }
1503         BUG_ON(ret);
1504
1505         /* step three, lock the state bits for the whole range */
1506         lock_extent_bits(tree, delalloc_start, delalloc_end,
1507                          0, &cached_state, GFP_NOFS);
1508
1509         /* then test to make sure it is all still delalloc */
1510         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1511                              EXTENT_DELALLOC, 1, cached_state);
1512         if (!ret) {
1513                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1514                                      &cached_state, GFP_NOFS);
1515                 __unlock_for_delalloc(inode, locked_page,
1516                               delalloc_start, delalloc_end);
1517                 cond_resched();
1518                 goto again;
1519         }
1520         free_extent_state(cached_state);
1521         *start = delalloc_start;
1522         *end = delalloc_end;
1523 out_failed:
1524         return found;
1525 }
1526
1527 int extent_clear_unlock_delalloc(struct inode *inode,
1528                                 struct extent_io_tree *tree,
1529                                 u64 start, u64 end, struct page *locked_page,
1530                                 unsigned long op)
1531 {
1532         int ret;
1533         struct page *pages[16];
1534         unsigned long index = start >> PAGE_CACHE_SHIFT;
1535         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1536         unsigned long nr_pages = end_index - index + 1;
1537         int i;
1538         int clear_bits = 0;
1539
1540         if (op & EXTENT_CLEAR_UNLOCK)
1541                 clear_bits |= EXTENT_LOCKED;
1542         if (op & EXTENT_CLEAR_DIRTY)
1543                 clear_bits |= EXTENT_DIRTY;
1544
1545         if (op & EXTENT_CLEAR_DELALLOC)
1546                 clear_bits |= EXTENT_DELALLOC;
1547
1548         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1549         if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1550                     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1551                     EXTENT_SET_PRIVATE2)))
1552                 return 0;
1553
1554         while (nr_pages > 0) {
1555                 ret = find_get_pages_contig(inode->i_mapping, index,
1556                                      min_t(unsigned long,
1557                                      nr_pages, ARRAY_SIZE(pages)), pages);
1558                 for (i = 0; i < ret; i++) {
1559
1560                         if (op & EXTENT_SET_PRIVATE2)
1561                                 SetPagePrivate2(pages[i]);
1562
1563                         if (pages[i] == locked_page) {
1564                                 page_cache_release(pages[i]);
1565                                 continue;
1566                         }
1567                         if (op & EXTENT_CLEAR_DIRTY)
1568                                 clear_page_dirty_for_io(pages[i]);
1569                         if (op & EXTENT_SET_WRITEBACK)
1570                                 set_page_writeback(pages[i]);
1571                         if (op & EXTENT_END_WRITEBACK)
1572                                 end_page_writeback(pages[i]);
1573                         if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1574                                 unlock_page(pages[i]);
1575                         page_cache_release(pages[i]);
1576                 }
1577                 nr_pages -= ret;
1578                 index += ret;
1579                 cond_resched();
1580         }
1581         return 0;
1582 }
1583
1584 /*
1585  * count the number of bytes in the tree that have a given bit(s)
1586  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1587  * cached.  The total number found is returned.
1588  */
1589 u64 count_range_bits(struct extent_io_tree *tree,
1590                      u64 *start, u64 search_end, u64 max_bytes,
1591                      unsigned long bits, int contig)
1592 {
1593         struct rb_node *node;
1594         struct extent_state *state;
1595         u64 cur_start = *start;
1596         u64 total_bytes = 0;
1597         u64 last = 0;
1598         int found = 0;
1599
1600         if (search_end <= cur_start) {
1601                 WARN_ON(1);
1602                 return 0;
1603         }
1604
1605         spin_lock(&tree->lock);
1606         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1607                 total_bytes = tree->dirty_bytes;
1608                 goto out;
1609         }
1610         /*
1611          * this search will find all the extents that end after
1612          * our range starts.
1613          */
1614         node = tree_search(tree, cur_start);
1615         if (!node)
1616                 goto out;
1617
1618         while (1) {
1619                 state = rb_entry(node, struct extent_state, rb_node);
1620                 if (state->start > search_end)
1621                         break;
1622                 if (contig && found && state->start > last + 1)
1623                         break;
1624                 if (state->end >= cur_start && (state->state & bits) == bits) {
1625                         total_bytes += min(search_end, state->end) + 1 -
1626                                        max(cur_start, state->start);
1627                         if (total_bytes >= max_bytes)
1628                                 break;
1629                         if (!found) {
1630                                 *start = max(cur_start, state->start);
1631                                 found = 1;
1632                         }
1633                         last = state->end;
1634                 } else if (contig && found) {
1635                         break;
1636                 }
1637                 node = rb_next(node);
1638                 if (!node)
1639                         break;
1640         }
1641 out:
1642         spin_unlock(&tree->lock);
1643         return total_bytes;
1644 }
1645
1646 /*
1647  * set the private field for a given byte offset in the tree.  If there isn't
1648  * an extent_state there already, this does nothing.
1649  */
1650 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1651 {
1652         struct rb_node *node;
1653         struct extent_state *state;
1654         int ret = 0;
1655
1656         spin_lock(&tree->lock);
1657         /*
1658          * this search will find all the extents that end after
1659          * our range starts.
1660          */
1661         node = tree_search(tree, start);
1662         if (!node) {
1663                 ret = -ENOENT;
1664                 goto out;
1665         }
1666         state = rb_entry(node, struct extent_state, rb_node);
1667         if (state->start != start) {
1668                 ret = -ENOENT;
1669                 goto out;
1670         }
1671         state->private = private;
1672 out:
1673         spin_unlock(&tree->lock);
1674         return ret;
1675 }
1676
1677 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1678 {
1679         struct rb_node *node;
1680         struct extent_state *state;
1681         int ret = 0;
1682
1683         spin_lock(&tree->lock);
1684         /*
1685          * this search will find all the extents that end after
1686          * our range starts.
1687          */
1688         node = tree_search(tree, start);
1689         if (!node) {
1690                 ret = -ENOENT;
1691                 goto out;
1692         }
1693         state = rb_entry(node, struct extent_state, rb_node);
1694         if (state->start != start) {
1695                 ret = -ENOENT;
1696                 goto out;
1697         }
1698         *private = state->private;
1699 out:
1700         spin_unlock(&tree->lock);
1701         return ret;
1702 }
1703
1704 /*
1705  * searches a range in the state tree for a given mask.
1706  * If 'filled' == 1, this returns 1 only if every extent in the tree
1707  * has the bits set.  Otherwise, 1 is returned if any bit in the
1708  * range is found set.
1709  */
1710 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1711                    int bits, int filled, struct extent_state *cached)
1712 {
1713         struct extent_state *state = NULL;
1714         struct rb_node *node;
1715         int bitset = 0;
1716
1717         spin_lock(&tree->lock);
1718         if (cached && cached->tree && cached->start <= start &&
1719             cached->end > start)
1720                 node = &cached->rb_node;
1721         else
1722                 node = tree_search(tree, start);
1723         while (node && start <= end) {
1724                 state = rb_entry(node, struct extent_state, rb_node);
1725
1726                 if (filled && state->start > start) {
1727                         bitset = 0;
1728                         break;
1729                 }
1730
1731                 if (state->start > end)
1732                         break;
1733
1734                 if (state->state & bits) {
1735                         bitset = 1;
1736                         if (!filled)
1737                                 break;
1738                 } else if (filled) {
1739                         bitset = 0;
1740                         break;
1741                 }
1742
1743                 if (state->end == (u64)-1)
1744                         break;
1745
1746                 start = state->end + 1;
1747                 if (start > end)
1748                         break;
1749                 node = rb_next(node);
1750                 if (!node) {
1751                         if (filled)
1752                                 bitset = 0;
1753                         break;
1754                 }
1755         }
1756         spin_unlock(&tree->lock);
1757         return bitset;
1758 }
1759
1760 /*
1761  * helper function to set a given page up to date if all the
1762  * extents in the tree for that page are up to date
1763  */
1764 static int check_page_uptodate(struct extent_io_tree *tree,
1765                                struct page *page)
1766 {
1767         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1768         u64 end = start + PAGE_CACHE_SIZE - 1;
1769         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1770                 SetPageUptodate(page);
1771         return 0;
1772 }
1773
1774 /*
1775  * helper function to unlock a page if all the extents in the tree
1776  * for that page are unlocked
1777  */
1778 static int check_page_locked(struct extent_io_tree *tree,
1779                              struct page *page)
1780 {
1781         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1782         u64 end = start + PAGE_CACHE_SIZE - 1;
1783         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1784                 unlock_page(page);
1785         return 0;
1786 }
1787
1788 /*
1789  * helper function to end page writeback if all the extents
1790  * in the tree for that page are done with writeback
1791  */
1792 static int check_page_writeback(struct extent_io_tree *tree,
1793                              struct page *page)
1794 {
1795         end_page_writeback(page);
1796         return 0;
1797 }
1798
1799 /*
1800  * When IO fails, either with EIO or csum verification fails, we
1801  * try other mirrors that might have a good copy of the data.  This
1802  * io_failure_record is used to record state as we go through all the
1803  * mirrors.  If another mirror has good data, the page is set up to date
1804  * and things continue.  If a good mirror can't be found, the original
1805  * bio end_io callback is called to indicate things have failed.
1806  */
1807 struct io_failure_record {
1808         struct page *page;
1809         u64 start;
1810         u64 len;
1811         u64 logical;
1812         unsigned long bio_flags;
1813         int this_mirror;
1814         int failed_mirror;
1815         int in_validation;
1816 };
1817
1818 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1819                                 int did_repair)
1820 {
1821         int ret;
1822         int err = 0;
1823         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1824
1825         set_state_private(failure_tree, rec->start, 0);
1826         ret = clear_extent_bits(failure_tree, rec->start,
1827                                 rec->start + rec->len - 1,
1828                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1829         if (ret)
1830                 err = ret;
1831
1832         if (did_repair) {
1833                 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1834                                         rec->start + rec->len - 1,
1835                                         EXTENT_DAMAGED, GFP_NOFS);
1836                 if (ret && !err)
1837                         err = ret;
1838         }
1839
1840         kfree(rec);
1841         return err;
1842 }
1843
1844 static void repair_io_failure_callback(struct bio *bio, int err)
1845 {
1846         complete(bio->bi_private);
1847 }
1848
1849 /*
1850  * this bypasses the standard btrfs submit functions deliberately, as
1851  * the standard behavior is to write all copies in a raid setup. here we only
1852  * want to write the one bad copy. so we do the mapping for ourselves and issue
1853  * submit_bio directly.
1854  * to avoid any synchonization issues, wait for the data after writing, which
1855  * actually prevents the read that triggered the error from finishing.
1856  * currently, there can be no more than two copies of every data bit. thus,
1857  * exactly one rewrite is required.
1858  */
1859 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1860                         u64 length, u64 logical, struct page *page,
1861                         int mirror_num)
1862 {
1863         struct bio *bio;
1864         struct btrfs_device *dev;
1865         DECLARE_COMPLETION_ONSTACK(compl);
1866         u64 map_length = 0;
1867         u64 sector;
1868         struct btrfs_bio *bbio = NULL;
1869         int ret;
1870
1871         BUG_ON(!mirror_num);
1872
1873         bio = bio_alloc(GFP_NOFS, 1);
1874         if (!bio)
1875                 return -EIO;
1876         bio->bi_private = &compl;
1877         bio->bi_end_io = repair_io_failure_callback;
1878         bio->bi_size = 0;
1879         map_length = length;
1880
1881         ret = btrfs_map_block(map_tree, WRITE, logical,
1882                               &map_length, &bbio, mirror_num);
1883         if (ret) {
1884                 bio_put(bio);
1885                 return -EIO;
1886         }
1887         BUG_ON(mirror_num != bbio->mirror_num);
1888         sector = bbio->stripes[mirror_num-1].physical >> 9;
1889         bio->bi_sector = sector;
1890         dev = bbio->stripes[mirror_num-1].dev;
1891         kfree(bbio);
1892         if (!dev || !dev->bdev || !dev->writeable) {
1893                 bio_put(bio);
1894                 return -EIO;
1895         }
1896         bio->bi_bdev = dev->bdev;
1897         bio_add_page(bio, page, length, start-page_offset(page));
1898         submit_bio(WRITE_SYNC, bio);
1899         wait_for_completion(&compl);
1900
1901         if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1902                 /* try to remap that extent elsewhere? */
1903                 bio_put(bio);
1904                 return -EIO;
1905         }
1906
1907         printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1908                         "sector %llu)\n", page->mapping->host->i_ino, start,
1909                         dev->name, sector);
1910
1911         bio_put(bio);
1912         return 0;
1913 }
1914
1915 /*
1916  * each time an IO finishes, we do a fast check in the IO failure tree
1917  * to see if we need to process or clean up an io_failure_record
1918  */
1919 static int clean_io_failure(u64 start, struct page *page)
1920 {
1921         u64 private;
1922         u64 private_failure;
1923         struct io_failure_record *failrec;
1924         struct btrfs_mapping_tree *map_tree;
1925         struct extent_state *state;
1926         int num_copies;
1927         int did_repair = 0;
1928         int ret;
1929         struct inode *inode = page->mapping->host;
1930
1931         private = 0;
1932         ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1933                                 (u64)-1, 1, EXTENT_DIRTY, 0);
1934         if (!ret)
1935                 return 0;
1936
1937         ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1938                                 &private_failure);
1939         if (ret)
1940                 return 0;
1941
1942         failrec = (struct io_failure_record *)(unsigned long) private_failure;
1943         BUG_ON(!failrec->this_mirror);
1944
1945         if (failrec->in_validation) {
1946                 /* there was no real error, just free the record */
1947                 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1948                          failrec->start);
1949                 did_repair = 1;
1950                 goto out;
1951         }
1952
1953         spin_lock(&BTRFS_I(inode)->io_tree.lock);
1954         state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1955                                             failrec->start,
1956                                             EXTENT_LOCKED);
1957         spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1958
1959         if (state && state->start == failrec->start) {
1960                 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1961                 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1962                                                 failrec->len);
1963                 if (num_copies > 1)  {
1964                         ret = repair_io_failure(map_tree, start, failrec->len,
1965                                                 failrec->logical, page,
1966                                                 failrec->failed_mirror);
1967                         did_repair = !ret;
1968                 }
1969         }
1970
1971 out:
1972         if (!ret)
1973                 ret = free_io_failure(inode, failrec, did_repair);
1974
1975         return ret;
1976 }
1977
1978 /*
1979  * this is a generic handler for readpage errors (default
1980  * readpage_io_failed_hook). if other copies exist, read those and write back
1981  * good data to the failed position. does not investigate in remapping the
1982  * failed extent elsewhere, hoping the device will be smart enough to do this as
1983  * needed
1984  */
1985
1986 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
1987                                 u64 start, u64 end, int failed_mirror,
1988                                 struct extent_state *state)
1989 {
1990         struct io_failure_record *failrec = NULL;
1991         u64 private;
1992         struct extent_map *em;
1993         struct inode *inode = page->mapping->host;
1994         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1995         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1996         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1997         struct bio *bio;
1998         int num_copies;
1999         int ret;
2000         int read_mode;
2001         u64 logical;
2002
2003         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2004
2005         ret = get_state_private(failure_tree, start, &private);
2006         if (ret) {
2007                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2008                 if (!failrec)
2009                         return -ENOMEM;
2010                 failrec->start = start;
2011                 failrec->len = end - start + 1;
2012                 failrec->this_mirror = 0;
2013                 failrec->bio_flags = 0;
2014                 failrec->in_validation = 0;
2015
2016                 read_lock(&em_tree->lock);
2017                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2018                 if (!em) {
2019                         read_unlock(&em_tree->lock);
2020                         kfree(failrec);
2021                         return -EIO;
2022                 }
2023
2024                 if (em->start > start || em->start + em->len < start) {
2025                         free_extent_map(em);
2026                         em = NULL;
2027                 }
2028                 read_unlock(&em_tree->lock);
2029
2030                 if (!em || IS_ERR(em)) {
2031                         kfree(failrec);
2032                         return -EIO;
2033                 }
2034                 logical = start - em->start;
2035                 logical = em->block_start + logical;
2036                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2037                         logical = em->block_start;
2038                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2039                         extent_set_compress_type(&failrec->bio_flags,
2040                                                  em->compress_type);
2041                 }
2042                 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2043                          "len=%llu\n", logical, start, failrec->len);
2044                 failrec->logical = logical;
2045                 free_extent_map(em);
2046
2047                 /* set the bits in the private failure tree */
2048                 ret = set_extent_bits(failure_tree, start, end,
2049                                         EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2050                 if (ret >= 0)
2051                         ret = set_state_private(failure_tree, start,
2052                                                 (u64)(unsigned long)failrec);
2053                 /* set the bits in the inode's tree */
2054                 if (ret >= 0)
2055                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2056                                                 GFP_NOFS);
2057                 if (ret < 0) {
2058                         kfree(failrec);
2059                         return ret;
2060                 }
2061         } else {
2062                 failrec = (struct io_failure_record *)(unsigned long)private;
2063                 pr_debug("bio_readpage_error: (found) logical=%llu, "
2064                          "start=%llu, len=%llu, validation=%d\n",
2065                          failrec->logical, failrec->start, failrec->len,
2066                          failrec->in_validation);
2067                 /*
2068                  * when data can be on disk more than twice, add to failrec here
2069                  * (e.g. with a list for failed_mirror) to make
2070                  * clean_io_failure() clean all those errors at once.
2071                  */
2072         }
2073         num_copies = btrfs_num_copies(
2074                               &BTRFS_I(inode)->root->fs_info->mapping_tree,
2075                               failrec->logical, failrec->len);
2076         if (num_copies == 1) {
2077                 /*
2078                  * we only have a single copy of the data, so don't bother with
2079                  * all the retry and error correction code that follows. no
2080                  * matter what the error is, it is very likely to persist.
2081                  */
2082                 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2083                          "state=%p, num_copies=%d, next_mirror %d, "
2084                          "failed_mirror %d\n", state, num_copies,
2085                          failrec->this_mirror, failed_mirror);
2086                 free_io_failure(inode, failrec, 0);
2087                 return -EIO;
2088         }
2089
2090         if (!state) {
2091                 spin_lock(&tree->lock);
2092                 state = find_first_extent_bit_state(tree, failrec->start,
2093                                                     EXTENT_LOCKED);
2094                 if (state && state->start != failrec->start)
2095                         state = NULL;
2096                 spin_unlock(&tree->lock);
2097         }
2098
2099         /*
2100          * there are two premises:
2101          *      a) deliver good data to the caller
2102          *      b) correct the bad sectors on disk
2103          */
2104         if (failed_bio->bi_vcnt > 1) {
2105                 /*
2106                  * to fulfill b), we need to know the exact failing sectors, as
2107                  * we don't want to rewrite any more than the failed ones. thus,
2108                  * we need separate read requests for the failed bio
2109                  *
2110                  * if the following BUG_ON triggers, our validation request got
2111                  * merged. we need separate requests for our algorithm to work.
2112                  */
2113                 BUG_ON(failrec->in_validation);
2114                 failrec->in_validation = 1;
2115                 failrec->this_mirror = failed_mirror;
2116                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2117         } else {
2118                 /*
2119                  * we're ready to fulfill a) and b) alongside. get a good copy
2120                  * of the failed sector and if we succeed, we have setup
2121                  * everything for repair_io_failure to do the rest for us.
2122                  */
2123                 if (failrec->in_validation) {
2124                         BUG_ON(failrec->this_mirror != failed_mirror);
2125                         failrec->in_validation = 0;
2126                         failrec->this_mirror = 0;
2127                 }
2128                 failrec->failed_mirror = failed_mirror;
2129                 failrec->this_mirror++;
2130                 if (failrec->this_mirror == failed_mirror)
2131                         failrec->this_mirror++;
2132                 read_mode = READ_SYNC;
2133         }
2134
2135         if (!state || failrec->this_mirror > num_copies) {
2136                 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2137                          "next_mirror %d, failed_mirror %d\n", state,
2138                          num_copies, failrec->this_mirror, failed_mirror);
2139                 free_io_failure(inode, failrec, 0);
2140                 return -EIO;
2141         }
2142
2143         bio = bio_alloc(GFP_NOFS, 1);
2144         bio->bi_private = state;
2145         bio->bi_end_io = failed_bio->bi_end_io;
2146         bio->bi_sector = failrec->logical >> 9;
2147         bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2148         bio->bi_size = 0;
2149
2150         bio_add_page(bio, page, failrec->len, start - page_offset(page));
2151
2152         pr_debug("bio_readpage_error: submitting new read[%#x] to "
2153                  "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2154                  failrec->this_mirror, num_copies, failrec->in_validation);
2155
2156         tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror,
2157                                         failrec->bio_flags, 0);
2158         return 0;
2159 }
2160
2161 /* lots and lots of room for performance fixes in the end_bio funcs */
2162
2163 /*
2164  * after a writepage IO is done, we need to:
2165  * clear the uptodate bits on error
2166  * clear the writeback bits in the extent tree for this IO
2167  * end_page_writeback if the page has no more pending IO
2168  *
2169  * Scheduling is not allowed, so the extent state tree is expected
2170  * to have one and only one object corresponding to this IO.
2171  */
2172 static void end_bio_extent_writepage(struct bio *bio, int err)
2173 {
2174         int uptodate = err == 0;
2175         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2176         struct extent_io_tree *tree;
2177         u64 start;
2178         u64 end;
2179         int whole_page;
2180         int ret;
2181
2182         do {
2183                 struct page *page = bvec->bv_page;
2184                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2185
2186                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2187                          bvec->bv_offset;
2188                 end = start + bvec->bv_len - 1;
2189
2190                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2191                         whole_page = 1;
2192                 else
2193                         whole_page = 0;
2194
2195                 if (--bvec >= bio->bi_io_vec)
2196                         prefetchw(&bvec->bv_page->flags);
2197                 if (tree->ops && tree->ops->writepage_end_io_hook) {
2198                         ret = tree->ops->writepage_end_io_hook(page, start,
2199                                                        end, NULL, uptodate);
2200                         if (ret)
2201                                 uptodate = 0;
2202                 }
2203
2204                 if (!uptodate && tree->ops &&
2205                     tree->ops->writepage_io_failed_hook) {
2206                         ret = tree->ops->writepage_io_failed_hook(bio, page,
2207                                                          start, end, NULL);
2208                         if (ret == 0) {
2209                                 uptodate = (err == 0);
2210                                 continue;
2211                         }
2212                 }
2213
2214                 if (!uptodate) {
2215                         clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2216                         ClearPageUptodate(page);
2217                         SetPageError(page);
2218                 }
2219
2220                 if (whole_page)
2221                         end_page_writeback(page);
2222                 else
2223                         check_page_writeback(tree, page);
2224         } while (bvec >= bio->bi_io_vec);
2225
2226         bio_put(bio);
2227 }
2228
2229 /*
2230  * after a readpage IO is done, we need to:
2231  * clear the uptodate bits on error
2232  * set the uptodate bits if things worked
2233  * set the page up to date if all extents in the tree are uptodate
2234  * clear the lock bit in the extent tree
2235  * unlock the page if there are no other extents locked for it
2236  *
2237  * Scheduling is not allowed, so the extent state tree is expected
2238  * to have one and only one object corresponding to this IO.
2239  */
2240 static void end_bio_extent_readpage(struct bio *bio, int err)
2241 {
2242         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2243         struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2244         struct bio_vec *bvec = bio->bi_io_vec;
2245         struct extent_io_tree *tree;
2246         u64 start;
2247         u64 end;
2248         int whole_page;
2249         int ret;
2250
2251         if (err)
2252                 uptodate = 0;
2253
2254         do {
2255                 struct page *page = bvec->bv_page;
2256                 struct extent_state *cached = NULL;
2257                 struct extent_state *state;
2258
2259                 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2260                          "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2261                          (long int)bio->bi_bdev);
2262                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2263
2264                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2265                         bvec->bv_offset;
2266                 end = start + bvec->bv_len - 1;
2267
2268                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2269                         whole_page = 1;
2270                 else
2271                         whole_page = 0;
2272
2273                 if (++bvec <= bvec_end)
2274                         prefetchw(&bvec->bv_page->flags);
2275
2276                 spin_lock(&tree->lock);
2277                 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2278                 if (state && state->start == start) {
2279                         /*
2280                          * take a reference on the state, unlock will drop
2281                          * the ref
2282                          */
2283                         cache_state(state, &cached);
2284                 }
2285                 spin_unlock(&tree->lock);
2286
2287                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2288                         ret = tree->ops->readpage_end_io_hook(page, start, end,
2289                                                               state);
2290                         if (ret)
2291                                 uptodate = 0;
2292                         else
2293                                 clean_io_failure(start, page);
2294                 }
2295                 if (!uptodate) {
2296                         int failed_mirror;
2297                         failed_mirror = (int)(unsigned long)bio->bi_bdev;
2298                         /*
2299                          * The generic bio_readpage_error handles errors the
2300                          * following way: If possible, new read requests are
2301                          * created and submitted and will end up in
2302                          * end_bio_extent_readpage as well (if we're lucky, not
2303                          * in the !uptodate case). In that case it returns 0 and
2304                          * we just go on with the next page in our bio. If it
2305                          * can't handle the error it will return -EIO and we
2306                          * remain responsible for that page.
2307                          */
2308                         ret = bio_readpage_error(bio, page, start, end,
2309                                                         failed_mirror, NULL);
2310                         if (ret == 0) {
2311 error_handled:
2312                                 uptodate =
2313                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
2314                                 if (err)
2315                                         uptodate = 0;
2316                                 uncache_state(&cached);
2317                                 continue;
2318                         }
2319                         if (tree->ops && tree->ops->readpage_io_failed_hook) {
2320                                 ret = tree->ops->readpage_io_failed_hook(
2321                                                         bio, page, start, end,
2322                                                         failed_mirror, state);
2323                                 if (ret == 0)
2324                                         goto error_handled;
2325                         }
2326                 }
2327
2328                 if (uptodate) {
2329                         set_extent_uptodate(tree, start, end, &cached,
2330                                             GFP_ATOMIC);
2331                 }
2332                 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2333
2334                 if (whole_page) {
2335                         if (uptodate) {
2336                                 SetPageUptodate(page);
2337                         } else {
2338                                 ClearPageUptodate(page);
2339                                 SetPageError(page);
2340                         }
2341                         unlock_page(page);
2342                 } else {
2343                         if (uptodate) {
2344                                 check_page_uptodate(tree, page);
2345                         } else {
2346                                 ClearPageUptodate(page);
2347                                 SetPageError(page);
2348                         }
2349                         check_page_locked(tree, page);
2350                 }
2351         } while (bvec <= bvec_end);
2352
2353         bio_put(bio);
2354 }
2355
2356 struct bio *
2357 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2358                 gfp_t gfp_flags)
2359 {
2360         struct bio *bio;
2361
2362         bio = bio_alloc(gfp_flags, nr_vecs);
2363
2364         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2365                 while (!bio && (nr_vecs /= 2))
2366                         bio = bio_alloc(gfp_flags, nr_vecs);
2367         }
2368
2369         if (bio) {
2370                 bio->bi_size = 0;
2371                 bio->bi_bdev = bdev;
2372                 bio->bi_sector = first_sector;
2373         }
2374         return bio;
2375 }
2376
2377 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
2378                           unsigned long bio_flags)
2379 {
2380         int ret = 0;
2381         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2382         struct page *page = bvec->bv_page;
2383         struct extent_io_tree *tree = bio->bi_private;
2384         u64 start;
2385
2386         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2387
2388         bio->bi_private = NULL;
2389
2390         bio_get(bio);
2391
2392         if (tree->ops && tree->ops->submit_bio_hook)
2393                 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2394                                            mirror_num, bio_flags, start);
2395         else
2396                 submit_bio(rw, bio);
2397
2398         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2399                 ret = -EOPNOTSUPP;
2400         bio_put(bio);
2401         return ret;
2402 }
2403
2404 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2405                               struct page *page, sector_t sector,
2406                               size_t size, unsigned long offset,
2407                               struct block_device *bdev,
2408                               struct bio **bio_ret,
2409                               unsigned long max_pages,
2410                               bio_end_io_t end_io_func,
2411                               int mirror_num,
2412                               unsigned long prev_bio_flags,
2413                               unsigned long bio_flags)
2414 {
2415         int ret = 0;
2416         struct bio *bio;
2417         int nr;
2418         int contig = 0;
2419         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2420         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2421         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2422
2423         if (bio_ret && *bio_ret) {
2424                 bio = *bio_ret;
2425                 if (old_compressed)
2426                         contig = bio->bi_sector == sector;
2427                 else
2428                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
2429                                 sector;
2430
2431                 if (prev_bio_flags != bio_flags || !contig ||
2432                     (tree->ops && tree->ops->merge_bio_hook &&
2433                      tree->ops->merge_bio_hook(page, offset, page_size, bio,
2434                                                bio_flags)) ||
2435                     bio_add_page(bio, page, page_size, offset) < page_size) {
2436                         ret = submit_one_bio(rw, bio, mirror_num,
2437                                              prev_bio_flags);
2438                         bio = NULL;
2439                 } else {
2440                         return 0;
2441                 }
2442         }
2443         if (this_compressed)
2444                 nr = BIO_MAX_PAGES;
2445         else
2446                 nr = bio_get_nr_vecs(bdev);
2447
2448         bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2449         if (!bio)
2450                 return -ENOMEM;
2451
2452         bio_add_page(bio, page, page_size, offset);
2453         bio->bi_end_io = end_io_func;
2454         bio->bi_private = tree;
2455
2456         if (bio_ret)
2457                 *bio_ret = bio;
2458         else
2459                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2460
2461         return ret;
2462 }
2463
2464 void set_page_extent_mapped(struct page *page)
2465 {
2466         if (!PagePrivate(page)) {
2467                 SetPagePrivate(page);
2468                 page_cache_get(page);
2469                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2470         }
2471 }
2472
2473 static void set_page_extent_head(struct page *page, unsigned long len)
2474 {
2475         WARN_ON(!PagePrivate(page));
2476         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
2477 }
2478
2479 /*
2480  * basic readpage implementation.  Locked extent state structs are inserted
2481  * into the tree that are removed when the IO is done (by the end_io
2482  * handlers)
2483  */
2484 static int __extent_read_full_page(struct extent_io_tree *tree,
2485                                    struct page *page,
2486                                    get_extent_t *get_extent,
2487                                    struct bio **bio, int mirror_num,
2488                                    unsigned long *bio_flags)
2489 {
2490         struct inode *inode = page->mapping->host;
2491         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2492         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2493         u64 end;
2494         u64 cur = start;
2495         u64 extent_offset;
2496         u64 last_byte = i_size_read(inode);
2497         u64 block_start;
2498         u64 cur_end;
2499         sector_t sector;
2500         struct extent_map *em;
2501         struct block_device *bdev;
2502         struct btrfs_ordered_extent *ordered;
2503         int ret;
2504         int nr = 0;
2505         size_t pg_offset = 0;
2506         size_t iosize;
2507         size_t disk_io_size;
2508         size_t blocksize = inode->i_sb->s_blocksize;
2509         unsigned long this_bio_flag = 0;
2510
2511         set_page_extent_mapped(page);
2512
2513         if (!PageUptodate(page)) {
2514                 if (cleancache_get_page(page) == 0) {
2515                         BUG_ON(blocksize != PAGE_SIZE);
2516                         goto out;
2517                 }
2518         }
2519
2520         end = page_end;
2521         while (1) {
2522                 lock_extent(tree, start, end, GFP_NOFS);
2523                 ordered = btrfs_lookup_ordered_extent(inode, start);
2524                 if (!ordered)
2525                         break;
2526                 unlock_extent(tree, start, end, GFP_NOFS);
2527                 btrfs_start_ordered_extent(inode, ordered, 1);
2528                 btrfs_put_ordered_extent(ordered);
2529         }
2530
2531         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2532                 char *userpage;
2533                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2534
2535                 if (zero_offset) {
2536                         iosize = PAGE_CACHE_SIZE - zero_offset;
2537                         userpage = kmap_atomic(page, KM_USER0);
2538                         memset(userpage + zero_offset, 0, iosize);
2539                         flush_dcache_page(page);
2540                         kunmap_atomic(userpage, KM_USER0);
2541                 }
2542         }
2543         while (cur <= end) {
2544                 if (cur >= last_byte) {
2545                         char *userpage;
2546                         struct extent_state *cached = NULL;
2547
2548                         iosize = PAGE_CACHE_SIZE - pg_offset;
2549                         userpage = kmap_atomic(page, KM_USER0);
2550                         memset(userpage + pg_offset, 0, iosize);
2551                         flush_dcache_page(page);
2552                         kunmap_atomic(userpage, KM_USER0);
2553                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2554                                             &cached, GFP_NOFS);
2555                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2556                                              &cached, GFP_NOFS);
2557                         break;
2558                 }
2559                 em = get_extent(inode, page, pg_offset, cur,
2560                                 end - cur + 1, 0);
2561                 if (IS_ERR_OR_NULL(em)) {
2562                         SetPageError(page);
2563                         unlock_extent(tree, cur, end, GFP_NOFS);
2564                         break;
2565                 }
2566                 extent_offset = cur - em->start;
2567                 BUG_ON(extent_map_end(em) <= cur);
2568                 BUG_ON(end < cur);
2569
2570                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2571                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2572                         extent_set_compress_type(&this_bio_flag,
2573                                                  em->compress_type);
2574                 }
2575
2576                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2577                 cur_end = min(extent_map_end(em) - 1, end);
2578                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2579                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2580                         disk_io_size = em->block_len;
2581                         sector = em->block_start >> 9;
2582                 } else {
2583                         sector = (em->block_start + extent_offset) >> 9;
2584                         disk_io_size = iosize;
2585                 }
2586                 bdev = em->bdev;
2587                 block_start = em->block_start;
2588                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2589                         block_start = EXTENT_MAP_HOLE;
2590                 free_extent_map(em);
2591                 em = NULL;
2592
2593                 /* we've found a hole, just zero and go on */
2594                 if (block_start == EXTENT_MAP_HOLE) {
2595                         char *userpage;
2596                         struct extent_state *cached = NULL;
2597
2598                         userpage = kmap_atomic(page, KM_USER0);
2599                         memset(userpage + pg_offset, 0, iosize);
2600                         flush_dcache_page(page);
2601                         kunmap_atomic(userpage, KM_USER0);
2602
2603                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2604                                             &cached, GFP_NOFS);
2605                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2606                                              &cached, GFP_NOFS);
2607                         cur = cur + iosize;
2608                         pg_offset += iosize;
2609                         continue;
2610                 }
2611                 /* the get_extent function already copied into the page */
2612                 if (test_range_bit(tree, cur, cur_end,
2613                                    EXTENT_UPTODATE, 1, NULL)) {
2614                         check_page_uptodate(tree, page);
2615                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2616                         cur = cur + iosize;
2617                         pg_offset += iosize;
2618                         continue;
2619                 }
2620                 /* we have an inline extent but it didn't get marked up
2621                  * to date.  Error out
2622                  */
2623                 if (block_start == EXTENT_MAP_INLINE) {
2624                         SetPageError(page);
2625                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2626                         cur = cur + iosize;
2627                         pg_offset += iosize;
2628                         continue;
2629                 }
2630
2631                 ret = 0;
2632                 if (tree->ops && tree->ops->readpage_io_hook) {
2633                         ret = tree->ops->readpage_io_hook(page, cur,
2634                                                           cur + iosize - 1);
2635                 }
2636                 if (!ret) {
2637                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2638                         pnr -= page->index;
2639                         ret = submit_extent_page(READ, tree, page,
2640                                          sector, disk_io_size, pg_offset,
2641                                          bdev, bio, pnr,
2642                                          end_bio_extent_readpage, mirror_num,
2643                                          *bio_flags,
2644                                          this_bio_flag);
2645                         nr++;
2646                         *bio_flags = this_bio_flag;
2647                 }
2648                 if (ret)
2649                         SetPageError(page);
2650                 cur = cur + iosize;
2651                 pg_offset += iosize;
2652         }
2653 out:
2654         if (!nr) {
2655                 if (!PageError(page))
2656                         SetPageUptodate(page);
2657                 unlock_page(page);
2658         }
2659         return 0;
2660 }
2661
2662 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2663                             get_extent_t *get_extent, int mirror_num)
2664 {
2665         struct bio *bio = NULL;
2666         unsigned long bio_flags = 0;
2667         int ret;
2668
2669         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2670                                       &bio_flags);
2671         if (bio)
2672                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2673         return ret;
2674 }
2675
2676 static noinline void update_nr_written(struct page *page,
2677                                       struct writeback_control *wbc,
2678                                       unsigned long nr_written)
2679 {
2680         wbc->nr_to_write -= nr_written;
2681         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2682             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2683                 page->mapping->writeback_index = page->index + nr_written;
2684 }
2685
2686 /*
2687  * the writepage semantics are similar to regular writepage.  extent
2688  * records are inserted to lock ranges in the tree, and as dirty areas
2689  * are found, they are marked writeback.  Then the lock bits are removed
2690  * and the end_io handler clears the writeback ranges
2691  */
2692 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2693                               void *data)
2694 {
2695         struct inode *inode = page->mapping->host;
2696         struct extent_page_data *epd = data;
2697         struct extent_io_tree *tree = epd->tree;
2698         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2699         u64 delalloc_start;
2700         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2701         u64 end;
2702         u64 cur = start;
2703         u64 extent_offset;
2704         u64 last_byte = i_size_read(inode);
2705         u64 block_start;
2706         u64 iosize;
2707         sector_t sector;
2708         struct extent_state *cached_state = NULL;
2709         struct extent_map *em;
2710         struct block_device *bdev;
2711         int ret;
2712         int nr = 0;
2713         size_t pg_offset = 0;
2714         size_t blocksize;
2715         loff_t i_size = i_size_read(inode);
2716         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2717         u64 nr_delalloc;
2718         u64 delalloc_end;
2719         int page_started;
2720         int compressed;
2721         int write_flags;
2722         unsigned long nr_written = 0;
2723         bool fill_delalloc = true;
2724
2725         if (wbc->sync_mode == WB_SYNC_ALL)
2726                 write_flags = WRITE_SYNC;
2727         else
2728                 write_flags = WRITE;
2729
2730         trace___extent_writepage(page, inode, wbc);
2731
2732         WARN_ON(!PageLocked(page));
2733
2734         ClearPageError(page);
2735
2736         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2737         if (page->index > end_index ||
2738            (page->index == end_index && !pg_offset)) {
2739                 page->mapping->a_ops->invalidatepage(page, 0);
2740                 unlock_page(page);
2741                 return 0;
2742         }
2743
2744         if (page->index == end_index) {
2745                 char *userpage;
2746
2747                 userpage = kmap_atomic(page, KM_USER0);
2748                 memset(userpage + pg_offset, 0,
2749                        PAGE_CACHE_SIZE - pg_offset);
2750                 kunmap_atomic(userpage, KM_USER0);
2751                 flush_dcache_page(page);
2752         }
2753         pg_offset = 0;
2754
2755         set_page_extent_mapped(page);
2756
2757         if (!tree->ops || !tree->ops->fill_delalloc)
2758                 fill_delalloc = false;
2759
2760         delalloc_start = start;
2761         delalloc_end = 0;
2762         page_started = 0;
2763         if (!epd->extent_locked && fill_delalloc) {
2764                 u64 delalloc_to_write = 0;
2765                 /*
2766                  * make sure the wbc mapping index is at least updated
2767                  * to this page.
2768                  */
2769                 update_nr_written(page, wbc, 0);
2770
2771                 while (delalloc_end < page_end) {
2772                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2773                                                        page,
2774                                                        &delalloc_start,
2775                                                        &delalloc_end,
2776                                                        128 * 1024 * 1024);
2777                         if (nr_delalloc == 0) {
2778                                 delalloc_start = delalloc_end + 1;
2779                                 continue;
2780                         }
2781                         tree->ops->fill_delalloc(inode, page, delalloc_start,
2782                                                  delalloc_end, &page_started,
2783                                                  &nr_written);
2784                         /*
2785                          * delalloc_end is already one less than the total
2786                          * length, so we don't subtract one from
2787                          * PAGE_CACHE_SIZE
2788                          */
2789                         delalloc_to_write += (delalloc_end - delalloc_start +
2790                                               PAGE_CACHE_SIZE) >>
2791                                               PAGE_CACHE_SHIFT;
2792                         delalloc_start = delalloc_end + 1;
2793                 }
2794                 if (wbc->nr_to_write < delalloc_to_write) {
2795                         int thresh = 8192;
2796
2797                         if (delalloc_to_write < thresh * 2)
2798                                 thresh = delalloc_to_write;
2799                         wbc->nr_to_write = min_t(u64, delalloc_to_write,
2800                                                  thresh);
2801                 }
2802
2803                 /* did the fill delalloc function already unlock and start
2804                  * the IO?
2805                  */
2806                 if (page_started) {
2807                         ret = 0;
2808                         /*
2809                          * we've unlocked the page, so we can't update
2810                          * the mapping's writeback index, just update
2811                          * nr_to_write.
2812                          */
2813                         wbc->nr_to_write -= nr_written;
2814                         goto done_unlocked;
2815                 }
2816         }
2817         if (tree->ops && tree->ops->writepage_start_hook) {
2818                 ret = tree->ops->writepage_start_hook(page, start,
2819                                                       page_end);
2820                 if (ret == -EAGAIN) {
2821                         redirty_page_for_writepage(wbc, page);
2822                         update_nr_written(page, wbc, nr_written);
2823                         unlock_page(page);
2824                         ret = 0;
2825                         goto done_unlocked;
2826                 }
2827         }
2828
2829         /*
2830          * we don't want to touch the inode after unlocking the page,
2831          * so we update the mapping writeback index now
2832          */
2833         update_nr_written(page, wbc, nr_written + 1);
2834
2835         end = page_end;
2836         if (last_byte <= start) {
2837                 if (tree->ops && tree->ops->writepage_end_io_hook)
2838                         tree->ops->writepage_end_io_hook(page, start,
2839                                                          page_end, NULL, 1);
2840                 goto done;
2841         }
2842
2843         blocksize = inode->i_sb->s_blocksize;
2844
2845         while (cur <= end) {
2846                 if (cur >= last_byte) {
2847                         if (tree->ops && tree->ops->writepage_end_io_hook)
2848                                 tree->ops->writepage_end_io_hook(page, cur,
2849                                                          page_end, NULL, 1);
2850                         break;
2851                 }
2852                 em = epd->get_extent(inode, page, pg_offset, cur,
2853                                      end - cur + 1, 1);
2854                 if (IS_ERR_OR_NULL(em)) {
2855                         SetPageError(page);
2856                         break;
2857                 }
2858
2859                 extent_offset = cur - em->start;
2860                 BUG_ON(extent_map_end(em) <= cur);
2861                 BUG_ON(end < cur);
2862                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2863                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2864                 sector = (em->block_start + extent_offset) >> 9;
2865                 bdev = em->bdev;
2866                 block_start = em->block_start;
2867                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2868                 free_extent_map(em);
2869                 em = NULL;
2870
2871                 /*
2872                  * compressed and inline extents are written through other
2873                  * paths in the FS
2874                  */
2875                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2876                     block_start == EXTENT_MAP_INLINE) {
2877                         /*
2878                          * end_io notification does not happen here for
2879                          * compressed extents
2880                          */
2881                         if (!compressed && tree->ops &&
2882                             tree->ops->writepage_end_io_hook)
2883                                 tree->ops->writepage_end_io_hook(page, cur,
2884                                                          cur + iosize - 1,
2885                                                          NULL, 1);
2886                         else if (compressed) {
2887                                 /* we don't want to end_page_writeback on
2888                                  * a compressed extent.  this happens
2889                                  * elsewhere
2890                                  */
2891                                 nr++;
2892                         }
2893
2894                         cur += iosize;
2895                         pg_offset += iosize;
2896                         continue;
2897                 }
2898                 /* leave this out until we have a page_mkwrite call */
2899                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2900                                    EXTENT_DIRTY, 0, NULL)) {
2901                         cur = cur + iosize;
2902                         pg_offset += iosize;
2903                         continue;
2904                 }
2905
2906                 if (tree->ops && tree->ops->writepage_io_hook) {
2907                         ret = tree->ops->writepage_io_hook(page, cur,
2908                                                 cur + iosize - 1);
2909                 } else {
2910                         ret = 0;
2911                 }
2912                 if (ret) {
2913                         SetPageError(page);
2914                 } else {
2915                         unsigned long max_nr = end_index + 1;
2916
2917                         set_range_writeback(tree, cur, cur + iosize - 1);
2918                         if (!PageWriteback(page)) {
2919                                 printk(KERN_ERR "btrfs warning page %lu not "
2920                                        "writeback, cur %llu end %llu\n",
2921                                        page->index, (unsigned long long)cur,
2922                                        (unsigned long long)end);
2923                         }
2924
2925                         ret = submit_extent_page(write_flags, tree, page,
2926                                                  sector, iosize, pg_offset,
2927                                                  bdev, &epd->bio, max_nr,
2928                                                  end_bio_extent_writepage,
2929                                                  0, 0, 0);
2930                         if (ret)
2931                                 SetPageError(page);
2932                 }
2933                 cur = cur + iosize;
2934                 pg_offset += iosize;
2935                 nr++;
2936         }
2937 done:
2938         if (nr == 0) {
2939                 /* make sure the mapping tag for page dirty gets cleared */
2940                 set_page_writeback(page);
2941                 end_page_writeback(page);
2942         }
2943         unlock_page(page);
2944
2945 done_unlocked:
2946
2947         /* drop our reference on any cached states */
2948         free_extent_state(cached_state);
2949         return 0;
2950 }
2951
2952 /**
2953  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2954  * @mapping: address space structure to write
2955  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2956  * @writepage: function called for each page
2957  * @data: data passed to writepage function
2958  *
2959  * If a page is already under I/O, write_cache_pages() skips it, even
2960  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2961  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2962  * and msync() need to guarantee that all the data which was dirty at the time
2963  * the call was made get new I/O started against them.  If wbc->sync_mode is
2964  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2965  * existing IO to complete.
2966  */
2967 static int extent_write_cache_pages(struct extent_io_tree *tree,
2968                              struct address_space *mapping,
2969                              struct writeback_control *wbc,
2970                              writepage_t writepage, void *data,
2971                              void (*flush_fn)(void *))
2972 {
2973         int ret = 0;
2974         int done = 0;
2975         int nr_to_write_done = 0;
2976         struct pagevec pvec;
2977         int nr_pages;
2978         pgoff_t index;
2979         pgoff_t end;            /* Inclusive */
2980         int scanned = 0;
2981         int tag;
2982
2983         pagevec_init(&pvec, 0);
2984         if (wbc->range_cyclic) {
2985                 index = mapping->writeback_index; /* Start from prev offset */
2986                 end = -1;
2987         } else {
2988                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2989                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2990                 scanned = 1;
2991         }
2992         if (wbc->sync_mode == WB_SYNC_ALL)
2993                 tag = PAGECACHE_TAG_TOWRITE;
2994         else
2995                 tag = PAGECACHE_TAG_DIRTY;
2996 retry:
2997         if (wbc->sync_mode == WB_SYNC_ALL)
2998                 tag_pages_for_writeback(mapping, index, end);
2999         while (!done && !nr_to_write_done && (index <= end) &&
3000                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3001                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3002                 unsigned i;
3003
3004                 scanned = 1;
3005                 for (i = 0; i < nr_pages; i++) {
3006                         struct page *page = pvec.pages[i];
3007
3008                         /*
3009                          * At this point we hold neither mapping->tree_lock nor
3010                          * lock on the page itself: the page may be truncated or
3011                          * invalidated (changing page->mapping to NULL), or even
3012                          * swizzled back from swapper_space to tmpfs file
3013                          * mapping
3014                          */
3015                         if (tree->ops &&
3016                             tree->ops->write_cache_pages_lock_hook) {
3017                                 tree->ops->write_cache_pages_lock_hook(page,
3018                                                                data, flush_fn);
3019                         } else {
3020                                 if (!trylock_page(page)) {
3021                                         flush_fn(data);
3022                                         lock_page(page);
3023                                 }
3024                         }
3025
3026                         if (unlikely(page->mapping != mapping)) {
3027                                 unlock_page(page);
3028                                 continue;
3029                         }
3030
3031                         if (!wbc->range_cyclic && page->index > end) {
3032                                 done = 1;
3033                                 unlock_page(page);
3034                                 continue;
3035                         }
3036
3037                         if (wbc->sync_mode != WB_SYNC_NONE) {
3038                                 if (PageWriteback(page))
3039                                         flush_fn(data);
3040                                 wait_on_page_writeback(page);
3041                         }
3042
3043                         if (PageWriteback(page) ||
3044                             !clear_page_dirty_for_io(page)) {
3045                                 unlock_page(page);
3046                                 continue;
3047                         }
3048
3049                         ret = (*writepage)(page, wbc, data);
3050
3051                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3052                                 unlock_page(page);
3053                                 ret = 0;
3054                         }
3055                         if (ret)
3056                                 done = 1;
3057
3058                         /*
3059                          * the filesystem may choose to bump up nr_to_write.
3060                          * We have to make sure to honor the new nr_to_write
3061                          * at any time
3062                          */
3063                         nr_to_write_done = wbc->nr_to_write <= 0;
3064                 }
3065                 pagevec_release(&pvec);
3066                 cond_resched();
3067         }
3068         if (!scanned && !done) {
3069                 /*
3070                  * We hit the last page and there is more work to be done: wrap
3071                  * back to the start of the file
3072                  */
3073                 scanned = 1;
3074                 index = 0;
3075                 goto retry;
3076         }
3077         return ret;
3078 }
3079
3080 static void flush_epd_write_bio(struct extent_page_data *epd)
3081 {
3082         if (epd->bio) {
3083                 if (epd->sync_io)
3084                         submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
3085                 else
3086                         submit_one_bio(WRITE, epd->bio, 0, 0);
3087                 epd->bio = NULL;
3088         }
3089 }
3090
3091 static noinline void flush_write_bio(void *data)
3092 {
3093         struct extent_page_data *epd = data;
3094         flush_epd_write_bio(epd);
3095 }
3096
3097 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3098                           get_extent_t *get_extent,
3099                           struct writeback_control *wbc)
3100 {
3101         int ret;
3102         struct extent_page_data epd = {
3103                 .bio = NULL,
3104                 .tree = tree,
3105                 .get_extent = get_extent,
3106                 .extent_locked = 0,
3107                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3108         };
3109
3110         ret = __extent_writepage(page, wbc, &epd);
3111
3112         flush_epd_write_bio(&epd);
3113         return ret;
3114 }
3115
3116 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3117                               u64 start, u64 end, get_extent_t *get_extent,
3118                               int mode)
3119 {
3120         int ret = 0;
3121         struct address_space *mapping = inode->i_mapping;
3122         struct page *page;
3123         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3124                 PAGE_CACHE_SHIFT;
3125
3126         struct extent_page_data epd = {
3127                 .bio = NULL,
3128                 .tree = tree,
3129                 .get_extent = get_extent,
3130                 .extent_locked = 1,
3131                 .sync_io = mode == WB_SYNC_ALL,
3132         };
3133         struct writeback_control wbc_writepages = {
3134                 .sync_mode      = mode,
3135                 .nr_to_write    = nr_pages * 2,
3136                 .range_start    = start,
3137                 .range_end      = end + 1,
3138         };
3139
3140         while (start <= end) {
3141                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3142                 if (clear_page_dirty_for_io(page))
3143                         ret = __extent_writepage(page, &wbc_writepages, &epd);
3144                 else {
3145                         if (tree->ops && tree->ops->writepage_end_io_hook)
3146                                 tree->ops->writepage_end_io_hook(page, start,
3147                                                  start + PAGE_CACHE_SIZE - 1,
3148                                                  NULL, 1);
3149                         unlock_page(page);
3150                 }
3151                 page_cache_release(page);
3152                 start += PAGE_CACHE_SIZE;
3153         }
3154
3155         flush_epd_write_bio(&epd);
3156         return ret;
3157 }
3158
3159 int extent_writepages(struct extent_io_tree *tree,
3160                       struct address_space *mapping,
3161                       get_extent_t *get_extent,
3162                       struct writeback_control *wbc)
3163 {
3164         int ret = 0;
3165         struct extent_page_data epd = {
3166                 .bio = NULL,
3167                 .tree = tree,
3168                 .get_extent = get_extent,
3169                 .extent_locked = 0,
3170                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3171         };
3172
3173         ret = extent_write_cache_pages(tree, mapping, wbc,
3174                                        __extent_writepage, &epd,
3175                                        flush_write_bio);
3176         flush_epd_write_bio(&epd);
3177         return ret;
3178 }
3179
3180 int extent_readpages(struct extent_io_tree *tree,
3181                      struct address_space *mapping,
3182                      struct list_head *pages, unsigned nr_pages,
3183                      get_extent_t get_extent)
3184 {
3185         struct bio *bio = NULL;
3186         unsigned page_idx;
3187         unsigned long bio_flags = 0;
3188
3189         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3190                 struct page *page = list_entry(pages->prev, struct page, lru);
3191
3192                 prefetchw(&page->flags);
3193                 list_del(&page->lru);
3194                 if (!add_to_page_cache_lru(page, mapping,
3195                                         page->index, GFP_NOFS)) {
3196                         __extent_read_full_page(tree, page, get_extent,
3197                                                 &bio, 0, &bio_flags);
3198                 }
3199                 page_cache_release(page);
3200         }
3201         BUG_ON(!list_empty(pages));
3202         if (bio)
3203                 submit_one_bio(READ, bio, 0, bio_flags);
3204         return 0;
3205 }
3206
3207 /*
3208  * basic invalidatepage code, this waits on any locked or writeback
3209  * ranges corresponding to the page, and then deletes any extent state
3210  * records from the tree
3211  */
3212 int extent_invalidatepage(struct extent_io_tree *tree,
3213                           struct page *page, unsigned long offset)
3214 {
3215         struct extent_state *cached_state = NULL;
3216         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3217         u64 end = start + PAGE_CACHE_SIZE - 1;
3218         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3219
3220         start += (offset + blocksize - 1) & ~(blocksize - 1);
3221         if (start > end)
3222                 return 0;
3223
3224         lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
3225         wait_on_page_writeback(page);
3226         clear_extent_bit(tree, start, end,
3227                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3228                          EXTENT_DO_ACCOUNTING,
3229                          1, 1, &cached_state, GFP_NOFS);
3230         return 0;
3231 }
3232
3233 /*
3234  * a helper for releasepage, this tests for areas of the page that
3235  * are locked or under IO and drops the related state bits if it is safe
3236  * to drop the page.
3237  */
3238 int try_release_extent_state(struct extent_map_tree *map,
3239                              struct extent_io_tree *tree, struct page *page,
3240                              gfp_t mask)
3241 {
3242         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3243         u64 end = start + PAGE_CACHE_SIZE - 1;
3244         int ret = 1;
3245
3246         if (test_range_bit(tree, start, end,
3247                            EXTENT_IOBITS, 0, NULL))
3248                 ret = 0;
3249         else {
3250                 if ((mask & GFP_NOFS) == GFP_NOFS)
3251                         mask = GFP_NOFS;
3252                 /*
3253                  * at this point we can safely clear everything except the
3254                  * locked bit and the nodatasum bit
3255                  */
3256                 ret = clear_extent_bit(tree, start, end,
3257                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3258                                  0, 0, NULL, mask);
3259
3260                 /* if clear_extent_bit failed for enomem reasons,
3261                  * we can't allow the release to continue.
3262                  */
3263                 if (ret < 0)
3264                         ret = 0;
3265                 else
3266                         ret = 1;
3267         }
3268         return ret;
3269 }
3270
3271 /*
3272  * a helper for releasepage.  As long as there are no locked extents
3273  * in the range corresponding to the page, both state records and extent
3274  * map records are removed
3275  */
3276 int try_release_extent_mapping(struct extent_map_tree *map,
3277                                struct extent_io_tree *tree, struct page *page,
3278                                gfp_t mask)
3279 {
3280         struct extent_map *em;
3281         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3282         u64 end = start + PAGE_CACHE_SIZE - 1;
3283
3284         if ((mask & __GFP_WAIT) &&
3285             page->mapping->host->i_size > 16 * 1024 * 1024) {
3286                 u64 len;
3287                 while (start <= end) {
3288                         len = end - start + 1;
3289                         write_lock(&map->lock);
3290                         em = lookup_extent_mapping(map, start, len);
3291                         if (IS_ERR_OR_NULL(em)) {
3292                                 write_unlock(&map->lock);
3293                                 break;
3294                         }
3295                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3296                             em->start != start) {
3297                                 write_unlock(&map->lock);
3298                                 free_extent_map(em);
3299                                 break;
3300                         }
3301                         if (!test_range_bit(tree, em->start,
3302                                             extent_map_end(em) - 1,
3303                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
3304                                             0, NULL)) {
3305                                 remove_extent_mapping(map, em);
3306                                 /* once for the rb tree */
3307                                 free_extent_map(em);
3308                         }
3309                         start = extent_map_end(em);
3310                         write_unlock(&map->lock);
3311
3312                         /* once for us */
3313                         free_extent_map(em);
3314                 }
3315         }
3316         return try_release_extent_state(map, tree, page, mask);
3317 }
3318
3319 /*
3320  * helper function for fiemap, which doesn't want to see any holes.
3321  * This maps until we find something past 'last'
3322  */
3323 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3324                                                 u64 offset,
3325                                                 u64 last,
3326                                                 get_extent_t *get_extent)
3327 {
3328         u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3329         struct extent_map *em;
3330         u64 len;
3331
3332         if (offset >= last)
3333                 return NULL;
3334
3335         while(1) {
3336                 len = last - offset;
3337                 if (len == 0)
3338                         break;
3339                 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3340                 em = get_extent(inode, NULL, 0, offset, len, 0);
3341                 if (IS_ERR_OR_NULL(em))
3342                         return em;
3343
3344                 /* if this isn't a hole return it */
3345                 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3346                     em->block_start != EXTENT_MAP_HOLE) {
3347                         return em;
3348                 }
3349
3350                 /* this is a hole, advance to the next extent */
3351                 offset = extent_map_end(em);
3352                 free_extent_map(em);
3353                 if (offset >= last)
3354                         break;
3355         }
3356         return NULL;
3357 }
3358
3359 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3360                 __u64 start, __u64 len, get_extent_t *get_extent)
3361 {
3362         int ret = 0;
3363         u64 off = start;
3364         u64 max = start + len;
3365         u32 flags = 0;
3366         u32 found_type;
3367         u64 last;
3368         u64 last_for_get_extent = 0;
3369         u64 disko = 0;
3370         u64 isize = i_size_read(inode);
3371         struct btrfs_key found_key;
3372         struct extent_map *em = NULL;
3373         struct extent_state *cached_state = NULL;
3374         struct btrfs_path *path;
3375         struct btrfs_file_extent_item *item;
3376         int end = 0;
3377         u64 em_start = 0;
3378         u64 em_len = 0;
3379         u64 em_end = 0;
3380         unsigned long emflags;
3381
3382         if (len == 0)
3383                 return -EINVAL;
3384
3385         path = btrfs_alloc_path();
3386         if (!path)
3387                 return -ENOMEM;
3388         path->leave_spinning = 1;
3389
3390         start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3391         len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3392
3393         /*
3394          * lookup the last file extent.  We're not using i_size here
3395          * because there might be preallocation past i_size
3396          */
3397         ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3398                                        path, btrfs_ino(inode), -1, 0);
3399         if (ret < 0) {
3400                 btrfs_free_path(path);
3401                 return ret;
3402         }
3403         WARN_ON(!ret);
3404         path->slots[0]--;
3405         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3406                               struct btrfs_file_extent_item);
3407         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3408         found_type = btrfs_key_type(&found_key);
3409
3410         /* No extents, but there might be delalloc bits */
3411         if (found_key.objectid != btrfs_ino(inode) ||
3412             found_type != BTRFS_EXTENT_DATA_KEY) {
3413                 /* have to trust i_size as the end */
3414                 last = (u64)-1;
3415                 last_for_get_extent = isize;
3416         } else {
3417                 /*
3418                  * remember the start of the last extent.  There are a
3419                  * bunch of different factors that go into the length of the
3420                  * extent, so its much less complex to remember where it started
3421                  */
3422                 last = found_key.offset;
3423                 last_for_get_extent = last + 1;
3424         }
3425         btrfs_free_path(path);
3426
3427         /*
3428          * we might have some extents allocated but more delalloc past those
3429          * extents.  so, we trust isize unless the start of the last extent is
3430          * beyond isize
3431          */
3432         if (last < isize) {
3433                 last = (u64)-1;
3434                 last_for_get_extent = isize;
3435         }
3436
3437         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3438                          &cached_state, GFP_NOFS);
3439
3440         em = get_extent_skip_holes(inode, start, last_for_get_extent,
3441                                    get_extent);
3442         if (!em)
3443                 goto out;
3444         if (IS_ERR(em)) {
3445                 ret = PTR_ERR(em);
3446                 goto out;
3447         }
3448
3449         while (!end) {
3450                 u64 offset_in_extent;
3451
3452                 /* break if the extent we found is outside the range */
3453                 if (em->start >= max || extent_map_end(em) < off)
3454                         break;
3455
3456                 /*
3457                  * get_extent may return an extent that starts before our
3458                  * requested range.  We have to make sure the ranges
3459                  * we return to fiemap always move forward and don't
3460                  * overlap, so adjust the offsets here
3461                  */
3462                 em_start = max(em->start, off);
3463
3464                 /*
3465                  * record the offset from the start of the extent
3466                  * for adjusting the disk offset below
3467                  */
3468                 offset_in_extent = em_start - em->start;
3469                 em_end = extent_map_end(em);
3470                 em_len = em_end - em_start;
3471                 emflags = em->flags;
3472                 disko = 0;
3473                 flags = 0;
3474
3475                 /*
3476                  * bump off for our next call to get_extent
3477                  */
3478                 off = extent_map_end(em);
3479                 if (off >= max)
3480                         end = 1;
3481
3482                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3483                         end = 1;
3484                         flags |= FIEMAP_EXTENT_LAST;
3485                 } else if (em->block_start == EXTENT_MAP_INLINE) {
3486                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
3487                                   FIEMAP_EXTENT_NOT_ALIGNED);
3488                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3489                         flags |= (FIEMAP_EXTENT_DELALLOC |
3490                                   FIEMAP_EXTENT_UNKNOWN);
3491                 } else {
3492                         disko = em->block_start + offset_in_extent;
3493                 }
3494                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3495                         flags |= FIEMAP_EXTENT_ENCODED;
3496
3497                 free_extent_map(em);
3498                 em = NULL;
3499                 if ((em_start >= last) || em_len == (u64)-1 ||
3500                    (last == (u64)-1 && isize <= em_end)) {
3501                         flags |= FIEMAP_EXTENT_LAST;
3502                         end = 1;
3503                 }
3504
3505                 /* now scan forward to see if this is really the last extent. */
3506                 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3507                                            get_extent);
3508                 if (IS_ERR(em)) {
3509                         ret = PTR_ERR(em);
3510                         goto out;
3511                 }
3512                 if (!em) {
3513                         flags |= FIEMAP_EXTENT_LAST;
3514                         end = 1;
3515                 }
3516                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3517                                               em_len, flags);
3518                 if (ret)
3519                         goto out_free;
3520         }
3521 out_free:
3522         free_extent_map(em);
3523 out:
3524         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3525                              &cached_state, GFP_NOFS);
3526         return ret;
3527 }
3528
3529 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3530                                               unsigned long i)
3531 {
3532         struct page *p;
3533         struct address_space *mapping;
3534
3535         if (i == 0)
3536                 return eb->first_page;
3537         i += eb->start >> PAGE_CACHE_SHIFT;
3538         mapping = eb->first_page->mapping;
3539         if (!mapping)
3540                 return NULL;
3541
3542         /*
3543          * extent_buffer_page is only called after pinning the page
3544          * by increasing the reference count.  So we know the page must
3545          * be in the radix tree.
3546          */
3547         rcu_read_lock();
3548         p = radix_tree_lookup(&mapping->page_tree, i);
3549         rcu_read_unlock();
3550
3551         return p;
3552 }
3553
3554 inline unsigned long num_extent_pages(u64 start, u64 len)
3555 {
3556         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3557                 (start >> PAGE_CACHE_SHIFT);
3558 }
3559
3560 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3561                                                    u64 start,
3562                                                    unsigned long len,
3563                                                    gfp_t mask)
3564 {
3565         struct extent_buffer *eb = NULL;
3566 #if LEAK_DEBUG
3567         unsigned long flags;
3568 #endif
3569
3570         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3571         if (eb == NULL)
3572                 return NULL;
3573         eb->start = start;
3574         eb->len = len;
3575         rwlock_init(&eb->lock);
3576         atomic_set(&eb->write_locks, 0);
3577         atomic_set(&eb->read_locks, 0);
3578         atomic_set(&eb->blocking_readers, 0);
3579         atomic_set(&eb->blocking_writers, 0);
3580         atomic_set(&eb->spinning_readers, 0);
3581         atomic_set(&eb->spinning_writers, 0);
3582         init_waitqueue_head(&eb->write_lock_wq);
3583         init_waitqueue_head(&eb->read_lock_wq);
3584
3585 #if LEAK_DEBUG
3586         spin_lock_irqsave(&leak_lock, flags);
3587         list_add(&eb->leak_list, &buffers);
3588         spin_unlock_irqrestore(&leak_lock, flags);
3589 #endif
3590         atomic_set(&eb->refs, 1);
3591
3592         return eb;
3593 }
3594
3595 static void __free_extent_buffer(struct extent_buffer *eb)
3596 {
3597 #if LEAK_DEBUG
3598         unsigned long flags;
3599         spin_lock_irqsave(&leak_lock, flags);
3600         list_del(&eb->leak_list);
3601         spin_unlock_irqrestore(&leak_lock, flags);
3602 #endif
3603         kmem_cache_free(extent_buffer_cache, eb);
3604 }
3605
3606 /*
3607  * Helper for releasing extent buffer page.
3608  */
3609 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3610                                                 unsigned long start_idx)
3611 {
3612         unsigned long index;
3613         struct page *page;
3614
3615         if (!eb->first_page)
3616                 return;
3617
3618         index = num_extent_pages(eb->start, eb->len);
3619         if (start_idx >= index)
3620                 return;
3621
3622         do {
3623                 index--;
3624                 page = extent_buffer_page(eb, index);
3625                 if (page)
3626                         page_cache_release(page);
3627         } while (index != start_idx);
3628 }
3629
3630 /*
3631  * Helper for releasing the extent buffer.
3632  */
3633 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3634 {
3635         btrfs_release_extent_buffer_page(eb, 0);
3636         __free_extent_buffer(eb);
3637 }
3638
3639 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3640                                           u64 start, unsigned long len,
3641                                           struct page *page0)
3642 {
3643         unsigned long num_pages = num_extent_pages(start, len);
3644         unsigned long i;
3645         unsigned long index = start >> PAGE_CACHE_SHIFT;
3646         struct extent_buffer *eb;
3647         struct extent_buffer *exists = NULL;
3648         struct page *p;
3649         struct address_space *mapping = tree->mapping;
3650         int uptodate = 1;
3651         int ret;
3652
3653         rcu_read_lock();
3654         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3655         if (eb && atomic_inc_not_zero(&eb->refs)) {
3656                 rcu_read_unlock();
3657                 mark_page_accessed(eb->first_page);
3658                 return eb;
3659         }
3660         rcu_read_unlock();
3661
3662         eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3663         if (!eb)
3664                 return NULL;
3665
3666         if (page0) {
3667                 eb->first_page = page0;
3668                 i = 1;
3669                 index++;
3670                 page_cache_get(page0);
3671                 mark_page_accessed(page0);
3672                 set_page_extent_mapped(page0);
3673                 set_page_extent_head(page0, len);
3674                 uptodate = PageUptodate(page0);
3675         } else {
3676                 i = 0;
3677         }
3678         for (; i < num_pages; i++, index++) {
3679                 p = find_or_create_page(mapping, index, GFP_NOFS);
3680                 if (!p) {
3681                         WARN_ON(1);
3682                         goto free_eb;
3683                 }
3684                 set_page_extent_mapped(p);
3685                 mark_page_accessed(p);
3686                 if (i == 0) {
3687                         eb->first_page = p;
3688                         set_page_extent_head(p, len);
3689                 } else {
3690                         set_page_private(p, EXTENT_PAGE_PRIVATE);
3691                 }
3692                 if (!PageUptodate(p))
3693                         uptodate = 0;
3694
3695                 /*
3696                  * see below about how we avoid a nasty race with release page
3697                  * and why we unlock later
3698                  */
3699                 if (i != 0)
3700                         unlock_page(p);
3701         }
3702         if (uptodate)
3703                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3704
3705         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3706         if (ret)
3707                 goto free_eb;
3708
3709         spin_lock(&tree->buffer_lock);
3710         ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3711         if (ret == -EEXIST) {
3712                 exists = radix_tree_lookup(&tree->buffer,
3713                                                 start >> PAGE_CACHE_SHIFT);
3714                 /* add one reference for the caller */
3715                 atomic_inc(&exists->refs);
3716                 spin_unlock(&tree->buffer_lock);
3717                 radix_tree_preload_end();
3718                 goto free_eb;
3719         }
3720         /* add one reference for the tree */
3721         atomic_inc(&eb->refs);
3722         spin_unlock(&tree->buffer_lock);
3723         radix_tree_preload_end();
3724
3725         /*
3726          * there is a race where release page may have
3727          * tried to find this extent buffer in the radix
3728          * but failed.  It will tell the VM it is safe to
3729          * reclaim the, and it will clear the page private bit.
3730          * We must make sure to set the page private bit properly
3731          * after the extent buffer is in the radix tree so
3732          * it doesn't get lost
3733          */
3734         set_page_extent_mapped(eb->first_page);
3735         set_page_extent_head(eb->first_page, eb->len);
3736         if (!page0)
3737                 unlock_page(eb->first_page);
3738         return eb;
3739
3740 free_eb:
3741         if (eb->first_page && !page0)
3742                 unlock_page(eb->first_page);
3743
3744         if (!atomic_dec_and_test(&eb->refs))
3745                 return exists;
3746         btrfs_release_extent_buffer(eb);
3747         return exists;
3748 }
3749
3750 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3751                                          u64 start, unsigned long len)
3752 {
3753         struct extent_buffer *eb;
3754
3755         rcu_read_lock();
3756         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3757         if (eb && atomic_inc_not_zero(&eb->refs)) {
3758                 rcu_read_unlock();
3759                 mark_page_accessed(eb->first_page);
3760                 return eb;
3761         }
3762         rcu_read_unlock();
3763
3764         return NULL;
3765 }
3766
3767 void free_extent_buffer(struct extent_buffer *eb)
3768 {
3769         if (!eb)
3770                 return;
3771
3772         if (!atomic_dec_and_test(&eb->refs))
3773                 return;
3774
3775         WARN_ON(1);
3776 }
3777
3778 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3779                               struct extent_buffer *eb)
3780 {
3781         unsigned long i;
3782         unsigned long num_pages;
3783         struct page *page;
3784
3785         num_pages = num_extent_pages(eb->start, eb->len);
3786
3787         for (i = 0; i < num_pages; i++) {
3788                 page = extent_buffer_page(eb, i);
3789                 if (!PageDirty(page))
3790                         continue;
3791
3792                 lock_page(page);
3793                 WARN_ON(!PagePrivate(page));
3794
3795                 set_page_extent_mapped(page);
3796                 if (i == 0)
3797                         set_page_extent_head(page, eb->len);
3798
3799                 clear_page_dirty_for_io(page);
3800                 spin_lock_irq(&page->mapping->tree_lock);
3801                 if (!PageDirty(page)) {
3802                         radix_tree_tag_clear(&page->mapping->page_tree,
3803                                                 page_index(page),
3804                                                 PAGECACHE_TAG_DIRTY);
3805                 }
3806                 spin_unlock_irq(&page->mapping->tree_lock);
3807                 ClearPageError(page);
3808                 unlock_page(page);
3809         }
3810         return 0;
3811 }
3812
3813 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3814                              struct extent_buffer *eb)
3815 {
3816         unsigned long i;
3817         unsigned long num_pages;
3818         int was_dirty = 0;
3819
3820         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3821         num_pages = num_extent_pages(eb->start, eb->len);
3822         for (i = 0; i < num_pages; i++)
3823                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3824         return was_dirty;
3825 }
3826
3827 static int __eb_straddles_pages(u64 start, u64 len)
3828 {
3829         if (len < PAGE_CACHE_SIZE)
3830                 return 1;
3831         if (start & (PAGE_CACHE_SIZE - 1))
3832                 return 1;
3833         if ((start + len) & (PAGE_CACHE_SIZE - 1))
3834                 return 1;
3835         return 0;
3836 }
3837
3838 static int eb_straddles_pages(struct extent_buffer *eb)
3839 {
3840         return __eb_straddles_pages(eb->start, eb->len);
3841 }
3842
3843 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3844                                 struct extent_buffer *eb,
3845                                 struct extent_state **cached_state)
3846 {
3847         unsigned long i;
3848         struct page *page;
3849         unsigned long num_pages;
3850
3851         num_pages = num_extent_pages(eb->start, eb->len);
3852         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3853
3854         if (eb_straddles_pages(eb)) {
3855                 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3856                                       cached_state, GFP_NOFS);
3857         }
3858         for (i = 0; i < num_pages; i++) {
3859                 page = extent_buffer_page(eb, i);
3860                 if (page)
3861                         ClearPageUptodate(page);
3862         }
3863         return 0;
3864 }
3865
3866 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3867                                 struct extent_buffer *eb)
3868 {
3869         unsigned long i;
3870         struct page *page;
3871         unsigned long num_pages;
3872
3873         num_pages = num_extent_pages(eb->start, eb->len);
3874
3875         if (eb_straddles_pages(eb)) {
3876                 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3877                                     NULL, GFP_NOFS);
3878         }
3879         for (i = 0; i < num_pages; i++) {
3880                 page = extent_buffer_page(eb, i);
3881                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3882                     ((i == num_pages - 1) &&
3883                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3884                         check_page_uptodate(tree, page);
3885                         continue;
3886                 }
3887                 SetPageUptodate(page);
3888         }
3889         return 0;
3890 }
3891
3892 int extent_range_uptodate(struct extent_io_tree *tree,
3893                           u64 start, u64 end)
3894 {
3895         struct page *page;
3896         int ret;
3897         int pg_uptodate = 1;
3898         int uptodate;
3899         unsigned long index;
3900
3901         if (__eb_straddles_pages(start, end - start + 1)) {
3902                 ret = test_range_bit(tree, start, end,
3903                                      EXTENT_UPTODATE, 1, NULL);
3904                 if (ret)
3905                         return 1;
3906         }
3907         while (start <= end) {
3908                 index = start >> PAGE_CACHE_SHIFT;
3909                 page = find_get_page(tree->mapping, index);
3910                 uptodate = PageUptodate(page);
3911                 page_cache_release(page);
3912                 if (!uptodate) {
3913                         pg_uptodate = 0;
3914                         break;
3915                 }
3916                 start += PAGE_CACHE_SIZE;
3917         }
3918         return pg_uptodate;
3919 }
3920
3921 int extent_buffer_uptodate(struct extent_io_tree *tree,
3922                            struct extent_buffer *eb,
3923                            struct extent_state *cached_state)
3924 {
3925         int ret = 0;
3926         unsigned long num_pages;
3927         unsigned long i;
3928         struct page *page;
3929         int pg_uptodate = 1;
3930
3931         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3932                 return 1;
3933
3934         if (eb_straddles_pages(eb)) {
3935                 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3936                                    EXTENT_UPTODATE, 1, cached_state);
3937                 if (ret)
3938                         return ret;
3939         }
3940
3941         num_pages = num_extent_pages(eb->start, eb->len);
3942         for (i = 0; i < num_pages; i++) {
3943                 page = extent_buffer_page(eb, i);
3944                 if (!PageUptodate(page)) {
3945                         pg_uptodate = 0;
3946                         break;
3947                 }
3948         }
3949         return pg_uptodate;
3950 }
3951
3952 int read_extent_buffer_pages(struct extent_io_tree *tree,
3953                              struct extent_buffer *eb, u64 start, int wait,
3954                              get_extent_t *get_extent, int mirror_num)
3955 {
3956         unsigned long i;
3957         unsigned long start_i;
3958         struct page *page;
3959         int err;
3960         int ret = 0;
3961         int locked_pages = 0;
3962         int all_uptodate = 1;
3963         int inc_all_pages = 0;
3964         unsigned long num_pages;
3965         struct bio *bio = NULL;
3966         unsigned long bio_flags = 0;
3967
3968         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3969                 return 0;
3970
3971         if (eb_straddles_pages(eb)) {
3972                 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3973                                    EXTENT_UPTODATE, 1, NULL)) {
3974                         return 0;
3975                 }
3976         }
3977
3978         if (start) {
3979                 WARN_ON(start < eb->start);
3980                 start_i = (start >> PAGE_CACHE_SHIFT) -
3981                         (eb->start >> PAGE_CACHE_SHIFT);
3982         } else {
3983                 start_i = 0;
3984         }
3985
3986         num_pages = num_extent_pages(eb->start, eb->len);
3987         for (i = start_i; i < num_pages; i++) {
3988                 page = extent_buffer_page(eb, i);
3989                 if (wait == WAIT_NONE) {
3990                         if (!trylock_page(page))
3991                                 goto unlock_exit;
3992                 } else {
3993                         lock_page(page);
3994                 }
3995                 locked_pages++;
3996                 if (!PageUptodate(page))
3997                         all_uptodate = 0;
3998         }
3999         if (all_uptodate) {
4000                 if (start_i == 0)
4001                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4002                 goto unlock_exit;
4003         }
4004
4005         for (i = start_i; i < num_pages; i++) {
4006                 page = extent_buffer_page(eb, i);
4007
4008                 WARN_ON(!PagePrivate(page));
4009
4010                 set_page_extent_mapped(page);
4011                 if (i == 0)
4012                         set_page_extent_head(page, eb->len);
4013
4014                 if (inc_all_pages)
4015                         page_cache_get(page);
4016                 if (!PageUptodate(page)) {
4017                         if (start_i == 0)
4018                                 inc_all_pages = 1;
4019                         ClearPageError(page);
4020                         err = __extent_read_full_page(tree, page,
4021                                                       get_extent, &bio,
4022                                                       mirror_num, &bio_flags);
4023                         if (err)
4024                                 ret = err;
4025                 } else {
4026                         unlock_page(page);
4027                 }
4028         }
4029
4030         if (bio)
4031                 submit_one_bio(READ, bio, mirror_num, bio_flags);
4032
4033         if (ret || wait != WAIT_COMPLETE)
4034                 return ret;
4035
4036         for (i = start_i; i < num_pages; i++) {
4037                 page = extent_buffer_page(eb, i);
4038                 wait_on_page_locked(page);
4039                 if (!PageUptodate(page))
4040                         ret = -EIO;
4041         }
4042
4043         if (!ret)
4044                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4045         return ret;
4046
4047 unlock_exit:
4048         i = start_i;
4049         while (locked_pages > 0) {
4050                 page = extent_buffer_page(eb, i);
4051                 i++;
4052                 unlock_page(page);
4053                 locked_pages--;
4054         }
4055         return ret;
4056 }
4057
4058 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4059                         unsigned long start,
4060                         unsigned long len)
4061 {
4062         size_t cur;
4063         size_t offset;
4064         struct page *page;
4065         char *kaddr;
4066         char *dst = (char *)dstv;
4067         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4068         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4069
4070         WARN_ON(start > eb->len);
4071         WARN_ON(start + len > eb->start + eb->len);
4072
4073         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4074
4075         while (len > 0) {
4076                 page = extent_buffer_page(eb, i);
4077
4078                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4079                 kaddr = page_address(page);
4080                 memcpy(dst, kaddr + offset, cur);
4081
4082                 dst += cur;
4083                 len -= cur;
4084                 offset = 0;
4085                 i++;
4086         }
4087 }
4088
4089 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4090                                unsigned long min_len, char **map,
4091                                unsigned long *map_start,
4092                                unsigned long *map_len)
4093 {
4094         size_t offset = start & (PAGE_CACHE_SIZE - 1);
4095         char *kaddr;
4096         struct page *p;
4097         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4098         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4099         unsigned long end_i = (start_offset + start + min_len - 1) >>
4100                 PAGE_CACHE_SHIFT;
4101
4102         if (i != end_i)
4103                 return -EINVAL;
4104
4105         if (i == 0) {
4106                 offset = start_offset;
4107                 *map_start = 0;
4108         } else {
4109                 offset = 0;
4110                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4111         }
4112
4113         if (start + min_len > eb->len) {
4114                 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4115                        "wanted %lu %lu\n", (unsigned long long)eb->start,
4116                        eb->len, start, min_len);
4117                 WARN_ON(1);
4118                 return -EINVAL;
4119         }
4120
4121         p = extent_buffer_page(eb, i);
4122         kaddr = page_address(p);
4123         *map = kaddr + offset;
4124         *map_len = PAGE_CACHE_SIZE - offset;
4125         return 0;
4126 }
4127
4128 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4129                           unsigned long start,
4130                           unsigned long len)
4131 {
4132         size_t cur;
4133         size_t offset;
4134         struct page *page;
4135         char *kaddr;
4136         char *ptr = (char *)ptrv;
4137         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4138         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4139         int ret = 0;
4140
4141         WARN_ON(start > eb->len);
4142         WARN_ON(start + len > eb->start + eb->len);
4143
4144         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4145
4146         while (len > 0) {
4147                 page = extent_buffer_page(eb, i);
4148
4149                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4150
4151                 kaddr = page_address(page);
4152                 ret = memcmp(ptr, kaddr + offset, cur);
4153                 if (ret)
4154                         break;
4155
4156                 ptr += cur;
4157                 len -= cur;
4158                 offset = 0;
4159                 i++;
4160         }
4161         return ret;
4162 }
4163
4164 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4165                          unsigned long start, unsigned long len)
4166 {
4167         size_t cur;
4168         size_t offset;
4169         struct page *page;
4170         char *kaddr;
4171         char *src = (char *)srcv;
4172         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4173         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4174
4175         WARN_ON(start > eb->len);
4176         WARN_ON(start + len > eb->start + eb->len);
4177
4178         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4179
4180         while (len > 0) {
4181                 page = extent_buffer_page(eb, i);
4182                 WARN_ON(!PageUptodate(page));
4183
4184                 cur = min(len, PAGE_CACHE_SIZE - offset);
4185                 kaddr = page_address(page);
4186                 memcpy(kaddr + offset, src, cur);
4187
4188                 src += cur;
4189                 len -= cur;
4190                 offset = 0;
4191                 i++;
4192         }
4193 }
4194
4195 void memset_extent_buffer(struct extent_buffer *eb, char c,
4196                           unsigned long start, unsigned long len)
4197 {
4198         size_t cur;
4199         size_t offset;
4200         struct page *page;
4201         char *kaddr;
4202         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4203         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4204
4205         WARN_ON(start > eb->len);
4206         WARN_ON(start + len > eb->start + eb->len);
4207
4208         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4209
4210         while (len > 0) {
4211                 page = extent_buffer_page(eb, i);
4212                 WARN_ON(!PageUptodate(page));
4213
4214                 cur = min(len, PAGE_CACHE_SIZE - offset);
4215                 kaddr = page_address(page);
4216                 memset(kaddr + offset, c, cur);
4217
4218                 len -= cur;
4219                 offset = 0;
4220                 i++;
4221         }
4222 }
4223
4224 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4225                         unsigned long dst_offset, unsigned long src_offset,
4226                         unsigned long len)
4227 {
4228         u64 dst_len = dst->len;
4229         size_t cur;
4230         size_t offset;
4231         struct page *page;
4232         char *kaddr;
4233         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4234         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4235
4236         WARN_ON(src->len != dst_len);
4237
4238         offset = (start_offset + dst_offset) &
4239                 ((unsigned long)PAGE_CACHE_SIZE - 1);
4240
4241         while (len > 0) {
4242                 page = extent_buffer_page(dst, i);
4243                 WARN_ON(!PageUptodate(page));
4244
4245                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4246
4247                 kaddr = page_address(page);
4248                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4249
4250                 src_offset += cur;
4251                 len -= cur;
4252                 offset = 0;
4253                 i++;
4254         }
4255 }
4256
4257 static void move_pages(struct page *dst_page, struct page *src_page,
4258                        unsigned long dst_off, unsigned long src_off,
4259                        unsigned long len)
4260 {
4261         char *dst_kaddr = page_address(dst_page);
4262         if (dst_page == src_page) {
4263                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4264         } else {
4265                 char *src_kaddr = page_address(src_page);
4266                 char *p = dst_kaddr + dst_off + len;
4267                 char *s = src_kaddr + src_off + len;
4268
4269                 while (len--)
4270                         *--p = *--s;
4271         }
4272 }
4273
4274 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4275 {
4276         unsigned long distance = (src > dst) ? src - dst : dst - src;
4277         return distance < len;
4278 }
4279
4280 static void copy_pages(struct page *dst_page, struct page *src_page,
4281                        unsigned long dst_off, unsigned long src_off,
4282                        unsigned long len)
4283 {
4284         char *dst_kaddr = page_address(dst_page);
4285         char *src_kaddr;
4286
4287         if (dst_page != src_page) {
4288                 src_kaddr = page_address(src_page);
4289         } else {
4290                 src_kaddr = dst_kaddr;
4291                 BUG_ON(areas_overlap(src_off, dst_off, len));
4292         }
4293
4294         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4295 }
4296
4297 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4298                            unsigned long src_offset, unsigned long len)
4299 {
4300         size_t cur;
4301         size_t dst_off_in_page;
4302         size_t src_off_in_page;
4303         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4304         unsigned long dst_i;
4305         unsigned long src_i;
4306
4307         if (src_offset + len > dst->len) {
4308                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4309                        "len %lu dst len %lu\n", src_offset, len, dst->len);
4310                 BUG_ON(1);
4311         }
4312         if (dst_offset + len > dst->len) {
4313                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4314                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
4315                 BUG_ON(1);
4316         }
4317
4318         while (len > 0) {
4319                 dst_off_in_page = (start_offset + dst_offset) &
4320                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4321                 src_off_in_page = (start_offset + src_offset) &
4322                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4323
4324                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4325                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4326
4327                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4328                                                src_off_in_page));
4329                 cur = min_t(unsigned long, cur,
4330                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4331
4332                 copy_pages(extent_buffer_page(dst, dst_i),
4333                            extent_buffer_page(dst, src_i),
4334                            dst_off_in_page, src_off_in_page, cur);
4335
4336                 src_offset += cur;
4337                 dst_offset += cur;
4338                 len -= cur;
4339         }
4340 }
4341
4342 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4343                            unsigned long src_offset, unsigned long len)
4344 {
4345         size_t cur;
4346         size_t dst_off_in_page;
4347         size_t src_off_in_page;
4348         unsigned long dst_end = dst_offset + len - 1;
4349         unsigned long src_end = src_offset + len - 1;
4350         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4351         unsigned long dst_i;
4352         unsigned long src_i;
4353
4354         if (src_offset + len > dst->len) {
4355                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4356                        "len %lu len %lu\n", src_offset, len, dst->len);
4357                 BUG_ON(1);
4358         }
4359         if (dst_offset + len > dst->len) {
4360                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4361                        "len %lu len %lu\n", dst_offset, len, dst->len);
4362                 BUG_ON(1);
4363         }
4364         if (!areas_overlap(src_offset, dst_offset, len)) {
4365                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4366                 return;
4367         }
4368         while (len > 0) {
4369                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4370                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4371
4372                 dst_off_in_page = (start_offset + dst_end) &
4373                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4374                 src_off_in_page = (start_offset + src_end) &
4375                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4376
4377                 cur = min_t(unsigned long, len, src_off_in_page + 1);
4378                 cur = min(cur, dst_off_in_page + 1);
4379                 move_pages(extent_buffer_page(dst, dst_i),
4380                            extent_buffer_page(dst, src_i),
4381                            dst_off_in_page - cur + 1,
4382                            src_off_in_page - cur + 1, cur);
4383
4384                 dst_end -= cur;
4385                 src_end -= cur;
4386                 len -= cur;
4387         }
4388 }
4389
4390 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4391 {
4392         struct extent_buffer *eb =
4393                         container_of(head, struct extent_buffer, rcu_head);
4394
4395         btrfs_release_extent_buffer(eb);
4396 }
4397
4398 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4399 {
4400         u64 start = page_offset(page);
4401         struct extent_buffer *eb;
4402         int ret = 1;
4403
4404         spin_lock(&tree->buffer_lock);
4405         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4406         if (!eb) {
4407                 spin_unlock(&tree->buffer_lock);
4408                 return ret;
4409         }
4410
4411         if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4412                 ret = 0;
4413                 goto out;
4414         }
4415
4416         /*
4417          * set @eb->refs to 0 if it is already 1, and then release the @eb.
4418          * Or go back.
4419          */
4420         if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
4421                 ret = 0;
4422                 goto out;
4423         }
4424
4425         radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4426 out:
4427         spin_unlock(&tree->buffer_lock);
4428
4429         /* at this point we can safely release the extent buffer */
4430         if (atomic_read(&eb->refs) == 0)
4431                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4432         return ret;
4433 }