Merge Btrfs into fs/btrfs
[pandora-kernel.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23                                        unsigned long extra_flags,
24                                        void (*ctor)(void *, struct kmem_cache *,
25                                                     unsigned long));
26
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
29
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
32
33 #ifdef LEAK_DEBUG
34 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
35 #endif
36
37 #define BUFFER_LRU_MAX 64
38
39 struct tree_entry {
40         u64 start;
41         u64 end;
42         struct rb_node rb_node;
43 };
44
45 struct extent_page_data {
46         struct bio *bio;
47         struct extent_io_tree *tree;
48         get_extent_t *get_extent;
49 };
50
51 int __init extent_io_init(void)
52 {
53         extent_state_cache = btrfs_cache_create("extent_state",
54                                             sizeof(struct extent_state), 0,
55                                             NULL);
56         if (!extent_state_cache)
57                 return -ENOMEM;
58
59         extent_buffer_cache = btrfs_cache_create("extent_buffers",
60                                             sizeof(struct extent_buffer), 0,
61                                             NULL);
62         if (!extent_buffer_cache)
63                 goto free_state_cache;
64         return 0;
65
66 free_state_cache:
67         kmem_cache_destroy(extent_state_cache);
68         return -ENOMEM;
69 }
70
71 void extent_io_exit(void)
72 {
73         struct extent_state *state;
74         struct extent_buffer *eb;
75
76         while (!list_empty(&states)) {
77                 state = list_entry(states.next, struct extent_state, leak_list);
78                 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
79                 list_del(&state->leak_list);
80                 kmem_cache_free(extent_state_cache, state);
81
82         }
83
84         while (!list_empty(&buffers)) {
85                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
86                 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
87                 list_del(&eb->leak_list);
88                 kmem_cache_free(extent_buffer_cache, eb);
89         }
90         if (extent_state_cache)
91                 kmem_cache_destroy(extent_state_cache);
92         if (extent_buffer_cache)
93                 kmem_cache_destroy(extent_buffer_cache);
94 }
95
96 void extent_io_tree_init(struct extent_io_tree *tree,
97                           struct address_space *mapping, gfp_t mask)
98 {
99         tree->state.rb_node = NULL;
100         tree->buffer.rb_node = NULL;
101         tree->ops = NULL;
102         tree->dirty_bytes = 0;
103         spin_lock_init(&tree->lock);
104         spin_lock_init(&tree->buffer_lock);
105         tree->mapping = mapping;
106 }
107 EXPORT_SYMBOL(extent_io_tree_init);
108
109 struct extent_state *alloc_extent_state(gfp_t mask)
110 {
111         struct extent_state *state;
112 #ifdef LEAK_DEBUG
113         unsigned long flags;
114 #endif
115
116         state = kmem_cache_alloc(extent_state_cache, mask);
117         if (!state)
118                 return state;
119         state->state = 0;
120         state->private = 0;
121         state->tree = NULL;
122 #ifdef LEAK_DEBUG
123         spin_lock_irqsave(&leak_lock, flags);
124         list_add(&state->leak_list, &states);
125         spin_unlock_irqrestore(&leak_lock, flags);
126 #endif
127         atomic_set(&state->refs, 1);
128         init_waitqueue_head(&state->wq);
129         return state;
130 }
131 EXPORT_SYMBOL(alloc_extent_state);
132
133 void free_extent_state(struct extent_state *state)
134 {
135         if (!state)
136                 return;
137         if (atomic_dec_and_test(&state->refs)) {
138 #ifdef LEAK_DEBUG
139                 unsigned long flags;
140 #endif
141                 WARN_ON(state->tree);
142 #ifdef LEAK_DEBUG
143                 spin_lock_irqsave(&leak_lock, flags);
144                 list_del(&state->leak_list);
145                 spin_unlock_irqrestore(&leak_lock, flags);
146 #endif
147                 kmem_cache_free(extent_state_cache, state);
148         }
149 }
150 EXPORT_SYMBOL(free_extent_state);
151
152 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
153                                    struct rb_node *node)
154 {
155         struct rb_node ** p = &root->rb_node;
156         struct rb_node * parent = NULL;
157         struct tree_entry *entry;
158
159         while(*p) {
160                 parent = *p;
161                 entry = rb_entry(parent, struct tree_entry, rb_node);
162
163                 if (offset < entry->start)
164                         p = &(*p)->rb_left;
165                 else if (offset > entry->end)
166                         p = &(*p)->rb_right;
167                 else
168                         return parent;
169         }
170
171         entry = rb_entry(node, struct tree_entry, rb_node);
172         rb_link_node(node, parent, p);
173         rb_insert_color(node, root);
174         return NULL;
175 }
176
177 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
178                                      struct rb_node **prev_ret,
179                                      struct rb_node **next_ret)
180 {
181         struct rb_root *root = &tree->state;
182         struct rb_node * n = root->rb_node;
183         struct rb_node *prev = NULL;
184         struct rb_node *orig_prev = NULL;
185         struct tree_entry *entry;
186         struct tree_entry *prev_entry = NULL;
187
188         while(n) {
189                 entry = rb_entry(n, struct tree_entry, rb_node);
190                 prev = n;
191                 prev_entry = entry;
192
193                 if (offset < entry->start)
194                         n = n->rb_left;
195                 else if (offset > entry->end)
196                         n = n->rb_right;
197                 else {
198                         return n;
199                 }
200         }
201
202         if (prev_ret) {
203                 orig_prev = prev;
204                 while(prev && offset > prev_entry->end) {
205                         prev = rb_next(prev);
206                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
207                 }
208                 *prev_ret = prev;
209                 prev = orig_prev;
210         }
211
212         if (next_ret) {
213                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214                 while(prev && offset < prev_entry->start) {
215                         prev = rb_prev(prev);
216                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217                 }
218                 *next_ret = prev;
219         }
220         return NULL;
221 }
222
223 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
224                                           u64 offset)
225 {
226         struct rb_node *prev = NULL;
227         struct rb_node *ret;
228
229         ret = __etree_search(tree, offset, &prev, NULL);
230         if (!ret) {
231                 return prev;
232         }
233         return ret;
234 }
235
236 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
237                                           u64 offset, struct rb_node *node)
238 {
239         struct rb_root *root = &tree->buffer;
240         struct rb_node ** p = &root->rb_node;
241         struct rb_node * parent = NULL;
242         struct extent_buffer *eb;
243
244         while(*p) {
245                 parent = *p;
246                 eb = rb_entry(parent, struct extent_buffer, rb_node);
247
248                 if (offset < eb->start)
249                         p = &(*p)->rb_left;
250                 else if (offset > eb->start)
251                         p = &(*p)->rb_right;
252                 else
253                         return eb;
254         }
255
256         rb_link_node(node, parent, p);
257         rb_insert_color(node, root);
258         return NULL;
259 }
260
261 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
262                                            u64 offset)
263 {
264         struct rb_root *root = &tree->buffer;
265         struct rb_node * n = root->rb_node;
266         struct extent_buffer *eb;
267
268         while(n) {
269                 eb = rb_entry(n, struct extent_buffer, rb_node);
270                 if (offset < eb->start)
271                         n = n->rb_left;
272                 else if (offset > eb->start)
273                         n = n->rb_right;
274                 else
275                         return eb;
276         }
277         return NULL;
278 }
279
280 /*
281  * utility function to look for merge candidates inside a given range.
282  * Any extents with matching state are merged together into a single
283  * extent in the tree.  Extents with EXTENT_IO in their state field
284  * are not merged because the end_io handlers need to be able to do
285  * operations on them without sleeping (or doing allocations/splits).
286  *
287  * This should be called with the tree lock held.
288  */
289 static int merge_state(struct extent_io_tree *tree,
290                        struct extent_state *state)
291 {
292         struct extent_state *other;
293         struct rb_node *other_node;
294
295         if (state->state & EXTENT_IOBITS)
296                 return 0;
297
298         other_node = rb_prev(&state->rb_node);
299         if (other_node) {
300                 other = rb_entry(other_node, struct extent_state, rb_node);
301                 if (other->end == state->start - 1 &&
302                     other->state == state->state) {
303                         state->start = other->start;
304                         other->tree = NULL;
305                         rb_erase(&other->rb_node, &tree->state);
306                         free_extent_state(other);
307                 }
308         }
309         other_node = rb_next(&state->rb_node);
310         if (other_node) {
311                 other = rb_entry(other_node, struct extent_state, rb_node);
312                 if (other->start == state->end + 1 &&
313                     other->state == state->state) {
314                         other->start = state->start;
315                         state->tree = NULL;
316                         rb_erase(&state->rb_node, &tree->state);
317                         free_extent_state(state);
318                 }
319         }
320         return 0;
321 }
322
323 static void set_state_cb(struct extent_io_tree *tree,
324                          struct extent_state *state,
325                          unsigned long bits)
326 {
327         if (tree->ops && tree->ops->set_bit_hook) {
328                 tree->ops->set_bit_hook(tree->mapping->host, state->start,
329                                         state->end, state->state, bits);
330         }
331 }
332
333 static void clear_state_cb(struct extent_io_tree *tree,
334                            struct extent_state *state,
335                            unsigned long bits)
336 {
337         if (tree->ops && tree->ops->set_bit_hook) {
338                 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
339                                           state->end, state->state, bits);
340         }
341 }
342
343 /*
344  * insert an extent_state struct into the tree.  'bits' are set on the
345  * struct before it is inserted.
346  *
347  * This may return -EEXIST if the extent is already there, in which case the
348  * state struct is freed.
349  *
350  * The tree lock is not taken internally.  This is a utility function and
351  * probably isn't what you want to call (see set/clear_extent_bit).
352  */
353 static int insert_state(struct extent_io_tree *tree,
354                         struct extent_state *state, u64 start, u64 end,
355                         int bits)
356 {
357         struct rb_node *node;
358
359         if (end < start) {
360                 printk("end < start %Lu %Lu\n", end, start);
361                 WARN_ON(1);
362         }
363         if (bits & EXTENT_DIRTY)
364                 tree->dirty_bytes += end - start + 1;
365         set_state_cb(tree, state, bits);
366         state->state |= bits;
367         state->start = start;
368         state->end = end;
369         node = tree_insert(&tree->state, end, &state->rb_node);
370         if (node) {
371                 struct extent_state *found;
372                 found = rb_entry(node, struct extent_state, rb_node);
373                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
374                 free_extent_state(state);
375                 return -EEXIST;
376         }
377         state->tree = tree;
378         merge_state(tree, state);
379         return 0;
380 }
381
382 /*
383  * split a given extent state struct in two, inserting the preallocated
384  * struct 'prealloc' as the newly created second half.  'split' indicates an
385  * offset inside 'orig' where it should be split.
386  *
387  * Before calling,
388  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
389  * are two extent state structs in the tree:
390  * prealloc: [orig->start, split - 1]
391  * orig: [ split, orig->end ]
392  *
393  * The tree locks are not taken by this function. They need to be held
394  * by the caller.
395  */
396 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
397                        struct extent_state *prealloc, u64 split)
398 {
399         struct rb_node *node;
400         prealloc->start = orig->start;
401         prealloc->end = split - 1;
402         prealloc->state = orig->state;
403         orig->start = split;
404
405         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
406         if (node) {
407                 struct extent_state *found;
408                 found = rb_entry(node, struct extent_state, rb_node);
409                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
410                 free_extent_state(prealloc);
411                 return -EEXIST;
412         }
413         prealloc->tree = tree;
414         return 0;
415 }
416
417 /*
418  * utility function to clear some bits in an extent state struct.
419  * it will optionally wake up any one waiting on this state (wake == 1), or
420  * forcibly remove the state from the tree (delete == 1).
421  *
422  * If no bits are set on the state struct after clearing things, the
423  * struct is freed and removed from the tree
424  */
425 static int clear_state_bit(struct extent_io_tree *tree,
426                             struct extent_state *state, int bits, int wake,
427                             int delete)
428 {
429         int ret = state->state & bits;
430
431         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
432                 u64 range = state->end - state->start + 1;
433                 WARN_ON(range > tree->dirty_bytes);
434                 tree->dirty_bytes -= range;
435         }
436         clear_state_cb(tree, state, bits);
437         state->state &= ~bits;
438         if (wake)
439                 wake_up(&state->wq);
440         if (delete || state->state == 0) {
441                 if (state->tree) {
442                         clear_state_cb(tree, state, state->state);
443                         rb_erase(&state->rb_node, &tree->state);
444                         state->tree = NULL;
445                         free_extent_state(state);
446                 } else {
447                         WARN_ON(1);
448                 }
449         } else {
450                 merge_state(tree, state);
451         }
452         return ret;
453 }
454
455 /*
456  * clear some bits on a range in the tree.  This may require splitting
457  * or inserting elements in the tree, so the gfp mask is used to
458  * indicate which allocations or sleeping are allowed.
459  *
460  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461  * the given range from the tree regardless of state (ie for truncate).
462  *
463  * the range [start, end] is inclusive.
464  *
465  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
466  * bits were already set, or zero if none of the bits were already set.
467  */
468 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
469                      int bits, int wake, int delete, gfp_t mask)
470 {
471         struct extent_state *state;
472         struct extent_state *prealloc = NULL;
473         struct rb_node *node;
474         unsigned long flags;
475         int err;
476         int set = 0;
477
478 again:
479         if (!prealloc && (mask & __GFP_WAIT)) {
480                 prealloc = alloc_extent_state(mask);
481                 if (!prealloc)
482                         return -ENOMEM;
483         }
484
485         spin_lock_irqsave(&tree->lock, flags);
486         /*
487          * this search will find the extents that end after
488          * our range starts
489          */
490         node = tree_search(tree, start);
491         if (!node)
492                 goto out;
493         state = rb_entry(node, struct extent_state, rb_node);
494         if (state->start > end)
495                 goto out;
496         WARN_ON(state->end < start);
497
498         /*
499          *     | ---- desired range ---- |
500          *  | state | or
501          *  | ------------- state -------------- |
502          *
503          * We need to split the extent we found, and may flip
504          * bits on second half.
505          *
506          * If the extent we found extends past our range, we
507          * just split and search again.  It'll get split again
508          * the next time though.
509          *
510          * If the extent we found is inside our range, we clear
511          * the desired bit on it.
512          */
513
514         if (state->start < start) {
515                 if (!prealloc)
516                         prealloc = alloc_extent_state(GFP_ATOMIC);
517                 err = split_state(tree, state, prealloc, start);
518                 BUG_ON(err == -EEXIST);
519                 prealloc = NULL;
520                 if (err)
521                         goto out;
522                 if (state->end <= end) {
523                         start = state->end + 1;
524                         set |= clear_state_bit(tree, state, bits,
525                                         wake, delete);
526                 } else {
527                         start = state->start;
528                 }
529                 goto search_again;
530         }
531         /*
532          * | ---- desired range ---- |
533          *                        | state |
534          * We need to split the extent, and clear the bit
535          * on the first half
536          */
537         if (state->start <= end && state->end > end) {
538                 if (!prealloc)
539                         prealloc = alloc_extent_state(GFP_ATOMIC);
540                 err = split_state(tree, state, prealloc, end + 1);
541                 BUG_ON(err == -EEXIST);
542
543                 if (wake)
544                         wake_up(&state->wq);
545                 set |= clear_state_bit(tree, prealloc, bits,
546                                        wake, delete);
547                 prealloc = NULL;
548                 goto out;
549         }
550
551         start = state->end + 1;
552         set |= clear_state_bit(tree, state, bits, wake, delete);
553         goto search_again;
554
555 out:
556         spin_unlock_irqrestore(&tree->lock, flags);
557         if (prealloc)
558                 free_extent_state(prealloc);
559
560         return set;
561
562 search_again:
563         if (start > end)
564                 goto out;
565         spin_unlock_irqrestore(&tree->lock, flags);
566         if (mask & __GFP_WAIT)
567                 cond_resched();
568         goto again;
569 }
570 EXPORT_SYMBOL(clear_extent_bit);
571
572 static int wait_on_state(struct extent_io_tree *tree,
573                          struct extent_state *state)
574 {
575         DEFINE_WAIT(wait);
576         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
577         spin_unlock_irq(&tree->lock);
578         schedule();
579         spin_lock_irq(&tree->lock);
580         finish_wait(&state->wq, &wait);
581         return 0;
582 }
583
584 /*
585  * waits for one or more bits to clear on a range in the state tree.
586  * The range [start, end] is inclusive.
587  * The tree lock is taken by this function
588  */
589 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
590 {
591         struct extent_state *state;
592         struct rb_node *node;
593
594         spin_lock_irq(&tree->lock);
595 again:
596         while (1) {
597                 /*
598                  * this search will find all the extents that end after
599                  * our range starts
600                  */
601                 node = tree_search(tree, start);
602                 if (!node)
603                         break;
604
605                 state = rb_entry(node, struct extent_state, rb_node);
606
607                 if (state->start > end)
608                         goto out;
609
610                 if (state->state & bits) {
611                         start = state->start;
612                         atomic_inc(&state->refs);
613                         wait_on_state(tree, state);
614                         free_extent_state(state);
615                         goto again;
616                 }
617                 start = state->end + 1;
618
619                 if (start > end)
620                         break;
621
622                 if (need_resched()) {
623                         spin_unlock_irq(&tree->lock);
624                         cond_resched();
625                         spin_lock_irq(&tree->lock);
626                 }
627         }
628 out:
629         spin_unlock_irq(&tree->lock);
630         return 0;
631 }
632 EXPORT_SYMBOL(wait_extent_bit);
633
634 static void set_state_bits(struct extent_io_tree *tree,
635                            struct extent_state *state,
636                            int bits)
637 {
638         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
639                 u64 range = state->end - state->start + 1;
640                 tree->dirty_bytes += range;
641         }
642         set_state_cb(tree, state, bits);
643         state->state |= bits;
644 }
645
646 /*
647  * set some bits on a range in the tree.  This may require allocations
648  * or sleeping, so the gfp mask is used to indicate what is allowed.
649  *
650  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651  * range already has the desired bits set.  The start of the existing
652  * range is returned in failed_start in this case.
653  *
654  * [start, end] is inclusive
655  * This takes the tree lock.
656  */
657 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
658                    int exclusive, u64 *failed_start, gfp_t mask)
659 {
660         struct extent_state *state;
661         struct extent_state *prealloc = NULL;
662         struct rb_node *node;
663         unsigned long flags;
664         int err = 0;
665         int set;
666         u64 last_start;
667         u64 last_end;
668 again:
669         if (!prealloc && (mask & __GFP_WAIT)) {
670                 prealloc = alloc_extent_state(mask);
671                 if (!prealloc)
672                         return -ENOMEM;
673         }
674
675         spin_lock_irqsave(&tree->lock, flags);
676         /*
677          * this search will find all the extents that end after
678          * our range starts.
679          */
680         node = tree_search(tree, start);
681         if (!node) {
682                 err = insert_state(tree, prealloc, start, end, bits);
683                 prealloc = NULL;
684                 BUG_ON(err == -EEXIST);
685                 goto out;
686         }
687
688         state = rb_entry(node, struct extent_state, rb_node);
689         last_start = state->start;
690         last_end = state->end;
691
692         /*
693          * | ---- desired range ---- |
694          * | state |
695          *
696          * Just lock what we found and keep going
697          */
698         if (state->start == start && state->end <= end) {
699                 set = state->state & bits;
700                 if (set && exclusive) {
701                         *failed_start = state->start;
702                         err = -EEXIST;
703                         goto out;
704                 }
705                 set_state_bits(tree, state, bits);
706                 start = state->end + 1;
707                 merge_state(tree, state);
708                 goto search_again;
709         }
710
711         /*
712          *     | ---- desired range ---- |
713          * | state |
714          *   or
715          * | ------------- state -------------- |
716          *
717          * We need to split the extent we found, and may flip bits on
718          * second half.
719          *
720          * If the extent we found extends past our
721          * range, we just split and search again.  It'll get split
722          * again the next time though.
723          *
724          * If the extent we found is inside our range, we set the
725          * desired bit on it.
726          */
727         if (state->start < start) {
728                 set = state->state & bits;
729                 if (exclusive && set) {
730                         *failed_start = start;
731                         err = -EEXIST;
732                         goto out;
733                 }
734                 err = split_state(tree, state, prealloc, start);
735                 BUG_ON(err == -EEXIST);
736                 prealloc = NULL;
737                 if (err)
738                         goto out;
739                 if (state->end <= end) {
740                         set_state_bits(tree, state, bits);
741                         start = state->end + 1;
742                         merge_state(tree, state);
743                 } else {
744                         start = state->start;
745                 }
746                 goto search_again;
747         }
748         /*
749          * | ---- desired range ---- |
750          *     | state | or               | state |
751          *
752          * There's a hole, we need to insert something in it and
753          * ignore the extent we found.
754          */
755         if (state->start > start) {
756                 u64 this_end;
757                 if (end < last_start)
758                         this_end = end;
759                 else
760                         this_end = last_start -1;
761                 err = insert_state(tree, prealloc, start, this_end,
762                                    bits);
763                 prealloc = NULL;
764                 BUG_ON(err == -EEXIST);
765                 if (err)
766                         goto out;
767                 start = this_end + 1;
768                 goto search_again;
769         }
770         /*
771          * | ---- desired range ---- |
772          *                        | state |
773          * We need to split the extent, and set the bit
774          * on the first half
775          */
776         if (state->start <= end && state->end > end) {
777                 set = state->state & bits;
778                 if (exclusive && set) {
779                         *failed_start = start;
780                         err = -EEXIST;
781                         goto out;
782                 }
783                 err = split_state(tree, state, prealloc, end + 1);
784                 BUG_ON(err == -EEXIST);
785
786                 set_state_bits(tree, prealloc, bits);
787                 merge_state(tree, prealloc);
788                 prealloc = NULL;
789                 goto out;
790         }
791
792         goto search_again;
793
794 out:
795         spin_unlock_irqrestore(&tree->lock, flags);
796         if (prealloc)
797                 free_extent_state(prealloc);
798
799         return err;
800
801 search_again:
802         if (start > end)
803                 goto out;
804         spin_unlock_irqrestore(&tree->lock, flags);
805         if (mask & __GFP_WAIT)
806                 cond_resched();
807         goto again;
808 }
809 EXPORT_SYMBOL(set_extent_bit);
810
811 /* wrappers around set/clear extent bit */
812 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
813                      gfp_t mask)
814 {
815         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
816                               mask);
817 }
818 EXPORT_SYMBOL(set_extent_dirty);
819
820 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
821                        gfp_t mask)
822 {
823         return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
824 }
825 EXPORT_SYMBOL(set_extent_ordered);
826
827 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
828                     int bits, gfp_t mask)
829 {
830         return set_extent_bit(tree, start, end, bits, 0, NULL,
831                               mask);
832 }
833 EXPORT_SYMBOL(set_extent_bits);
834
835 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
836                       int bits, gfp_t mask)
837 {
838         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
839 }
840 EXPORT_SYMBOL(clear_extent_bits);
841
842 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
843                      gfp_t mask)
844 {
845         return set_extent_bit(tree, start, end,
846                               EXTENT_DELALLOC | EXTENT_DIRTY,
847                               0, NULL, mask);
848 }
849 EXPORT_SYMBOL(set_extent_delalloc);
850
851 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
852                        gfp_t mask)
853 {
854         return clear_extent_bit(tree, start, end,
855                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
856 }
857 EXPORT_SYMBOL(clear_extent_dirty);
858
859 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
860                          gfp_t mask)
861 {
862         return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
863 }
864 EXPORT_SYMBOL(clear_extent_ordered);
865
866 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
867                      gfp_t mask)
868 {
869         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
870                               mask);
871 }
872 EXPORT_SYMBOL(set_extent_new);
873
874 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
875                        gfp_t mask)
876 {
877         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
878 }
879 EXPORT_SYMBOL(clear_extent_new);
880
881 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
882                         gfp_t mask)
883 {
884         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
885                               mask);
886 }
887 EXPORT_SYMBOL(set_extent_uptodate);
888
889 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
890                           gfp_t mask)
891 {
892         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
893 }
894 EXPORT_SYMBOL(clear_extent_uptodate);
895
896 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
897                          gfp_t mask)
898 {
899         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
900                               0, NULL, mask);
901 }
902 EXPORT_SYMBOL(set_extent_writeback);
903
904 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
905                            gfp_t mask)
906 {
907         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
908 }
909 EXPORT_SYMBOL(clear_extent_writeback);
910
911 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
912 {
913         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
914 }
915 EXPORT_SYMBOL(wait_on_extent_writeback);
916
917 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
918 {
919         int err;
920         u64 failed_start;
921         while (1) {
922                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
923                                      &failed_start, mask);
924                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
925                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
926                         start = failed_start;
927                 } else {
928                         break;
929                 }
930                 WARN_ON(start > end);
931         }
932         return err;
933 }
934 EXPORT_SYMBOL(lock_extent);
935
936 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
937                   gfp_t mask)
938 {
939         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
940 }
941 EXPORT_SYMBOL(unlock_extent);
942
943 /*
944  * helper function to set pages and extents in the tree dirty
945  */
946 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
947 {
948         unsigned long index = start >> PAGE_CACHE_SHIFT;
949         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
950         struct page *page;
951
952         while (index <= end_index) {
953                 page = find_get_page(tree->mapping, index);
954                 BUG_ON(!page);
955                 __set_page_dirty_nobuffers(page);
956                 page_cache_release(page);
957                 index++;
958         }
959         set_extent_dirty(tree, start, end, GFP_NOFS);
960         return 0;
961 }
962 EXPORT_SYMBOL(set_range_dirty);
963
964 /*
965  * helper function to set both pages and extents in the tree writeback
966  */
967 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
968 {
969         unsigned long index = start >> PAGE_CACHE_SHIFT;
970         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
971         struct page *page;
972
973         while (index <= end_index) {
974                 page = find_get_page(tree->mapping, index);
975                 BUG_ON(!page);
976                 set_page_writeback(page);
977                 page_cache_release(page);
978                 index++;
979         }
980         set_extent_writeback(tree, start, end, GFP_NOFS);
981         return 0;
982 }
983 EXPORT_SYMBOL(set_range_writeback);
984
985 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
986                           u64 *start_ret, u64 *end_ret, int bits)
987 {
988         struct rb_node *node;
989         struct extent_state *state;
990         int ret = 1;
991
992         spin_lock_irq(&tree->lock);
993         /*
994          * this search will find all the extents that end after
995          * our range starts.
996          */
997         node = tree_search(tree, start);
998         if (!node) {
999                 goto out;
1000         }
1001
1002         while(1) {
1003                 state = rb_entry(node, struct extent_state, rb_node);
1004                 if (state->end >= start && (state->state & bits)) {
1005                         *start_ret = state->start;
1006                         *end_ret = state->end;
1007                         ret = 0;
1008                         break;
1009                 }
1010                 node = rb_next(node);
1011                 if (!node)
1012                         break;
1013         }
1014 out:
1015         spin_unlock_irq(&tree->lock);
1016         return ret;
1017 }
1018 EXPORT_SYMBOL(find_first_extent_bit);
1019
1020 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1021                                                  u64 start, int bits)
1022 {
1023         struct rb_node *node;
1024         struct extent_state *state;
1025
1026         /*
1027          * this search will find all the extents that end after
1028          * our range starts.
1029          */
1030         node = tree_search(tree, start);
1031         if (!node) {
1032                 goto out;
1033         }
1034
1035         while(1) {
1036                 state = rb_entry(node, struct extent_state, rb_node);
1037                 if (state->end >= start && (state->state & bits)) {
1038                         return state;
1039                 }
1040                 node = rb_next(node);
1041                 if (!node)
1042                         break;
1043         }
1044 out:
1045         return NULL;
1046 }
1047 EXPORT_SYMBOL(find_first_extent_bit_state);
1048
1049 u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1050                              u64 *start, u64 *end, u64 max_bytes)
1051 {
1052         struct rb_node *node;
1053         struct extent_state *state;
1054         u64 cur_start = *start;
1055         u64 found = 0;
1056         u64 total_bytes = 0;
1057
1058         spin_lock_irq(&tree->lock);
1059         /*
1060          * this search will find all the extents that end after
1061          * our range starts.
1062          */
1063 search_again:
1064         node = tree_search(tree, cur_start);
1065         if (!node) {
1066                 if (!found)
1067                         *end = (u64)-1;
1068                 goto out;
1069         }
1070
1071         while(1) {
1072                 state = rb_entry(node, struct extent_state, rb_node);
1073                 if (found && state->start != cur_start) {
1074                         goto out;
1075                 }
1076                 if (!(state->state & EXTENT_DELALLOC)) {
1077                         if (!found)
1078                                 *end = state->end;
1079                         goto out;
1080                 }
1081                 if (!found) {
1082                         struct extent_state *prev_state;
1083                         struct rb_node *prev_node = node;
1084                         while(1) {
1085                                 prev_node = rb_prev(prev_node);
1086                                 if (!prev_node)
1087                                         break;
1088                                 prev_state = rb_entry(prev_node,
1089                                                       struct extent_state,
1090                                                       rb_node);
1091                                 if (!(prev_state->state & EXTENT_DELALLOC))
1092                                         break;
1093                                 state = prev_state;
1094                                 node = prev_node;
1095                         }
1096                 }
1097                 if (state->state & EXTENT_LOCKED) {
1098                         DEFINE_WAIT(wait);
1099                         atomic_inc(&state->refs);
1100                         prepare_to_wait(&state->wq, &wait,
1101                                         TASK_UNINTERRUPTIBLE);
1102                         spin_unlock_irq(&tree->lock);
1103                         schedule();
1104                         spin_lock_irq(&tree->lock);
1105                         finish_wait(&state->wq, &wait);
1106                         free_extent_state(state);
1107                         goto search_again;
1108                 }
1109                 set_state_cb(tree, state, EXTENT_LOCKED);
1110                 state->state |= EXTENT_LOCKED;
1111                 if (!found)
1112                         *start = state->start;
1113                 found++;
1114                 *end = state->end;
1115                 cur_start = state->end + 1;
1116                 node = rb_next(node);
1117                 if (!node)
1118                         break;
1119                 total_bytes += state->end - state->start + 1;
1120                 if (total_bytes >= max_bytes)
1121                         break;
1122         }
1123 out:
1124         spin_unlock_irq(&tree->lock);
1125         return found;
1126 }
1127
1128 u64 count_range_bits(struct extent_io_tree *tree,
1129                      u64 *start, u64 search_end, u64 max_bytes,
1130                      unsigned long bits)
1131 {
1132         struct rb_node *node;
1133         struct extent_state *state;
1134         u64 cur_start = *start;
1135         u64 total_bytes = 0;
1136         int found = 0;
1137
1138         if (search_end <= cur_start) {
1139                 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1140                 WARN_ON(1);
1141                 return 0;
1142         }
1143
1144         spin_lock_irq(&tree->lock);
1145         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1146                 total_bytes = tree->dirty_bytes;
1147                 goto out;
1148         }
1149         /*
1150          * this search will find all the extents that end after
1151          * our range starts.
1152          */
1153         node = tree_search(tree, cur_start);
1154         if (!node) {
1155                 goto out;
1156         }
1157
1158         while(1) {
1159                 state = rb_entry(node, struct extent_state, rb_node);
1160                 if (state->start > search_end)
1161                         break;
1162                 if (state->end >= cur_start && (state->state & bits)) {
1163                         total_bytes += min(search_end, state->end) + 1 -
1164                                        max(cur_start, state->start);
1165                         if (total_bytes >= max_bytes)
1166                                 break;
1167                         if (!found) {
1168                                 *start = state->start;
1169                                 found = 1;
1170                         }
1171                 }
1172                 node = rb_next(node);
1173                 if (!node)
1174                         break;
1175         }
1176 out:
1177         spin_unlock_irq(&tree->lock);
1178         return total_bytes;
1179 }
1180 /*
1181  * helper function to lock both pages and extents in the tree.
1182  * pages must be locked first.
1183  */
1184 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1185 {
1186         unsigned long index = start >> PAGE_CACHE_SHIFT;
1187         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1188         struct page *page;
1189         int err;
1190
1191         while (index <= end_index) {
1192                 page = grab_cache_page(tree->mapping, index);
1193                 if (!page) {
1194                         err = -ENOMEM;
1195                         goto failed;
1196                 }
1197                 if (IS_ERR(page)) {
1198                         err = PTR_ERR(page);
1199                         goto failed;
1200                 }
1201                 index++;
1202         }
1203         lock_extent(tree, start, end, GFP_NOFS);
1204         return 0;
1205
1206 failed:
1207         /*
1208          * we failed above in getting the page at 'index', so we undo here
1209          * up to but not including the page at 'index'
1210          */
1211         end_index = index;
1212         index = start >> PAGE_CACHE_SHIFT;
1213         while (index < end_index) {
1214                 page = find_get_page(tree->mapping, index);
1215                 unlock_page(page);
1216                 page_cache_release(page);
1217                 index++;
1218         }
1219         return err;
1220 }
1221 EXPORT_SYMBOL(lock_range);
1222
1223 /*
1224  * helper function to unlock both pages and extents in the tree.
1225  */
1226 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1227 {
1228         unsigned long index = start >> PAGE_CACHE_SHIFT;
1229         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1230         struct page *page;
1231
1232         while (index <= end_index) {
1233                 page = find_get_page(tree->mapping, index);
1234                 unlock_page(page);
1235                 page_cache_release(page);
1236                 index++;
1237         }
1238         unlock_extent(tree, start, end, GFP_NOFS);
1239         return 0;
1240 }
1241 EXPORT_SYMBOL(unlock_range);
1242
1243 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1244 {
1245         struct rb_node *node;
1246         struct extent_state *state;
1247         int ret = 0;
1248
1249         spin_lock_irq(&tree->lock);
1250         /*
1251          * this search will find all the extents that end after
1252          * our range starts.
1253          */
1254         node = tree_search(tree, start);
1255         if (!node) {
1256                 ret = -ENOENT;
1257                 goto out;
1258         }
1259         state = rb_entry(node, struct extent_state, rb_node);
1260         if (state->start != start) {
1261                 ret = -ENOENT;
1262                 goto out;
1263         }
1264         state->private = private;
1265 out:
1266         spin_unlock_irq(&tree->lock);
1267         return ret;
1268 }
1269
1270 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1271 {
1272         struct rb_node *node;
1273         struct extent_state *state;
1274         int ret = 0;
1275
1276         spin_lock_irq(&tree->lock);
1277         /*
1278          * this search will find all the extents that end after
1279          * our range starts.
1280          */
1281         node = tree_search(tree, start);
1282         if (!node) {
1283                 ret = -ENOENT;
1284                 goto out;
1285         }
1286         state = rb_entry(node, struct extent_state, rb_node);
1287         if (state->start != start) {
1288                 ret = -ENOENT;
1289                 goto out;
1290         }
1291         *private = state->private;
1292 out:
1293         spin_unlock_irq(&tree->lock);
1294         return ret;
1295 }
1296
1297 /*
1298  * searches a range in the state tree for a given mask.
1299  * If 'filled' == 1, this returns 1 only if every extent in the tree
1300  * has the bits set.  Otherwise, 1 is returned if any bit in the
1301  * range is found set.
1302  */
1303 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1304                    int bits, int filled)
1305 {
1306         struct extent_state *state = NULL;
1307         struct rb_node *node;
1308         int bitset = 0;
1309         unsigned long flags;
1310
1311         spin_lock_irqsave(&tree->lock, flags);
1312         node = tree_search(tree, start);
1313         while (node && start <= end) {
1314                 state = rb_entry(node, struct extent_state, rb_node);
1315
1316                 if (filled && state->start > start) {
1317                         bitset = 0;
1318                         break;
1319                 }
1320
1321                 if (state->start > end)
1322                         break;
1323
1324                 if (state->state & bits) {
1325                         bitset = 1;
1326                         if (!filled)
1327                                 break;
1328                 } else if (filled) {
1329                         bitset = 0;
1330                         break;
1331                 }
1332                 start = state->end + 1;
1333                 if (start > end)
1334                         break;
1335                 node = rb_next(node);
1336                 if (!node) {
1337                         if (filled)
1338                                 bitset = 0;
1339                         break;
1340                 }
1341         }
1342         spin_unlock_irqrestore(&tree->lock, flags);
1343         return bitset;
1344 }
1345 EXPORT_SYMBOL(test_range_bit);
1346
1347 /*
1348  * helper function to set a given page up to date if all the
1349  * extents in the tree for that page are up to date
1350  */
1351 static int check_page_uptodate(struct extent_io_tree *tree,
1352                                struct page *page)
1353 {
1354         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1355         u64 end = start + PAGE_CACHE_SIZE - 1;
1356         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1357                 SetPageUptodate(page);
1358         return 0;
1359 }
1360
1361 /*
1362  * helper function to unlock a page if all the extents in the tree
1363  * for that page are unlocked
1364  */
1365 static int check_page_locked(struct extent_io_tree *tree,
1366                              struct page *page)
1367 {
1368         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1369         u64 end = start + PAGE_CACHE_SIZE - 1;
1370         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1371                 unlock_page(page);
1372         return 0;
1373 }
1374
1375 /*
1376  * helper function to end page writeback if all the extents
1377  * in the tree for that page are done with writeback
1378  */
1379 static int check_page_writeback(struct extent_io_tree *tree,
1380                              struct page *page)
1381 {
1382         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1383         u64 end = start + PAGE_CACHE_SIZE - 1;
1384         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1385                 end_page_writeback(page);
1386         return 0;
1387 }
1388
1389 /* lots and lots of room for performance fixes in the end_bio funcs */
1390
1391 /*
1392  * after a writepage IO is done, we need to:
1393  * clear the uptodate bits on error
1394  * clear the writeback bits in the extent tree for this IO
1395  * end_page_writeback if the page has no more pending IO
1396  *
1397  * Scheduling is not allowed, so the extent state tree is expected
1398  * to have one and only one object corresponding to this IO.
1399  */
1400 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1401 static void end_bio_extent_writepage(struct bio *bio, int err)
1402 #else
1403 static int end_bio_extent_writepage(struct bio *bio,
1404                                    unsigned int bytes_done, int err)
1405 #endif
1406 {
1407         int uptodate = err == 0;
1408         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1409         struct extent_io_tree *tree;
1410         u64 start;
1411         u64 end;
1412         int whole_page;
1413         int ret;
1414
1415 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1416         if (bio->bi_size)
1417                 return 1;
1418 #endif
1419         do {
1420                 struct page *page = bvec->bv_page;
1421                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1422
1423                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1424                          bvec->bv_offset;
1425                 end = start + bvec->bv_len - 1;
1426
1427                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1428                         whole_page = 1;
1429                 else
1430                         whole_page = 0;
1431
1432                 if (--bvec >= bio->bi_io_vec)
1433                         prefetchw(&bvec->bv_page->flags);
1434                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1435                         ret = tree->ops->writepage_end_io_hook(page, start,
1436                                                        end, NULL, uptodate);
1437                         if (ret)
1438                                 uptodate = 0;
1439                 }
1440
1441                 if (!uptodate && tree->ops &&
1442                     tree->ops->writepage_io_failed_hook) {
1443                         ret = tree->ops->writepage_io_failed_hook(bio, page,
1444                                                          start, end, NULL);
1445                         if (ret == 0) {
1446                                 uptodate = (err == 0);
1447                                 continue;
1448                         }
1449                 }
1450
1451                 if (!uptodate) {
1452                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1453                         ClearPageUptodate(page);
1454                         SetPageError(page);
1455                 }
1456
1457                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1458
1459                 if (whole_page)
1460                         end_page_writeback(page);
1461                 else
1462                         check_page_writeback(tree, page);
1463         } while (bvec >= bio->bi_io_vec);
1464         bio_put(bio);
1465 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1466         return 0;
1467 #endif
1468 }
1469
1470 /*
1471  * after a readpage IO is done, we need to:
1472  * clear the uptodate bits on error
1473  * set the uptodate bits if things worked
1474  * set the page up to date if all extents in the tree are uptodate
1475  * clear the lock bit in the extent tree
1476  * unlock the page if there are no other extents locked for it
1477  *
1478  * Scheduling is not allowed, so the extent state tree is expected
1479  * to have one and only one object corresponding to this IO.
1480  */
1481 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1482 static void end_bio_extent_readpage(struct bio *bio, int err)
1483 #else
1484 static int end_bio_extent_readpage(struct bio *bio,
1485                                    unsigned int bytes_done, int err)
1486 #endif
1487 {
1488         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1489         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1490         struct extent_io_tree *tree;
1491         u64 start;
1492         u64 end;
1493         int whole_page;
1494         int ret;
1495
1496 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1497         if (bio->bi_size)
1498                 return 1;
1499 #endif
1500
1501         do {
1502                 struct page *page = bvec->bv_page;
1503                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1504
1505                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1506                         bvec->bv_offset;
1507                 end = start + bvec->bv_len - 1;
1508
1509                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1510                         whole_page = 1;
1511                 else
1512                         whole_page = 0;
1513
1514                 if (--bvec >= bio->bi_io_vec)
1515                         prefetchw(&bvec->bv_page->flags);
1516
1517                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1518                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1519                                                               NULL);
1520                         if (ret)
1521                                 uptodate = 0;
1522                 }
1523                 if (!uptodate && tree->ops &&
1524                     tree->ops->readpage_io_failed_hook) {
1525                         ret = tree->ops->readpage_io_failed_hook(bio, page,
1526                                                          start, end, NULL);
1527                         if (ret == 0) {
1528                                 uptodate =
1529                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
1530                                 continue;
1531                         }
1532                 }
1533
1534                 if (uptodate)
1535                         set_extent_uptodate(tree, start, end,
1536                                             GFP_ATOMIC);
1537                 unlock_extent(tree, start, end, GFP_ATOMIC);
1538
1539                 if (whole_page) {
1540                         if (uptodate) {
1541                                 SetPageUptodate(page);
1542                         } else {
1543                                 ClearPageUptodate(page);
1544                                 SetPageError(page);
1545                         }
1546                         unlock_page(page);
1547                 } else {
1548                         if (uptodate) {
1549                                 check_page_uptodate(tree, page);
1550                         } else {
1551                                 ClearPageUptodate(page);
1552                                 SetPageError(page);
1553                         }
1554                         check_page_locked(tree, page);
1555                 }
1556         } while (bvec >= bio->bi_io_vec);
1557
1558         bio_put(bio);
1559 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1560         return 0;
1561 #endif
1562 }
1563
1564 /*
1565  * IO done from prepare_write is pretty simple, we just unlock
1566  * the structs in the extent tree when done, and set the uptodate bits
1567  * as appropriate.
1568  */
1569 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1570 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1571 #else
1572 static int end_bio_extent_preparewrite(struct bio *bio,
1573                                        unsigned int bytes_done, int err)
1574 #endif
1575 {
1576         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1577         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1578         struct extent_io_tree *tree;
1579         u64 start;
1580         u64 end;
1581
1582 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1583         if (bio->bi_size)
1584                 return 1;
1585 #endif
1586
1587         do {
1588                 struct page *page = bvec->bv_page;
1589                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1590
1591                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1592                         bvec->bv_offset;
1593                 end = start + bvec->bv_len - 1;
1594
1595                 if (--bvec >= bio->bi_io_vec)
1596                         prefetchw(&bvec->bv_page->flags);
1597
1598                 if (uptodate) {
1599                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1600                 } else {
1601                         ClearPageUptodate(page);
1602                         SetPageError(page);
1603                 }
1604
1605                 unlock_extent(tree, start, end, GFP_ATOMIC);
1606
1607         } while (bvec >= bio->bi_io_vec);
1608
1609         bio_put(bio);
1610 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1611         return 0;
1612 #endif
1613 }
1614
1615 static struct bio *
1616 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1617                  gfp_t gfp_flags)
1618 {
1619         struct bio *bio;
1620
1621         bio = bio_alloc(gfp_flags, nr_vecs);
1622
1623         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1624                 while (!bio && (nr_vecs /= 2))
1625                         bio = bio_alloc(gfp_flags, nr_vecs);
1626         }
1627
1628         if (bio) {
1629                 bio->bi_size = 0;
1630                 bio->bi_bdev = bdev;
1631                 bio->bi_sector = first_sector;
1632         }
1633         return bio;
1634 }
1635
1636 static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
1637 {
1638         int ret = 0;
1639         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1640         struct page *page = bvec->bv_page;
1641         struct extent_io_tree *tree = bio->bi_private;
1642         struct rb_node *node;
1643         struct extent_state *state;
1644         u64 start;
1645         u64 end;
1646
1647         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1648         end = start + bvec->bv_len - 1;
1649
1650         spin_lock_irq(&tree->lock);
1651         node = __etree_search(tree, start, NULL, NULL);
1652         BUG_ON(!node);
1653         state = rb_entry(node, struct extent_state, rb_node);
1654         while(state->end < end) {
1655                 node = rb_next(node);
1656                 state = rb_entry(node, struct extent_state, rb_node);
1657         }
1658         BUG_ON(state->end != end);
1659         spin_unlock_irq(&tree->lock);
1660
1661         bio->bi_private = NULL;
1662
1663         bio_get(bio);
1664
1665         if (tree->ops && tree->ops->submit_bio_hook)
1666                 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1667                                            mirror_num);
1668         else
1669                 submit_bio(rw, bio);
1670         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1671                 ret = -EOPNOTSUPP;
1672         bio_put(bio);
1673         return ret;
1674 }
1675
1676 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1677                               struct page *page, sector_t sector,
1678                               size_t size, unsigned long offset,
1679                               struct block_device *bdev,
1680                               struct bio **bio_ret,
1681                               unsigned long max_pages,
1682                               bio_end_io_t end_io_func,
1683                               int mirror_num)
1684 {
1685         int ret = 0;
1686         struct bio *bio;
1687         int nr;
1688
1689         if (bio_ret && *bio_ret) {
1690                 bio = *bio_ret;
1691                 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1692                     (tree->ops && tree->ops->merge_bio_hook &&
1693                      tree->ops->merge_bio_hook(page, offset, size, bio)) ||
1694                     bio_add_page(bio, page, size, offset) < size) {
1695                         ret = submit_one_bio(rw, bio, mirror_num);
1696                         bio = NULL;
1697                 } else {
1698                         return 0;
1699                 }
1700         }
1701         nr = bio_get_nr_vecs(bdev);
1702         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1703         if (!bio) {
1704                 printk("failed to allocate bio nr %d\n", nr);
1705         }
1706
1707
1708         bio_add_page(bio, page, size, offset);
1709         bio->bi_end_io = end_io_func;
1710         bio->bi_private = tree;
1711
1712         if (bio_ret) {
1713                 *bio_ret = bio;
1714         } else {
1715                 ret = submit_one_bio(rw, bio, mirror_num);
1716         }
1717
1718         return ret;
1719 }
1720
1721 void set_page_extent_mapped(struct page *page)
1722 {
1723         if (!PagePrivate(page)) {
1724                 SetPagePrivate(page);
1725                 page_cache_get(page);
1726                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1727         }
1728 }
1729
1730 void set_page_extent_head(struct page *page, unsigned long len)
1731 {
1732         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1733 }
1734
1735 /*
1736  * basic readpage implementation.  Locked extent state structs are inserted
1737  * into the tree that are removed when the IO is done (by the end_io
1738  * handlers)
1739  */
1740 static int __extent_read_full_page(struct extent_io_tree *tree,
1741                                    struct page *page,
1742                                    get_extent_t *get_extent,
1743                                    struct bio **bio, int mirror_num)
1744 {
1745         struct inode *inode = page->mapping->host;
1746         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1747         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1748         u64 end;
1749         u64 cur = start;
1750         u64 extent_offset;
1751         u64 last_byte = i_size_read(inode);
1752         u64 block_start;
1753         u64 cur_end;
1754         sector_t sector;
1755         struct extent_map *em;
1756         struct block_device *bdev;
1757         int ret;
1758         int nr = 0;
1759         size_t page_offset = 0;
1760         size_t iosize;
1761         size_t blocksize = inode->i_sb->s_blocksize;
1762
1763         set_page_extent_mapped(page);
1764
1765         end = page_end;
1766         lock_extent(tree, start, end, GFP_NOFS);
1767
1768         while (cur <= end) {
1769                 if (cur >= last_byte) {
1770                         char *userpage;
1771                         iosize = PAGE_CACHE_SIZE - page_offset;
1772                         userpage = kmap_atomic(page, KM_USER0);
1773                         memset(userpage + page_offset, 0, iosize);
1774                         flush_dcache_page(page);
1775                         kunmap_atomic(userpage, KM_USER0);
1776                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1777                                             GFP_NOFS);
1778                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1779                         break;
1780                 }
1781                 em = get_extent(inode, page, page_offset, cur,
1782                                 end - cur + 1, 0);
1783                 if (IS_ERR(em) || !em) {
1784                         SetPageError(page);
1785                         unlock_extent(tree, cur, end, GFP_NOFS);
1786                         break;
1787                 }
1788                 extent_offset = cur - em->start;
1789                 if (extent_map_end(em) <= cur) {
1790 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1791                 }
1792                 BUG_ON(extent_map_end(em) <= cur);
1793                 if (end < cur) {
1794 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1795                 }
1796                 BUG_ON(end < cur);
1797
1798                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1799                 cur_end = min(extent_map_end(em) - 1, end);
1800                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1801                 sector = (em->block_start + extent_offset) >> 9;
1802                 bdev = em->bdev;
1803                 block_start = em->block_start;
1804                 free_extent_map(em);
1805                 em = NULL;
1806
1807                 /* we've found a hole, just zero and go on */
1808                 if (block_start == EXTENT_MAP_HOLE) {
1809                         char *userpage;
1810                         userpage = kmap_atomic(page, KM_USER0);
1811                         memset(userpage + page_offset, 0, iosize);
1812                         flush_dcache_page(page);
1813                         kunmap_atomic(userpage, KM_USER0);
1814
1815                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1816                                             GFP_NOFS);
1817                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1818                         cur = cur + iosize;
1819                         page_offset += iosize;
1820                         continue;
1821                 }
1822                 /* the get_extent function already copied into the page */
1823                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1824                         check_page_uptodate(tree, page);
1825                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1826                         cur = cur + iosize;
1827                         page_offset += iosize;
1828                         continue;
1829                 }
1830                 /* we have an inline extent but it didn't get marked up
1831                  * to date.  Error out
1832                  */
1833                 if (block_start == EXTENT_MAP_INLINE) {
1834                         SetPageError(page);
1835                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1836                         cur = cur + iosize;
1837                         page_offset += iosize;
1838                         continue;
1839                 }
1840
1841                 ret = 0;
1842                 if (tree->ops && tree->ops->readpage_io_hook) {
1843                         ret = tree->ops->readpage_io_hook(page, cur,
1844                                                           cur + iosize - 1);
1845                 }
1846                 if (!ret) {
1847                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1848                         pnr -= page->index;
1849                         ret = submit_extent_page(READ, tree, page,
1850                                          sector, iosize, page_offset,
1851                                          bdev, bio, pnr,
1852                                          end_bio_extent_readpage, mirror_num);
1853                         nr++;
1854                 }
1855                 if (ret)
1856                         SetPageError(page);
1857                 cur = cur + iosize;
1858                 page_offset += iosize;
1859         }
1860         if (!nr) {
1861                 if (!PageError(page))
1862                         SetPageUptodate(page);
1863                 unlock_page(page);
1864         }
1865         return 0;
1866 }
1867
1868 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1869                             get_extent_t *get_extent)
1870 {
1871         struct bio *bio = NULL;
1872         int ret;
1873
1874         ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
1875         if (bio)
1876                 submit_one_bio(READ, bio, 0);
1877         return ret;
1878 }
1879 EXPORT_SYMBOL(extent_read_full_page);
1880
1881 /*
1882  * the writepage semantics are similar to regular writepage.  extent
1883  * records are inserted to lock ranges in the tree, and as dirty areas
1884  * are found, they are marked writeback.  Then the lock bits are removed
1885  * and the end_io handler clears the writeback ranges
1886  */
1887 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1888                               void *data)
1889 {
1890         struct inode *inode = page->mapping->host;
1891         struct extent_page_data *epd = data;
1892         struct extent_io_tree *tree = epd->tree;
1893         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1894         u64 delalloc_start;
1895         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1896         u64 end;
1897         u64 cur = start;
1898         u64 extent_offset;
1899         u64 last_byte = i_size_read(inode);
1900         u64 block_start;
1901         u64 iosize;
1902         u64 unlock_start;
1903         sector_t sector;
1904         struct extent_map *em;
1905         struct block_device *bdev;
1906         int ret;
1907         int nr = 0;
1908         size_t pg_offset = 0;
1909         size_t blocksize;
1910         loff_t i_size = i_size_read(inode);
1911         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1912         u64 nr_delalloc;
1913         u64 delalloc_end;
1914
1915         WARN_ON(!PageLocked(page));
1916         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
1917         if (page->index > end_index ||
1918            (page->index == end_index && !pg_offset)) {
1919                 page->mapping->a_ops->invalidatepage(page, 0);
1920                 unlock_page(page);
1921                 return 0;
1922         }
1923
1924         if (page->index == end_index) {
1925                 char *userpage;
1926
1927                 userpage = kmap_atomic(page, KM_USER0);
1928                 memset(userpage + pg_offset, 0,
1929                        PAGE_CACHE_SIZE - pg_offset);
1930                 kunmap_atomic(userpage, KM_USER0);
1931                 flush_dcache_page(page);
1932         }
1933         pg_offset = 0;
1934
1935         set_page_extent_mapped(page);
1936
1937         delalloc_start = start;
1938         delalloc_end = 0;
1939         while(delalloc_end < page_end) {
1940                 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1941                                                        &delalloc_end,
1942                                                        128 * 1024 * 1024);
1943                 if (nr_delalloc == 0) {
1944                         delalloc_start = delalloc_end + 1;
1945                         continue;
1946                 }
1947                 tree->ops->fill_delalloc(inode, delalloc_start,
1948                                          delalloc_end);
1949                 clear_extent_bit(tree, delalloc_start,
1950                                  delalloc_end,
1951                                  EXTENT_LOCKED | EXTENT_DELALLOC,
1952                                  1, 0, GFP_NOFS);
1953                 delalloc_start = delalloc_end + 1;
1954         }
1955         lock_extent(tree, start, page_end, GFP_NOFS);
1956         unlock_start = start;
1957
1958         if (tree->ops && tree->ops->writepage_start_hook) {
1959                 ret = tree->ops->writepage_start_hook(page, start, page_end);
1960                 if (ret == -EAGAIN) {
1961                         unlock_extent(tree, start, page_end, GFP_NOFS);
1962                         redirty_page_for_writepage(wbc, page);
1963                         unlock_page(page);
1964                         return 0;
1965                 }
1966         }
1967
1968         end = page_end;
1969         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1970                 printk("found delalloc bits after lock_extent\n");
1971         }
1972
1973         if (last_byte <= start) {
1974                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1975                 unlock_extent(tree, start, page_end, GFP_NOFS);
1976                 if (tree->ops && tree->ops->writepage_end_io_hook)
1977                         tree->ops->writepage_end_io_hook(page, start,
1978                                                          page_end, NULL, 1);
1979                 unlock_start = page_end + 1;
1980                 goto done;
1981         }
1982
1983         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1984         blocksize = inode->i_sb->s_blocksize;
1985
1986         while (cur <= end) {
1987                 if (cur >= last_byte) {
1988                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1989                         unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1990                         if (tree->ops && tree->ops->writepage_end_io_hook)
1991                                 tree->ops->writepage_end_io_hook(page, cur,
1992                                                          page_end, NULL, 1);
1993                         unlock_start = page_end + 1;
1994                         break;
1995                 }
1996                 em = epd->get_extent(inode, page, pg_offset, cur,
1997                                      end - cur + 1, 1);
1998                 if (IS_ERR(em) || !em) {
1999                         SetPageError(page);
2000                         break;
2001                 }
2002
2003                 extent_offset = cur - em->start;
2004                 BUG_ON(extent_map_end(em) <= cur);
2005                 BUG_ON(end < cur);
2006                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2007                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2008                 sector = (em->block_start + extent_offset) >> 9;
2009                 bdev = em->bdev;
2010                 block_start = em->block_start;
2011                 free_extent_map(em);
2012                 em = NULL;
2013
2014                 if (block_start == EXTENT_MAP_HOLE ||
2015                     block_start == EXTENT_MAP_INLINE) {
2016                         clear_extent_dirty(tree, cur,
2017                                            cur + iosize - 1, GFP_NOFS);
2018
2019                         unlock_extent(tree, unlock_start, cur + iosize -1,
2020                                       GFP_NOFS);
2021
2022                         if (tree->ops && tree->ops->writepage_end_io_hook)
2023                                 tree->ops->writepage_end_io_hook(page, cur,
2024                                                          cur + iosize - 1,
2025                                                          NULL, 1);
2026                         cur = cur + iosize;
2027                         pg_offset += iosize;
2028                         unlock_start = cur;
2029                         continue;
2030                 }
2031
2032                 /* leave this out until we have a page_mkwrite call */
2033                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2034                                    EXTENT_DIRTY, 0)) {
2035                         cur = cur + iosize;
2036                         pg_offset += iosize;
2037                         continue;
2038                 }
2039                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2040                 if (tree->ops && tree->ops->writepage_io_hook) {
2041                         ret = tree->ops->writepage_io_hook(page, cur,
2042                                                 cur + iosize - 1);
2043                 } else {
2044                         ret = 0;
2045                 }
2046                 if (ret) {
2047                         SetPageError(page);
2048                 } else {
2049                         unsigned long max_nr = end_index + 1;
2050
2051                         set_range_writeback(tree, cur, cur + iosize - 1);
2052                         if (!PageWriteback(page)) {
2053                                 printk("warning page %lu not writeback, "
2054                                        "cur %llu end %llu\n", page->index,
2055                                        (unsigned long long)cur,
2056                                        (unsigned long long)end);
2057                         }
2058
2059                         ret = submit_extent_page(WRITE, tree, page, sector,
2060                                                  iosize, pg_offset, bdev,
2061                                                  &epd->bio, max_nr,
2062                                                  end_bio_extent_writepage, 0);
2063                         if (ret)
2064                                 SetPageError(page);
2065                 }
2066                 cur = cur + iosize;
2067                 pg_offset += iosize;
2068                 nr++;
2069         }
2070 done:
2071         if (nr == 0) {
2072                 /* make sure the mapping tag for page dirty gets cleared */
2073                 set_page_writeback(page);
2074                 end_page_writeback(page);
2075         }
2076         if (unlock_start <= page_end)
2077                 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2078         unlock_page(page);
2079         return 0;
2080 }
2081
2082 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
2083 /* Taken directly from 2.6.23 with a mod for a lockpage hook */
2084 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2085                                 void *data);
2086 #endif
2087
2088 /**
2089  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2090  * @mapping: address space structure to write
2091  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2092  * @writepage: function called for each page
2093  * @data: data passed to writepage function
2094  *
2095  * If a page is already under I/O, write_cache_pages() skips it, even
2096  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2097  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2098  * and msync() need to guarantee that all the data which was dirty at the time
2099  * the call was made get new I/O started against them.  If wbc->sync_mode is
2100  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2101  * existing IO to complete.
2102  */
2103 int extent_write_cache_pages(struct extent_io_tree *tree,
2104                              struct address_space *mapping,
2105                              struct writeback_control *wbc,
2106                              writepage_t writepage, void *data)
2107 {
2108         struct backing_dev_info *bdi = mapping->backing_dev_info;
2109         int ret = 0;
2110         int done = 0;
2111         struct pagevec pvec;
2112         int nr_pages;
2113         pgoff_t index;
2114         pgoff_t end;            /* Inclusive */
2115         int scanned = 0;
2116         int range_whole = 0;
2117
2118         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2119                 wbc->encountered_congestion = 1;
2120                 return 0;
2121         }
2122
2123         pagevec_init(&pvec, 0);
2124         if (wbc->range_cyclic) {
2125                 index = mapping->writeback_index; /* Start from prev offset */
2126                 end = -1;
2127         } else {
2128                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2129                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2130                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2131                         range_whole = 1;
2132                 scanned = 1;
2133         }
2134 retry:
2135         while (!done && (index <= end) &&
2136                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2137                                               PAGECACHE_TAG_DIRTY,
2138                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2139                 unsigned i;
2140
2141                 scanned = 1;
2142                 for (i = 0; i < nr_pages; i++) {
2143                         struct page *page = pvec.pages[i];
2144
2145                         /*
2146                          * At this point we hold neither mapping->tree_lock nor
2147                          * lock on the page itself: the page may be truncated or
2148                          * invalidated (changing page->mapping to NULL), or even
2149                          * swizzled back from swapper_space to tmpfs file
2150                          * mapping
2151                          */
2152                         if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2153                                 tree->ops->write_cache_pages_lock_hook(page);
2154                         else
2155                                 lock_page(page);
2156
2157                         if (unlikely(page->mapping != mapping)) {
2158                                 unlock_page(page);
2159                                 continue;
2160                         }
2161
2162                         if (!wbc->range_cyclic && page->index > end) {
2163                                 done = 1;
2164                                 unlock_page(page);
2165                                 continue;
2166                         }
2167
2168                         if (wbc->sync_mode != WB_SYNC_NONE)
2169                                 wait_on_page_writeback(page);
2170
2171                         if (PageWriteback(page) ||
2172                             !clear_page_dirty_for_io(page)) {
2173                                 unlock_page(page);
2174                                 continue;
2175                         }
2176
2177                         ret = (*writepage)(page, wbc, data);
2178
2179                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2180                                 unlock_page(page);
2181                                 ret = 0;
2182                         }
2183                         if (ret || (--(wbc->nr_to_write) <= 0))
2184                                 done = 1;
2185                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2186                                 wbc->encountered_congestion = 1;
2187                                 done = 1;
2188                         }
2189                 }
2190                 pagevec_release(&pvec);
2191                 cond_resched();
2192         }
2193         if (!scanned && !done) {
2194                 /*
2195                  * We hit the last page and there is more work to be done: wrap
2196                  * back to the start of the file
2197                  */
2198                 scanned = 1;
2199                 index = 0;
2200                 goto retry;
2201         }
2202         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2203                 mapping->writeback_index = index;
2204 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2205         if (wbc->range_cont)
2206                 wbc->range_start = index << PAGE_CACHE_SHIFT;
2207 #endif
2208         return ret;
2209 }
2210 EXPORT_SYMBOL(extent_write_cache_pages);
2211
2212 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2213                           get_extent_t *get_extent,
2214                           struct writeback_control *wbc)
2215 {
2216         int ret;
2217         struct address_space *mapping = page->mapping;
2218         struct extent_page_data epd = {
2219                 .bio = NULL,
2220                 .tree = tree,
2221                 .get_extent = get_extent,
2222         };
2223         struct writeback_control wbc_writepages = {
2224                 .bdi            = wbc->bdi,
2225                 .sync_mode      = WB_SYNC_NONE,
2226                 .older_than_this = NULL,
2227                 .nr_to_write    = 64,
2228                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2229                 .range_end      = (loff_t)-1,
2230         };
2231
2232
2233         ret = __extent_writepage(page, wbc, &epd);
2234
2235         extent_write_cache_pages(tree, mapping, &wbc_writepages,
2236                                  __extent_writepage, &epd);
2237         if (epd.bio) {
2238                 submit_one_bio(WRITE, epd.bio, 0);
2239         }
2240         return ret;
2241 }
2242 EXPORT_SYMBOL(extent_write_full_page);
2243
2244
2245 int extent_writepages(struct extent_io_tree *tree,
2246                       struct address_space *mapping,
2247                       get_extent_t *get_extent,
2248                       struct writeback_control *wbc)
2249 {
2250         int ret = 0;
2251         struct extent_page_data epd = {
2252                 .bio = NULL,
2253                 .tree = tree,
2254                 .get_extent = get_extent,
2255         };
2256
2257         ret = extent_write_cache_pages(tree, mapping, wbc,
2258                                        __extent_writepage, &epd);
2259         if (epd.bio) {
2260                 submit_one_bio(WRITE, epd.bio, 0);
2261         }
2262         return ret;
2263 }
2264 EXPORT_SYMBOL(extent_writepages);
2265
2266 int extent_readpages(struct extent_io_tree *tree,
2267                      struct address_space *mapping,
2268                      struct list_head *pages, unsigned nr_pages,
2269                      get_extent_t get_extent)
2270 {
2271         struct bio *bio = NULL;
2272         unsigned page_idx;
2273         struct pagevec pvec;
2274
2275         pagevec_init(&pvec, 0);
2276         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2277                 struct page *page = list_entry(pages->prev, struct page, lru);
2278
2279                 prefetchw(&page->flags);
2280                 list_del(&page->lru);
2281                 /*
2282                  * what we want to do here is call add_to_page_cache_lru,
2283                  * but that isn't exported, so we reproduce it here
2284                  */
2285                 if (!add_to_page_cache(page, mapping,
2286                                         page->index, GFP_KERNEL)) {
2287
2288                         /* open coding of lru_cache_add, also not exported */
2289                         page_cache_get(page);
2290                         if (!pagevec_add(&pvec, page))
2291                                 __pagevec_lru_add(&pvec);
2292                         __extent_read_full_page(tree, page, get_extent,
2293                                                 &bio, 0);
2294                 }
2295                 page_cache_release(page);
2296         }
2297         if (pagevec_count(&pvec))
2298                 __pagevec_lru_add(&pvec);
2299         BUG_ON(!list_empty(pages));
2300         if (bio)
2301                 submit_one_bio(READ, bio, 0);
2302         return 0;
2303 }
2304 EXPORT_SYMBOL(extent_readpages);
2305
2306 /*
2307  * basic invalidatepage code, this waits on any locked or writeback
2308  * ranges corresponding to the page, and then deletes any extent state
2309  * records from the tree
2310  */
2311 int extent_invalidatepage(struct extent_io_tree *tree,
2312                           struct page *page, unsigned long offset)
2313 {
2314         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2315         u64 end = start + PAGE_CACHE_SIZE - 1;
2316         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2317
2318         start += (offset + blocksize -1) & ~(blocksize - 1);
2319         if (start > end)
2320                 return 0;
2321
2322         lock_extent(tree, start, end, GFP_NOFS);
2323         wait_on_extent_writeback(tree, start, end);
2324         clear_extent_bit(tree, start, end,
2325                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2326                          1, 1, GFP_NOFS);
2327         return 0;
2328 }
2329 EXPORT_SYMBOL(extent_invalidatepage);
2330
2331 /*
2332  * simple commit_write call, set_range_dirty is used to mark both
2333  * the pages and the extent records as dirty
2334  */
2335 int extent_commit_write(struct extent_io_tree *tree,
2336                         struct inode *inode, struct page *page,
2337                         unsigned from, unsigned to)
2338 {
2339         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2340
2341         set_page_extent_mapped(page);
2342         set_page_dirty(page);
2343
2344         if (pos > inode->i_size) {
2345                 i_size_write(inode, pos);
2346                 mark_inode_dirty(inode);
2347         }
2348         return 0;
2349 }
2350 EXPORT_SYMBOL(extent_commit_write);
2351
2352 int extent_prepare_write(struct extent_io_tree *tree,
2353                          struct inode *inode, struct page *page,
2354                          unsigned from, unsigned to, get_extent_t *get_extent)
2355 {
2356         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2357         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2358         u64 block_start;
2359         u64 orig_block_start;
2360         u64 block_end;
2361         u64 cur_end;
2362         struct extent_map *em;
2363         unsigned blocksize = 1 << inode->i_blkbits;
2364         size_t page_offset = 0;
2365         size_t block_off_start;
2366         size_t block_off_end;
2367         int err = 0;
2368         int iocount = 0;
2369         int ret = 0;
2370         int isnew;
2371
2372         set_page_extent_mapped(page);
2373
2374         block_start = (page_start + from) & ~((u64)blocksize - 1);
2375         block_end = (page_start + to - 1) | (blocksize - 1);
2376         orig_block_start = block_start;
2377
2378         lock_extent(tree, page_start, page_end, GFP_NOFS);
2379         while(block_start <= block_end) {
2380                 em = get_extent(inode, page, page_offset, block_start,
2381                                 block_end - block_start + 1, 1);
2382                 if (IS_ERR(em) || !em) {
2383                         goto err;
2384                 }
2385                 cur_end = min(block_end, extent_map_end(em) - 1);
2386                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2387                 block_off_end = block_off_start + blocksize;
2388                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2389
2390                 if (!PageUptodate(page) && isnew &&
2391                     (block_off_end > to || block_off_start < from)) {
2392                         void *kaddr;
2393
2394                         kaddr = kmap_atomic(page, KM_USER0);
2395                         if (block_off_end > to)
2396                                 memset(kaddr + to, 0, block_off_end - to);
2397                         if (block_off_start < from)
2398                                 memset(kaddr + block_off_start, 0,
2399                                        from - block_off_start);
2400                         flush_dcache_page(page);
2401                         kunmap_atomic(kaddr, KM_USER0);
2402                 }
2403                 if ((em->block_start != EXTENT_MAP_HOLE &&
2404                      em->block_start != EXTENT_MAP_INLINE) &&
2405                     !isnew && !PageUptodate(page) &&
2406                     (block_off_end > to || block_off_start < from) &&
2407                     !test_range_bit(tree, block_start, cur_end,
2408                                     EXTENT_UPTODATE, 1)) {
2409                         u64 sector;
2410                         u64 extent_offset = block_start - em->start;
2411                         size_t iosize;
2412                         sector = (em->block_start + extent_offset) >> 9;
2413                         iosize = (cur_end - block_start + blocksize) &
2414                                 ~((u64)blocksize - 1);
2415                         /*
2416                          * we've already got the extent locked, but we
2417                          * need to split the state such that our end_bio
2418                          * handler can clear the lock.
2419                          */
2420                         set_extent_bit(tree, block_start,
2421                                        block_start + iosize - 1,
2422                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2423                         ret = submit_extent_page(READ, tree, page,
2424                                          sector, iosize, page_offset, em->bdev,
2425                                          NULL, 1,
2426                                          end_bio_extent_preparewrite, 0);
2427                         iocount++;
2428                         block_start = block_start + iosize;
2429                 } else {
2430                         set_extent_uptodate(tree, block_start, cur_end,
2431                                             GFP_NOFS);
2432                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2433                         block_start = cur_end + 1;
2434                 }
2435                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2436                 free_extent_map(em);
2437         }
2438         if (iocount) {
2439                 wait_extent_bit(tree, orig_block_start,
2440                                 block_end, EXTENT_LOCKED);
2441         }
2442         check_page_uptodate(tree, page);
2443 err:
2444         /* FIXME, zero out newly allocated blocks on error */
2445         return err;
2446 }
2447 EXPORT_SYMBOL(extent_prepare_write);
2448
2449 /*
2450  * a helper for releasepage, this tests for areas of the page that
2451  * are locked or under IO and drops the related state bits if it is safe
2452  * to drop the page.
2453  */
2454 int try_release_extent_state(struct extent_map_tree *map,
2455                              struct extent_io_tree *tree, struct page *page,
2456                              gfp_t mask)
2457 {
2458         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2459         u64 end = start + PAGE_CACHE_SIZE - 1;
2460         int ret = 1;
2461
2462         if (test_range_bit(tree, start, end,
2463                            EXTENT_IOBITS | EXTENT_ORDERED, 0))
2464                 ret = 0;
2465         else {
2466                 if ((mask & GFP_NOFS) == GFP_NOFS)
2467                         mask = GFP_NOFS;
2468                 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2469                                  1, 1, mask);
2470         }
2471         return ret;
2472 }
2473 EXPORT_SYMBOL(try_release_extent_state);
2474
2475 /*
2476  * a helper for releasepage.  As long as there are no locked extents
2477  * in the range corresponding to the page, both state records and extent
2478  * map records are removed
2479  */
2480 int try_release_extent_mapping(struct extent_map_tree *map,
2481                                struct extent_io_tree *tree, struct page *page,
2482                                gfp_t mask)
2483 {
2484         struct extent_map *em;
2485         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2486         u64 end = start + PAGE_CACHE_SIZE - 1;
2487
2488         if ((mask & __GFP_WAIT) &&
2489             page->mapping->host->i_size > 16 * 1024 * 1024) {
2490                 u64 len;
2491                 while (start <= end) {
2492                         len = end - start + 1;
2493                         spin_lock(&map->lock);
2494                         em = lookup_extent_mapping(map, start, len);
2495                         if (!em || IS_ERR(em)) {
2496                                 spin_unlock(&map->lock);
2497                                 break;
2498                         }
2499                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2500                             em->start != start) {
2501                                 spin_unlock(&map->lock);
2502                                 free_extent_map(em);
2503                                 break;
2504                         }
2505                         if (!test_range_bit(tree, em->start,
2506                                             extent_map_end(em) - 1,
2507                                             EXTENT_LOCKED, 0)) {
2508                                 remove_extent_mapping(map, em);
2509                                 /* once for the rb tree */
2510                                 free_extent_map(em);
2511                         }
2512                         start = extent_map_end(em);
2513                         spin_unlock(&map->lock);
2514
2515                         /* once for us */
2516                         free_extent_map(em);
2517                 }
2518         }
2519         return try_release_extent_state(map, tree, page, mask);
2520 }
2521 EXPORT_SYMBOL(try_release_extent_mapping);
2522
2523 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2524                 get_extent_t *get_extent)
2525 {
2526         struct inode *inode = mapping->host;
2527         u64 start = iblock << inode->i_blkbits;
2528         sector_t sector = 0;
2529         struct extent_map *em;
2530
2531         em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2532         if (!em || IS_ERR(em))
2533                 return 0;
2534
2535         if (em->block_start == EXTENT_MAP_INLINE ||
2536             em->block_start == EXTENT_MAP_HOLE)
2537                 goto out;
2538
2539         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2540 out:
2541         free_extent_map(em);
2542         return sector;
2543 }
2544
2545 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2546                                               unsigned long i)
2547 {
2548         struct page *p;
2549         struct address_space *mapping;
2550
2551         if (i == 0)
2552                 return eb->first_page;
2553         i += eb->start >> PAGE_CACHE_SHIFT;
2554         mapping = eb->first_page->mapping;
2555         if (!mapping)
2556                 return NULL;
2557
2558         /*
2559          * extent_buffer_page is only called after pinning the page
2560          * by increasing the reference count.  So we know the page must
2561          * be in the radix tree.
2562          */
2563 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2564         rcu_read_lock();
2565 #else
2566         read_lock_irq(&mapping->tree_lock);
2567 #endif
2568         p = radix_tree_lookup(&mapping->page_tree, i);
2569
2570 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2571         rcu_read_unlock();
2572 #else
2573         read_unlock_irq(&mapping->tree_lock);
2574 #endif
2575         return p;
2576 }
2577
2578 static inline unsigned long num_extent_pages(u64 start, u64 len)
2579 {
2580         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2581                 (start >> PAGE_CACHE_SHIFT);
2582 }
2583
2584 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2585                                                    u64 start,
2586                                                    unsigned long len,
2587                                                    gfp_t mask)
2588 {
2589         struct extent_buffer *eb = NULL;
2590 #ifdef LEAK_DEBUG
2591         unsigned long flags;
2592 #endif
2593
2594         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2595         eb->start = start;
2596         eb->len = len;
2597         mutex_init(&eb->mutex);
2598 #ifdef LEAK_DEBUG
2599         spin_lock_irqsave(&leak_lock, flags);
2600         list_add(&eb->leak_list, &buffers);
2601         spin_unlock_irqrestore(&leak_lock, flags);
2602 #endif
2603         atomic_set(&eb->refs, 1);
2604
2605         return eb;
2606 }
2607
2608 static void __free_extent_buffer(struct extent_buffer *eb)
2609 {
2610 #ifdef LEAK_DEBUG
2611         unsigned long flags;
2612         spin_lock_irqsave(&leak_lock, flags);
2613         list_del(&eb->leak_list);
2614         spin_unlock_irqrestore(&leak_lock, flags);
2615 #endif
2616         kmem_cache_free(extent_buffer_cache, eb);
2617 }
2618
2619 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2620                                           u64 start, unsigned long len,
2621                                           struct page *page0,
2622                                           gfp_t mask)
2623 {
2624         unsigned long num_pages = num_extent_pages(start, len);
2625         unsigned long i;
2626         unsigned long index = start >> PAGE_CACHE_SHIFT;
2627         struct extent_buffer *eb;
2628         struct extent_buffer *exists = NULL;
2629         struct page *p;
2630         struct address_space *mapping = tree->mapping;
2631         int uptodate = 1;
2632
2633         spin_lock(&tree->buffer_lock);
2634         eb = buffer_search(tree, start);
2635         if (eb) {
2636                 atomic_inc(&eb->refs);
2637                 spin_unlock(&tree->buffer_lock);
2638                 mark_page_accessed(eb->first_page);
2639                 return eb;
2640         }
2641         spin_unlock(&tree->buffer_lock);
2642
2643         eb = __alloc_extent_buffer(tree, start, len, mask);
2644         if (!eb)
2645                 return NULL;
2646
2647         if (page0) {
2648                 eb->first_page = page0;
2649                 i = 1;
2650                 index++;
2651                 page_cache_get(page0);
2652                 mark_page_accessed(page0);
2653                 set_page_extent_mapped(page0);
2654                 set_page_extent_head(page0, len);
2655                 uptodate = PageUptodate(page0);
2656         } else {
2657                 i = 0;
2658         }
2659         for (; i < num_pages; i++, index++) {
2660                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2661                 if (!p) {
2662                         WARN_ON(1);
2663                         goto free_eb;
2664                 }
2665                 set_page_extent_mapped(p);
2666                 mark_page_accessed(p);
2667                 if (i == 0) {
2668                         eb->first_page = p;
2669                         set_page_extent_head(p, len);
2670                 } else {
2671                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2672                 }
2673                 if (!PageUptodate(p))
2674                         uptodate = 0;
2675                 unlock_page(p);
2676         }
2677         if (uptodate)
2678                 eb->flags |= EXTENT_UPTODATE;
2679         eb->flags |= EXTENT_BUFFER_FILLED;
2680
2681         spin_lock(&tree->buffer_lock);
2682         exists = buffer_tree_insert(tree, start, &eb->rb_node);
2683         if (exists) {
2684                 /* add one reference for the caller */
2685                 atomic_inc(&exists->refs);
2686                 spin_unlock(&tree->buffer_lock);
2687                 goto free_eb;
2688         }
2689         spin_unlock(&tree->buffer_lock);
2690
2691         /* add one reference for the tree */
2692         atomic_inc(&eb->refs);
2693         return eb;
2694
2695 free_eb:
2696         if (!atomic_dec_and_test(&eb->refs))
2697                 return exists;
2698         for (index = 1; index < i; index++)
2699                 page_cache_release(extent_buffer_page(eb, index));
2700         page_cache_release(extent_buffer_page(eb, 0));
2701         __free_extent_buffer(eb);
2702         return exists;
2703 }
2704 EXPORT_SYMBOL(alloc_extent_buffer);
2705
2706 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2707                                          u64 start, unsigned long len,
2708                                           gfp_t mask)
2709 {
2710         struct extent_buffer *eb;
2711
2712         spin_lock(&tree->buffer_lock);
2713         eb = buffer_search(tree, start);
2714         if (eb)
2715                 atomic_inc(&eb->refs);
2716         spin_unlock(&tree->buffer_lock);
2717
2718         if (eb)
2719                 mark_page_accessed(eb->first_page);
2720
2721         return eb;
2722 }
2723 EXPORT_SYMBOL(find_extent_buffer);
2724
2725 void free_extent_buffer(struct extent_buffer *eb)
2726 {
2727         if (!eb)
2728                 return;
2729
2730         if (!atomic_dec_and_test(&eb->refs))
2731                 return;
2732
2733         WARN_ON(1);
2734 }
2735 EXPORT_SYMBOL(free_extent_buffer);
2736
2737 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2738                               struct extent_buffer *eb)
2739 {
2740         int set;
2741         unsigned long i;
2742         unsigned long num_pages;
2743         struct page *page;
2744
2745         u64 start = eb->start;
2746         u64 end = start + eb->len - 1;
2747
2748         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2749         num_pages = num_extent_pages(eb->start, eb->len);
2750
2751         for (i = 0; i < num_pages; i++) {
2752                 page = extent_buffer_page(eb, i);
2753                 lock_page(page);
2754                 if (i == 0)
2755                         set_page_extent_head(page, eb->len);
2756                 else
2757                         set_page_private(page, EXTENT_PAGE_PRIVATE);
2758
2759                 /*
2760                  * if we're on the last page or the first page and the
2761                  * block isn't aligned on a page boundary, do extra checks
2762                  * to make sure we don't clean page that is partially dirty
2763                  */
2764                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2765                     ((i == num_pages - 1) &&
2766                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2767                         start = (u64)page->index << PAGE_CACHE_SHIFT;
2768                         end  = start + PAGE_CACHE_SIZE - 1;
2769                         if (test_range_bit(tree, start, end,
2770                                            EXTENT_DIRTY, 0)) {
2771                                 unlock_page(page);
2772                                 continue;
2773                         }
2774                 }
2775                 clear_page_dirty_for_io(page);
2776 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2777                 spin_lock_irq(&page->mapping->tree_lock);
2778 #else
2779                 read_lock_irq(&page->mapping->tree_lock);
2780 #endif
2781                 if (!PageDirty(page)) {
2782                         radix_tree_tag_clear(&page->mapping->page_tree,
2783                                                 page_index(page),
2784                                                 PAGECACHE_TAG_DIRTY);
2785                 }
2786 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2787                 spin_unlock_irq(&page->mapping->tree_lock);
2788 #else
2789                 read_unlock_irq(&page->mapping->tree_lock);
2790 #endif
2791                 unlock_page(page);
2792         }
2793         return 0;
2794 }
2795 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2796
2797 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2798                                     struct extent_buffer *eb)
2799 {
2800         return wait_on_extent_writeback(tree, eb->start,
2801                                         eb->start + eb->len - 1);
2802 }
2803 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2804
2805 int set_extent_buffer_dirty(struct extent_io_tree *tree,
2806                              struct extent_buffer *eb)
2807 {
2808         unsigned long i;
2809         unsigned long num_pages;
2810
2811         num_pages = num_extent_pages(eb->start, eb->len);
2812         for (i = 0; i < num_pages; i++) {
2813                 struct page *page = extent_buffer_page(eb, i);
2814                 /* writepage may need to do something special for the
2815                  * first page, we have to make sure page->private is
2816                  * properly set.  releasepage may drop page->private
2817                  * on us if the page isn't already dirty.
2818                  */
2819                 lock_page(page);
2820                 if (i == 0) {
2821                         set_page_extent_head(page, eb->len);
2822                 } else if (PagePrivate(page) &&
2823                            page->private != EXTENT_PAGE_PRIVATE) {
2824                         set_page_extent_mapped(page);
2825                 }
2826                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2827                 set_extent_dirty(tree, page_offset(page),
2828                                  page_offset(page) + PAGE_CACHE_SIZE -1,
2829                                  GFP_NOFS);
2830                 unlock_page(page);
2831         }
2832         return 0;
2833 }
2834 EXPORT_SYMBOL(set_extent_buffer_dirty);
2835
2836 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2837                                 struct extent_buffer *eb)
2838 {
2839         unsigned long i;
2840         struct page *page;
2841         unsigned long num_pages;
2842
2843         num_pages = num_extent_pages(eb->start, eb->len);
2844         eb->flags &= ~EXTENT_UPTODATE;
2845
2846         clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2847                               GFP_NOFS);
2848         for (i = 0; i < num_pages; i++) {
2849                 page = extent_buffer_page(eb, i);
2850                 if (page)
2851                         ClearPageUptodate(page);
2852         }
2853         return 0;
2854 }
2855
2856 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2857                                 struct extent_buffer *eb)
2858 {
2859         unsigned long i;
2860         struct page *page;
2861         unsigned long num_pages;
2862
2863         num_pages = num_extent_pages(eb->start, eb->len);
2864
2865         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2866                             GFP_NOFS);
2867         for (i = 0; i < num_pages; i++) {
2868                 page = extent_buffer_page(eb, i);
2869                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2870                     ((i == num_pages - 1) &&
2871                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2872                         check_page_uptodate(tree, page);
2873                         continue;
2874                 }
2875                 SetPageUptodate(page);
2876         }
2877         return 0;
2878 }
2879 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2880
2881 int extent_range_uptodate(struct extent_io_tree *tree,
2882                           u64 start, u64 end)
2883 {
2884         struct page *page;
2885         int ret;
2886         int pg_uptodate = 1;
2887         int uptodate;
2888         unsigned long index;
2889
2890         ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2891         if (ret)
2892                 return 1;
2893         while(start <= end) {
2894                 index = start >> PAGE_CACHE_SHIFT;
2895                 page = find_get_page(tree->mapping, index);
2896                 uptodate = PageUptodate(page);
2897                 page_cache_release(page);
2898                 if (!uptodate) {
2899                         pg_uptodate = 0;
2900                         break;
2901                 }
2902                 start += PAGE_CACHE_SIZE;
2903         }
2904         return pg_uptodate;
2905 }
2906
2907 int extent_buffer_uptodate(struct extent_io_tree *tree,
2908                            struct extent_buffer *eb)
2909 {
2910         int ret = 0;
2911         unsigned long num_pages;
2912         unsigned long i;
2913         struct page *page;
2914         int pg_uptodate = 1;
2915
2916         if (eb->flags & EXTENT_UPTODATE)
2917                 return 1;
2918
2919         ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2920                            EXTENT_UPTODATE, 1);
2921         if (ret)
2922                 return ret;
2923
2924         num_pages = num_extent_pages(eb->start, eb->len);
2925         for (i = 0; i < num_pages; i++) {
2926                 page = extent_buffer_page(eb, i);
2927                 if (!PageUptodate(page)) {
2928                         pg_uptodate = 0;
2929                         break;
2930                 }
2931         }
2932         return pg_uptodate;
2933 }
2934 EXPORT_SYMBOL(extent_buffer_uptodate);
2935
2936 int read_extent_buffer_pages(struct extent_io_tree *tree,
2937                              struct extent_buffer *eb,
2938                              u64 start, int wait,
2939                              get_extent_t *get_extent, int mirror_num)
2940 {
2941         unsigned long i;
2942         unsigned long start_i;
2943         struct page *page;
2944         int err;
2945         int ret = 0;
2946         int locked_pages = 0;
2947         int all_uptodate = 1;
2948         int inc_all_pages = 0;
2949         unsigned long num_pages;
2950         struct bio *bio = NULL;
2951
2952         if (eb->flags & EXTENT_UPTODATE)
2953                 return 0;
2954
2955         if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2956                            EXTENT_UPTODATE, 1)) {
2957                 return 0;
2958         }
2959
2960         if (start) {
2961                 WARN_ON(start < eb->start);
2962                 start_i = (start >> PAGE_CACHE_SHIFT) -
2963                         (eb->start >> PAGE_CACHE_SHIFT);
2964         } else {
2965                 start_i = 0;
2966         }
2967
2968         num_pages = num_extent_pages(eb->start, eb->len);
2969         for (i = start_i; i < num_pages; i++) {
2970                 page = extent_buffer_page(eb, i);
2971                 if (!wait) {
2972                         if (!trylock_page(page))
2973                                 goto unlock_exit;
2974                 } else {
2975                         lock_page(page);
2976                 }
2977                 locked_pages++;
2978                 if (!PageUptodate(page)) {
2979                         all_uptodate = 0;
2980                 }
2981         }
2982         if (all_uptodate) {
2983                 if (start_i == 0)
2984                         eb->flags |= EXTENT_UPTODATE;
2985                 if (ret) {
2986                         printk("all up to date but ret is %d\n", ret);
2987                 }
2988                 goto unlock_exit;
2989         }
2990
2991         for (i = start_i; i < num_pages; i++) {
2992                 page = extent_buffer_page(eb, i);
2993                 if (inc_all_pages)
2994                         page_cache_get(page);
2995                 if (!PageUptodate(page)) {
2996                         if (start_i == 0)
2997                                 inc_all_pages = 1;
2998                         ClearPageError(page);
2999                         err = __extent_read_full_page(tree, page,
3000                                                       get_extent, &bio,
3001                                                       mirror_num);
3002                         if (err) {
3003                                 ret = err;
3004                                 printk("err %d from __extent_read_full_page\n", ret);
3005                         }
3006                 } else {
3007                         unlock_page(page);
3008                 }
3009         }
3010
3011         if (bio)
3012                 submit_one_bio(READ, bio, mirror_num);
3013
3014         if (ret || !wait) {
3015                 if (ret)
3016                         printk("ret %d wait %d returning\n", ret, wait);
3017                 return ret;
3018         }
3019         for (i = start_i; i < num_pages; i++) {
3020                 page = extent_buffer_page(eb, i);
3021                 wait_on_page_locked(page);
3022                 if (!PageUptodate(page)) {
3023                         printk("page not uptodate after wait_on_page_locked\n");
3024                         ret = -EIO;
3025                 }
3026         }
3027         if (!ret)
3028                 eb->flags |= EXTENT_UPTODATE;
3029         return ret;
3030
3031 unlock_exit:
3032         i = start_i;
3033         while(locked_pages > 0) {
3034                 page = extent_buffer_page(eb, i);
3035                 i++;
3036                 unlock_page(page);
3037                 locked_pages--;
3038         }
3039         return ret;
3040 }
3041 EXPORT_SYMBOL(read_extent_buffer_pages);
3042
3043 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3044                         unsigned long start,
3045                         unsigned long len)
3046 {
3047         size_t cur;
3048         size_t offset;
3049         struct page *page;
3050         char *kaddr;
3051         char *dst = (char *)dstv;
3052         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3053         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3054
3055         WARN_ON(start > eb->len);
3056         WARN_ON(start + len > eb->start + eb->len);
3057
3058         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3059
3060         while(len > 0) {
3061                 page = extent_buffer_page(eb, i);
3062
3063                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3064                 kaddr = kmap_atomic(page, KM_USER1);
3065                 memcpy(dst, kaddr + offset, cur);
3066                 kunmap_atomic(kaddr, KM_USER1);
3067
3068                 dst += cur;
3069                 len -= cur;
3070                 offset = 0;
3071                 i++;
3072         }
3073 }
3074 EXPORT_SYMBOL(read_extent_buffer);
3075
3076 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3077                                unsigned long min_len, char **token, char **map,
3078                                unsigned long *map_start,
3079                                unsigned long *map_len, int km)
3080 {
3081         size_t offset = start & (PAGE_CACHE_SIZE - 1);
3082         char *kaddr;
3083         struct page *p;
3084         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3085         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3086         unsigned long end_i = (start_offset + start + min_len - 1) >>
3087                 PAGE_CACHE_SHIFT;
3088
3089         if (i != end_i)
3090                 return -EINVAL;
3091
3092         if (i == 0) {
3093                 offset = start_offset;
3094                 *map_start = 0;
3095         } else {
3096                 offset = 0;
3097                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3098         }
3099         if (start + min_len > eb->len) {
3100 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3101                 WARN_ON(1);
3102         }
3103
3104         p = extent_buffer_page(eb, i);
3105         kaddr = kmap_atomic(p, km);
3106         *token = kaddr;
3107         *map = kaddr + offset;
3108         *map_len = PAGE_CACHE_SIZE - offset;
3109         return 0;
3110 }
3111 EXPORT_SYMBOL(map_private_extent_buffer);
3112
3113 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3114                       unsigned long min_len,
3115                       char **token, char **map,
3116                       unsigned long *map_start,
3117                       unsigned long *map_len, int km)
3118 {
3119         int err;
3120         int save = 0;
3121         if (eb->map_token) {
3122                 unmap_extent_buffer(eb, eb->map_token, km);
3123                 eb->map_token = NULL;
3124                 save = 1;
3125         }
3126         err = map_private_extent_buffer(eb, start, min_len, token, map,
3127                                        map_start, map_len, km);
3128         if (!err && save) {
3129                 eb->map_token = *token;
3130                 eb->kaddr = *map;
3131                 eb->map_start = *map_start;
3132                 eb->map_len = *map_len;
3133         }
3134         return err;
3135 }
3136 EXPORT_SYMBOL(map_extent_buffer);
3137
3138 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3139 {
3140         kunmap_atomic(token, km);
3141 }
3142 EXPORT_SYMBOL(unmap_extent_buffer);
3143
3144 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3145                           unsigned long start,
3146                           unsigned long len)
3147 {
3148         size_t cur;
3149         size_t offset;
3150         struct page *page;
3151         char *kaddr;
3152         char *ptr = (char *)ptrv;
3153         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3154         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3155         int ret = 0;
3156
3157         WARN_ON(start > eb->len);
3158         WARN_ON(start + len > eb->start + eb->len);
3159
3160         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3161
3162         while(len > 0) {
3163                 page = extent_buffer_page(eb, i);
3164
3165                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3166
3167                 kaddr = kmap_atomic(page, KM_USER0);
3168                 ret = memcmp(ptr, kaddr + offset, cur);
3169                 kunmap_atomic(kaddr, KM_USER0);
3170                 if (ret)
3171                         break;
3172
3173                 ptr += cur;
3174                 len -= cur;
3175                 offset = 0;
3176                 i++;
3177         }
3178         return ret;
3179 }
3180 EXPORT_SYMBOL(memcmp_extent_buffer);
3181
3182 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3183                          unsigned long start, unsigned long len)
3184 {
3185         size_t cur;
3186         size_t offset;
3187         struct page *page;
3188         char *kaddr;
3189         char *src = (char *)srcv;
3190         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3191         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3192
3193         WARN_ON(start > eb->len);
3194         WARN_ON(start + len > eb->start + eb->len);
3195
3196         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3197
3198         while(len > 0) {
3199                 page = extent_buffer_page(eb, i);
3200                 WARN_ON(!PageUptodate(page));
3201
3202                 cur = min(len, PAGE_CACHE_SIZE - offset);
3203                 kaddr = kmap_atomic(page, KM_USER1);
3204                 memcpy(kaddr + offset, src, cur);
3205                 kunmap_atomic(kaddr, KM_USER1);
3206
3207                 src += cur;
3208                 len -= cur;
3209                 offset = 0;
3210                 i++;
3211         }
3212 }
3213 EXPORT_SYMBOL(write_extent_buffer);
3214
3215 void memset_extent_buffer(struct extent_buffer *eb, char c,
3216                           unsigned long start, unsigned long len)
3217 {
3218         size_t cur;
3219         size_t offset;
3220         struct page *page;
3221         char *kaddr;
3222         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3223         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3224
3225         WARN_ON(start > eb->len);
3226         WARN_ON(start + len > eb->start + eb->len);
3227
3228         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3229
3230         while(len > 0) {
3231                 page = extent_buffer_page(eb, i);
3232                 WARN_ON(!PageUptodate(page));
3233
3234                 cur = min(len, PAGE_CACHE_SIZE - offset);
3235                 kaddr = kmap_atomic(page, KM_USER0);
3236                 memset(kaddr + offset, c, cur);
3237                 kunmap_atomic(kaddr, KM_USER0);
3238
3239                 len -= cur;
3240                 offset = 0;
3241                 i++;
3242         }
3243 }
3244 EXPORT_SYMBOL(memset_extent_buffer);
3245
3246 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3247                         unsigned long dst_offset, unsigned long src_offset,
3248                         unsigned long len)
3249 {
3250         u64 dst_len = dst->len;
3251         size_t cur;
3252         size_t offset;
3253         struct page *page;
3254         char *kaddr;
3255         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3256         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3257
3258         WARN_ON(src->len != dst_len);
3259
3260         offset = (start_offset + dst_offset) &
3261                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3262
3263         while(len > 0) {
3264                 page = extent_buffer_page(dst, i);
3265                 WARN_ON(!PageUptodate(page));
3266
3267                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3268
3269                 kaddr = kmap_atomic(page, KM_USER0);
3270                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3271                 kunmap_atomic(kaddr, KM_USER0);
3272
3273                 src_offset += cur;
3274                 len -= cur;
3275                 offset = 0;
3276                 i++;
3277         }
3278 }
3279 EXPORT_SYMBOL(copy_extent_buffer);
3280
3281 static void move_pages(struct page *dst_page, struct page *src_page,
3282                        unsigned long dst_off, unsigned long src_off,
3283                        unsigned long len)
3284 {
3285         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3286         if (dst_page == src_page) {
3287                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3288         } else {
3289                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3290                 char *p = dst_kaddr + dst_off + len;
3291                 char *s = src_kaddr + src_off + len;
3292
3293                 while (len--)
3294                         *--p = *--s;
3295
3296                 kunmap_atomic(src_kaddr, KM_USER1);
3297         }
3298         kunmap_atomic(dst_kaddr, KM_USER0);
3299 }
3300
3301 static void copy_pages(struct page *dst_page, struct page *src_page,
3302                        unsigned long dst_off, unsigned long src_off,
3303                        unsigned long len)
3304 {
3305         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3306         char *src_kaddr;
3307
3308         if (dst_page != src_page)
3309                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3310         else
3311                 src_kaddr = dst_kaddr;
3312
3313         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3314         kunmap_atomic(dst_kaddr, KM_USER0);
3315         if (dst_page != src_page)
3316                 kunmap_atomic(src_kaddr, KM_USER1);
3317 }
3318
3319 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3320                            unsigned long src_offset, unsigned long len)
3321 {
3322         size_t cur;
3323         size_t dst_off_in_page;
3324         size_t src_off_in_page;
3325         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3326         unsigned long dst_i;
3327         unsigned long src_i;
3328
3329         if (src_offset + len > dst->len) {
3330                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3331                        src_offset, len, dst->len);
3332                 BUG_ON(1);
3333         }
3334         if (dst_offset + len > dst->len) {
3335                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3336                        dst_offset, len, dst->len);
3337                 BUG_ON(1);
3338         }
3339
3340         while(len > 0) {
3341                 dst_off_in_page = (start_offset + dst_offset) &
3342                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3343                 src_off_in_page = (start_offset + src_offset) &
3344                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3345
3346                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3347                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3348
3349                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3350                                                src_off_in_page));
3351                 cur = min_t(unsigned long, cur,
3352                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3353
3354                 copy_pages(extent_buffer_page(dst, dst_i),
3355                            extent_buffer_page(dst, src_i),
3356                            dst_off_in_page, src_off_in_page, cur);
3357
3358                 src_offset += cur;
3359                 dst_offset += cur;
3360                 len -= cur;
3361         }
3362 }
3363 EXPORT_SYMBOL(memcpy_extent_buffer);
3364
3365 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3366                            unsigned long src_offset, unsigned long len)
3367 {
3368         size_t cur;
3369         size_t dst_off_in_page;
3370         size_t src_off_in_page;
3371         unsigned long dst_end = dst_offset + len - 1;
3372         unsigned long src_end = src_offset + len - 1;
3373         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3374         unsigned long dst_i;
3375         unsigned long src_i;
3376
3377         if (src_offset + len > dst->len) {
3378                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3379                        src_offset, len, dst->len);
3380                 BUG_ON(1);
3381         }
3382         if (dst_offset + len > dst->len) {
3383                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3384                        dst_offset, len, dst->len);
3385                 BUG_ON(1);
3386         }
3387         if (dst_offset < src_offset) {
3388                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3389                 return;
3390         }
3391         while(len > 0) {
3392                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3393                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3394
3395                 dst_off_in_page = (start_offset + dst_end) &
3396                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3397                 src_off_in_page = (start_offset + src_end) &
3398                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3399
3400                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3401                 cur = min(cur, dst_off_in_page + 1);
3402                 move_pages(extent_buffer_page(dst, dst_i),
3403                            extent_buffer_page(dst, src_i),
3404                            dst_off_in_page - cur + 1,
3405                            src_off_in_page - cur + 1, cur);
3406
3407                 dst_end -= cur;
3408                 src_end -= cur;
3409                 len -= cur;
3410         }
3411 }
3412 EXPORT_SYMBOL(memmove_extent_buffer);
3413
3414 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3415 {
3416         u64 start = page_offset(page);
3417         struct extent_buffer *eb;
3418         int ret = 1;
3419         unsigned long i;
3420         unsigned long num_pages;
3421
3422         spin_lock(&tree->buffer_lock);
3423         eb = buffer_search(tree, start);
3424         if (!eb)
3425                 goto out;
3426
3427         if (atomic_read(&eb->refs) > 1) {
3428                 ret = 0;
3429                 goto out;
3430         }
3431         /* at this point we can safely release the extent buffer */
3432         num_pages = num_extent_pages(eb->start, eb->len);
3433         for (i = 0; i < num_pages; i++)
3434                 page_cache_release(extent_buffer_page(eb, i));
3435         rb_erase(&eb->rb_node, &tree->buffer);
3436         __free_extent_buffer(eb);
3437 out:
3438         spin_unlock(&tree->buffer_lock);
3439         return ret;
3440 }
3441 EXPORT_SYMBOL(try_release_extent_buffer);