ext4: check for extents that wrap around
[pandora-kernel.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45
46 #include <trace/events/ext4.h>
47
48 /*
49  * used by extent splitting.
50  */
51 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
52                                         due to ENOSPC */
53 #define EXT4_EXT_MARK_UNINIT1   0x2  /* mark first half uninitialized */
54 #define EXT4_EXT_MARK_UNINIT2   0x4  /* mark second half uninitialized */
55
56 #define EXT4_EXT_DATA_VALID1    0x8  /* first half contains valid data */
57 #define EXT4_EXT_DATA_VALID2    0x10 /* second half contains valid data */
58
59 static int ext4_split_extent(handle_t *handle,
60                                 struct inode *inode,
61                                 struct ext4_ext_path *path,
62                                 struct ext4_map_blocks *map,
63                                 int split_flag,
64                                 int flags);
65
66 static int ext4_split_extent_at(handle_t *handle,
67                              struct inode *inode,
68                              struct ext4_ext_path *path,
69                              ext4_lblk_t split,
70                              int split_flag,
71                              int flags);
72
73 static int ext4_ext_truncate_extend_restart(handle_t *handle,
74                                             struct inode *inode,
75                                             int needed)
76 {
77         int err;
78
79         if (!ext4_handle_valid(handle))
80                 return 0;
81         if (handle->h_buffer_credits > needed)
82                 return 0;
83         err = ext4_journal_extend(handle, needed);
84         if (err <= 0)
85                 return err;
86         err = ext4_truncate_restart_trans(handle, inode, needed);
87         if (err == 0)
88                 err = -EAGAIN;
89
90         return err;
91 }
92
93 /*
94  * could return:
95  *  - EROFS
96  *  - ENOMEM
97  */
98 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
99                                 struct ext4_ext_path *path)
100 {
101         if (path->p_bh) {
102                 /* path points to block */
103                 return ext4_journal_get_write_access(handle, path->p_bh);
104         }
105         /* path points to leaf/index in inode body */
106         /* we use in-core data, no need to protect them */
107         return 0;
108 }
109
110 /*
111  * could return:
112  *  - EROFS
113  *  - ENOMEM
114  *  - EIO
115  */
116 #define ext4_ext_dirty(handle, inode, path) \
117                 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
118 static int __ext4_ext_dirty(const char *where, unsigned int line,
119                             handle_t *handle, struct inode *inode,
120                             struct ext4_ext_path *path)
121 {
122         int err;
123         if (path->p_bh) {
124                 /* path points to block */
125                 err = __ext4_handle_dirty_metadata(where, line, handle,
126                                                    inode, path->p_bh);
127         } else {
128                 /* path points to leaf/index in inode body */
129                 err = ext4_mark_inode_dirty(handle, inode);
130         }
131         return err;
132 }
133
134 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
135                               struct ext4_ext_path *path,
136                               ext4_lblk_t block)
137 {
138         if (path) {
139                 int depth = path->p_depth;
140                 struct ext4_extent *ex;
141
142                 /*
143                  * Try to predict block placement assuming that we are
144                  * filling in a file which will eventually be
145                  * non-sparse --- i.e., in the case of libbfd writing
146                  * an ELF object sections out-of-order but in a way
147                  * the eventually results in a contiguous object or
148                  * executable file, or some database extending a table
149                  * space file.  However, this is actually somewhat
150                  * non-ideal if we are writing a sparse file such as
151                  * qemu or KVM writing a raw image file that is going
152                  * to stay fairly sparse, since it will end up
153                  * fragmenting the file system's free space.  Maybe we
154                  * should have some hueristics or some way to allow
155                  * userspace to pass a hint to file system,
156                  * especially if the latter case turns out to be
157                  * common.
158                  */
159                 ex = path[depth].p_ext;
160                 if (ex) {
161                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
162                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
163
164                         if (block > ext_block)
165                                 return ext_pblk + (block - ext_block);
166                         else
167                                 return ext_pblk - (ext_block - block);
168                 }
169
170                 /* it looks like index is empty;
171                  * try to find starting block from index itself */
172                 if (path[depth].p_bh)
173                         return path[depth].p_bh->b_blocknr;
174         }
175
176         /* OK. use inode's group */
177         return ext4_inode_to_goal_block(inode);
178 }
179
180 /*
181  * Allocation for a meta data block
182  */
183 static ext4_fsblk_t
184 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
185                         struct ext4_ext_path *path,
186                         struct ext4_extent *ex, int *err, unsigned int flags)
187 {
188         ext4_fsblk_t goal, newblock;
189
190         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
191         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
192                                         NULL, err);
193         return newblock;
194 }
195
196 static inline int ext4_ext_space_block(struct inode *inode, int check)
197 {
198         int size;
199
200         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
201                         / sizeof(struct ext4_extent);
202 #ifdef AGGRESSIVE_TEST
203         if (!check && size > 6)
204                 size = 6;
205 #endif
206         return size;
207 }
208
209 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
210 {
211         int size;
212
213         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
214                         / sizeof(struct ext4_extent_idx);
215 #ifdef AGGRESSIVE_TEST
216         if (!check && size > 5)
217                 size = 5;
218 #endif
219         return size;
220 }
221
222 static inline int ext4_ext_space_root(struct inode *inode, int check)
223 {
224         int size;
225
226         size = sizeof(EXT4_I(inode)->i_data);
227         size -= sizeof(struct ext4_extent_header);
228         size /= sizeof(struct ext4_extent);
229 #ifdef AGGRESSIVE_TEST
230         if (!check && size > 3)
231                 size = 3;
232 #endif
233         return size;
234 }
235
236 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
237 {
238         int size;
239
240         size = sizeof(EXT4_I(inode)->i_data);
241         size -= sizeof(struct ext4_extent_header);
242         size /= sizeof(struct ext4_extent_idx);
243 #ifdef AGGRESSIVE_TEST
244         if (!check && size > 4)
245                 size = 4;
246 #endif
247         return size;
248 }
249
250 /*
251  * Calculate the number of metadata blocks needed
252  * to allocate @blocks
253  * Worse case is one block per extent
254  */
255 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
256 {
257         struct ext4_inode_info *ei = EXT4_I(inode);
258         int idxs;
259
260         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
261                 / sizeof(struct ext4_extent_idx));
262
263         /*
264          * If the new delayed allocation block is contiguous with the
265          * previous da block, it can share index blocks with the
266          * previous block, so we only need to allocate a new index
267          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
268          * an additional index block, and at ldxs**3 blocks, yet
269          * another index blocks.
270          */
271         if (ei->i_da_metadata_calc_len &&
272             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
273                 int num = 0;
274
275                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
276                         num++;
277                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
278                         num++;
279                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
280                         num++;
281                         ei->i_da_metadata_calc_len = 0;
282                 } else
283                         ei->i_da_metadata_calc_len++;
284                 ei->i_da_metadata_calc_last_lblock++;
285                 return num;
286         }
287
288         /*
289          * In the worst case we need a new set of index blocks at
290          * every level of the inode's extent tree.
291          */
292         ei->i_da_metadata_calc_len = 1;
293         ei->i_da_metadata_calc_last_lblock = lblock;
294         return ext_depth(inode) + 1;
295 }
296
297 static int
298 ext4_ext_max_entries(struct inode *inode, int depth)
299 {
300         int max;
301
302         if (depth == ext_depth(inode)) {
303                 if (depth == 0)
304                         max = ext4_ext_space_root(inode, 1);
305                 else
306                         max = ext4_ext_space_root_idx(inode, 1);
307         } else {
308                 if (depth == 0)
309                         max = ext4_ext_space_block(inode, 1);
310                 else
311                         max = ext4_ext_space_block_idx(inode, 1);
312         }
313
314         return max;
315 }
316
317 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
318 {
319         ext4_fsblk_t block = ext4_ext_pblock(ext);
320         int len = ext4_ext_get_actual_len(ext);
321         ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
322
323         /*
324          * We allow neither:
325          *  - zero length
326          *  - overflow/wrap-around
327          */
328         if (lblock + len <= lblock)
329                 return 0;
330         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
331 }
332
333 static int ext4_valid_extent_idx(struct inode *inode,
334                                 struct ext4_extent_idx *ext_idx)
335 {
336         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
337
338         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
339 }
340
341 static int ext4_valid_extent_entries(struct inode *inode,
342                                 struct ext4_extent_header *eh,
343                                 int depth)
344 {
345         unsigned short entries;
346         if (eh->eh_entries == 0)
347                 return 1;
348
349         entries = le16_to_cpu(eh->eh_entries);
350
351         if (depth == 0) {
352                 /* leaf entries */
353                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
354                 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
355                 ext4_fsblk_t pblock = 0;
356                 ext4_lblk_t lblock = 0;
357                 ext4_lblk_t prev = 0;
358                 int len = 0;
359                 while (entries) {
360                         if (!ext4_valid_extent(inode, ext))
361                                 return 0;
362
363                         /* Check for overlapping extents */
364                         lblock = le32_to_cpu(ext->ee_block);
365                         len = ext4_ext_get_actual_len(ext);
366                         if ((lblock <= prev) && prev) {
367                                 pblock = ext4_ext_pblock(ext);
368                                 es->s_last_error_block = cpu_to_le64(pblock);
369                                 return 0;
370                         }
371                         ext++;
372                         entries--;
373                         prev = lblock + len - 1;
374                 }
375         } else {
376                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
377                 while (entries) {
378                         if (!ext4_valid_extent_idx(inode, ext_idx))
379                                 return 0;
380                         ext_idx++;
381                         entries--;
382                 }
383         }
384         return 1;
385 }
386
387 static int __ext4_ext_check(const char *function, unsigned int line,
388                             struct inode *inode, struct ext4_extent_header *eh,
389                             int depth)
390 {
391         const char *error_msg;
392         int max = 0;
393
394         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
395                 error_msg = "invalid magic";
396                 goto corrupted;
397         }
398         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
399                 error_msg = "unexpected eh_depth";
400                 goto corrupted;
401         }
402         if (unlikely(eh->eh_max == 0)) {
403                 error_msg = "invalid eh_max";
404                 goto corrupted;
405         }
406         max = ext4_ext_max_entries(inode, depth);
407         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
408                 error_msg = "too large eh_max";
409                 goto corrupted;
410         }
411         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
412                 error_msg = "invalid eh_entries";
413                 goto corrupted;
414         }
415         if (!ext4_valid_extent_entries(inode, eh, depth)) {
416                 error_msg = "invalid extent entries";
417                 goto corrupted;
418         }
419         return 0;
420
421 corrupted:
422         ext4_error_inode(inode, function, line, 0,
423                         "bad header/extent: %s - magic %x, "
424                         "entries %u, max %u(%u), depth %u(%u)",
425                         error_msg, le16_to_cpu(eh->eh_magic),
426                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
427                         max, le16_to_cpu(eh->eh_depth), depth);
428
429         return -EIO;
430 }
431
432 #define ext4_ext_check(inode, eh, depth)        \
433         __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
434
435 int ext4_ext_check_inode(struct inode *inode)
436 {
437         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
438 }
439
440 #ifdef EXT_DEBUG
441 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
442 {
443         int k, l = path->p_depth;
444
445         ext_debug("path:");
446         for (k = 0; k <= l; k++, path++) {
447                 if (path->p_idx) {
448                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
449                             ext4_idx_pblock(path->p_idx));
450                 } else if (path->p_ext) {
451                         ext_debug("  %d:[%d]%d:%llu ",
452                                   le32_to_cpu(path->p_ext->ee_block),
453                                   ext4_ext_is_uninitialized(path->p_ext),
454                                   ext4_ext_get_actual_len(path->p_ext),
455                                   ext4_ext_pblock(path->p_ext));
456                 } else
457                         ext_debug("  []");
458         }
459         ext_debug("\n");
460 }
461
462 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
463 {
464         int depth = ext_depth(inode);
465         struct ext4_extent_header *eh;
466         struct ext4_extent *ex;
467         int i;
468
469         if (!path)
470                 return;
471
472         eh = path[depth].p_hdr;
473         ex = EXT_FIRST_EXTENT(eh);
474
475         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
476
477         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
478                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
479                           ext4_ext_is_uninitialized(ex),
480                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
481         }
482         ext_debug("\n");
483 }
484
485 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
486                         ext4_fsblk_t newblock, int level)
487 {
488         int depth = ext_depth(inode);
489         struct ext4_extent *ex;
490
491         if (depth != level) {
492                 struct ext4_extent_idx *idx;
493                 idx = path[level].p_idx;
494                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
495                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
496                                         le32_to_cpu(idx->ei_block),
497                                         ext4_idx_pblock(idx),
498                                         newblock);
499                         idx++;
500                 }
501
502                 return;
503         }
504
505         ex = path[depth].p_ext;
506         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
507                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
508                                 le32_to_cpu(ex->ee_block),
509                                 ext4_ext_pblock(ex),
510                                 ext4_ext_is_uninitialized(ex),
511                                 ext4_ext_get_actual_len(ex),
512                                 newblock);
513                 ex++;
514         }
515 }
516
517 #else
518 #define ext4_ext_show_path(inode, path)
519 #define ext4_ext_show_leaf(inode, path)
520 #define ext4_ext_show_move(inode, path, newblock, level)
521 #endif
522
523 void ext4_ext_drop_refs(struct ext4_ext_path *path)
524 {
525         int depth = path->p_depth;
526         int i;
527
528         for (i = 0; i <= depth; i++, path++)
529                 if (path->p_bh) {
530                         brelse(path->p_bh);
531                         path->p_bh = NULL;
532                 }
533 }
534
535 /*
536  * ext4_ext_binsearch_idx:
537  * binary search for the closest index of the given block
538  * the header must be checked before calling this
539  */
540 static void
541 ext4_ext_binsearch_idx(struct inode *inode,
542                         struct ext4_ext_path *path, ext4_lblk_t block)
543 {
544         struct ext4_extent_header *eh = path->p_hdr;
545         struct ext4_extent_idx *r, *l, *m;
546
547
548         ext_debug("binsearch for %u(idx):  ", block);
549
550         l = EXT_FIRST_INDEX(eh) + 1;
551         r = EXT_LAST_INDEX(eh);
552         while (l <= r) {
553                 m = l + (r - l) / 2;
554                 if (block < le32_to_cpu(m->ei_block))
555                         r = m - 1;
556                 else
557                         l = m + 1;
558                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
559                                 m, le32_to_cpu(m->ei_block),
560                                 r, le32_to_cpu(r->ei_block));
561         }
562
563         path->p_idx = l - 1;
564         ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
565                   ext4_idx_pblock(path->p_idx));
566
567 #ifdef CHECK_BINSEARCH
568         {
569                 struct ext4_extent_idx *chix, *ix;
570                 int k;
571
572                 chix = ix = EXT_FIRST_INDEX(eh);
573                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
574                   if (k != 0 &&
575                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
576                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
577                                        "first=0x%p\n", k,
578                                        ix, EXT_FIRST_INDEX(eh));
579                                 printk(KERN_DEBUG "%u <= %u\n",
580                                        le32_to_cpu(ix->ei_block),
581                                        le32_to_cpu(ix[-1].ei_block));
582                         }
583                         BUG_ON(k && le32_to_cpu(ix->ei_block)
584                                            <= le32_to_cpu(ix[-1].ei_block));
585                         if (block < le32_to_cpu(ix->ei_block))
586                                 break;
587                         chix = ix;
588                 }
589                 BUG_ON(chix != path->p_idx);
590         }
591 #endif
592
593 }
594
595 /*
596  * ext4_ext_binsearch:
597  * binary search for closest extent of the given block
598  * the header must be checked before calling this
599  */
600 static void
601 ext4_ext_binsearch(struct inode *inode,
602                 struct ext4_ext_path *path, ext4_lblk_t block)
603 {
604         struct ext4_extent_header *eh = path->p_hdr;
605         struct ext4_extent *r, *l, *m;
606
607         if (eh->eh_entries == 0) {
608                 /*
609                  * this leaf is empty:
610                  * we get such a leaf in split/add case
611                  */
612                 return;
613         }
614
615         ext_debug("binsearch for %u:  ", block);
616
617         l = EXT_FIRST_EXTENT(eh) + 1;
618         r = EXT_LAST_EXTENT(eh);
619
620         while (l <= r) {
621                 m = l + (r - l) / 2;
622                 if (block < le32_to_cpu(m->ee_block))
623                         r = m - 1;
624                 else
625                         l = m + 1;
626                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
627                                 m, le32_to_cpu(m->ee_block),
628                                 r, le32_to_cpu(r->ee_block));
629         }
630
631         path->p_ext = l - 1;
632         ext_debug("  -> %d:%llu:[%d]%d ",
633                         le32_to_cpu(path->p_ext->ee_block),
634                         ext4_ext_pblock(path->p_ext),
635                         ext4_ext_is_uninitialized(path->p_ext),
636                         ext4_ext_get_actual_len(path->p_ext));
637
638 #ifdef CHECK_BINSEARCH
639         {
640                 struct ext4_extent *chex, *ex;
641                 int k;
642
643                 chex = ex = EXT_FIRST_EXTENT(eh);
644                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
645                         BUG_ON(k && le32_to_cpu(ex->ee_block)
646                                           <= le32_to_cpu(ex[-1].ee_block));
647                         if (block < le32_to_cpu(ex->ee_block))
648                                 break;
649                         chex = ex;
650                 }
651                 BUG_ON(chex != path->p_ext);
652         }
653 #endif
654
655 }
656
657 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
658 {
659         struct ext4_extent_header *eh;
660
661         eh = ext_inode_hdr(inode);
662         eh->eh_depth = 0;
663         eh->eh_entries = 0;
664         eh->eh_magic = EXT4_EXT_MAGIC;
665         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
666         ext4_mark_inode_dirty(handle, inode);
667         ext4_ext_invalidate_cache(inode);
668         return 0;
669 }
670
671 struct ext4_ext_path *
672 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
673                                         struct ext4_ext_path *path)
674 {
675         struct ext4_extent_header *eh;
676         struct buffer_head *bh;
677         short int depth, i, ppos = 0, alloc = 0;
678         int ret;
679
680         eh = ext_inode_hdr(inode);
681         depth = ext_depth(inode);
682
683         /* account possible depth increase */
684         if (!path) {
685                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
686                                 GFP_NOFS);
687                 if (!path)
688                         return ERR_PTR(-ENOMEM);
689                 alloc = 1;
690         }
691         path[0].p_hdr = eh;
692         path[0].p_bh = NULL;
693
694         i = depth;
695         /* walk through the tree */
696         while (i) {
697                 int need_to_validate = 0;
698
699                 ext_debug("depth %d: num %d, max %d\n",
700                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
701
702                 ext4_ext_binsearch_idx(inode, path + ppos, block);
703                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
704                 path[ppos].p_depth = i;
705                 path[ppos].p_ext = NULL;
706
707                 bh = sb_getblk_gfp(inode->i_sb, path[ppos].p_block,
708                                    __GFP_MOVABLE | GFP_NOFS);
709                 if (unlikely(!bh)) {
710                         ret = -ENOMEM;
711                         goto err;
712                 }
713                 if (!bh_uptodate_or_lock(bh)) {
714                         trace_ext4_ext_load_extent(inode, block,
715                                                 path[ppos].p_block);
716                         ret = bh_submit_read(bh);
717                         if (ret < 0) {
718                                 put_bh(bh);
719                                 goto err;
720                         }
721                         /* validate the extent entries */
722                         need_to_validate = 1;
723                 }
724                 eh = ext_block_hdr(bh);
725                 ppos++;
726                 if (unlikely(ppos > depth)) {
727                         put_bh(bh);
728                         EXT4_ERROR_INODE(inode,
729                                          "ppos %d > depth %d", ppos, depth);
730                         ret = -EIO;
731                         goto err;
732                 }
733                 path[ppos].p_bh = bh;
734                 path[ppos].p_hdr = eh;
735                 i--;
736
737                 ret = need_to_validate ? ext4_ext_check(inode, eh, i) : 0;
738                 if (ret < 0)
739                         goto err;
740         }
741
742         path[ppos].p_depth = i;
743         path[ppos].p_ext = NULL;
744         path[ppos].p_idx = NULL;
745
746         /* find extent */
747         ext4_ext_binsearch(inode, path + ppos, block);
748         /* if not an empty leaf */
749         if (path[ppos].p_ext)
750                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
751
752         ext4_ext_show_path(inode, path);
753
754         return path;
755
756 err:
757         ext4_ext_drop_refs(path);
758         if (alloc)
759                 kfree(path);
760         return ERR_PTR(ret);
761 }
762
763 /*
764  * ext4_ext_insert_index:
765  * insert new index [@logical;@ptr] into the block at @curp;
766  * check where to insert: before @curp or after @curp
767  */
768 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
769                                  struct ext4_ext_path *curp,
770                                  int logical, ext4_fsblk_t ptr)
771 {
772         struct ext4_extent_idx *ix;
773         int len, err;
774
775         err = ext4_ext_get_access(handle, inode, curp);
776         if (err)
777                 return err;
778
779         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
780                 EXT4_ERROR_INODE(inode,
781                                  "logical %d == ei_block %d!",
782                                  logical, le32_to_cpu(curp->p_idx->ei_block));
783                 return -EIO;
784         }
785
786         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
787                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
788                 EXT4_ERROR_INODE(inode,
789                                  "eh_entries %d >= eh_max %d!",
790                                  le16_to_cpu(curp->p_hdr->eh_entries),
791                                  le16_to_cpu(curp->p_hdr->eh_max));
792                 return -EIO;
793         }
794
795         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
796                 /* insert after */
797                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
798                 ix = curp->p_idx + 1;
799         } else {
800                 /* insert before */
801                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
802                 ix = curp->p_idx;
803         }
804
805         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
806         BUG_ON(len < 0);
807         if (len > 0) {
808                 ext_debug("insert new index %d: "
809                                 "move %d indices from 0x%p to 0x%p\n",
810                                 logical, len, ix, ix + 1);
811                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
812         }
813
814         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
815                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
816                 return -EIO;
817         }
818
819         ix->ei_block = cpu_to_le32(logical);
820         ext4_idx_store_pblock(ix, ptr);
821         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
822
823         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
824                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
825                 return -EIO;
826         }
827
828         err = ext4_ext_dirty(handle, inode, curp);
829         ext4_std_error(inode->i_sb, err);
830
831         return err;
832 }
833
834 /*
835  * ext4_ext_split:
836  * inserts new subtree into the path, using free index entry
837  * at depth @at:
838  * - allocates all needed blocks (new leaf and all intermediate index blocks)
839  * - makes decision where to split
840  * - moves remaining extents and index entries (right to the split point)
841  *   into the newly allocated blocks
842  * - initializes subtree
843  */
844 static int ext4_ext_split(handle_t *handle, struct inode *inode,
845                           unsigned int flags,
846                           struct ext4_ext_path *path,
847                           struct ext4_extent *newext, int at)
848 {
849         struct buffer_head *bh = NULL;
850         int depth = ext_depth(inode);
851         struct ext4_extent_header *neh;
852         struct ext4_extent_idx *fidx;
853         int i = at, k, m, a;
854         ext4_fsblk_t newblock, oldblock;
855         __le32 border;
856         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
857         int err = 0;
858
859         /* make decision: where to split? */
860         /* FIXME: now decision is simplest: at current extent */
861
862         /* if current leaf will be split, then we should use
863          * border from split point */
864         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
865                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
866                 return -EIO;
867         }
868         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
869                 border = path[depth].p_ext[1].ee_block;
870                 ext_debug("leaf will be split."
871                                 " next leaf starts at %d\n",
872                                   le32_to_cpu(border));
873         } else {
874                 border = newext->ee_block;
875                 ext_debug("leaf will be added."
876                                 " next leaf starts at %d\n",
877                                 le32_to_cpu(border));
878         }
879
880         /*
881          * If error occurs, then we break processing
882          * and mark filesystem read-only. index won't
883          * be inserted and tree will be in consistent
884          * state. Next mount will repair buffers too.
885          */
886
887         /*
888          * Get array to track all allocated blocks.
889          * We need this to handle errors and free blocks
890          * upon them.
891          */
892         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
893         if (!ablocks)
894                 return -ENOMEM;
895
896         /* allocate all needed blocks */
897         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
898         for (a = 0; a < depth - at; a++) {
899                 newblock = ext4_ext_new_meta_block(handle, inode, path,
900                                                    newext, &err, flags);
901                 if (newblock == 0)
902                         goto cleanup;
903                 ablocks[a] = newblock;
904         }
905
906         /* initialize new leaf */
907         newblock = ablocks[--a];
908         if (unlikely(newblock == 0)) {
909                 EXT4_ERROR_INODE(inode, "newblock == 0!");
910                 err = -EIO;
911                 goto cleanup;
912         }
913         bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
914         if (!bh) {
915                 err = -ENOMEM;
916                 goto cleanup;
917         }
918         lock_buffer(bh);
919
920         err = ext4_journal_get_create_access(handle, bh);
921         if (err)
922                 goto cleanup;
923
924         neh = ext_block_hdr(bh);
925         neh->eh_entries = 0;
926         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
927         neh->eh_magic = EXT4_EXT_MAGIC;
928         neh->eh_depth = 0;
929
930         /* move remainder of path[depth] to the new leaf */
931         if (unlikely(path[depth].p_hdr->eh_entries !=
932                      path[depth].p_hdr->eh_max)) {
933                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
934                                  path[depth].p_hdr->eh_entries,
935                                  path[depth].p_hdr->eh_max);
936                 err = -EIO;
937                 goto cleanup;
938         }
939         /* start copy from next extent */
940         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
941         ext4_ext_show_move(inode, path, newblock, depth);
942         if (m) {
943                 struct ext4_extent *ex;
944                 ex = EXT_FIRST_EXTENT(neh);
945                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
946                 le16_add_cpu(&neh->eh_entries, m);
947         }
948
949         set_buffer_uptodate(bh);
950         unlock_buffer(bh);
951
952         err = ext4_handle_dirty_metadata(handle, inode, bh);
953         if (err)
954                 goto cleanup;
955         brelse(bh);
956         bh = NULL;
957
958         /* correct old leaf */
959         if (m) {
960                 err = ext4_ext_get_access(handle, inode, path + depth);
961                 if (err)
962                         goto cleanup;
963                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
964                 err = ext4_ext_dirty(handle, inode, path + depth);
965                 if (err)
966                         goto cleanup;
967
968         }
969
970         /* create intermediate indexes */
971         k = depth - at - 1;
972         if (unlikely(k < 0)) {
973                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
974                 err = -EIO;
975                 goto cleanup;
976         }
977         if (k)
978                 ext_debug("create %d intermediate indices\n", k);
979         /* insert new index into current index block */
980         /* current depth stored in i var */
981         i = depth - 1;
982         while (k--) {
983                 oldblock = newblock;
984                 newblock = ablocks[--a];
985                 bh = sb_getblk(inode->i_sb, newblock);
986                 if (!bh) {
987                         err = -ENOMEM;
988                         goto cleanup;
989                 }
990                 lock_buffer(bh);
991
992                 err = ext4_journal_get_create_access(handle, bh);
993                 if (err)
994                         goto cleanup;
995
996                 neh = ext_block_hdr(bh);
997                 neh->eh_entries = cpu_to_le16(1);
998                 neh->eh_magic = EXT4_EXT_MAGIC;
999                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1000                 neh->eh_depth = cpu_to_le16(depth - i);
1001                 fidx = EXT_FIRST_INDEX(neh);
1002                 fidx->ei_block = border;
1003                 ext4_idx_store_pblock(fidx, oldblock);
1004
1005                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1006                                 i, newblock, le32_to_cpu(border), oldblock);
1007
1008                 /* move remainder of path[i] to the new index block */
1009                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1010                                         EXT_LAST_INDEX(path[i].p_hdr))) {
1011                         EXT4_ERROR_INODE(inode,
1012                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1013                                          le32_to_cpu(path[i].p_ext->ee_block));
1014                         err = -EIO;
1015                         goto cleanup;
1016                 }
1017                 /* start copy indexes */
1018                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1019                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1020                                 EXT_MAX_INDEX(path[i].p_hdr));
1021                 ext4_ext_show_move(inode, path, newblock, i);
1022                 if (m) {
1023                         memmove(++fidx, path[i].p_idx,
1024                                 sizeof(struct ext4_extent_idx) * m);
1025                         le16_add_cpu(&neh->eh_entries, m);
1026                 }
1027                 set_buffer_uptodate(bh);
1028                 unlock_buffer(bh);
1029
1030                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1031                 if (err)
1032                         goto cleanup;
1033                 brelse(bh);
1034                 bh = NULL;
1035
1036                 /* correct old index */
1037                 if (m) {
1038                         err = ext4_ext_get_access(handle, inode, path + i);
1039                         if (err)
1040                                 goto cleanup;
1041                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1042                         err = ext4_ext_dirty(handle, inode, path + i);
1043                         if (err)
1044                                 goto cleanup;
1045                 }
1046
1047                 i--;
1048         }
1049
1050         /* insert new index */
1051         err = ext4_ext_insert_index(handle, inode, path + at,
1052                                     le32_to_cpu(border), newblock);
1053
1054 cleanup:
1055         if (bh) {
1056                 if (buffer_locked(bh))
1057                         unlock_buffer(bh);
1058                 brelse(bh);
1059         }
1060
1061         if (err) {
1062                 /* free all allocated blocks in error case */
1063                 for (i = 0; i < depth; i++) {
1064                         if (!ablocks[i])
1065                                 continue;
1066                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1067                                          EXT4_FREE_BLOCKS_METADATA);
1068                 }
1069         }
1070         kfree(ablocks);
1071
1072         return err;
1073 }
1074
1075 /*
1076  * ext4_ext_grow_indepth:
1077  * implements tree growing procedure:
1078  * - allocates new block
1079  * - moves top-level data (index block or leaf) into the new block
1080  * - initializes new top-level, creating index that points to the
1081  *   just created block
1082  */
1083 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1084                                  unsigned int flags,
1085                                  struct ext4_extent *newext)
1086 {
1087         struct ext4_extent_header *neh;
1088         struct buffer_head *bh;
1089         ext4_fsblk_t newblock;
1090         int err = 0;
1091
1092         newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1093                 newext, &err, flags);
1094         if (newblock == 0)
1095                 return err;
1096
1097         bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1098         if (!bh)
1099                 return -ENOMEM;
1100         lock_buffer(bh);
1101
1102         err = ext4_journal_get_create_access(handle, bh);
1103         if (err) {
1104                 unlock_buffer(bh);
1105                 goto out;
1106         }
1107
1108         /* move top-level index/leaf into new block */
1109         memmove(bh->b_data, EXT4_I(inode)->i_data,
1110                 sizeof(EXT4_I(inode)->i_data));
1111
1112         /* set size of new block */
1113         neh = ext_block_hdr(bh);
1114         /* old root could have indexes or leaves
1115          * so calculate e_max right way */
1116         if (ext_depth(inode))
1117                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1118         else
1119                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1120         neh->eh_magic = EXT4_EXT_MAGIC;
1121         set_buffer_uptodate(bh);
1122         unlock_buffer(bh);
1123
1124         err = ext4_handle_dirty_metadata(handle, inode, bh);
1125         if (err)
1126                 goto out;
1127
1128         /* Update top-level index: num,max,pointer */
1129         neh = ext_inode_hdr(inode);
1130         neh->eh_entries = cpu_to_le16(1);
1131         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1132         if (neh->eh_depth == 0) {
1133                 /* Root extent block becomes index block */
1134                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1135                 EXT_FIRST_INDEX(neh)->ei_block =
1136                         EXT_FIRST_EXTENT(neh)->ee_block;
1137         }
1138         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1139                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1140                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1141                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1142
1143         neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1144         ext4_mark_inode_dirty(handle, inode);
1145 out:
1146         brelse(bh);
1147
1148         return err;
1149 }
1150
1151 /*
1152  * ext4_ext_create_new_leaf:
1153  * finds empty index and adds new leaf.
1154  * if no free index is found, then it requests in-depth growing.
1155  */
1156 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1157                                     unsigned int flags,
1158                                     struct ext4_ext_path *path,
1159                                     struct ext4_extent *newext)
1160 {
1161         struct ext4_ext_path *curp;
1162         int depth, i, err = 0;
1163
1164 repeat:
1165         i = depth = ext_depth(inode);
1166
1167         /* walk up to the tree and look for free index entry */
1168         curp = path + depth;
1169         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1170                 i--;
1171                 curp--;
1172         }
1173
1174         /* we use already allocated block for index block,
1175          * so subsequent data blocks should be contiguous */
1176         if (EXT_HAS_FREE_INDEX(curp)) {
1177                 /* if we found index with free entry, then use that
1178                  * entry: create all needed subtree and add new leaf */
1179                 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1180                 if (err)
1181                         goto out;
1182
1183                 /* refill path */
1184                 ext4_ext_drop_refs(path);
1185                 path = ext4_ext_find_extent(inode,
1186                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1187                                     path);
1188                 if (IS_ERR(path))
1189                         err = PTR_ERR(path);
1190         } else {
1191                 /* tree is full, time to grow in depth */
1192                 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1193                 if (err)
1194                         goto out;
1195
1196                 /* refill path */
1197                 ext4_ext_drop_refs(path);
1198                 path = ext4_ext_find_extent(inode,
1199                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1200                                     path);
1201                 if (IS_ERR(path)) {
1202                         err = PTR_ERR(path);
1203                         goto out;
1204                 }
1205
1206                 /*
1207                  * only first (depth 0 -> 1) produces free space;
1208                  * in all other cases we have to split the grown tree
1209                  */
1210                 depth = ext_depth(inode);
1211                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1212                         /* now we need to split */
1213                         goto repeat;
1214                 }
1215         }
1216
1217 out:
1218         return err;
1219 }
1220
1221 /*
1222  * search the closest allocated block to the left for *logical
1223  * and returns it at @logical + it's physical address at @phys
1224  * if *logical is the smallest allocated block, the function
1225  * returns 0 at @phys
1226  * return value contains 0 (success) or error code
1227  */
1228 static int ext4_ext_search_left(struct inode *inode,
1229                                 struct ext4_ext_path *path,
1230                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1231 {
1232         struct ext4_extent_idx *ix;
1233         struct ext4_extent *ex;
1234         int depth, ee_len;
1235
1236         if (unlikely(path == NULL)) {
1237                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1238                 return -EIO;
1239         }
1240         depth = path->p_depth;
1241         *phys = 0;
1242
1243         if (depth == 0 && path->p_ext == NULL)
1244                 return 0;
1245
1246         /* usually extent in the path covers blocks smaller
1247          * then *logical, but it can be that extent is the
1248          * first one in the file */
1249
1250         ex = path[depth].p_ext;
1251         ee_len = ext4_ext_get_actual_len(ex);
1252         if (*logical < le32_to_cpu(ex->ee_block)) {
1253                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1254                         EXT4_ERROR_INODE(inode,
1255                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1256                                          *logical, le32_to_cpu(ex->ee_block));
1257                         return -EIO;
1258                 }
1259                 while (--depth >= 0) {
1260                         ix = path[depth].p_idx;
1261                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1262                                 EXT4_ERROR_INODE(inode,
1263                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1264                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1265                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1266                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1267                                   depth);
1268                                 return -EIO;
1269                         }
1270                 }
1271                 return 0;
1272         }
1273
1274         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1275                 EXT4_ERROR_INODE(inode,
1276                                  "logical %d < ee_block %d + ee_len %d!",
1277                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1278                 return -EIO;
1279         }
1280
1281         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1282         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1283         return 0;
1284 }
1285
1286 /*
1287  * search the closest allocated block to the right for *logical
1288  * and returns it at @logical + it's physical address at @phys
1289  * if *logical is the largest allocated block, the function
1290  * returns 0 at @phys
1291  * return value contains 0 (success) or error code
1292  */
1293 static int ext4_ext_search_right(struct inode *inode,
1294                                  struct ext4_ext_path *path,
1295                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1296                                  struct ext4_extent **ret_ex)
1297 {
1298         struct buffer_head *bh = NULL;
1299         struct ext4_extent_header *eh;
1300         struct ext4_extent_idx *ix;
1301         struct ext4_extent *ex;
1302         ext4_fsblk_t block;
1303         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1304         int ee_len;
1305
1306         if (unlikely(path == NULL)) {
1307                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1308                 return -EIO;
1309         }
1310         depth = path->p_depth;
1311         *phys = 0;
1312
1313         if (depth == 0 && path->p_ext == NULL)
1314                 return 0;
1315
1316         /* usually extent in the path covers blocks smaller
1317          * then *logical, but it can be that extent is the
1318          * first one in the file */
1319
1320         ex = path[depth].p_ext;
1321         ee_len = ext4_ext_get_actual_len(ex);
1322         if (*logical < le32_to_cpu(ex->ee_block)) {
1323                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1324                         EXT4_ERROR_INODE(inode,
1325                                          "first_extent(path[%d].p_hdr) != ex",
1326                                          depth);
1327                         return -EIO;
1328                 }
1329                 while (--depth >= 0) {
1330                         ix = path[depth].p_idx;
1331                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1332                                 EXT4_ERROR_INODE(inode,
1333                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1334                                                  *logical);
1335                                 return -EIO;
1336                         }
1337                 }
1338                 goto found_extent;
1339         }
1340
1341         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1342                 EXT4_ERROR_INODE(inode,
1343                                  "logical %d < ee_block %d + ee_len %d!",
1344                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1345                 return -EIO;
1346         }
1347
1348         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1349                 /* next allocated block in this leaf */
1350                 ex++;
1351                 goto found_extent;
1352         }
1353
1354         /* go up and search for index to the right */
1355         while (--depth >= 0) {
1356                 ix = path[depth].p_idx;
1357                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1358                         goto got_index;
1359         }
1360
1361         /* we've gone up to the root and found no index to the right */
1362         return 0;
1363
1364 got_index:
1365         /* we've found index to the right, let's
1366          * follow it and find the closest allocated
1367          * block to the right */
1368         ix++;
1369         block = ext4_idx_pblock(ix);
1370         while (++depth < path->p_depth) {
1371                 bh = sb_bread(inode->i_sb, block);
1372                 if (bh == NULL)
1373                         return -EIO;
1374                 eh = ext_block_hdr(bh);
1375                 /* subtract from p_depth to get proper eh_depth */
1376                 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1377                         put_bh(bh);
1378                         return -EIO;
1379                 }
1380                 ix = EXT_FIRST_INDEX(eh);
1381                 block = ext4_idx_pblock(ix);
1382                 put_bh(bh);
1383         }
1384
1385         bh = sb_bread(inode->i_sb, block);
1386         if (bh == NULL)
1387                 return -EIO;
1388         eh = ext_block_hdr(bh);
1389         if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1390                 put_bh(bh);
1391                 return -EIO;
1392         }
1393         ex = EXT_FIRST_EXTENT(eh);
1394 found_extent:
1395         *logical = le32_to_cpu(ex->ee_block);
1396         *phys = ext4_ext_pblock(ex);
1397         *ret_ex = ex;
1398         if (bh)
1399                 put_bh(bh);
1400         return 0;
1401 }
1402
1403 /*
1404  * ext4_ext_next_allocated_block:
1405  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1406  * NOTE: it considers block number from index entry as
1407  * allocated block. Thus, index entries have to be consistent
1408  * with leaves.
1409  */
1410 static ext4_lblk_t
1411 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1412 {
1413         int depth;
1414
1415         BUG_ON(path == NULL);
1416         depth = path->p_depth;
1417
1418         if (depth == 0 && path->p_ext == NULL)
1419                 return EXT_MAX_BLOCKS;
1420
1421         while (depth >= 0) {
1422                 if (depth == path->p_depth) {
1423                         /* leaf */
1424                         if (path[depth].p_ext &&
1425                                 path[depth].p_ext !=
1426                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1427                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1428                 } else {
1429                         /* index */
1430                         if (path[depth].p_idx !=
1431                                         EXT_LAST_INDEX(path[depth].p_hdr))
1432                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1433                 }
1434                 depth--;
1435         }
1436
1437         return EXT_MAX_BLOCKS;
1438 }
1439
1440 /*
1441  * ext4_ext_next_leaf_block:
1442  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1443  */
1444 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1445 {
1446         int depth;
1447
1448         BUG_ON(path == NULL);
1449         depth = path->p_depth;
1450
1451         /* zero-tree has no leaf blocks at all */
1452         if (depth == 0)
1453                 return EXT_MAX_BLOCKS;
1454
1455         /* go to index block */
1456         depth--;
1457
1458         while (depth >= 0) {
1459                 if (path[depth].p_idx !=
1460                                 EXT_LAST_INDEX(path[depth].p_hdr))
1461                         return (ext4_lblk_t)
1462                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1463                 depth--;
1464         }
1465
1466         return EXT_MAX_BLOCKS;
1467 }
1468
1469 /*
1470  * ext4_ext_correct_indexes:
1471  * if leaf gets modified and modified extent is first in the leaf,
1472  * then we have to correct all indexes above.
1473  * TODO: do we need to correct tree in all cases?
1474  */
1475 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1476                                 struct ext4_ext_path *path)
1477 {
1478         struct ext4_extent_header *eh;
1479         int depth = ext_depth(inode);
1480         struct ext4_extent *ex;
1481         __le32 border;
1482         int k, err = 0;
1483
1484         eh = path[depth].p_hdr;
1485         ex = path[depth].p_ext;
1486
1487         if (unlikely(ex == NULL || eh == NULL)) {
1488                 EXT4_ERROR_INODE(inode,
1489                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1490                 return -EIO;
1491         }
1492
1493         if (depth == 0) {
1494                 /* there is no tree at all */
1495                 return 0;
1496         }
1497
1498         if (ex != EXT_FIRST_EXTENT(eh)) {
1499                 /* we correct tree if first leaf got modified only */
1500                 return 0;
1501         }
1502
1503         /*
1504          * TODO: we need correction if border is smaller than current one
1505          */
1506         k = depth - 1;
1507         border = path[depth].p_ext->ee_block;
1508         err = ext4_ext_get_access(handle, inode, path + k);
1509         if (err)
1510                 return err;
1511         path[k].p_idx->ei_block = border;
1512         err = ext4_ext_dirty(handle, inode, path + k);
1513         if (err)
1514                 return err;
1515
1516         while (k--) {
1517                 /* change all left-side indexes */
1518                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1519                         break;
1520                 err = ext4_ext_get_access(handle, inode, path + k);
1521                 if (err)
1522                         break;
1523                 path[k].p_idx->ei_block = border;
1524                 err = ext4_ext_dirty(handle, inode, path + k);
1525                 if (err)
1526                         break;
1527         }
1528
1529         return err;
1530 }
1531
1532 int
1533 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1534                                 struct ext4_extent *ex2)
1535 {
1536         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1537
1538         /*
1539          * Make sure that either both extents are uninitialized, or
1540          * both are _not_.
1541          */
1542         if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1543                 return 0;
1544
1545         if (ext4_ext_is_uninitialized(ex1))
1546                 max_len = EXT_UNINIT_MAX_LEN;
1547         else
1548                 max_len = EXT_INIT_MAX_LEN;
1549
1550         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1551         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1552
1553         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1554                         le32_to_cpu(ex2->ee_block))
1555                 return 0;
1556
1557         /*
1558          * To allow future support for preallocated extents to be added
1559          * as an RO_COMPAT feature, refuse to merge to extents if
1560          * this can result in the top bit of ee_len being set.
1561          */
1562         if (ext1_ee_len + ext2_ee_len > max_len)
1563                 return 0;
1564 #ifdef AGGRESSIVE_TEST
1565         if (ext1_ee_len >= 4)
1566                 return 0;
1567 #endif
1568
1569         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1570                 return 1;
1571         return 0;
1572 }
1573
1574 /*
1575  * This function tries to merge the "ex" extent to the next extent in the tree.
1576  * It always tries to merge towards right. If you want to merge towards
1577  * left, pass "ex - 1" as argument instead of "ex".
1578  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1579  * 1 if they got merged.
1580  */
1581 static int ext4_ext_try_to_merge_right(struct inode *inode,
1582                                  struct ext4_ext_path *path,
1583                                  struct ext4_extent *ex)
1584 {
1585         struct ext4_extent_header *eh;
1586         unsigned int depth, len;
1587         int merge_done = 0;
1588         int uninitialized = 0;
1589
1590         depth = ext_depth(inode);
1591         BUG_ON(path[depth].p_hdr == NULL);
1592         eh = path[depth].p_hdr;
1593
1594         while (ex < EXT_LAST_EXTENT(eh)) {
1595                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1596                         break;
1597                 /* merge with next extent! */
1598                 if (ext4_ext_is_uninitialized(ex))
1599                         uninitialized = 1;
1600                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1601                                 + ext4_ext_get_actual_len(ex + 1));
1602                 if (uninitialized)
1603                         ext4_ext_mark_uninitialized(ex);
1604
1605                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1606                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1607                                 * sizeof(struct ext4_extent);
1608                         memmove(ex + 1, ex + 2, len);
1609                 }
1610                 le16_add_cpu(&eh->eh_entries, -1);
1611                 merge_done = 1;
1612                 WARN_ON(eh->eh_entries == 0);
1613                 if (!eh->eh_entries)
1614                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1615         }
1616
1617         return merge_done;
1618 }
1619
1620 /*
1621  * This function tries to merge the @ex extent to neighbours in the tree.
1622  * return 1 if merge left else 0.
1623  */
1624 static int ext4_ext_try_to_merge(struct inode *inode,
1625                                   struct ext4_ext_path *path,
1626                                   struct ext4_extent *ex) {
1627         struct ext4_extent_header *eh;
1628         unsigned int depth;
1629         int merge_done = 0;
1630         int ret = 0;
1631
1632         depth = ext_depth(inode);
1633         BUG_ON(path[depth].p_hdr == NULL);
1634         eh = path[depth].p_hdr;
1635
1636         if (ex > EXT_FIRST_EXTENT(eh))
1637                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1638
1639         if (!merge_done)
1640                 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1641
1642         return ret;
1643 }
1644
1645 /*
1646  * check if a portion of the "newext" extent overlaps with an
1647  * existing extent.
1648  *
1649  * If there is an overlap discovered, it updates the length of the newext
1650  * such that there will be no overlap, and then returns 1.
1651  * If there is no overlap found, it returns 0.
1652  */
1653 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1654                                            struct inode *inode,
1655                                            struct ext4_extent *newext,
1656                                            struct ext4_ext_path *path)
1657 {
1658         ext4_lblk_t b1, b2;
1659         unsigned int depth, len1;
1660         unsigned int ret = 0;
1661
1662         b1 = le32_to_cpu(newext->ee_block);
1663         len1 = ext4_ext_get_actual_len(newext);
1664         depth = ext_depth(inode);
1665         if (!path[depth].p_ext)
1666                 goto out;
1667         b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1668
1669         /*
1670          * get the next allocated block if the extent in the path
1671          * is before the requested block(s)
1672          */
1673         if (b2 < b1) {
1674                 b2 = ext4_ext_next_allocated_block(path);
1675                 if (b2 == EXT_MAX_BLOCKS)
1676                         goto out;
1677                 b2 = EXT4_LBLK_CMASK(sbi, b2);
1678         }
1679
1680         /* check for wrap through zero on extent logical start block*/
1681         if (b1 + len1 < b1) {
1682                 len1 = EXT_MAX_BLOCKS - b1;
1683                 newext->ee_len = cpu_to_le16(len1);
1684                 ret = 1;
1685         }
1686
1687         /* check for overlap */
1688         if (b1 + len1 > b2) {
1689                 newext->ee_len = cpu_to_le16(b2 - b1);
1690                 ret = 1;
1691         }
1692 out:
1693         return ret;
1694 }
1695
1696 /*
1697  * ext4_ext_insert_extent:
1698  * tries to merge requsted extent into the existing extent or
1699  * inserts requested extent as new one into the tree,
1700  * creating new leaf in the no-space case.
1701  */
1702 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1703                                 struct ext4_ext_path *path,
1704                                 struct ext4_extent *newext, int flag)
1705 {
1706         struct ext4_extent_header *eh;
1707         struct ext4_extent *ex, *fex;
1708         struct ext4_extent *nearex; /* nearest extent */
1709         struct ext4_ext_path *npath = NULL;
1710         int depth, len, err;
1711         ext4_lblk_t next;
1712         unsigned uninitialized = 0;
1713         int flags = 0;
1714
1715         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1716                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1717                 return -EIO;
1718         }
1719         depth = ext_depth(inode);
1720         ex = path[depth].p_ext;
1721         if (unlikely(path[depth].p_hdr == NULL)) {
1722                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1723                 return -EIO;
1724         }
1725
1726         /* try to insert block into found extent and return */
1727         if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1728                 && ext4_can_extents_be_merged(inode, ex, newext)) {
1729                 ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1730                           ext4_ext_is_uninitialized(newext),
1731                           ext4_ext_get_actual_len(newext),
1732                           le32_to_cpu(ex->ee_block),
1733                           ext4_ext_is_uninitialized(ex),
1734                           ext4_ext_get_actual_len(ex),
1735                           ext4_ext_pblock(ex));
1736                 err = ext4_ext_get_access(handle, inode, path + depth);
1737                 if (err)
1738                         return err;
1739
1740                 /*
1741                  * ext4_can_extents_be_merged should have checked that either
1742                  * both extents are uninitialized, or both aren't. Thus we
1743                  * need to check only one of them here.
1744                  */
1745                 if (ext4_ext_is_uninitialized(ex))
1746                         uninitialized = 1;
1747                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1748                                         + ext4_ext_get_actual_len(newext));
1749                 if (uninitialized)
1750                         ext4_ext_mark_uninitialized(ex);
1751                 eh = path[depth].p_hdr;
1752                 nearex = ex;
1753                 goto merge;
1754         }
1755
1756         depth = ext_depth(inode);
1757         eh = path[depth].p_hdr;
1758         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1759                 goto has_space;
1760
1761         /* probably next leaf has space for us? */
1762         fex = EXT_LAST_EXTENT(eh);
1763         next = EXT_MAX_BLOCKS;
1764         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1765                 next = ext4_ext_next_leaf_block(path);
1766         if (next != EXT_MAX_BLOCKS) {
1767                 ext_debug("next leaf block - %u\n", next);
1768                 BUG_ON(npath != NULL);
1769                 npath = ext4_ext_find_extent(inode, next, NULL);
1770                 if (IS_ERR(npath))
1771                         return PTR_ERR(npath);
1772                 BUG_ON(npath->p_depth != path->p_depth);
1773                 eh = npath[depth].p_hdr;
1774                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1775                         ext_debug("next leaf isn't full(%d)\n",
1776                                   le16_to_cpu(eh->eh_entries));
1777                         path = npath;
1778                         goto has_space;
1779                 }
1780                 ext_debug("next leaf has no free space(%d,%d)\n",
1781                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1782         }
1783
1784         /*
1785          * There is no free space in the found leaf.
1786          * We're gonna add a new leaf in the tree.
1787          */
1788         if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1789                 flags = EXT4_MB_USE_ROOT_BLOCKS;
1790         err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1791         if (err)
1792                 goto cleanup;
1793         depth = ext_depth(inode);
1794         eh = path[depth].p_hdr;
1795
1796 has_space:
1797         nearex = path[depth].p_ext;
1798
1799         err = ext4_ext_get_access(handle, inode, path + depth);
1800         if (err)
1801                 goto cleanup;
1802
1803         if (!nearex) {
1804                 /* there is no extent in this leaf, create first one */
1805                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1806                                 le32_to_cpu(newext->ee_block),
1807                                 ext4_ext_pblock(newext),
1808                                 ext4_ext_is_uninitialized(newext),
1809                                 ext4_ext_get_actual_len(newext));
1810                 nearex = EXT_FIRST_EXTENT(eh);
1811         } else {
1812                 if (le32_to_cpu(newext->ee_block)
1813                            > le32_to_cpu(nearex->ee_block)) {
1814                         /* Insert after */
1815                         ext_debug("insert %u:%llu:[%d]%d before: "
1816                                         "nearest %p\n",
1817                                         le32_to_cpu(newext->ee_block),
1818                                         ext4_ext_pblock(newext),
1819                                         ext4_ext_is_uninitialized(newext),
1820                                         ext4_ext_get_actual_len(newext),
1821                                         nearex);
1822                         nearex++;
1823                 } else {
1824                         /* Insert before */
1825                         BUG_ON(newext->ee_block == nearex->ee_block);
1826                         ext_debug("insert %u:%llu:[%d]%d after: "
1827                                         "nearest %p\n",
1828                                         le32_to_cpu(newext->ee_block),
1829                                         ext4_ext_pblock(newext),
1830                                         ext4_ext_is_uninitialized(newext),
1831                                         ext4_ext_get_actual_len(newext),
1832                                         nearex);
1833                 }
1834                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
1835                 if (len > 0) {
1836                         ext_debug("insert %u:%llu:[%d]%d: "
1837                                         "move %d extents from 0x%p to 0x%p\n",
1838                                         le32_to_cpu(newext->ee_block),
1839                                         ext4_ext_pblock(newext),
1840                                         ext4_ext_is_uninitialized(newext),
1841                                         ext4_ext_get_actual_len(newext),
1842                                         len, nearex, nearex + 1);
1843                         memmove(nearex + 1, nearex,
1844                                 len * sizeof(struct ext4_extent));
1845                 }
1846         }
1847
1848         le16_add_cpu(&eh->eh_entries, 1);
1849         path[depth].p_ext = nearex;
1850         nearex->ee_block = newext->ee_block;
1851         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1852         nearex->ee_len = newext->ee_len;
1853
1854 merge:
1855         /* try to merge extents to the right */
1856         if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1857                 ext4_ext_try_to_merge(inode, path, nearex);
1858
1859         /* try to merge extents to the left */
1860
1861         /* time to correct all indexes above */
1862         err = ext4_ext_correct_indexes(handle, inode, path);
1863         if (err)
1864                 goto cleanup;
1865
1866         err = ext4_ext_dirty(handle, inode, path + depth);
1867
1868 cleanup:
1869         if (npath) {
1870                 ext4_ext_drop_refs(npath);
1871                 kfree(npath);
1872         }
1873         ext4_ext_invalidate_cache(inode);
1874         return err;
1875 }
1876
1877 static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1878                                ext4_lblk_t num, ext_prepare_callback func,
1879                                void *cbdata)
1880 {
1881         struct ext4_ext_path *path = NULL;
1882         struct ext4_ext_cache cbex;
1883         struct ext4_extent *ex;
1884         ext4_lblk_t next, start = 0, end = 0;
1885         ext4_lblk_t last = block + num;
1886         int depth, exists, err = 0;
1887
1888         BUG_ON(func == NULL);
1889         BUG_ON(inode == NULL);
1890
1891         while (block < last && block != EXT_MAX_BLOCKS) {
1892                 num = last - block;
1893                 /* find extent for this block */
1894                 down_read(&EXT4_I(inode)->i_data_sem);
1895                 path = ext4_ext_find_extent(inode, block, path);
1896                 up_read(&EXT4_I(inode)->i_data_sem);
1897                 if (IS_ERR(path)) {
1898                         err = PTR_ERR(path);
1899                         path = NULL;
1900                         break;
1901                 }
1902
1903                 depth = ext_depth(inode);
1904                 if (unlikely(path[depth].p_hdr == NULL)) {
1905                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1906                         err = -EIO;
1907                         break;
1908                 }
1909                 ex = path[depth].p_ext;
1910                 next = ext4_ext_next_allocated_block(path);
1911
1912                 exists = 0;
1913                 if (!ex) {
1914                         /* there is no extent yet, so try to allocate
1915                          * all requested space */
1916                         start = block;
1917                         end = block + num;
1918                 } else if (le32_to_cpu(ex->ee_block) > block) {
1919                         /* need to allocate space before found extent */
1920                         start = block;
1921                         end = le32_to_cpu(ex->ee_block);
1922                         if (block + num < end)
1923                                 end = block + num;
1924                 } else if (block >= le32_to_cpu(ex->ee_block)
1925                                         + ext4_ext_get_actual_len(ex)) {
1926                         /* need to allocate space after found extent */
1927                         start = block;
1928                         end = block + num;
1929                         if (end >= next)
1930                                 end = next;
1931                 } else if (block >= le32_to_cpu(ex->ee_block)) {
1932                         /*
1933                          * some part of requested space is covered
1934                          * by found extent
1935                          */
1936                         start = block;
1937                         end = le32_to_cpu(ex->ee_block)
1938                                 + ext4_ext_get_actual_len(ex);
1939                         if (block + num < end)
1940                                 end = block + num;
1941                         exists = 1;
1942                 } else {
1943                         BUG();
1944                 }
1945                 BUG_ON(end <= start);
1946
1947                 if (!exists) {
1948                         cbex.ec_block = start;
1949                         cbex.ec_len = end - start;
1950                         cbex.ec_start = 0;
1951                 } else {
1952                         cbex.ec_block = le32_to_cpu(ex->ee_block);
1953                         cbex.ec_len = ext4_ext_get_actual_len(ex);
1954                         cbex.ec_start = ext4_ext_pblock(ex);
1955                 }
1956
1957                 if (unlikely(cbex.ec_len == 0)) {
1958                         EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1959                         err = -EIO;
1960                         break;
1961                 }
1962                 err = func(inode, next, &cbex, ex, cbdata);
1963                 ext4_ext_drop_refs(path);
1964
1965                 if (err < 0)
1966                         break;
1967
1968                 if (err == EXT_REPEAT)
1969                         continue;
1970                 else if (err == EXT_BREAK) {
1971                         err = 0;
1972                         break;
1973                 }
1974
1975                 if (ext_depth(inode) != depth) {
1976                         /* depth was changed. we have to realloc path */
1977                         kfree(path);
1978                         path = NULL;
1979                 }
1980
1981                 block = cbex.ec_block + cbex.ec_len;
1982         }
1983
1984         if (path) {
1985                 ext4_ext_drop_refs(path);
1986                 kfree(path);
1987         }
1988
1989         return err;
1990 }
1991
1992 static void
1993 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1994                         __u32 len, ext4_fsblk_t start)
1995 {
1996         struct ext4_ext_cache *cex;
1997         BUG_ON(len == 0);
1998         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1999         trace_ext4_ext_put_in_cache(inode, block, len, start);
2000         cex = &EXT4_I(inode)->i_cached_extent;
2001         cex->ec_block = block;
2002         cex->ec_len = len;
2003         cex->ec_start = start;
2004         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2005 }
2006
2007 /*
2008  * ext4_ext_put_gap_in_cache:
2009  * calculate boundaries of the gap that the requested block fits into
2010  * and cache this gap
2011  */
2012 static void
2013 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2014                                 ext4_lblk_t block)
2015 {
2016         int depth = ext_depth(inode);
2017         unsigned long len;
2018         ext4_lblk_t lblock;
2019         struct ext4_extent *ex;
2020
2021         ex = path[depth].p_ext;
2022         if (ex == NULL) {
2023                 /* there is no extent yet, so gap is [0;-] */
2024                 lblock = 0;
2025                 len = EXT_MAX_BLOCKS;
2026                 ext_debug("cache gap(whole file):");
2027         } else if (block < le32_to_cpu(ex->ee_block)) {
2028                 lblock = block;
2029                 len = le32_to_cpu(ex->ee_block) - block;
2030                 ext_debug("cache gap(before): %u [%u:%u]",
2031                                 block,
2032                                 le32_to_cpu(ex->ee_block),
2033                                  ext4_ext_get_actual_len(ex));
2034         } else if (block >= le32_to_cpu(ex->ee_block)
2035                         + ext4_ext_get_actual_len(ex)) {
2036                 ext4_lblk_t next;
2037                 lblock = le32_to_cpu(ex->ee_block)
2038                         + ext4_ext_get_actual_len(ex);
2039
2040                 next = ext4_ext_next_allocated_block(path);
2041                 ext_debug("cache gap(after): [%u:%u] %u",
2042                                 le32_to_cpu(ex->ee_block),
2043                                 ext4_ext_get_actual_len(ex),
2044                                 block);
2045                 BUG_ON(next == lblock);
2046                 len = next - lblock;
2047         } else {
2048                 lblock = len = 0;
2049                 BUG();
2050         }
2051
2052         ext_debug(" -> %u:%lu\n", lblock, len);
2053         ext4_ext_put_in_cache(inode, lblock, len, 0);
2054 }
2055
2056 /*
2057  * ext4_ext_check_cache()
2058  * Checks to see if the given block is in the cache.
2059  * If it is, the cached extent is stored in the given
2060  * cache extent pointer.  If the cached extent is a hole,
2061  * this routine should be used instead of
2062  * ext4_ext_in_cache if the calling function needs to
2063  * know the size of the hole.
2064  *
2065  * @inode: The files inode
2066  * @block: The block to look for in the cache
2067  * @ex:    Pointer where the cached extent will be stored
2068  *         if it contains block
2069  *
2070  * Return 0 if cache is invalid; 1 if the cache is valid
2071  */
2072 static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2073         struct ext4_ext_cache *ex){
2074         struct ext4_ext_cache *cex;
2075         struct ext4_sb_info *sbi;
2076         int ret = 0;
2077
2078         /*
2079          * We borrow i_block_reservation_lock to protect i_cached_extent
2080          */
2081         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2082         cex = &EXT4_I(inode)->i_cached_extent;
2083         sbi = EXT4_SB(inode->i_sb);
2084
2085         /* has cache valid data? */
2086         if (cex->ec_len == 0)
2087                 goto errout;
2088
2089         if (in_range(block, cex->ec_block, cex->ec_len)) {
2090                 memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2091                 ext_debug("%u cached by %u:%u:%llu\n",
2092                                 block,
2093                                 cex->ec_block, cex->ec_len, cex->ec_start);
2094                 ret = 1;
2095         }
2096 errout:
2097         trace_ext4_ext_in_cache(inode, block, ret);
2098         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2099         return ret;
2100 }
2101
2102 /*
2103  * ext4_ext_in_cache()
2104  * Checks to see if the given block is in the cache.
2105  * If it is, the cached extent is stored in the given
2106  * extent pointer.
2107  *
2108  * @inode: The files inode
2109  * @block: The block to look for in the cache
2110  * @ex:    Pointer where the cached extent will be stored
2111  *         if it contains block
2112  *
2113  * Return 0 if cache is invalid; 1 if the cache is valid
2114  */
2115 static int
2116 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2117                         struct ext4_extent *ex)
2118 {
2119         struct ext4_ext_cache cex;
2120         int ret = 0;
2121
2122         if (ext4_ext_check_cache(inode, block, &cex)) {
2123                 ex->ee_block = cpu_to_le32(cex.ec_block);
2124                 ext4_ext_store_pblock(ex, cex.ec_start);
2125                 ex->ee_len = cpu_to_le16(cex.ec_len);
2126                 ret = 1;
2127         }
2128
2129         return ret;
2130 }
2131
2132
2133 /*
2134  * ext4_ext_rm_idx:
2135  * removes index from the index block.
2136  */
2137 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2138                         struct ext4_ext_path *path, int depth)
2139 {
2140         int err;
2141         ext4_fsblk_t leaf;
2142
2143         /* free index block */
2144         depth--;
2145         path = path + depth;
2146         leaf = ext4_idx_pblock(path->p_idx);
2147         if (unlikely(path->p_hdr->eh_entries == 0)) {
2148                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2149                 return -EIO;
2150         }
2151         err = ext4_ext_get_access(handle, inode, path);
2152         if (err)
2153                 return err;
2154
2155         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2156                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2157                 len *= sizeof(struct ext4_extent_idx);
2158                 memmove(path->p_idx, path->p_idx + 1, len);
2159         }
2160
2161         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2162         err = ext4_ext_dirty(handle, inode, path);
2163         if (err)
2164                 return err;
2165         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2166         trace_ext4_ext_rm_idx(inode, leaf);
2167
2168         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2169                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2170
2171         while (--depth >= 0) {
2172                 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2173                         break;
2174                 path--;
2175                 err = ext4_ext_get_access(handle, inode, path);
2176                 if (err)
2177                         break;
2178                 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2179                 err = ext4_ext_dirty(handle, inode, path);
2180                 if (err)
2181                         break;
2182         }
2183         return err;
2184 }
2185
2186 /*
2187  * ext4_ext_calc_credits_for_single_extent:
2188  * This routine returns max. credits that needed to insert an extent
2189  * to the extent tree.
2190  * When pass the actual path, the caller should calculate credits
2191  * under i_data_sem.
2192  */
2193 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2194                                                 struct ext4_ext_path *path)
2195 {
2196         if (path) {
2197                 int depth = ext_depth(inode);
2198                 int ret = 0;
2199
2200                 /* probably there is space in leaf? */
2201                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2202                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2203
2204                         /*
2205                          *  There are some space in the leaf tree, no
2206                          *  need to account for leaf block credit
2207                          *
2208                          *  bitmaps and block group descriptor blocks
2209                          *  and other metadata blocks still need to be
2210                          *  accounted.
2211                          */
2212                         /* 1 bitmap, 1 block group descriptor */
2213                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2214                         return ret;
2215                 }
2216         }
2217
2218         return ext4_chunk_trans_blocks(inode, nrblocks);
2219 }
2220
2221 /*
2222  * How many index/leaf blocks need to change/allocate to modify nrblocks?
2223  *
2224  * if nrblocks are fit in a single extent (chunk flag is 1), then
2225  * in the worse case, each tree level index/leaf need to be changed
2226  * if the tree split due to insert a new extent, then the old tree
2227  * index/leaf need to be updated too
2228  *
2229  * If the nrblocks are discontiguous, they could cause
2230  * the whole tree split more than once, but this is really rare.
2231  */
2232 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2233 {
2234         int index;
2235         int depth = ext_depth(inode);
2236
2237         if (chunk)
2238                 index = depth * 2;
2239         else
2240                 index = depth * 3;
2241
2242         return index;
2243 }
2244
2245 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2246                               struct ext4_extent *ex,
2247                               ext4_fsblk_t *partial_cluster,
2248                               ext4_lblk_t from, ext4_lblk_t to)
2249 {
2250         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2251         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2252         ext4_fsblk_t pblk;
2253         int flags = EXT4_FREE_BLOCKS_FORGET;
2254
2255         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2256                 flags |= EXT4_FREE_BLOCKS_METADATA;
2257         /*
2258          * For bigalloc file systems, we never free a partial cluster
2259          * at the beginning of the extent.  Instead, we make a note
2260          * that we tried freeing the cluster, and check to see if we
2261          * need to free it on a subsequent call to ext4_remove_blocks,
2262          * or at the end of the ext4_truncate() operation.
2263          */
2264         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2265
2266         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2267         /*
2268          * If we have a partial cluster, and it's different from the
2269          * cluster of the last block, we need to explicitly free the
2270          * partial cluster here.
2271          */
2272         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2273         if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2274                 ext4_free_blocks(handle, inode, NULL,
2275                                  EXT4_C2B(sbi, *partial_cluster),
2276                                  sbi->s_cluster_ratio, flags);
2277                 *partial_cluster = 0;
2278         }
2279
2280 #ifdef EXTENTS_STATS
2281         {
2282                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2283                 spin_lock(&sbi->s_ext_stats_lock);
2284                 sbi->s_ext_blocks += ee_len;
2285                 sbi->s_ext_extents++;
2286                 if (ee_len < sbi->s_ext_min)
2287                         sbi->s_ext_min = ee_len;
2288                 if (ee_len > sbi->s_ext_max)
2289                         sbi->s_ext_max = ee_len;
2290                 if (ext_depth(inode) > sbi->s_depth_max)
2291                         sbi->s_depth_max = ext_depth(inode);
2292                 spin_unlock(&sbi->s_ext_stats_lock);
2293         }
2294 #endif
2295         if (from >= le32_to_cpu(ex->ee_block)
2296             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2297                 /* tail removal */
2298                 ext4_lblk_t num;
2299
2300                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2301                 pblk = ext4_ext_pblock(ex) + ee_len - num;
2302                 ext_debug("free last %u blocks starting %llu\n", num, pblk);
2303                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2304                 /*
2305                  * If the block range to be freed didn't start at the
2306                  * beginning of a cluster, and we removed the entire
2307                  * extent, save the partial cluster here, since we
2308                  * might need to delete if we determine that the
2309                  * truncate operation has removed all of the blocks in
2310                  * the cluster.
2311                  */
2312                 if (EXT4_PBLK_COFF(sbi, pblk) &&
2313                     (ee_len == num))
2314                         *partial_cluster = EXT4_B2C(sbi, pblk);
2315                 else
2316                         *partial_cluster = 0;
2317         } else if (from == le32_to_cpu(ex->ee_block)
2318                    && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2319                 /* head removal */
2320                 ext4_lblk_t num;
2321                 ext4_fsblk_t start;
2322
2323                 num = to - from;
2324                 start = ext4_ext_pblock(ex);
2325
2326                 ext_debug("free first %u blocks starting %llu\n", num, start);
2327                 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2328
2329         } else {
2330                 printk(KERN_INFO "strange request: removal(2) "
2331                                 "%u-%u from %u:%u\n",
2332                                 from, to, le32_to_cpu(ex->ee_block), ee_len);
2333         }
2334         return 0;
2335 }
2336
2337
2338 /*
2339  * ext4_ext_rm_leaf() Removes the extents associated with the
2340  * blocks appearing between "start" and "end", and splits the extents
2341  * if "start" and "end" appear in the same extent
2342  *
2343  * @handle: The journal handle
2344  * @inode:  The files inode
2345  * @path:   The path to the leaf
2346  * @start:  The first block to remove
2347  * @end:   The last block to remove
2348  */
2349 static int
2350 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2351                  struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2352                  ext4_lblk_t start, ext4_lblk_t end)
2353 {
2354         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2355         int err = 0, correct_index = 0;
2356         int depth = ext_depth(inode), credits;
2357         struct ext4_extent_header *eh;
2358         ext4_lblk_t a, b;
2359         unsigned num;
2360         ext4_lblk_t ex_ee_block;
2361         unsigned short ex_ee_len;
2362         unsigned uninitialized = 0;
2363         struct ext4_extent *ex;
2364
2365         /* the header must be checked already in ext4_ext_remove_space() */
2366         ext_debug("truncate since %u in leaf to %u\n", start, end);
2367         if (!path[depth].p_hdr)
2368                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2369         eh = path[depth].p_hdr;
2370         if (unlikely(path[depth].p_hdr == NULL)) {
2371                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2372                 return -EIO;
2373         }
2374         /* find where to start removing */
2375         ex = EXT_LAST_EXTENT(eh);
2376
2377         ex_ee_block = le32_to_cpu(ex->ee_block);
2378         ex_ee_len = ext4_ext_get_actual_len(ex);
2379
2380         /*
2381          * If we're starting with an extent other than the last one in the
2382          * node, we need to see if it shares a cluster with the extent to
2383          * the right (towards the end of the file). If its leftmost cluster
2384          * is this extent's rightmost cluster and it is not cluster aligned,
2385          * we'll mark it as a partial that is not to be deallocated.
2386          */
2387
2388         if (ex != EXT_LAST_EXTENT(eh)) {
2389                 ext4_fsblk_t current_pblk, right_pblk;
2390                 long long current_cluster, right_cluster;
2391
2392                 current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2393                 current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
2394                 right_pblk = ext4_ext_pblock(ex + 1);
2395                 right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
2396                 if (current_cluster == right_cluster &&
2397                         EXT4_PBLK_COFF(sbi, right_pblk))
2398                         *partial_cluster = -right_cluster;
2399         }
2400
2401         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2402
2403         while (ex >= EXT_FIRST_EXTENT(eh) &&
2404                         ex_ee_block + ex_ee_len > start) {
2405
2406                 if (ext4_ext_is_uninitialized(ex))
2407                         uninitialized = 1;
2408                 else
2409                         uninitialized = 0;
2410
2411                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2412                          uninitialized, ex_ee_len);
2413                 path[depth].p_ext = ex;
2414
2415                 a = ex_ee_block > start ? ex_ee_block : start;
2416                 b = ex_ee_block+ex_ee_len - 1 < end ?
2417                         ex_ee_block+ex_ee_len - 1 : end;
2418
2419                 ext_debug("  border %u:%u\n", a, b);
2420
2421                 /* If this extent is beyond the end of the hole, skip it */
2422                 if (end < ex_ee_block) {
2423                         ex--;
2424                         ex_ee_block = le32_to_cpu(ex->ee_block);
2425                         ex_ee_len = ext4_ext_get_actual_len(ex);
2426                         continue;
2427                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2428                         EXT4_ERROR_INODE(inode,"  bad truncate %u:%u\n",
2429                                          start, end);
2430                         err = -EIO;
2431                         goto out;
2432                 } else if (a != ex_ee_block) {
2433                         /* remove tail of the extent */
2434                         num = a - ex_ee_block;
2435                 } else {
2436                         /* remove whole extent: excellent! */
2437                         num = 0;
2438                 }
2439                 /*
2440                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2441                  * descriptor) for each block group; assume two block
2442                  * groups plus ex_ee_len/blocks_per_block_group for
2443                  * the worst case
2444                  */
2445                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2446                 if (ex == EXT_FIRST_EXTENT(eh)) {
2447                         correct_index = 1;
2448                         credits += (ext_depth(inode)) + 1;
2449                 }
2450                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2451
2452                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2453                 if (err)
2454                         goto out;
2455
2456                 err = ext4_ext_get_access(handle, inode, path + depth);
2457                 if (err)
2458                         goto out;
2459
2460                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2461                                          a, b);
2462                 if (err)
2463                         goto out;
2464
2465                 if (num == 0)
2466                         /* this extent is removed; mark slot entirely unused */
2467                         ext4_ext_store_pblock(ex, 0);
2468
2469                 ex->ee_len = cpu_to_le16(num);
2470                 /*
2471                  * Do not mark uninitialized if all the blocks in the
2472                  * extent have been removed.
2473                  */
2474                 if (uninitialized && num)
2475                         ext4_ext_mark_uninitialized(ex);
2476                 /*
2477                  * If the extent was completely released,
2478                  * we need to remove it from the leaf
2479                  */
2480                 if (num == 0) {
2481                         if (end != EXT_MAX_BLOCKS - 1) {
2482                                 /*
2483                                  * For hole punching, we need to scoot all the
2484                                  * extents up when an extent is removed so that
2485                                  * we dont have blank extents in the middle
2486                                  */
2487                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2488                                         sizeof(struct ext4_extent));
2489
2490                                 /* Now get rid of the one at the end */
2491                                 memset(EXT_LAST_EXTENT(eh), 0,
2492                                         sizeof(struct ext4_extent));
2493                         }
2494                         le16_add_cpu(&eh->eh_entries, -1);
2495                 } else
2496                         *partial_cluster = 0;
2497
2498                 err = ext4_ext_dirty(handle, inode, path + depth);
2499                 if (err)
2500                         goto out;
2501
2502                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2503                                 ext4_ext_pblock(ex));
2504                 ex--;
2505                 ex_ee_block = le32_to_cpu(ex->ee_block);
2506                 ex_ee_len = ext4_ext_get_actual_len(ex);
2507         }
2508
2509         if (correct_index && eh->eh_entries)
2510                 err = ext4_ext_correct_indexes(handle, inode, path);
2511
2512         /*
2513          * If there is still a entry in the leaf node, check to see if
2514          * it references the partial cluster.  This is the only place
2515          * where it could; if it doesn't, we can free the cluster.
2516          */
2517         if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2518             (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2519              *partial_cluster)) {
2520                 int flags = EXT4_FREE_BLOCKS_FORGET;
2521
2522                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2523                         flags |= EXT4_FREE_BLOCKS_METADATA;
2524
2525                 ext4_free_blocks(handle, inode, NULL,
2526                                  EXT4_C2B(sbi, *partial_cluster),
2527                                  sbi->s_cluster_ratio, flags);
2528                 *partial_cluster = 0;
2529         }
2530
2531         /* if this leaf is free, then we should
2532          * remove it from index block above */
2533         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2534                 err = ext4_ext_rm_idx(handle, inode, path, depth);
2535
2536 out:
2537         return err;
2538 }
2539
2540 /*
2541  * ext4_ext_more_to_rm:
2542  * returns 1 if current index has to be freed (even partial)
2543  */
2544 static int
2545 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2546 {
2547         BUG_ON(path->p_idx == NULL);
2548
2549         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2550                 return 0;
2551
2552         /*
2553          * if truncate on deeper level happened, it wasn't partial,
2554          * so we have to consider current index for truncation
2555          */
2556         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2557                 return 0;
2558         return 1;
2559 }
2560
2561 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2562                                  ext4_lblk_t end)
2563 {
2564         struct super_block *sb = inode->i_sb;
2565         int depth = ext_depth(inode);
2566         struct ext4_ext_path *path = NULL;
2567         ext4_fsblk_t partial_cluster = 0;
2568         handle_t *handle;
2569         int i = 0, err;
2570
2571         ext_debug("truncate since %u to %u\n", start, end);
2572
2573         /* probably first extent we're gonna free will be last in block */
2574         handle = ext4_journal_start(inode, depth + 1);
2575         if (IS_ERR(handle))
2576                 return PTR_ERR(handle);
2577
2578 again:
2579         ext4_ext_invalidate_cache(inode);
2580
2581         trace_ext4_ext_remove_space(inode, start, depth);
2582
2583         /*
2584          * Check if we are removing extents inside the extent tree. If that
2585          * is the case, we are going to punch a hole inside the extent tree
2586          * so we have to check whether we need to split the extent covering
2587          * the last block to remove so we can easily remove the part of it
2588          * in ext4_ext_rm_leaf().
2589          */
2590         if (end < EXT_MAX_BLOCKS - 1) {
2591                 struct ext4_extent *ex;
2592                 ext4_lblk_t ee_block;
2593
2594                 /* find extent for this block */
2595                 path = ext4_ext_find_extent(inode, end, NULL);
2596                 if (IS_ERR(path)) {
2597                         ext4_journal_stop(handle);
2598                         return PTR_ERR(path);
2599                 }
2600                 depth = ext_depth(inode);
2601                 ex = path[depth].p_ext;
2602                 if (!ex) {
2603                         ext4_ext_drop_refs(path);
2604                         kfree(path);
2605                         path = NULL;
2606                         goto cont;
2607                 }
2608
2609                 ee_block = le32_to_cpu(ex->ee_block);
2610
2611                 /*
2612                  * See if the last block is inside the extent, if so split
2613                  * the extent at 'end' block so we can easily remove the
2614                  * tail of the first part of the split extent in
2615                  * ext4_ext_rm_leaf().
2616                  */
2617                 if (end >= ee_block &&
2618                     end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2619                         int split_flag = 0;
2620
2621                         if (ext4_ext_is_uninitialized(ex))
2622                                 split_flag = EXT4_EXT_MARK_UNINIT1 |
2623                                              EXT4_EXT_MARK_UNINIT2;
2624
2625                         /*
2626                          * Split the extent in two so that 'end' is the last
2627                          * block in the first new extent
2628                          */
2629                         err = ext4_split_extent_at(handle, inode, path,
2630                                                 end + 1, split_flag,
2631                                                 EXT4_GET_BLOCKS_PRE_IO |
2632                                                 EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2633
2634                         if (err < 0)
2635                                 goto out;
2636                 }
2637         }
2638 cont:
2639
2640         /*
2641          * We start scanning from right side, freeing all the blocks
2642          * after i_size and walking into the tree depth-wise.
2643          */
2644         depth = ext_depth(inode);
2645         if (path) {
2646                 int k = i = depth;
2647                 while (--k > 0)
2648                         path[k].p_block =
2649                                 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2650         } else {
2651                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2652                                GFP_NOFS);
2653                 if (path == NULL) {
2654                         ext4_journal_stop(handle);
2655                         return -ENOMEM;
2656                 }
2657                 path[0].p_depth = depth;
2658                 path[0].p_hdr = ext_inode_hdr(inode);
2659                 i = 0;
2660
2661                 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2662                         err = -EIO;
2663                         goto out;
2664                 }
2665         }
2666         err = 0;
2667
2668         while (i >= 0 && err == 0) {
2669                 if (i == depth) {
2670                         /* this is leaf block */
2671                         err = ext4_ext_rm_leaf(handle, inode, path,
2672                                                &partial_cluster, start,
2673                                                end);
2674                         /* root level has p_bh == NULL, brelse() eats this */
2675                         brelse(path[i].p_bh);
2676                         path[i].p_bh = NULL;
2677                         i--;
2678                         continue;
2679                 }
2680
2681                 /* this is index block */
2682                 if (!path[i].p_hdr) {
2683                         ext_debug("initialize header\n");
2684                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2685                 }
2686
2687                 if (!path[i].p_idx) {
2688                         /* this level hasn't been touched yet */
2689                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2690                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2691                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2692                                   path[i].p_hdr,
2693                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2694                 } else {
2695                         /* we were already here, see at next index */
2696                         path[i].p_idx--;
2697                 }
2698
2699                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2700                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2701                                 path[i].p_idx);
2702                 if (ext4_ext_more_to_rm(path + i)) {
2703                         struct buffer_head *bh;
2704                         /* go to the next level */
2705                         ext_debug("move to level %d (block %llu)\n",
2706                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2707                         memset(path + i + 1, 0, sizeof(*path));
2708                         bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2709                         if (!bh) {
2710                                 /* should we reset i_size? */
2711                                 err = -EIO;
2712                                 break;
2713                         }
2714                         if (WARN_ON(i + 1 > depth)) {
2715                                 err = -EIO;
2716                                 break;
2717                         }
2718                         if (ext4_ext_check(inode, ext_block_hdr(bh),
2719                                                         depth - i - 1)) {
2720                                 err = -EIO;
2721                                 break;
2722                         }
2723                         path[i + 1].p_bh = bh;
2724
2725                         /* save actual number of indexes since this
2726                          * number is changed at the next iteration */
2727                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2728                         i++;
2729                 } else {
2730                         /* we finished processing this index, go up */
2731                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2732                                 /* index is empty, remove it;
2733                                  * handle must be already prepared by the
2734                                  * truncatei_leaf() */
2735                                 err = ext4_ext_rm_idx(handle, inode, path, i);
2736                         }
2737                         /* root level has p_bh == NULL, brelse() eats this */
2738                         brelse(path[i].p_bh);
2739                         path[i].p_bh = NULL;
2740                         i--;
2741                         ext_debug("return to level %d\n", i);
2742                 }
2743         }
2744
2745         trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2746                         path->p_hdr->eh_entries);
2747
2748         /* If we still have something in the partial cluster and we have removed
2749          * even the first extent, then we should free the blocks in the partial
2750          * cluster as well. */
2751         if (partial_cluster && path->p_hdr->eh_entries == 0) {
2752                 int flags = EXT4_FREE_BLOCKS_FORGET;
2753
2754                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2755                         flags |= EXT4_FREE_BLOCKS_METADATA;
2756
2757                 ext4_free_blocks(handle, inode, NULL,
2758                                  EXT4_C2B(EXT4_SB(sb), partial_cluster),
2759                                  EXT4_SB(sb)->s_cluster_ratio, flags);
2760                 partial_cluster = 0;
2761         }
2762
2763         /* TODO: flexible tree reduction should be here */
2764         if (path->p_hdr->eh_entries == 0) {
2765                 /*
2766                  * truncate to zero freed all the tree,
2767                  * so we need to correct eh_depth
2768                  */
2769                 err = ext4_ext_get_access(handle, inode, path);
2770                 if (err == 0) {
2771                         ext_inode_hdr(inode)->eh_depth = 0;
2772                         ext_inode_hdr(inode)->eh_max =
2773                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
2774                         err = ext4_ext_dirty(handle, inode, path);
2775                 }
2776         }
2777 out:
2778         ext4_ext_drop_refs(path);
2779         kfree(path);
2780         if (err == -EAGAIN) {
2781                 path = NULL;
2782                 goto again;
2783         }
2784         ext4_journal_stop(handle);
2785
2786         return err;
2787 }
2788
2789 /*
2790  * called at mount time
2791  */
2792 void ext4_ext_init(struct super_block *sb)
2793 {
2794         /*
2795          * possible initialization would be here
2796          */
2797
2798         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2799 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2800                 printk(KERN_INFO "EXT4-fs: file extents enabled");
2801 #ifdef AGGRESSIVE_TEST
2802                 printk(", aggressive tests");
2803 #endif
2804 #ifdef CHECK_BINSEARCH
2805                 printk(", check binsearch");
2806 #endif
2807 #ifdef EXTENTS_STATS
2808                 printk(", stats");
2809 #endif
2810                 printk("\n");
2811 #endif
2812 #ifdef EXTENTS_STATS
2813                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2814                 EXT4_SB(sb)->s_ext_min = 1 << 30;
2815                 EXT4_SB(sb)->s_ext_max = 0;
2816 #endif
2817         }
2818 }
2819
2820 /*
2821  * called at umount time
2822  */
2823 void ext4_ext_release(struct super_block *sb)
2824 {
2825         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2826                 return;
2827
2828 #ifdef EXTENTS_STATS
2829         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2830                 struct ext4_sb_info *sbi = EXT4_SB(sb);
2831                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2832                         sbi->s_ext_blocks, sbi->s_ext_extents,
2833                         sbi->s_ext_blocks / sbi->s_ext_extents);
2834                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2835                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2836         }
2837 #endif
2838 }
2839
2840 /* FIXME!! we need to try to merge to left or right after zero-out  */
2841 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2842 {
2843         ext4_fsblk_t ee_pblock;
2844         unsigned int ee_len;
2845         int ret;
2846
2847         ee_len    = ext4_ext_get_actual_len(ex);
2848         ee_pblock = ext4_ext_pblock(ex);
2849
2850         ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2851         if (ret > 0)
2852                 ret = 0;
2853
2854         return ret;
2855 }
2856
2857 /*
2858  * ext4_split_extent_at() splits an extent at given block.
2859  *
2860  * @handle: the journal handle
2861  * @inode: the file inode
2862  * @path: the path to the extent
2863  * @split: the logical block where the extent is splitted.
2864  * @split_flags: indicates if the extent could be zeroout if split fails, and
2865  *               the states(init or uninit) of new extents.
2866  * @flags: flags used to insert new extent to extent tree.
2867  *
2868  *
2869  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2870  * of which are deterimined by split_flag.
2871  *
2872  * There are two cases:
2873  *  a> the extent are splitted into two extent.
2874  *  b> split is not needed, and just mark the extent.
2875  *
2876  * return 0 on success.
2877  */
2878 static int ext4_split_extent_at(handle_t *handle,
2879                              struct inode *inode,
2880                              struct ext4_ext_path *path,
2881                              ext4_lblk_t split,
2882                              int split_flag,
2883                              int flags)
2884 {
2885         ext4_fsblk_t newblock;
2886         ext4_lblk_t ee_block;
2887         struct ext4_extent *ex, newex, orig_ex;
2888         struct ext4_extent *ex2 = NULL;
2889         unsigned int ee_len, depth;
2890         int err = 0;
2891
2892         BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
2893                (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
2894
2895         ext_debug("ext4_split_extents_at: inode %lu, logical"
2896                 "block %llu\n", inode->i_ino, (unsigned long long)split);
2897
2898         ext4_ext_show_leaf(inode, path);
2899
2900         depth = ext_depth(inode);
2901         ex = path[depth].p_ext;
2902         ee_block = le32_to_cpu(ex->ee_block);
2903         ee_len = ext4_ext_get_actual_len(ex);
2904         newblock = split - ee_block + ext4_ext_pblock(ex);
2905
2906         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2907
2908         err = ext4_ext_get_access(handle, inode, path + depth);
2909         if (err)
2910                 goto out;
2911
2912         if (split == ee_block) {
2913                 /*
2914                  * case b: block @split is the block that the extent begins with
2915                  * then we just change the state of the extent, and splitting
2916                  * is not needed.
2917                  */
2918                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2919                         ext4_ext_mark_uninitialized(ex);
2920                 else
2921                         ext4_ext_mark_initialized(ex);
2922
2923                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2924                         ext4_ext_try_to_merge(inode, path, ex);
2925
2926                 err = ext4_ext_dirty(handle, inode, path + depth);
2927                 goto out;
2928         }
2929
2930         /* case a */
2931         memcpy(&orig_ex, ex, sizeof(orig_ex));
2932         ex->ee_len = cpu_to_le16(split - ee_block);
2933         if (split_flag & EXT4_EXT_MARK_UNINIT1)
2934                 ext4_ext_mark_uninitialized(ex);
2935
2936         /*
2937          * path may lead to new leaf, not to original leaf any more
2938          * after ext4_ext_insert_extent() returns,
2939          */
2940         err = ext4_ext_dirty(handle, inode, path + depth);
2941         if (err)
2942                 goto fix_extent_len;
2943
2944         ex2 = &newex;
2945         ex2->ee_block = cpu_to_le32(split);
2946         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
2947         ext4_ext_store_pblock(ex2, newblock);
2948         if (split_flag & EXT4_EXT_MARK_UNINIT2)
2949                 ext4_ext_mark_uninitialized(ex2);
2950
2951         err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2952         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2953                 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
2954                         if (split_flag & EXT4_EXT_DATA_VALID1)
2955                                 err = ext4_ext_zeroout(inode, ex2);
2956                         else
2957                                 err = ext4_ext_zeroout(inode, ex);
2958                 } else
2959                         err = ext4_ext_zeroout(inode, &orig_ex);
2960
2961                 if (err)
2962                         goto fix_extent_len;
2963                 /* update the extent length and mark as initialized */
2964                 ex->ee_len = cpu_to_le16(ee_len);
2965                 ext4_ext_try_to_merge(inode, path, ex);
2966                 err = ext4_ext_dirty(handle, inode, path + depth);
2967                 goto out;
2968         } else if (err)
2969                 goto fix_extent_len;
2970
2971 out:
2972         ext4_ext_show_leaf(inode, path);
2973         return err;
2974
2975 fix_extent_len:
2976         ex->ee_len = orig_ex.ee_len;
2977         ext4_ext_dirty(handle, inode, path + depth);
2978         return err;
2979 }
2980
2981 /*
2982  * ext4_split_extents() splits an extent and mark extent which is covered
2983  * by @map as split_flags indicates
2984  *
2985  * It may result in splitting the extent into multiple extents (upto three)
2986  * There are three possibilities:
2987  *   a> There is no split required
2988  *   b> Splits in two extents: Split is happening at either end of the extent
2989  *   c> Splits in three extents: Somone is splitting in middle of the extent
2990  *
2991  */
2992 static int ext4_split_extent(handle_t *handle,
2993                               struct inode *inode,
2994                               struct ext4_ext_path *path,
2995                               struct ext4_map_blocks *map,
2996                               int split_flag,
2997                               int flags)
2998 {
2999         ext4_lblk_t ee_block;
3000         struct ext4_extent *ex;
3001         unsigned int ee_len, depth;
3002         int err = 0;
3003         int uninitialized;
3004         int split_flag1, flags1;
3005         int allocated = map->m_len;
3006
3007         depth = ext_depth(inode);
3008         ex = path[depth].p_ext;
3009         ee_block = le32_to_cpu(ex->ee_block);
3010         ee_len = ext4_ext_get_actual_len(ex);
3011         uninitialized = ext4_ext_is_uninitialized(ex);
3012
3013         if (map->m_lblk + map->m_len < ee_block + ee_len) {
3014                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3015                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3016                 if (uninitialized)
3017                         split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3018                                        EXT4_EXT_MARK_UNINIT2;
3019                 if (split_flag & EXT4_EXT_DATA_VALID2)
3020                         split_flag1 |= EXT4_EXT_DATA_VALID1;
3021                 err = ext4_split_extent_at(handle, inode, path,
3022                                 map->m_lblk + map->m_len, split_flag1, flags1);
3023                 if (err)
3024                         goto out;
3025         } else {
3026                 allocated = ee_len - (map->m_lblk - ee_block);
3027         }
3028
3029         ext4_ext_drop_refs(path);
3030         path = ext4_ext_find_extent(inode, map->m_lblk, path);
3031         if (IS_ERR(path))
3032                 return PTR_ERR(path);
3033
3034         if (map->m_lblk >= ee_block) {
3035                 split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
3036                                             EXT4_EXT_DATA_VALID2);
3037                 if (uninitialized)
3038                         split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3039                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3040                         split_flag1 |= EXT4_EXT_MARK_UNINIT2;
3041                 err = ext4_split_extent_at(handle, inode, path,
3042                                 map->m_lblk, split_flag1, flags);
3043                 if (err)
3044                         goto out;
3045         }
3046
3047         ext4_ext_show_leaf(inode, path);
3048 out:
3049         return err ? err : allocated;
3050 }
3051
3052 #define EXT4_EXT_ZERO_LEN 7
3053 /*
3054  * This function is called by ext4_ext_map_blocks() if someone tries to write
3055  * to an uninitialized extent. It may result in splitting the uninitialized
3056  * extent into multiple extents (up to three - one initialized and two
3057  * uninitialized).
3058  * There are three possibilities:
3059  *   a> There is no split required: Entire extent should be initialized
3060  *   b> Splits in two extents: Write is happening at either end of the extent
3061  *   c> Splits in three extents: Somone is writing in middle of the extent
3062  *
3063  * Pre-conditions:
3064  *  - The extent pointed to by 'path' is uninitialized.
3065  *  - The extent pointed to by 'path' contains a superset
3066  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3067  *
3068  * Post-conditions on success:
3069  *  - the returned value is the number of blocks beyond map->l_lblk
3070  *    that are allocated and initialized.
3071  *    It is guaranteed to be >= map->m_len.
3072  */
3073 static int ext4_ext_convert_to_initialized(handle_t *handle,
3074                                            struct inode *inode,
3075                                            struct ext4_map_blocks *map,
3076                                            struct ext4_ext_path *path)
3077 {
3078         struct ext4_extent_header *eh;
3079         struct ext4_map_blocks split_map;
3080         struct ext4_extent zero_ex;
3081         struct ext4_extent *ex;
3082         ext4_lblk_t ee_block, eof_block;
3083         unsigned int ee_len, depth;
3084         int allocated;
3085         int err = 0;
3086         int split_flag = 0;
3087
3088         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3089                 "block %llu, max_blocks %u\n", inode->i_ino,
3090                 (unsigned long long)map->m_lblk, map->m_len);
3091
3092         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3093                 inode->i_sb->s_blocksize_bits;
3094         if (eof_block < map->m_lblk + map->m_len)
3095                 eof_block = map->m_lblk + map->m_len;
3096
3097         depth = ext_depth(inode);
3098         eh = path[depth].p_hdr;
3099         ex = path[depth].p_ext;
3100         ee_block = le32_to_cpu(ex->ee_block);
3101         ee_len = ext4_ext_get_actual_len(ex);
3102         allocated = ee_len - (map->m_lblk - ee_block);
3103
3104         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3105
3106         /* Pre-conditions */
3107         BUG_ON(!ext4_ext_is_uninitialized(ex));
3108         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3109
3110         /*
3111          * Attempt to transfer newly initialized blocks from the currently
3112          * uninitialized extent to its left neighbor. This is much cheaper
3113          * than an insertion followed by a merge as those involve costly
3114          * memmove() calls. This is the common case in steady state for
3115          * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3116          * writes.
3117          *
3118          * Limitations of the current logic:
3119          *  - L1: we only deal with writes at the start of the extent.
3120          *    The approach could be extended to writes at the end
3121          *    of the extent but this scenario was deemed less common.
3122          *  - L2: we do not deal with writes covering the whole extent.
3123          *    This would require removing the extent if the transfer
3124          *    is possible.
3125          *  - L3: we only attempt to merge with an extent stored in the
3126          *    same extent tree node.
3127          */
3128         if ((map->m_lblk == ee_block) &&        /*L1*/
3129                 (map->m_len < ee_len) &&        /*L2*/
3130                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L3*/
3131                 struct ext4_extent *prev_ex;
3132                 ext4_lblk_t prev_lblk;
3133                 ext4_fsblk_t prev_pblk, ee_pblk;
3134                 unsigned int prev_len, write_len;
3135
3136                 prev_ex = ex - 1;
3137                 prev_lblk = le32_to_cpu(prev_ex->ee_block);
3138                 prev_len = ext4_ext_get_actual_len(prev_ex);
3139                 prev_pblk = ext4_ext_pblock(prev_ex);
3140                 ee_pblk = ext4_ext_pblock(ex);
3141                 write_len = map->m_len;
3142
3143                 /*
3144                  * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3145                  * upon those conditions:
3146                  * - C1: prev_ex is initialized,
3147                  * - C2: prev_ex is logically abutting ex,
3148                  * - C3: prev_ex is physically abutting ex,
3149                  * - C4: prev_ex can receive the additional blocks without
3150                  *   overflowing the (initialized) length limit.
3151                  */
3152                 if ((!ext4_ext_is_uninitialized(prev_ex)) &&            /*C1*/
3153                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3154                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3155                         (prev_len < (EXT_INIT_MAX_LEN - write_len))) {  /*C4*/
3156                         err = ext4_ext_get_access(handle, inode, path + depth);
3157                         if (err)
3158                                 goto out;
3159
3160                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3161                                 map, ex, prev_ex);
3162
3163                         /* Shift the start of ex by 'write_len' blocks */
3164                         ex->ee_block = cpu_to_le32(ee_block + write_len);
3165                         ext4_ext_store_pblock(ex, ee_pblk + write_len);
3166                         ex->ee_len = cpu_to_le16(ee_len - write_len);
3167                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3168
3169                         /* Extend prev_ex by 'write_len' blocks */
3170                         prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3171
3172                         /* Mark the block containing both extents as dirty */
3173                         ext4_ext_dirty(handle, inode, path + depth);
3174
3175                         /* Update path to point to the right extent */
3176                         path[depth].p_ext = prev_ex;
3177
3178                         /* Result: number of initialized blocks past m_lblk */
3179                         allocated = write_len;
3180                         goto out;
3181                 }
3182         }
3183
3184         WARN_ON(map->m_lblk < ee_block);
3185         /*
3186          * It is safe to convert extent to initialized via explicit
3187          * zeroout only if extent is fully insde i_size or new_size.
3188          */
3189         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3190
3191         /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3192         if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
3193             (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3194                 err = ext4_ext_zeroout(inode, ex);
3195                 if (err)
3196                         goto out;
3197
3198                 err = ext4_ext_get_access(handle, inode, path + depth);
3199                 if (err)
3200                         goto out;
3201                 ext4_ext_mark_initialized(ex);
3202                 ext4_ext_try_to_merge(inode, path, ex);
3203                 err = ext4_ext_dirty(handle, inode, path + depth);
3204                 goto out;
3205         }
3206
3207         /*
3208          * four cases:
3209          * 1. split the extent into three extents.
3210          * 2. split the extent into two extents, zeroout the first half.
3211          * 3. split the extent into two extents, zeroout the second half.
3212          * 4. split the extent into two extents with out zeroout.
3213          */
3214         split_map.m_lblk = map->m_lblk;
3215         split_map.m_len = map->m_len;
3216
3217         if (allocated > map->m_len) {
3218                 if (allocated <= EXT4_EXT_ZERO_LEN &&
3219                     (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3220                         /* case 3 */
3221                         zero_ex.ee_block =
3222                                          cpu_to_le32(map->m_lblk);
3223                         zero_ex.ee_len = cpu_to_le16(allocated);
3224                         ext4_ext_store_pblock(&zero_ex,
3225                                 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3226                         err = ext4_ext_zeroout(inode, &zero_ex);
3227                         if (err)
3228                                 goto out;
3229                         split_map.m_lblk = map->m_lblk;
3230                         split_map.m_len = allocated;
3231                 } else if ((map->m_lblk - ee_block + map->m_len <
3232                            EXT4_EXT_ZERO_LEN) &&
3233                            (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3234                         /* case 2 */
3235                         if (map->m_lblk != ee_block) {
3236                                 zero_ex.ee_block = ex->ee_block;
3237                                 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3238                                                         ee_block);
3239                                 ext4_ext_store_pblock(&zero_ex,
3240                                                       ext4_ext_pblock(ex));
3241                                 err = ext4_ext_zeroout(inode, &zero_ex);
3242                                 if (err)
3243                                         goto out;
3244                         }
3245
3246                         split_map.m_lblk = ee_block;
3247                         split_map.m_len = map->m_lblk - ee_block + map->m_len;
3248                         allocated = map->m_len;
3249                 }
3250         }
3251
3252         allocated = ext4_split_extent(handle, inode, path,
3253                                        &split_map, split_flag, 0);
3254         if (allocated < 0)
3255                 err = allocated;
3256
3257 out:
3258         return err ? err : allocated;
3259 }
3260
3261 /*
3262  * This function is called by ext4_ext_map_blocks() from
3263  * ext4_get_blocks_dio_write() when DIO to write
3264  * to an uninitialized extent.
3265  *
3266  * Writing to an uninitialized extent may result in splitting the uninitialized
3267  * extent into multiple /initialized uninitialized extents (up to three)
3268  * There are three possibilities:
3269  *   a> There is no split required: Entire extent should be uninitialized
3270  *   b> Splits in two extents: Write is happening at either end of the extent
3271  *   c> Splits in three extents: Somone is writing in middle of the extent
3272  *
3273  * One of more index blocks maybe needed if the extent tree grow after
3274  * the uninitialized extent split. To prevent ENOSPC occur at the IO
3275  * complete, we need to split the uninitialized extent before DIO submit
3276  * the IO. The uninitialized extent called at this time will be split
3277  * into three uninitialized extent(at most). After IO complete, the part
3278  * being filled will be convert to initialized by the end_io callback function
3279  * via ext4_convert_unwritten_extents().
3280  *
3281  * Returns the size of uninitialized extent to be written on success.
3282  */
3283 static int ext4_split_unwritten_extents(handle_t *handle,
3284                                         struct inode *inode,
3285                                         struct ext4_map_blocks *map,
3286                                         struct ext4_ext_path *path,
3287                                         int flags)
3288 {
3289         ext4_lblk_t eof_block;
3290         ext4_lblk_t ee_block;
3291         struct ext4_extent *ex;
3292         unsigned int ee_len;
3293         int split_flag = 0, depth;
3294
3295         ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3296                 "block %llu, max_blocks %u\n", inode->i_ino,
3297                 (unsigned long long)map->m_lblk, map->m_len);
3298
3299         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3300                 inode->i_sb->s_blocksize_bits;
3301         if (eof_block < map->m_lblk + map->m_len)
3302                 eof_block = map->m_lblk + map->m_len;
3303         /*
3304          * It is safe to convert extent to initialized via explicit
3305          * zeroout only if extent is fully insde i_size or new_size.
3306          */
3307         depth = ext_depth(inode);
3308         ex = path[depth].p_ext;
3309         ee_block = le32_to_cpu(ex->ee_block);
3310         ee_len = ext4_ext_get_actual_len(ex);
3311
3312         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3313         split_flag |= EXT4_EXT_MARK_UNINIT2;
3314         if (flags & EXT4_GET_BLOCKS_CONVERT)
3315                 split_flag |= EXT4_EXT_DATA_VALID2;
3316         flags |= EXT4_GET_BLOCKS_PRE_IO;
3317         return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3318 }
3319
3320 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3321                                                 struct inode *inode,
3322                                                 struct ext4_map_blocks *map,
3323                                                 struct ext4_ext_path *path)
3324 {
3325         struct ext4_extent *ex;
3326         ext4_lblk_t ee_block;
3327         unsigned int ee_len;
3328         int depth;
3329         int err = 0;
3330
3331         depth = ext_depth(inode);
3332         ex = path[depth].p_ext;
3333         ee_block = le32_to_cpu(ex->ee_block);
3334         ee_len = ext4_ext_get_actual_len(ex);
3335
3336         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3337                 "block %llu, max_blocks %u\n", inode->i_ino,
3338                   (unsigned long long)ee_block, ee_len);
3339
3340         /* If extent is larger than requested then split is required */
3341         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3342                 err = ext4_split_unwritten_extents(handle, inode, map, path,
3343                                                    EXT4_GET_BLOCKS_CONVERT);
3344                 if (err < 0)
3345                         goto out;
3346                 ext4_ext_drop_refs(path);
3347                 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3348                 if (IS_ERR(path)) {
3349                         err = PTR_ERR(path);
3350                         goto out;
3351                 }
3352                 depth = ext_depth(inode);
3353                 ex = path[depth].p_ext;
3354         }
3355
3356         err = ext4_ext_get_access(handle, inode, path + depth);
3357         if (err)
3358                 goto out;
3359         /* first mark the extent as initialized */
3360         ext4_ext_mark_initialized(ex);
3361
3362         /* note: ext4_ext_correct_indexes() isn't needed here because
3363          * borders are not changed
3364          */
3365         ext4_ext_try_to_merge(inode, path, ex);
3366
3367         /* Mark modified extent as dirty */
3368         err = ext4_ext_dirty(handle, inode, path + depth);
3369 out:
3370         ext4_ext_show_leaf(inode, path);
3371         return err;
3372 }
3373
3374 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3375                         sector_t block, int count)
3376 {
3377         int i;
3378         for (i = 0; i < count; i++)
3379                 unmap_underlying_metadata(bdev, block + i);
3380 }
3381
3382 /*
3383  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3384  */
3385 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3386                               ext4_lblk_t lblk,
3387                               struct ext4_ext_path *path,
3388                               unsigned int len)
3389 {
3390         int i, depth;
3391         struct ext4_extent_header *eh;
3392         struct ext4_extent *last_ex;
3393
3394         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3395                 return 0;
3396
3397         depth = ext_depth(inode);
3398         eh = path[depth].p_hdr;
3399
3400         if (unlikely(!eh->eh_entries)) {
3401                 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
3402                                  "EOFBLOCKS_FL set");
3403                 return -EIO;
3404         }
3405         last_ex = EXT_LAST_EXTENT(eh);
3406         /*
3407          * We should clear the EOFBLOCKS_FL flag if we are writing the
3408          * last block in the last extent in the file.  We test this by
3409          * first checking to see if the caller to
3410          * ext4_ext_get_blocks() was interested in the last block (or
3411          * a block beyond the last block) in the current extent.  If
3412          * this turns out to be false, we can bail out from this
3413          * function immediately.
3414          */
3415         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3416             ext4_ext_get_actual_len(last_ex))
3417                 return 0;
3418         /*
3419          * If the caller does appear to be planning to write at or
3420          * beyond the end of the current extent, we then test to see
3421          * if the current extent is the last extent in the file, by
3422          * checking to make sure it was reached via the rightmost node
3423          * at each level of the tree.
3424          */
3425         for (i = depth-1; i >= 0; i--)
3426                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3427                         return 0;
3428         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3429         return ext4_mark_inode_dirty(handle, inode);
3430 }
3431
3432 /**
3433  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3434  *
3435  * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
3436  * whether there are any buffers marked for delayed allocation. It returns '1'
3437  * on the first delalloc'ed buffer head found. If no buffer head in the given
3438  * range is marked for delalloc, it returns 0.
3439  * lblk_start should always be <= lblk_end.
3440  * search_hint_reverse is to indicate that searching in reverse from lblk_end to
3441  * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
3442  * block sooner). This is useful when blocks are truncated sequentially from
3443  * lblk_start towards lblk_end.
3444  */
3445 static int ext4_find_delalloc_range(struct inode *inode,
3446                                     ext4_lblk_t lblk_start,
3447                                     ext4_lblk_t lblk_end,
3448                                     int search_hint_reverse)
3449 {
3450         struct address_space *mapping = inode->i_mapping;
3451         struct buffer_head *head, *bh = NULL;
3452         struct page *page;
3453         ext4_lblk_t i, pg_lblk;
3454         pgoff_t index;
3455
3456         /* reverse search wont work if fs block size is less than page size */
3457         if (inode->i_blkbits < PAGE_CACHE_SHIFT)
3458                 search_hint_reverse = 0;
3459
3460         if (search_hint_reverse)
3461                 i = lblk_end;
3462         else
3463                 i = lblk_start;
3464
3465         index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
3466
3467         while ((i >= lblk_start) && (i <= lblk_end)) {
3468                 page = find_get_page(mapping, index);
3469                 if (!page)
3470                         goto nextpage;
3471
3472                 if (!page_has_buffers(page))
3473                         goto nextpage;
3474
3475                 head = page_buffers(page);
3476                 if (!head)
3477                         goto nextpage;
3478
3479                 bh = head;
3480                 pg_lblk = index << (PAGE_CACHE_SHIFT -
3481                                                 inode->i_blkbits);
3482                 do {
3483                         if (unlikely(pg_lblk < lblk_start)) {
3484                                 /*
3485                                  * This is possible when fs block size is less
3486                                  * than page size and our cluster starts/ends in
3487                                  * middle of the page. So we need to skip the
3488                                  * initial few blocks till we reach the 'lblk'
3489                                  */
3490                                 pg_lblk++;
3491                                 continue;
3492                         }
3493
3494                         /* Check if the buffer is delayed allocated and that it
3495                          * is not yet mapped. (when da-buffers are mapped during
3496                          * their writeout, their da_mapped bit is set.)
3497                          */
3498                         if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3499                                 page_cache_release(page);
3500                                 trace_ext4_find_delalloc_range(inode,
3501                                                 lblk_start, lblk_end,
3502                                                 search_hint_reverse,
3503                                                 1, i);
3504                                 return 1;
3505                         }
3506                         if (search_hint_reverse)
3507                                 i--;
3508                         else
3509                                 i++;
3510                 } while ((i >= lblk_start) && (i <= lblk_end) &&
3511                                 ((bh = bh->b_this_page) != head));
3512 nextpage:
3513                 if (page)
3514                         page_cache_release(page);
3515                 /*
3516                  * Move to next page. 'i' will be the first lblk in the next
3517                  * page.
3518                  */
3519                 if (search_hint_reverse)
3520                         index--;
3521                 else
3522                         index++;
3523                 i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
3524         }
3525
3526         trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3527                                         search_hint_reverse, 0, 0);
3528         return 0;
3529 }
3530
3531 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
3532                                int search_hint_reverse)
3533 {
3534         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3535         ext4_lblk_t lblk_start, lblk_end;
3536         lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
3537         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3538
3539         return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3540                                         search_hint_reverse);
3541 }
3542
3543 /**
3544  * Determines how many complete clusters (out of those specified by the 'map')
3545  * are under delalloc and were reserved quota for.
3546  * This function is called when we are writing out the blocks that were
3547  * originally written with their allocation delayed, but then the space was
3548  * allocated using fallocate() before the delayed allocation could be resolved.
3549  * The cases to look for are:
3550  * ('=' indicated delayed allocated blocks
3551  *  '-' indicates non-delayed allocated blocks)
3552  * (a) partial clusters towards beginning and/or end outside of allocated range
3553  *     are not delalloc'ed.
3554  *      Ex:
3555  *      |----c---=|====c====|====c====|===-c----|
3556  *               |++++++ allocated ++++++|
3557  *      ==> 4 complete clusters in above example
3558  *
3559  * (b) partial cluster (outside of allocated range) towards either end is
3560  *     marked for delayed allocation. In this case, we will exclude that
3561  *     cluster.
3562  *      Ex:
3563  *      |----====c========|========c========|
3564  *           |++++++ allocated ++++++|
3565  *      ==> 1 complete clusters in above example
3566  *
3567  *      Ex:
3568  *      |================c================|
3569  *            |++++++ allocated ++++++|
3570  *      ==> 0 complete clusters in above example
3571  *
3572  * The ext4_da_update_reserve_space will be called only if we
3573  * determine here that there were some "entire" clusters that span
3574  * this 'allocated' range.
3575  * In the non-bigalloc case, this function will just end up returning num_blks
3576  * without ever calling ext4_find_delalloc_range.
3577  */
3578 static unsigned int
3579 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3580                            unsigned int num_blks)
3581 {
3582         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3583         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3584         ext4_lblk_t lblk_from, lblk_to, c_offset;
3585         unsigned int allocated_clusters = 0;
3586
3587         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3588         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3589
3590         /* max possible clusters for this allocation */
3591         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3592
3593         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3594
3595         /* Check towards left side */
3596         c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
3597         if (c_offset) {
3598                 lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
3599                 lblk_to = lblk_from + c_offset - 1;
3600
3601                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3602                         allocated_clusters--;
3603         }
3604
3605         /* Now check towards right. */
3606         c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
3607         if (allocated_clusters && c_offset) {
3608                 lblk_from = lblk_start + num_blks;
3609                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3610
3611                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3612                         allocated_clusters--;
3613         }
3614
3615         return allocated_clusters;
3616 }
3617
3618 static int
3619 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3620                         struct ext4_map_blocks *map,
3621                         struct ext4_ext_path *path, int flags,
3622                         unsigned int allocated, ext4_fsblk_t newblock)
3623 {
3624         int ret = 0;
3625         int err = 0;
3626         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3627
3628         ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3629                   "block %llu, max_blocks %u, flags %d, allocated %u",
3630                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3631                   flags, allocated);
3632         ext4_ext_show_leaf(inode, path);
3633
3634         trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3635                                                     newblock);
3636
3637         /* get_block() before submit the IO, split the extent */
3638         if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3639                 ret = ext4_split_unwritten_extents(handle, inode, map,
3640                                                    path, flags);
3641                 /*
3642                  * Flag the inode(non aio case) or end_io struct (aio case)
3643                  * that this IO needs to conversion to written when IO is
3644                  * completed
3645                  */
3646                 if (io)
3647                         ext4_set_io_unwritten_flag(inode, io);
3648                 else
3649                         ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3650                 if (ext4_should_dioread_nolock(inode))
3651                         map->m_flags |= EXT4_MAP_UNINIT;
3652                 goto out;
3653         }
3654         /* IO end_io complete, convert the filled extent to written */
3655         if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3656                 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3657                                                         path);
3658                 if (ret >= 0) {
3659                         ext4_update_inode_fsync_trans(handle, inode, 1);
3660                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
3661                                                  path, map->m_len);
3662                 } else
3663                         err = ret;
3664                 goto out2;
3665         }
3666         /* buffered IO case */
3667         /*
3668          * repeat fallocate creation request
3669          * we already have an unwritten extent
3670          */
3671         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3672                 goto map_out;
3673
3674         /* buffered READ or buffered write_begin() lookup */
3675         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3676                 /*
3677                  * We have blocks reserved already.  We
3678                  * return allocated blocks so that delalloc
3679                  * won't do block reservation for us.  But
3680                  * the buffer head will be unmapped so that
3681                  * a read from the block returns 0s.
3682                  */
3683                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3684                 goto out1;
3685         }
3686
3687         /* buffered write, writepage time, convert*/
3688         ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3689         if (ret >= 0)
3690                 ext4_update_inode_fsync_trans(handle, inode, 1);
3691 out:
3692         if (ret <= 0) {
3693                 err = ret;
3694                 goto out2;
3695         } else
3696                 allocated = ret;
3697         map->m_flags |= EXT4_MAP_NEW;
3698         /*
3699          * if we allocated more blocks than requested
3700          * we need to make sure we unmap the extra block
3701          * allocated. The actual needed block will get
3702          * unmapped later when we find the buffer_head marked
3703          * new.
3704          */
3705         if (allocated > map->m_len) {
3706                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3707                                         newblock + map->m_len,
3708                                         allocated - map->m_len);
3709                 allocated = map->m_len;
3710         }
3711         map->m_len = allocated;
3712
3713         /*
3714          * If we have done fallocate with the offset that is already
3715          * delayed allocated, we would have block reservation
3716          * and quota reservation done in the delayed write path.
3717          * But fallocate would have already updated quota and block
3718          * count for this offset. So cancel these reservation
3719          */
3720         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3721                 unsigned int reserved_clusters;
3722                 reserved_clusters = get_reserved_cluster_alloc(inode,
3723                                 map->m_lblk, map->m_len);
3724                 if (reserved_clusters)
3725                         ext4_da_update_reserve_space(inode,
3726                                                      reserved_clusters,
3727                                                      0);
3728         }
3729
3730 map_out:
3731         map->m_flags |= EXT4_MAP_MAPPED;
3732         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3733                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3734                                          map->m_len);
3735                 if (err < 0)
3736                         goto out2;
3737         }
3738 out1:
3739         if (allocated > map->m_len)
3740                 allocated = map->m_len;
3741         ext4_ext_show_leaf(inode, path);
3742         map->m_pblk = newblock;
3743         map->m_len = allocated;
3744 out2:
3745         if (path) {
3746                 ext4_ext_drop_refs(path);
3747                 kfree(path);
3748         }
3749         return err ? err : allocated;
3750 }
3751
3752 /*
3753  * get_implied_cluster_alloc - check to see if the requested
3754  * allocation (in the map structure) overlaps with a cluster already
3755  * allocated in an extent.
3756  *      @sb     The filesystem superblock structure
3757  *      @map    The requested lblk->pblk mapping
3758  *      @ex     The extent structure which might contain an implied
3759  *                      cluster allocation
3760  *
3761  * This function is called by ext4_ext_map_blocks() after we failed to
3762  * find blocks that were already in the inode's extent tree.  Hence,
3763  * we know that the beginning of the requested region cannot overlap
3764  * the extent from the inode's extent tree.  There are three cases we
3765  * want to catch.  The first is this case:
3766  *
3767  *               |--- cluster # N--|
3768  *    |--- extent ---|  |---- requested region ---|
3769  *                      |==========|
3770  *
3771  * The second case that we need to test for is this one:
3772  *
3773  *   |--------- cluster # N ----------------|
3774  *         |--- requested region --|   |------- extent ----|
3775  *         |=======================|
3776  *
3777  * The third case is when the requested region lies between two extents
3778  * within the same cluster:
3779  *          |------------- cluster # N-------------|
3780  * |----- ex -----|                  |---- ex_right ----|
3781  *                  |------ requested region ------|
3782  *                  |================|
3783  *
3784  * In each of the above cases, we need to set the map->m_pblk and
3785  * map->m_len so it corresponds to the return the extent labelled as
3786  * "|====|" from cluster #N, since it is already in use for data in
3787  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
3788  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3789  * as a new "allocated" block region.  Otherwise, we will return 0 and
3790  * ext4_ext_map_blocks() will then allocate one or more new clusters
3791  * by calling ext4_mb_new_blocks().
3792  */
3793 static int get_implied_cluster_alloc(struct super_block *sb,
3794                                      struct ext4_map_blocks *map,
3795                                      struct ext4_extent *ex,
3796                                      struct ext4_ext_path *path)
3797 {
3798         struct ext4_sb_info *sbi = EXT4_SB(sb);
3799         ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
3800         ext4_lblk_t ex_cluster_start, ex_cluster_end;
3801         ext4_lblk_t rr_cluster_start, rr_cluster_end;
3802         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3803         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3804         unsigned short ee_len = ext4_ext_get_actual_len(ex);
3805
3806         /* The extent passed in that we are trying to match */
3807         ex_cluster_start = EXT4_B2C(sbi, ee_block);
3808         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3809
3810         /* The requested region passed into ext4_map_blocks() */
3811         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3812         rr_cluster_end = EXT4_B2C(sbi, map->m_lblk + map->m_len - 1);
3813
3814         if ((rr_cluster_start == ex_cluster_end) ||
3815             (rr_cluster_start == ex_cluster_start)) {
3816                 if (rr_cluster_start == ex_cluster_end)
3817                         ee_start += ee_len - 1;
3818                 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
3819                 map->m_len = min(map->m_len,
3820                                  (unsigned) sbi->s_cluster_ratio - c_offset);
3821                 /*
3822                  * Check for and handle this case:
3823                  *
3824                  *   |--------- cluster # N-------------|
3825                  *                     |------- extent ----|
3826                  *         |--- requested region ---|
3827                  *         |===========|
3828                  */
3829
3830                 if (map->m_lblk < ee_block)
3831                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
3832
3833                 /*
3834                  * Check for the case where there is already another allocated
3835                  * block to the right of 'ex' but before the end of the cluster.
3836                  *
3837                  *          |------------- cluster # N-------------|
3838                  * |----- ex -----|                  |---- ex_right ----|
3839                  *                  |------ requested region ------|
3840                  *                  |================|
3841                  */
3842                 if (map->m_lblk > ee_block) {
3843                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3844                         map->m_len = min(map->m_len, next - map->m_lblk);
3845                 }
3846
3847                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3848                 return 1;
3849         }
3850
3851         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3852         return 0;
3853 }
3854
3855
3856 /*
3857  * Block allocation/map/preallocation routine for extents based files
3858  *
3859  *
3860  * Need to be called with
3861  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3862  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3863  *
3864  * return > 0, number of of blocks already mapped/allocated
3865  *          if create == 0 and these are pre-allocated blocks
3866  *              buffer head is unmapped
3867  *          otherwise blocks are mapped
3868  *
3869  * return = 0, if plain look up failed (blocks have not been allocated)
3870  *          buffer head is unmapped
3871  *
3872  * return < 0, error case.
3873  */
3874 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3875                         struct ext4_map_blocks *map, int flags)
3876 {
3877         struct ext4_ext_path *path = NULL;
3878         struct ext4_extent newex, *ex, *ex2;
3879         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3880         ext4_fsblk_t newblock = 0;
3881         int free_on_err = 0, err = 0, depth, ret;
3882         unsigned int allocated = 0, offset = 0;
3883         unsigned int allocated_clusters = 0;
3884         unsigned int punched_out = 0;
3885         unsigned int result = 0;
3886         struct ext4_allocation_request ar;
3887         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3888         ext4_lblk_t cluster_offset;
3889
3890         ext_debug("blocks %u/%u requested for inode %lu\n",
3891                   map->m_lblk, map->m_len, inode->i_ino);
3892         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3893
3894         /* check in cache */
3895         if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) &&
3896                 ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3897                 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3898                         if ((sbi->s_cluster_ratio > 1) &&
3899                             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3900                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3901
3902                         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3903                                 /*
3904                                  * block isn't allocated yet and
3905                                  * user doesn't want to allocate it
3906                                  */
3907                                 goto out2;
3908                         }
3909                         /* we should allocate requested block */
3910                 } else {
3911                         /* block is already allocated */
3912                         if (sbi->s_cluster_ratio > 1)
3913                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3914                         newblock = map->m_lblk
3915                                    - le32_to_cpu(newex.ee_block)
3916                                    + ext4_ext_pblock(&newex);
3917                         /* number of remaining blocks in the extent */
3918                         allocated = ext4_ext_get_actual_len(&newex) -
3919                                 (map->m_lblk - le32_to_cpu(newex.ee_block));
3920                         goto out;
3921                 }
3922         }
3923
3924         /* find extent for this block */
3925         path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3926         if (IS_ERR(path)) {
3927                 err = PTR_ERR(path);
3928                 path = NULL;
3929                 goto out2;
3930         }
3931
3932         depth = ext_depth(inode);
3933
3934         /*
3935          * consistent leaf must not be empty;
3936          * this situation is possible, though, _during_ tree modification;
3937          * this is why assert can't be put in ext4_ext_find_extent()
3938          */
3939         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3940                 EXT4_ERROR_INODE(inode, "bad extent address "
3941                                  "lblock: %lu, depth: %d pblock %lld",
3942                                  (unsigned long) map->m_lblk, depth,
3943                                  path[depth].p_block);
3944                 err = -EIO;
3945                 goto out2;
3946         }
3947
3948         ex = path[depth].p_ext;
3949         if (ex) {
3950                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3951                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3952                 unsigned short ee_len;
3953
3954                 /*
3955                  * Uninitialized extents are treated as holes, except that
3956                  * we split out initialized portions during a write.
3957                  */
3958                 ee_len = ext4_ext_get_actual_len(ex);
3959
3960                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3961
3962                 /* if found extent covers block, simply return it */
3963                 if (in_range(map->m_lblk, ee_block, ee_len)) {
3964                         struct ext4_map_blocks punch_map;
3965                         ext4_fsblk_t partial_cluster = 0;
3966
3967                         newblock = map->m_lblk - ee_block + ee_start;
3968                         /* number of remaining blocks in the extent */
3969                         allocated = ee_len - (map->m_lblk - ee_block);
3970                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3971                                   ee_block, ee_len, newblock);
3972
3973                         if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) {
3974                                 /*
3975                                  * Do not put uninitialized extent
3976                                  * in the cache
3977                                  */
3978                                 if (!ext4_ext_is_uninitialized(ex)) {
3979                                         ext4_ext_put_in_cache(inode, ee_block,
3980                                                 ee_len, ee_start);
3981                                         goto out;
3982                                 }
3983                                 ret = ext4_ext_handle_uninitialized_extents(
3984                                         handle, inode, map, path, flags,
3985                                         allocated, newblock);
3986                                 return ret;
3987                         }
3988
3989                         /*
3990                          * Punch out the map length, but only to the
3991                          * end of the extent
3992                          */
3993                         punched_out = allocated < map->m_len ?
3994                                 allocated : map->m_len;
3995
3996                         /*
3997                          * Sense extents need to be converted to
3998                          * uninitialized, they must fit in an
3999                          * uninitialized extent
4000                          */
4001                         if (punched_out > EXT_UNINIT_MAX_LEN)
4002                                 punched_out = EXT_UNINIT_MAX_LEN;
4003
4004                         punch_map.m_lblk = map->m_lblk;
4005                         punch_map.m_pblk = newblock;
4006                         punch_map.m_len = punched_out;
4007                         punch_map.m_flags = 0;
4008
4009                         /* Check to see if the extent needs to be split */
4010                         if (punch_map.m_len != ee_len ||
4011                                 punch_map.m_lblk != ee_block) {
4012
4013                                 ret = ext4_split_extent(handle, inode,
4014                                 path, &punch_map, 0,
4015                                 EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
4016                                 EXT4_GET_BLOCKS_PRE_IO);
4017
4018                                 if (ret < 0) {
4019                                         err = ret;
4020                                         goto out2;
4021                                 }
4022                                 /*
4023                                  * find extent for the block at
4024                                  * the start of the hole
4025                                  */
4026                                 ext4_ext_drop_refs(path);
4027                                 kfree(path);
4028
4029                                 path = ext4_ext_find_extent(inode,
4030                                 map->m_lblk, NULL);
4031                                 if (IS_ERR(path)) {
4032                                         err = PTR_ERR(path);
4033                                         path = NULL;
4034                                         goto out2;
4035                                 }
4036
4037                                 depth = ext_depth(inode);
4038                                 ex = path[depth].p_ext;
4039                                 ee_len = ext4_ext_get_actual_len(ex);
4040                                 ee_block = le32_to_cpu(ex->ee_block);
4041                                 ee_start = ext4_ext_pblock(ex);
4042
4043                         }
4044
4045                         ext4_ext_mark_uninitialized(ex);
4046
4047                         ext4_ext_invalidate_cache(inode);
4048
4049                         err = ext4_ext_rm_leaf(handle, inode, path,
4050                                                &partial_cluster, map->m_lblk,
4051                                                map->m_lblk + punched_out);
4052
4053                         if (!err && path->p_hdr->eh_entries == 0) {
4054                                 /*
4055                                  * Punch hole freed all of this sub tree,
4056                                  * so we need to correct eh_depth
4057                                  */
4058                                 err = ext4_ext_get_access(handle, inode, path);
4059                                 if (err == 0) {
4060                                         ext_inode_hdr(inode)->eh_depth = 0;
4061                                         ext_inode_hdr(inode)->eh_max =
4062                                         cpu_to_le16(ext4_ext_space_root(
4063                                                 inode, 0));
4064
4065                                         err = ext4_ext_dirty(
4066                                                 handle, inode, path);
4067                                 }
4068                         }
4069
4070                         goto out2;
4071                 }
4072         }
4073
4074         if ((sbi->s_cluster_ratio > 1) &&
4075             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
4076                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4077
4078         /*
4079          * requested block isn't allocated yet;
4080          * we couldn't try to create block if create flag is zero
4081          */
4082         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4083                 /*
4084                  * put just found gap into cache to speed up
4085                  * subsequent requests
4086                  */
4087                 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4088                 goto out2;
4089         }
4090
4091         /*
4092          * Okay, we need to do block allocation.
4093          */
4094         map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4095         newex.ee_block = cpu_to_le32(map->m_lblk);
4096         cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4097
4098         /*
4099          * If we are doing bigalloc, check to see if the extent returned
4100          * by ext4_ext_find_extent() implies a cluster we can use.
4101          */
4102         if (cluster_offset && ex &&
4103             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4104                 ar.len = allocated = map->m_len;
4105                 newblock = map->m_pblk;
4106                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4107                 goto got_allocated_blocks;
4108         }
4109
4110         /* find neighbour allocated blocks */
4111         ar.lleft = map->m_lblk;
4112         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4113         if (err)
4114                 goto out2;
4115         ar.lright = map->m_lblk;
4116         ex2 = NULL;
4117         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4118         if (err)
4119                 goto out2;
4120
4121         /* Check if the extent after searching to the right implies a
4122          * cluster we can use. */
4123         if ((sbi->s_cluster_ratio > 1) && ex2 &&
4124             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4125                 ar.len = allocated = map->m_len;
4126                 newblock = map->m_pblk;
4127                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4128                 goto got_allocated_blocks;
4129         }
4130
4131         /*
4132          * See if request is beyond maximum number of blocks we can have in
4133          * a single extent. For an initialized extent this limit is
4134          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4135          * EXT_UNINIT_MAX_LEN.
4136          */
4137         if (map->m_len > EXT_INIT_MAX_LEN &&
4138             !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4139                 map->m_len = EXT_INIT_MAX_LEN;
4140         else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4141                  (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4142                 map->m_len = EXT_UNINIT_MAX_LEN;
4143
4144         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4145         newex.ee_len = cpu_to_le16(map->m_len);
4146         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4147         if (err)
4148                 allocated = ext4_ext_get_actual_len(&newex);
4149         else
4150                 allocated = map->m_len;
4151
4152         /* allocate new block */
4153         ar.inode = inode;
4154         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4155         ar.logical = map->m_lblk;
4156         /*
4157          * We calculate the offset from the beginning of the cluster
4158          * for the logical block number, since when we allocate a
4159          * physical cluster, the physical block should start at the
4160          * same offset from the beginning of the cluster.  This is
4161          * needed so that future calls to get_implied_cluster_alloc()
4162          * work correctly.
4163          */
4164         offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4165         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4166         ar.goal -= offset;
4167         ar.logical -= offset;
4168         if (S_ISREG(inode->i_mode))
4169                 ar.flags = EXT4_MB_HINT_DATA;
4170         else
4171                 /* disable in-core preallocation for non-regular files */
4172                 ar.flags = 0;
4173         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4174                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4175         newblock = ext4_mb_new_blocks(handle, &ar, &err);
4176         if (!newblock)
4177                 goto out2;
4178         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4179                   ar.goal, newblock, allocated);
4180         free_on_err = 1;
4181         allocated_clusters = ar.len;
4182         ar.len = EXT4_C2B(sbi, ar.len) - offset;
4183         if (ar.len > allocated)
4184                 ar.len = allocated;
4185
4186 got_allocated_blocks:
4187         /* try to insert new extent into found leaf and return */
4188         ext4_ext_store_pblock(&newex, newblock + offset);
4189         newex.ee_len = cpu_to_le16(ar.len);
4190         /* Mark uninitialized */
4191         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4192                 ext4_ext_mark_uninitialized(&newex);
4193                 /*
4194                  * io_end structure was created for every IO write to an
4195                  * uninitialized extent. To avoid unnecessary conversion,
4196                  * here we flag the IO that really needs the conversion.
4197                  * For non asycn direct IO case, flag the inode state
4198                  * that we need to perform conversion when IO is done.
4199                  */
4200                 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4201                         if (io)
4202                                 ext4_set_io_unwritten_flag(inode, io);
4203                         else
4204                                 ext4_set_inode_state(inode,
4205                                                      EXT4_STATE_DIO_UNWRITTEN);
4206                 }
4207                 if (ext4_should_dioread_nolock(inode))
4208                         map->m_flags |= EXT4_MAP_UNINIT;
4209         }
4210
4211         err = 0;
4212         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4213                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4214                                          path, ar.len);
4215         if (!err)
4216                 err = ext4_ext_insert_extent(handle, inode, path,
4217                                              &newex, flags);
4218         if (err && free_on_err) {
4219                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4220                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4221                 /* free data blocks we just allocated */
4222                 /* not a good idea to call discard here directly,
4223                  * but otherwise we'd need to call it every free() */
4224                 ext4_discard_preallocations(inode);
4225                 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4226                                  ext4_ext_get_actual_len(&newex), fb_flags);
4227                 goto out2;
4228         }
4229
4230         /* previous routine could use block we allocated */
4231         newblock = ext4_ext_pblock(&newex);
4232         allocated = ext4_ext_get_actual_len(&newex);
4233         if (allocated > map->m_len)
4234                 allocated = map->m_len;
4235         map->m_flags |= EXT4_MAP_NEW;
4236
4237         /*
4238          * Update reserved blocks/metadata blocks after successful
4239          * block allocation which had been deferred till now.
4240          */
4241         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4242                 unsigned int reserved_clusters;
4243                 /*
4244                  * Check how many clusters we had reserved this allocated range
4245                  */
4246                 reserved_clusters = get_reserved_cluster_alloc(inode,
4247                                                 map->m_lblk, allocated);
4248                 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4249                         if (reserved_clusters) {
4250                                 /*
4251                                  * We have clusters reserved for this range.
4252                                  * But since we are not doing actual allocation
4253                                  * and are simply using blocks from previously
4254                                  * allocated cluster, we should release the
4255                                  * reservation and not claim quota.
4256                                  */
4257                                 ext4_da_update_reserve_space(inode,
4258                                                 reserved_clusters, 0);
4259                         }
4260                 } else {
4261                         BUG_ON(allocated_clusters < reserved_clusters);
4262                         /* We will claim quota for all newly allocated blocks.*/
4263                         ext4_da_update_reserve_space(inode, allocated_clusters,
4264                                                         1);
4265                         if (reserved_clusters < allocated_clusters) {
4266                                 struct ext4_inode_info *ei = EXT4_I(inode);
4267                                 int reservation = allocated_clusters -
4268                                                   reserved_clusters;
4269                                 /*
4270                                  * It seems we claimed few clusters outside of
4271                                  * the range of this allocation. We should give
4272                                  * it back to the reservation pool. This can
4273                                  * happen in the following case:
4274                                  *
4275                                  * * Suppose s_cluster_ratio is 4 (i.e., each
4276                                  *   cluster has 4 blocks. Thus, the clusters
4277                                  *   are [0-3],[4-7],[8-11]...
4278                                  * * First comes delayed allocation write for
4279                                  *   logical blocks 10 & 11. Since there were no
4280                                  *   previous delayed allocated blocks in the
4281                                  *   range [8-11], we would reserve 1 cluster
4282                                  *   for this write.
4283                                  * * Next comes write for logical blocks 3 to 8.
4284                                  *   In this case, we will reserve 2 clusters
4285                                  *   (for [0-3] and [4-7]; and not for [8-11] as
4286                                  *   that range has a delayed allocated blocks.
4287                                  *   Thus total reserved clusters now becomes 3.
4288                                  * * Now, during the delayed allocation writeout
4289                                  *   time, we will first write blocks [3-8] and
4290                                  *   allocate 3 clusters for writing these
4291                                  *   blocks. Also, we would claim all these
4292                                  *   three clusters above.
4293                                  * * Now when we come here to writeout the
4294                                  *   blocks [10-11], we would expect to claim
4295                                  *   the reservation of 1 cluster we had made
4296                                  *   (and we would claim it since there are no
4297                                  *   more delayed allocated blocks in the range
4298                                  *   [8-11]. But our reserved cluster count had
4299                                  *   already gone to 0.
4300                                  *
4301                                  *   Thus, at the step 4 above when we determine
4302                                  *   that there are still some unwritten delayed
4303                                  *   allocated blocks outside of our current
4304                                  *   block range, we should increment the
4305                                  *   reserved clusters count so that when the
4306                                  *   remaining blocks finally gets written, we
4307                                  *   could claim them.
4308                                  */
4309                                 dquot_reserve_block(inode,
4310                                                 EXT4_C2B(sbi, reservation));
4311                                 spin_lock(&ei->i_block_reservation_lock);
4312                                 ei->i_reserved_data_blocks += reservation;
4313                                 spin_unlock(&ei->i_block_reservation_lock);
4314                         }
4315                 }
4316         }
4317
4318         /*
4319          * Cache the extent and update transaction to commit on fdatasync only
4320          * when it is _not_ an uninitialized extent.
4321          */
4322         if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4323                 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4324                 ext4_update_inode_fsync_trans(handle, inode, 1);
4325         } else
4326                 ext4_update_inode_fsync_trans(handle, inode, 0);
4327 out:
4328         if (allocated > map->m_len)
4329                 allocated = map->m_len;
4330         ext4_ext_show_leaf(inode, path);
4331         map->m_flags |= EXT4_MAP_MAPPED;
4332         map->m_pblk = newblock;
4333         map->m_len = allocated;
4334 out2:
4335         if (path) {
4336                 ext4_ext_drop_refs(path);
4337                 kfree(path);
4338         }
4339         result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ?
4340                         punched_out : allocated;
4341
4342         trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
4343                 newblock, map->m_len, err ? err : result);
4344
4345         return err ? err : result;
4346 }
4347
4348 void ext4_ext_truncate(struct inode *inode)
4349 {
4350         struct address_space *mapping = inode->i_mapping;
4351         struct super_block *sb = inode->i_sb;
4352         ext4_lblk_t last_block;
4353         handle_t *handle;
4354         loff_t page_len;
4355         int err = 0;
4356
4357         /*
4358          * finish any pending end_io work so we won't run the risk of
4359          * converting any truncated blocks to initialized later
4360          */
4361         ext4_flush_completed_IO(inode);
4362
4363         /*
4364          * probably first extent we're gonna free will be last in block
4365          */
4366         err = ext4_writepage_trans_blocks(inode);
4367         handle = ext4_journal_start(inode, err);
4368         if (IS_ERR(handle))
4369                 return;
4370
4371         if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4372                 page_len = PAGE_CACHE_SIZE -
4373                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4374
4375                 err = ext4_discard_partial_page_buffers(handle,
4376                         mapping, inode->i_size, page_len, 0);
4377
4378                 if (err)
4379                         goto out_stop;
4380         }
4381
4382         if (ext4_orphan_add(handle, inode))
4383                 goto out_stop;
4384
4385         down_write(&EXT4_I(inode)->i_data_sem);
4386         ext4_ext_invalidate_cache(inode);
4387
4388         ext4_discard_preallocations(inode);
4389
4390         /*
4391          * TODO: optimization is possible here.
4392          * Probably we need not scan at all,
4393          * because page truncation is enough.
4394          */
4395
4396         /* we have to know where to truncate from in crash case */
4397         EXT4_I(inode)->i_disksize = inode->i_size;
4398         ext4_mark_inode_dirty(handle, inode);
4399
4400         last_block = (inode->i_size + sb->s_blocksize - 1)
4401                         >> EXT4_BLOCK_SIZE_BITS(sb);
4402         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4403
4404         /* In a multi-transaction truncate, we only make the final
4405          * transaction synchronous.
4406          */
4407         if (IS_SYNC(inode))
4408                 ext4_handle_sync(handle);
4409
4410         up_write(&EXT4_I(inode)->i_data_sem);
4411
4412 out_stop:
4413         /*
4414          * If this was a simple ftruncate() and the file will remain alive,
4415          * then we need to clear up the orphan record which we created above.
4416          * However, if this was a real unlink then we were called by
4417          * ext4_delete_inode(), and we allow that function to clean up the
4418          * orphan info for us.
4419          */
4420         if (inode->i_nlink)
4421                 ext4_orphan_del(handle, inode);
4422
4423         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4424         ext4_mark_inode_dirty(handle, inode);
4425         ext4_journal_stop(handle);
4426 }
4427
4428 static void ext4_falloc_update_inode(struct inode *inode,
4429                                 int mode, loff_t new_size, int update_ctime)
4430 {
4431         struct timespec now;
4432
4433         if (update_ctime) {
4434                 now = current_fs_time(inode->i_sb);
4435                 if (!timespec_equal(&inode->i_ctime, &now))
4436                         inode->i_ctime = now;
4437         }
4438         /*
4439          * Update only when preallocation was requested beyond
4440          * the file size.
4441          */
4442         if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4443                 if (new_size > i_size_read(inode))
4444                         i_size_write(inode, new_size);
4445                 if (new_size > EXT4_I(inode)->i_disksize)
4446                         ext4_update_i_disksize(inode, new_size);
4447         } else {
4448                 /*
4449                  * Mark that we allocate beyond EOF so the subsequent truncate
4450                  * can proceed even if the new size is the same as i_size.
4451                  */
4452                 if (new_size > i_size_read(inode))
4453                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4454         }
4455
4456 }
4457
4458 /*
4459  * preallocate space for a file. This implements ext4's fallocate file
4460  * operation, which gets called from sys_fallocate system call.
4461  * For block-mapped files, posix_fallocate should fall back to the method
4462  * of writing zeroes to the required new blocks (the same behavior which is
4463  * expected for file systems which do not support fallocate() system call).
4464  */
4465 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4466 {
4467         struct inode *inode = file->f_path.dentry->d_inode;
4468         handle_t *handle;
4469         loff_t new_size;
4470         unsigned int max_blocks;
4471         int ret = 0;
4472         int ret2 = 0;
4473         int retries = 0;
4474         int flags;
4475         struct ext4_map_blocks map;
4476         unsigned int credits, blkbits = inode->i_blkbits;
4477
4478         /* Return error if mode is not supported */
4479         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4480                 return -EOPNOTSUPP;
4481
4482         if (mode & FALLOC_FL_PUNCH_HOLE)
4483                 return ext4_punch_hole(file, offset, len);
4484
4485         trace_ext4_fallocate_enter(inode, offset, len, mode);
4486         map.m_lblk = offset >> blkbits;
4487         /*
4488          * We can't just convert len to max_blocks because
4489          * If blocksize = 4096 offset = 3072 and len = 2048
4490          */
4491         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4492                 - map.m_lblk;
4493         /*
4494          * credits to insert 1 extent into extent tree
4495          */
4496         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4497         mutex_lock(&inode->i_mutex);
4498
4499         /*
4500          * We only support preallocation for extent-based files only
4501          */
4502         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4503                 ret = -EOPNOTSUPP;
4504                 goto out;
4505         }
4506
4507         ret = inode_newsize_ok(inode, (len + offset));
4508         if (ret) {
4509                 mutex_unlock(&inode->i_mutex);
4510                 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4511                 return ret;
4512         }
4513         flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4514         if (mode & FALLOC_FL_KEEP_SIZE)
4515                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4516         /*
4517          * Don't normalize the request if it can fit in one extent so
4518          * that it doesn't get unnecessarily split into multiple
4519          * extents.
4520          */
4521         if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4522                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4523 retry:
4524         while (ret >= 0 && ret < max_blocks) {
4525                 map.m_lblk = map.m_lblk + ret;
4526                 map.m_len = max_blocks = max_blocks - ret;
4527                 handle = ext4_journal_start(inode, credits);
4528                 if (IS_ERR(handle)) {
4529                         ret = PTR_ERR(handle);
4530                         break;
4531                 }
4532                 ret = ext4_map_blocks(handle, inode, &map, flags);
4533                 if (ret <= 0) {
4534 #ifdef EXT4FS_DEBUG
4535                         WARN_ON(ret <= 0);
4536                         printk(KERN_ERR "%s: ext4_ext_map_blocks "
4537                                     "returned error inode#%lu, block=%u, "
4538                                     "max_blocks=%u", __func__,
4539                                     inode->i_ino, map.m_lblk, max_blocks);
4540 #endif
4541                         ext4_mark_inode_dirty(handle, inode);
4542                         ret2 = ext4_journal_stop(handle);
4543                         break;
4544                 }
4545                 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4546                                                 blkbits) >> blkbits))
4547                         new_size = offset + len;
4548                 else
4549                         new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4550
4551                 ext4_falloc_update_inode(inode, mode, new_size,
4552                                          (map.m_flags & EXT4_MAP_NEW));
4553                 ext4_mark_inode_dirty(handle, inode);
4554                 ret2 = ext4_journal_stop(handle);
4555                 if (ret2)
4556                         break;
4557         }
4558         if (ret == -ENOSPC &&
4559                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
4560                 ret = 0;
4561                 goto retry;
4562         }
4563 out:
4564         mutex_unlock(&inode->i_mutex);
4565         trace_ext4_fallocate_exit(inode, offset, max_blocks,
4566                                 ret > 0 ? ret2 : ret);
4567         return ret > 0 ? ret2 : ret;
4568 }
4569
4570 /*
4571  * This function convert a range of blocks to written extents
4572  * The caller of this function will pass the start offset and the size.
4573  * all unwritten extents within this range will be converted to
4574  * written extents.
4575  *
4576  * This function is called from the direct IO end io call back
4577  * function, to convert the fallocated extents after IO is completed.
4578  * Returns 0 on success.
4579  */
4580 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4581                                     ssize_t len)
4582 {
4583         handle_t *handle;
4584         unsigned int max_blocks;
4585         int ret = 0;
4586         int ret2 = 0;
4587         struct ext4_map_blocks map;
4588         unsigned int credits, blkbits = inode->i_blkbits;
4589
4590         map.m_lblk = offset >> blkbits;
4591         /*
4592          * We can't just convert len to max_blocks because
4593          * If blocksize = 4096 offset = 3072 and len = 2048
4594          */
4595         max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4596                       map.m_lblk);
4597         /*
4598          * credits to insert 1 extent into extent tree
4599          */
4600         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4601         while (ret >= 0 && ret < max_blocks) {
4602                 map.m_lblk += ret;
4603                 map.m_len = (max_blocks -= ret);
4604                 handle = ext4_journal_start(inode, credits);
4605                 if (IS_ERR(handle)) {
4606                         ret = PTR_ERR(handle);
4607                         break;
4608                 }
4609                 ret = ext4_map_blocks(handle, inode, &map,
4610                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4611                 if (ret <= 0) {
4612                         WARN_ON(ret <= 0);
4613                         printk(KERN_ERR "%s: ext4_ext_map_blocks "
4614                                     "returned error inode#%lu, block=%u, "
4615                                     "max_blocks=%u", __func__,
4616                                     inode->i_ino, map.m_lblk, map.m_len);
4617                 }
4618                 ext4_mark_inode_dirty(handle, inode);
4619                 ret2 = ext4_journal_stop(handle);
4620                 if (ret <= 0 || ret2 )
4621                         break;
4622         }
4623         return ret > 0 ? ret2 : ret;
4624 }
4625
4626 /*
4627  * Callback function called for each extent to gather FIEMAP information.
4628  */
4629 static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
4630                        struct ext4_ext_cache *newex, struct ext4_extent *ex,
4631                        void *data)
4632 {
4633         __u64   logical;
4634         __u64   physical;
4635         __u64   length;
4636         __u32   flags = 0;
4637         int             ret = 0;
4638         struct fiemap_extent_info *fieinfo = data;
4639         unsigned char blksize_bits;
4640
4641         blksize_bits = inode->i_sb->s_blocksize_bits;
4642         logical = (__u64)newex->ec_block << blksize_bits;
4643
4644         if (newex->ec_start == 0) {
4645                 /*
4646                  * No extent in extent-tree contains block @newex->ec_start,
4647                  * then the block may stay in 1)a hole or 2)delayed-extent.
4648                  *
4649                  * Holes or delayed-extents are processed as follows.
4650                  * 1. lookup dirty pages with specified range in pagecache.
4651                  *    If no page is got, then there is no delayed-extent and
4652                  *    return with EXT_CONTINUE.
4653                  * 2. find the 1st mapped buffer,
4654                  * 3. check if the mapped buffer is both in the request range
4655                  *    and a delayed buffer. If not, there is no delayed-extent,
4656                  *    then return.
4657                  * 4. a delayed-extent is found, the extent will be collected.
4658                  */
4659                 ext4_lblk_t     end = 0;
4660                 pgoff_t         last_offset;
4661                 pgoff_t         offset;
4662                 pgoff_t         index;
4663                 pgoff_t         start_index = 0;
4664                 struct page     **pages = NULL;
4665                 struct buffer_head *bh = NULL;
4666                 struct buffer_head *head = NULL;
4667                 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
4668
4669                 pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
4670                 if (pages == NULL)
4671                         return -ENOMEM;
4672
4673                 offset = logical >> PAGE_SHIFT;
4674 repeat:
4675                 last_offset = offset;
4676                 head = NULL;
4677                 ret = find_get_pages_tag(inode->i_mapping, &offset,
4678                                         PAGECACHE_TAG_DIRTY, nr_pages, pages);
4679
4680                 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4681                         /* First time, try to find a mapped buffer. */
4682                         if (ret == 0) {
4683 out:
4684                                 for (index = 0; index < ret; index++)
4685                                         page_cache_release(pages[index]);
4686                                 /* just a hole. */
4687                                 kfree(pages);
4688                                 return EXT_CONTINUE;
4689                         }
4690                         index = 0;
4691
4692 next_page:
4693                         /* Try to find the 1st mapped buffer. */
4694                         end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
4695                                   blksize_bits;
4696                         if (!page_has_buffers(pages[index]))
4697                                 goto out;
4698                         head = page_buffers(pages[index]);
4699                         if (!head)
4700                                 goto out;
4701
4702                         index++;
4703                         bh = head;
4704                         do {
4705                                 if (end >= newex->ec_block +
4706                                         newex->ec_len)
4707                                         /* The buffer is out of
4708                                          * the request range.
4709                                          */
4710                                         goto out;
4711
4712                                 if (buffer_mapped(bh) &&
4713                                     end >= newex->ec_block) {
4714                                         start_index = index - 1;
4715                                         /* get the 1st mapped buffer. */
4716                                         goto found_mapped_buffer;
4717                                 }
4718
4719                                 bh = bh->b_this_page;
4720                                 end++;
4721                         } while (bh != head);
4722
4723                         /* No mapped buffer in the range found in this page,
4724                          * We need to look up next page.
4725                          */
4726                         if (index >= ret) {
4727                                 /* There is no page left, but we need to limit
4728                                  * newex->ec_len.
4729                                  */
4730                                 newex->ec_len = end - newex->ec_block;
4731                                 goto out;
4732                         }
4733                         goto next_page;
4734                 } else {
4735                         /*Find contiguous delayed buffers. */
4736                         if (ret > 0 && pages[0]->index == last_offset)
4737                                 head = page_buffers(pages[0]);
4738                         bh = head;
4739                         index = 1;
4740                         start_index = 0;
4741                 }
4742
4743 found_mapped_buffer:
4744                 if (bh != NULL && buffer_delay(bh)) {
4745                         /* 1st or contiguous delayed buffer found. */
4746                         if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4747                                 /*
4748                                  * 1st delayed buffer found, record
4749                                  * the start of extent.
4750                                  */
4751                                 flags |= FIEMAP_EXTENT_DELALLOC;
4752                                 newex->ec_block = end;
4753                                 logical = (__u64)end << blksize_bits;
4754                         }
4755                         /* Find contiguous delayed buffers. */
4756                         do {
4757                                 if (!buffer_delay(bh))
4758                                         goto found_delayed_extent;
4759                                 bh = bh->b_this_page;
4760                                 end++;
4761                         } while (bh != head);
4762
4763                         for (; index < ret; index++) {
4764                                 if (!page_has_buffers(pages[index])) {
4765                                         bh = NULL;
4766                                         break;
4767                                 }
4768                                 head = page_buffers(pages[index]);
4769                                 if (!head) {
4770                                         bh = NULL;
4771                                         break;
4772                                 }
4773
4774                                 if (pages[index]->index !=
4775                                     pages[start_index]->index + index
4776                                     - start_index) {
4777                                         /* Blocks are not contiguous. */
4778                                         bh = NULL;
4779                                         break;
4780                                 }
4781                                 bh = head;
4782                                 do {
4783                                         if (!buffer_delay(bh))
4784                                                 /* Delayed-extent ends. */
4785                                                 goto found_delayed_extent;
4786                                         bh = bh->b_this_page;
4787                                         end++;
4788                                 } while (bh != head);
4789                         }
4790                 } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4791                         /* a hole found. */
4792                         goto out;
4793
4794 found_delayed_extent:
4795                 newex->ec_len = min(end - newex->ec_block,
4796                                                 (ext4_lblk_t)EXT_INIT_MAX_LEN);
4797                 if (ret == nr_pages && bh != NULL &&
4798                         newex->ec_len < EXT_INIT_MAX_LEN &&
4799                         buffer_delay(bh)) {
4800                         /* Have not collected an extent and continue. */
4801                         for (index = 0; index < ret; index++)
4802                                 page_cache_release(pages[index]);
4803                         goto repeat;
4804                 }
4805
4806                 for (index = 0; index < ret; index++)
4807                         page_cache_release(pages[index]);
4808                 kfree(pages);
4809         }
4810
4811         physical = (__u64)newex->ec_start << blksize_bits;
4812         length =   (__u64)newex->ec_len << blksize_bits;
4813
4814         if (ex && ext4_ext_is_uninitialized(ex))
4815                 flags |= FIEMAP_EXTENT_UNWRITTEN;
4816
4817         if (next == EXT_MAX_BLOCKS)
4818                 flags |= FIEMAP_EXTENT_LAST;
4819
4820         ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4821                                         length, flags);
4822         if (ret < 0)
4823                 return ret;
4824         if (ret == 1)
4825                 return EXT_BREAK;
4826         return EXT_CONTINUE;
4827 }
4828 /* fiemap flags we can handle specified here */
4829 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4830
4831 static int ext4_xattr_fiemap(struct inode *inode,
4832                                 struct fiemap_extent_info *fieinfo)
4833 {
4834         __u64 physical = 0;
4835         __u64 length;
4836         __u32 flags = FIEMAP_EXTENT_LAST;
4837         int blockbits = inode->i_sb->s_blocksize_bits;
4838         int error = 0;
4839
4840         /* in-inode? */
4841         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4842                 struct ext4_iloc iloc;
4843                 int offset;     /* offset of xattr in inode */
4844
4845                 error = ext4_get_inode_loc(inode, &iloc);
4846                 if (error)
4847                         return error;
4848                 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4849                 offset = EXT4_GOOD_OLD_INODE_SIZE +
4850                                 EXT4_I(inode)->i_extra_isize;
4851                 physical += offset;
4852                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4853                 flags |= FIEMAP_EXTENT_DATA_INLINE;
4854                 brelse(iloc.bh);
4855         } else { /* external block */
4856                 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4857                 length = inode->i_sb->s_blocksize;
4858         }
4859
4860         if (physical)
4861                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4862                                                 length, flags);
4863         return (error < 0 ? error : 0);
4864 }
4865
4866 /*
4867  * ext4_ext_punch_hole
4868  *
4869  * Punches a hole of "length" bytes in a file starting
4870  * at byte "offset"
4871  *
4872  * @inode:  The inode of the file to punch a hole in
4873  * @offset: The starting byte offset of the hole
4874  * @length: The length of the hole
4875  *
4876  * Returns the number of blocks removed or negative on err
4877  */
4878 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4879 {
4880         struct inode *inode = file->f_path.dentry->d_inode;
4881         struct super_block *sb = inode->i_sb;
4882         ext4_lblk_t first_block, stop_block;
4883         struct address_space *mapping = inode->i_mapping;
4884         handle_t *handle;
4885         loff_t first_page, last_page, page_len;
4886         loff_t first_page_offset, last_page_offset;
4887         int credits, err = 0;
4888
4889         /* No need to punch hole beyond i_size */
4890         if (offset >= inode->i_size)
4891                 return 0;
4892
4893         /*
4894          * If the hole extends beyond i_size, set the hole
4895          * to end after the page that contains i_size
4896          */
4897         if (offset + length > inode->i_size) {
4898                 length = inode->i_size +
4899                    PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4900                    offset;
4901         }
4902
4903         first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4904         last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4905
4906         first_page_offset = first_page << PAGE_CACHE_SHIFT;
4907         last_page_offset = last_page << PAGE_CACHE_SHIFT;
4908
4909         /*
4910          * Write out all dirty pages to avoid race conditions
4911          * Then release them.
4912          */
4913         if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4914                 err = filemap_write_and_wait_range(mapping,
4915                         offset, offset + length - 1);
4916
4917                 if (err)
4918                         return err;
4919         }
4920
4921         /* Now release the pages */
4922         if (last_page_offset > first_page_offset) {
4923                 truncate_inode_pages_range(mapping, first_page_offset,
4924                                            last_page_offset-1);
4925         }
4926
4927         /* finish any pending end_io work */
4928         ext4_flush_completed_IO(inode);
4929
4930         credits = ext4_writepage_trans_blocks(inode);
4931         handle = ext4_journal_start(inode, credits);
4932         if (IS_ERR(handle))
4933                 return PTR_ERR(handle);
4934
4935         err = ext4_orphan_add(handle, inode);
4936         if (err)
4937                 goto out;
4938
4939         /*
4940          * Now we need to zero out the non-page-aligned data in the
4941          * pages at the start and tail of the hole, and unmap the buffer
4942          * heads for the block aligned regions of the page that were
4943          * completely zeroed.
4944          */
4945         if (first_page > last_page) {
4946                 /*
4947                  * If the file space being truncated is contained within a page
4948                  * just zero out and unmap the middle of that page
4949                  */
4950                 err = ext4_discard_partial_page_buffers(handle,
4951                         mapping, offset, length, 0);
4952
4953                 if (err)
4954                         goto out;
4955         } else {
4956                 /*
4957                  * zero out and unmap the partial page that contains
4958                  * the start of the hole
4959                  */
4960                 page_len  = first_page_offset - offset;
4961                 if (page_len > 0) {
4962                         err = ext4_discard_partial_page_buffers(handle, mapping,
4963                                                    offset, page_len, 0);
4964                         if (err)
4965                                 goto out;
4966                 }
4967
4968                 /*
4969                  * zero out and unmap the partial page that contains
4970                  * the end of the hole
4971                  */
4972                 page_len = offset + length - last_page_offset;
4973                 if (page_len > 0) {
4974                         err = ext4_discard_partial_page_buffers(handle, mapping,
4975                                         last_page_offset, page_len, 0);
4976                         if (err)
4977                                 goto out;
4978                 }
4979         }
4980
4981         /*
4982          * If i_size is contained in the last page, we need to
4983          * unmap and zero the partial page after i_size
4984          */
4985         if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4986            inode->i_size % PAGE_CACHE_SIZE != 0) {
4987
4988                 page_len = PAGE_CACHE_SIZE -
4989                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4990
4991                 if (page_len > 0) {
4992                         err = ext4_discard_partial_page_buffers(handle,
4993                           mapping, inode->i_size, page_len, 0);
4994
4995                         if (err)
4996                                 goto out;
4997                 }
4998         }
4999
5000         first_block = (offset + sb->s_blocksize - 1) >>
5001                 EXT4_BLOCK_SIZE_BITS(sb);
5002         stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
5003
5004         /* If there are no blocks to remove, return now */
5005         if (first_block >= stop_block)
5006                 goto out;
5007
5008         down_write(&EXT4_I(inode)->i_data_sem);
5009         ext4_ext_invalidate_cache(inode);
5010         ext4_discard_preallocations(inode);
5011
5012         err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
5013
5014         ext4_ext_invalidate_cache(inode);
5015         ext4_discard_preallocations(inode);
5016
5017         if (IS_SYNC(inode))
5018                 ext4_handle_sync(handle);
5019
5020         up_write(&EXT4_I(inode)->i_data_sem);
5021
5022 out:
5023         ext4_orphan_del(handle, inode);
5024         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
5025         ext4_mark_inode_dirty(handle, inode);
5026         ext4_journal_stop(handle);
5027         return err;
5028 }
5029 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5030                 __u64 start, __u64 len)
5031 {
5032         ext4_lblk_t start_blk;
5033         int error = 0;
5034
5035         /* fallback to generic here if not in extents fmt */
5036         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5037                 return generic_block_fiemap(inode, fieinfo, start, len,
5038                         ext4_get_block);
5039
5040         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
5041                 return -EBADR;
5042
5043         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
5044                 error = ext4_xattr_fiemap(inode, fieinfo);
5045         } else {
5046                 ext4_lblk_t len_blks;
5047                 __u64 last_blk;
5048
5049                 start_blk = start >> inode->i_sb->s_blocksize_bits;
5050                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5051                 if (last_blk >= EXT_MAX_BLOCKS)
5052                         last_blk = EXT_MAX_BLOCKS-1;
5053                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5054
5055                 /*
5056                  * Walk the extent tree gathering extent information.
5057                  * ext4_ext_fiemap_cb will push extents back to user.
5058                  */
5059                 error = ext4_ext_walk_space(inode, start_blk, len_blks,
5060                                           ext4_ext_fiemap_cb, fieinfo);
5061         }
5062
5063         return error;
5064 }