2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45 #include "ext4_extents.h"
47 #include <trace/events/ext4.h>
49 static int ext4_ext_truncate_extend_restart(handle_t *handle,
55 if (!ext4_handle_valid(handle))
57 if (handle->h_buffer_credits > needed)
59 err = ext4_journal_extend(handle, needed);
62 err = ext4_truncate_restart_trans(handle, inode, needed);
74 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
75 struct ext4_ext_path *path)
78 /* path points to block */
79 return ext4_journal_get_write_access(handle, path->p_bh);
81 /* path points to leaf/index in inode body */
82 /* we use in-core data, no need to protect them */
92 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
93 struct ext4_ext_path *path)
97 /* path points to block */
98 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
100 /* path points to leaf/index in inode body */
101 err = ext4_mark_inode_dirty(handle, inode);
106 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
107 struct ext4_ext_path *path,
110 struct ext4_inode_info *ei = EXT4_I(inode);
111 ext4_fsblk_t bg_start;
112 ext4_fsblk_t last_block;
113 ext4_grpblk_t colour;
114 ext4_group_t block_group;
115 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
119 struct ext4_extent *ex;
120 depth = path->p_depth;
123 * Try to predict block placement assuming that we are
124 * filling in a file which will eventually be
125 * non-sparse --- i.e., in the case of libbfd writing
126 * an ELF object sections out-of-order but in a way
127 * the eventually results in a contiguous object or
128 * executable file, or some database extending a table
129 * space file. However, this is actually somewhat
130 * non-ideal if we are writing a sparse file such as
131 * qemu or KVM writing a raw image file that is going
132 * to stay fairly sparse, since it will end up
133 * fragmenting the file system's free space. Maybe we
134 * should have some hueristics or some way to allow
135 * userspace to pass a hint to file system,
136 * especially if the latter case turns out to be
139 ex = path[depth].p_ext;
141 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
142 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
144 if (block > ext_block)
145 return ext_pblk + (block - ext_block);
147 return ext_pblk - (ext_block - block);
150 /* it looks like index is empty;
151 * try to find starting block from index itself */
152 if (path[depth].p_bh)
153 return path[depth].p_bh->b_blocknr;
156 /* OK. use inode's group */
157 block_group = ei->i_block_group;
158 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
160 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
161 * block groups per flexgroup, reserve the first block
162 * group for directories and special files. Regular
163 * files will start at the second block group. This
164 * tends to speed up directory access and improves
167 block_group &= ~(flex_size-1);
168 if (S_ISREG(inode->i_mode))
171 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
172 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
175 * If we are doing delayed allocation, we don't need take
176 * colour into account.
178 if (test_opt(inode->i_sb, DELALLOC))
181 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
182 colour = (current->pid % 16) *
183 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
185 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
186 return bg_start + colour + block;
190 * Allocation for a meta data block
193 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
194 struct ext4_ext_path *path,
195 struct ext4_extent *ex, int *err)
197 ext4_fsblk_t goal, newblock;
199 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
200 newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
204 static inline int ext4_ext_space_block(struct inode *inode, int check)
208 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
209 / sizeof(struct ext4_extent);
211 #ifdef AGGRESSIVE_TEST
219 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
223 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
224 / sizeof(struct ext4_extent_idx);
226 #ifdef AGGRESSIVE_TEST
234 static inline int ext4_ext_space_root(struct inode *inode, int check)
238 size = sizeof(EXT4_I(inode)->i_data);
239 size -= sizeof(struct ext4_extent_header);
240 size /= sizeof(struct ext4_extent);
242 #ifdef AGGRESSIVE_TEST
250 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
254 size = sizeof(EXT4_I(inode)->i_data);
255 size -= sizeof(struct ext4_extent_header);
256 size /= sizeof(struct ext4_extent_idx);
258 #ifdef AGGRESSIVE_TEST
267 * Calculate the number of metadata blocks needed
268 * to allocate @blocks
269 * Worse case is one block per extent
271 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
273 struct ext4_inode_info *ei = EXT4_I(inode);
276 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
277 / sizeof(struct ext4_extent_idx));
280 * If the new delayed allocation block is contiguous with the
281 * previous da block, it can share index blocks with the
282 * previous block, so we only need to allocate a new index
283 * block every idxs leaf blocks. At ldxs**2 blocks, we need
284 * an additional index block, and at ldxs**3 blocks, yet
285 * another index blocks.
287 if (ei->i_da_metadata_calc_len &&
288 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
289 if ((ei->i_da_metadata_calc_len % idxs) == 0)
291 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
293 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
295 ei->i_da_metadata_calc_len = 0;
297 ei->i_da_metadata_calc_len++;
298 ei->i_da_metadata_calc_last_lblock++;
303 * In the worst case we need a new set of index blocks at
304 * every level of the inode's extent tree.
306 ei->i_da_metadata_calc_len = 1;
307 ei->i_da_metadata_calc_last_lblock = lblock;
308 return ext_depth(inode) + 1;
312 ext4_ext_max_entries(struct inode *inode, int depth)
316 if (depth == ext_depth(inode)) {
318 max = ext4_ext_space_root(inode, 1);
320 max = ext4_ext_space_root_idx(inode, 1);
323 max = ext4_ext_space_block(inode, 1);
325 max = ext4_ext_space_block_idx(inode, 1);
331 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
333 ext4_fsblk_t block = ext4_ext_pblock(ext);
334 int len = ext4_ext_get_actual_len(ext);
336 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
339 static int ext4_valid_extent_idx(struct inode *inode,
340 struct ext4_extent_idx *ext_idx)
342 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
344 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
347 static int ext4_valid_extent_entries(struct inode *inode,
348 struct ext4_extent_header *eh,
351 struct ext4_extent *ext;
352 struct ext4_extent_idx *ext_idx;
353 unsigned short entries;
354 if (eh->eh_entries == 0)
357 entries = le16_to_cpu(eh->eh_entries);
361 ext = EXT_FIRST_EXTENT(eh);
363 if (!ext4_valid_extent(inode, ext))
369 ext_idx = EXT_FIRST_INDEX(eh);
371 if (!ext4_valid_extent_idx(inode, ext_idx))
380 static int __ext4_ext_check(const char *function, unsigned int line,
381 struct inode *inode, struct ext4_extent_header *eh,
384 const char *error_msg;
387 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
388 error_msg = "invalid magic";
391 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
392 error_msg = "unexpected eh_depth";
395 if (unlikely(eh->eh_max == 0)) {
396 error_msg = "invalid eh_max";
399 max = ext4_ext_max_entries(inode, depth);
400 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
401 error_msg = "too large eh_max";
404 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
405 error_msg = "invalid eh_entries";
408 if (!ext4_valid_extent_entries(inode, eh, depth)) {
409 error_msg = "invalid extent entries";
415 ext4_error_inode(inode, function, line, 0,
416 "bad header/extent: %s - magic %x, "
417 "entries %u, max %u(%u), depth %u(%u)",
418 error_msg, le16_to_cpu(eh->eh_magic),
419 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
420 max, le16_to_cpu(eh->eh_depth), depth);
425 #define ext4_ext_check(inode, eh, depth) \
426 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
428 int ext4_ext_check_inode(struct inode *inode)
430 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
434 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
436 int k, l = path->p_depth;
439 for (k = 0; k <= l; k++, path++) {
441 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
442 ext4_idx_pblock(path->p_idx));
443 } else if (path->p_ext) {
444 ext_debug(" %d:[%d]%d:%llu ",
445 le32_to_cpu(path->p_ext->ee_block),
446 ext4_ext_is_uninitialized(path->p_ext),
447 ext4_ext_get_actual_len(path->p_ext),
448 ext4_ext_pblock(path->p_ext));
455 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
457 int depth = ext_depth(inode);
458 struct ext4_extent_header *eh;
459 struct ext4_extent *ex;
465 eh = path[depth].p_hdr;
466 ex = EXT_FIRST_EXTENT(eh);
468 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
470 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
471 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
472 ext4_ext_is_uninitialized(ex),
473 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
478 #define ext4_ext_show_path(inode, path)
479 #define ext4_ext_show_leaf(inode, path)
482 void ext4_ext_drop_refs(struct ext4_ext_path *path)
484 int depth = path->p_depth;
487 for (i = 0; i <= depth; i++, path++)
495 * ext4_ext_binsearch_idx:
496 * binary search for the closest index of the given block
497 * the header must be checked before calling this
500 ext4_ext_binsearch_idx(struct inode *inode,
501 struct ext4_ext_path *path, ext4_lblk_t block)
503 struct ext4_extent_header *eh = path->p_hdr;
504 struct ext4_extent_idx *r, *l, *m;
507 ext_debug("binsearch for %u(idx): ", block);
509 l = EXT_FIRST_INDEX(eh) + 1;
510 r = EXT_LAST_INDEX(eh);
513 if (block < le32_to_cpu(m->ei_block))
517 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
518 m, le32_to_cpu(m->ei_block),
519 r, le32_to_cpu(r->ei_block));
523 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
524 ext4_idx_pblock(path->p_idx));
526 #ifdef CHECK_BINSEARCH
528 struct ext4_extent_idx *chix, *ix;
531 chix = ix = EXT_FIRST_INDEX(eh);
532 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
534 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
535 printk(KERN_DEBUG "k=%d, ix=0x%p, "
537 ix, EXT_FIRST_INDEX(eh));
538 printk(KERN_DEBUG "%u <= %u\n",
539 le32_to_cpu(ix->ei_block),
540 le32_to_cpu(ix[-1].ei_block));
542 BUG_ON(k && le32_to_cpu(ix->ei_block)
543 <= le32_to_cpu(ix[-1].ei_block));
544 if (block < le32_to_cpu(ix->ei_block))
548 BUG_ON(chix != path->p_idx);
555 * ext4_ext_binsearch:
556 * binary search for closest extent of the given block
557 * the header must be checked before calling this
560 ext4_ext_binsearch(struct inode *inode,
561 struct ext4_ext_path *path, ext4_lblk_t block)
563 struct ext4_extent_header *eh = path->p_hdr;
564 struct ext4_extent *r, *l, *m;
566 if (eh->eh_entries == 0) {
568 * this leaf is empty:
569 * we get such a leaf in split/add case
574 ext_debug("binsearch for %u: ", block);
576 l = EXT_FIRST_EXTENT(eh) + 1;
577 r = EXT_LAST_EXTENT(eh);
581 if (block < le32_to_cpu(m->ee_block))
585 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
586 m, le32_to_cpu(m->ee_block),
587 r, le32_to_cpu(r->ee_block));
591 ext_debug(" -> %d:%llu:[%d]%d ",
592 le32_to_cpu(path->p_ext->ee_block),
593 ext4_ext_pblock(path->p_ext),
594 ext4_ext_is_uninitialized(path->p_ext),
595 ext4_ext_get_actual_len(path->p_ext));
597 #ifdef CHECK_BINSEARCH
599 struct ext4_extent *chex, *ex;
602 chex = ex = EXT_FIRST_EXTENT(eh);
603 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
604 BUG_ON(k && le32_to_cpu(ex->ee_block)
605 <= le32_to_cpu(ex[-1].ee_block));
606 if (block < le32_to_cpu(ex->ee_block))
610 BUG_ON(chex != path->p_ext);
616 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
618 struct ext4_extent_header *eh;
620 eh = ext_inode_hdr(inode);
623 eh->eh_magic = EXT4_EXT_MAGIC;
624 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
625 ext4_mark_inode_dirty(handle, inode);
626 ext4_ext_invalidate_cache(inode);
630 struct ext4_ext_path *
631 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
632 struct ext4_ext_path *path)
634 struct ext4_extent_header *eh;
635 struct buffer_head *bh;
636 short int depth, i, ppos = 0, alloc = 0;
638 eh = ext_inode_hdr(inode);
639 depth = ext_depth(inode);
641 /* account possible depth increase */
643 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
646 return ERR_PTR(-ENOMEM);
653 /* walk through the tree */
655 int need_to_validate = 0;
657 ext_debug("depth %d: num %d, max %d\n",
658 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
660 ext4_ext_binsearch_idx(inode, path + ppos, block);
661 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
662 path[ppos].p_depth = i;
663 path[ppos].p_ext = NULL;
665 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
668 if (!bh_uptodate_or_lock(bh)) {
669 trace_ext4_ext_load_extent(inode, block,
671 if (bh_submit_read(bh) < 0) {
675 /* validate the extent entries */
676 need_to_validate = 1;
678 eh = ext_block_hdr(bh);
680 if (unlikely(ppos > depth)) {
682 EXT4_ERROR_INODE(inode,
683 "ppos %d > depth %d", ppos, depth);
686 path[ppos].p_bh = bh;
687 path[ppos].p_hdr = eh;
690 if (need_to_validate && ext4_ext_check(inode, eh, i))
694 path[ppos].p_depth = i;
695 path[ppos].p_ext = NULL;
696 path[ppos].p_idx = NULL;
699 ext4_ext_binsearch(inode, path + ppos, block);
700 /* if not an empty leaf */
701 if (path[ppos].p_ext)
702 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
704 ext4_ext_show_path(inode, path);
709 ext4_ext_drop_refs(path);
712 return ERR_PTR(-EIO);
716 * ext4_ext_insert_index:
717 * insert new index [@logical;@ptr] into the block at @curp;
718 * check where to insert: before @curp or after @curp
720 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
721 struct ext4_ext_path *curp,
722 int logical, ext4_fsblk_t ptr)
724 struct ext4_extent_idx *ix;
727 err = ext4_ext_get_access(handle, inode, curp);
731 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
732 EXT4_ERROR_INODE(inode,
733 "logical %d == ei_block %d!",
734 logical, le32_to_cpu(curp->p_idx->ei_block));
737 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
738 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
740 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
741 len = (len - 1) * sizeof(struct ext4_extent_idx);
742 len = len < 0 ? 0 : len;
743 ext_debug("insert new index %d after: %llu. "
744 "move %d from 0x%p to 0x%p\n",
746 (curp->p_idx + 1), (curp->p_idx + 2));
747 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
749 ix = curp->p_idx + 1;
752 len = len * sizeof(struct ext4_extent_idx);
753 len = len < 0 ? 0 : len;
754 ext_debug("insert new index %d before: %llu. "
755 "move %d from 0x%p to 0x%p\n",
757 curp->p_idx, (curp->p_idx + 1));
758 memmove(curp->p_idx + 1, curp->p_idx, len);
762 ix->ei_block = cpu_to_le32(logical);
763 ext4_idx_store_pblock(ix, ptr);
764 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
766 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
767 > le16_to_cpu(curp->p_hdr->eh_max))) {
768 EXT4_ERROR_INODE(inode,
769 "logical %d == ei_block %d!",
770 logical, le32_to_cpu(curp->p_idx->ei_block));
773 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
774 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
778 err = ext4_ext_dirty(handle, inode, curp);
779 ext4_std_error(inode->i_sb, err);
786 * inserts new subtree into the path, using free index entry
788 * - allocates all needed blocks (new leaf and all intermediate index blocks)
789 * - makes decision where to split
790 * - moves remaining extents and index entries (right to the split point)
791 * into the newly allocated blocks
792 * - initializes subtree
794 static int ext4_ext_split(handle_t *handle, struct inode *inode,
795 struct ext4_ext_path *path,
796 struct ext4_extent *newext, int at)
798 struct buffer_head *bh = NULL;
799 int depth = ext_depth(inode);
800 struct ext4_extent_header *neh;
801 struct ext4_extent_idx *fidx;
802 struct ext4_extent *ex;
804 ext4_fsblk_t newblock, oldblock;
806 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
809 /* make decision: where to split? */
810 /* FIXME: now decision is simplest: at current extent */
812 /* if current leaf will be split, then we should use
813 * border from split point */
814 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
815 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
818 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
819 border = path[depth].p_ext[1].ee_block;
820 ext_debug("leaf will be split."
821 " next leaf starts at %d\n",
822 le32_to_cpu(border));
824 border = newext->ee_block;
825 ext_debug("leaf will be added."
826 " next leaf starts at %d\n",
827 le32_to_cpu(border));
831 * If error occurs, then we break processing
832 * and mark filesystem read-only. index won't
833 * be inserted and tree will be in consistent
834 * state. Next mount will repair buffers too.
838 * Get array to track all allocated blocks.
839 * We need this to handle errors and free blocks
842 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
846 /* allocate all needed blocks */
847 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
848 for (a = 0; a < depth - at; a++) {
849 newblock = ext4_ext_new_meta_block(handle, inode, path,
853 ablocks[a] = newblock;
856 /* initialize new leaf */
857 newblock = ablocks[--a];
858 if (unlikely(newblock == 0)) {
859 EXT4_ERROR_INODE(inode, "newblock == 0!");
863 bh = sb_getblk(inode->i_sb, newblock);
870 err = ext4_journal_get_create_access(handle, bh);
874 neh = ext_block_hdr(bh);
876 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
877 neh->eh_magic = EXT4_EXT_MAGIC;
879 ex = EXT_FIRST_EXTENT(neh);
881 /* move remainder of path[depth] to the new leaf */
882 if (unlikely(path[depth].p_hdr->eh_entries !=
883 path[depth].p_hdr->eh_max)) {
884 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
885 path[depth].p_hdr->eh_entries,
886 path[depth].p_hdr->eh_max);
890 /* start copy from next extent */
891 /* TODO: we could do it by single memmove */
894 while (path[depth].p_ext <=
895 EXT_MAX_EXTENT(path[depth].p_hdr)) {
896 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
897 le32_to_cpu(path[depth].p_ext->ee_block),
898 ext4_ext_pblock(path[depth].p_ext),
899 ext4_ext_is_uninitialized(path[depth].p_ext),
900 ext4_ext_get_actual_len(path[depth].p_ext),
902 /*memmove(ex++, path[depth].p_ext++,
903 sizeof(struct ext4_extent));
909 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
910 le16_add_cpu(&neh->eh_entries, m);
913 set_buffer_uptodate(bh);
916 err = ext4_handle_dirty_metadata(handle, inode, bh);
922 /* correct old leaf */
924 err = ext4_ext_get_access(handle, inode, path + depth);
927 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
928 err = ext4_ext_dirty(handle, inode, path + depth);
934 /* create intermediate indexes */
936 if (unlikely(k < 0)) {
937 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
942 ext_debug("create %d intermediate indices\n", k);
943 /* insert new index into current index block */
944 /* current depth stored in i var */
948 newblock = ablocks[--a];
949 bh = sb_getblk(inode->i_sb, newblock);
956 err = ext4_journal_get_create_access(handle, bh);
960 neh = ext_block_hdr(bh);
961 neh->eh_entries = cpu_to_le16(1);
962 neh->eh_magic = EXT4_EXT_MAGIC;
963 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
964 neh->eh_depth = cpu_to_le16(depth - i);
965 fidx = EXT_FIRST_INDEX(neh);
966 fidx->ei_block = border;
967 ext4_idx_store_pblock(fidx, oldblock);
969 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
970 i, newblock, le32_to_cpu(border), oldblock);
975 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
976 EXT_MAX_INDEX(path[i].p_hdr));
977 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
978 EXT_LAST_INDEX(path[i].p_hdr))) {
979 EXT4_ERROR_INODE(inode,
980 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
981 le32_to_cpu(path[i].p_ext->ee_block));
985 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
986 ext_debug("%d: move %d:%llu in new index %llu\n", i,
987 le32_to_cpu(path[i].p_idx->ei_block),
988 ext4_idx_pblock(path[i].p_idx),
990 /*memmove(++fidx, path[i].p_idx++,
991 sizeof(struct ext4_extent_idx));
993 BUG_ON(neh->eh_entries > neh->eh_max);*/
998 memmove(++fidx, path[i].p_idx - m,
999 sizeof(struct ext4_extent_idx) * m);
1000 le16_add_cpu(&neh->eh_entries, m);
1002 set_buffer_uptodate(bh);
1005 err = ext4_handle_dirty_metadata(handle, inode, bh);
1011 /* correct old index */
1013 err = ext4_ext_get_access(handle, inode, path + i);
1016 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1017 err = ext4_ext_dirty(handle, inode, path + i);
1025 /* insert new index */
1026 err = ext4_ext_insert_index(handle, inode, path + at,
1027 le32_to_cpu(border), newblock);
1031 if (buffer_locked(bh))
1037 /* free all allocated blocks in error case */
1038 for (i = 0; i < depth; i++) {
1041 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1042 EXT4_FREE_BLOCKS_METADATA);
1051 * ext4_ext_grow_indepth:
1052 * implements tree growing procedure:
1053 * - allocates new block
1054 * - moves top-level data (index block or leaf) into the new block
1055 * - initializes new top-level, creating index that points to the
1056 * just created block
1058 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1059 struct ext4_ext_path *path,
1060 struct ext4_extent *newext)
1062 struct ext4_ext_path *curp = path;
1063 struct ext4_extent_header *neh;
1064 struct buffer_head *bh;
1065 ext4_fsblk_t newblock;
1068 newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
1072 bh = sb_getblk(inode->i_sb, newblock);
1075 ext4_std_error(inode->i_sb, err);
1080 err = ext4_journal_get_create_access(handle, bh);
1086 /* move top-level index/leaf into new block */
1087 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
1089 /* set size of new block */
1090 neh = ext_block_hdr(bh);
1091 /* old root could have indexes or leaves
1092 * so calculate e_max right way */
1093 if (ext_depth(inode))
1094 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1096 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1097 neh->eh_magic = EXT4_EXT_MAGIC;
1098 set_buffer_uptodate(bh);
1101 err = ext4_handle_dirty_metadata(handle, inode, bh);
1105 /* create index in new top-level index: num,max,pointer */
1106 err = ext4_ext_get_access(handle, inode, curp);
1110 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1111 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1112 curp->p_hdr->eh_entries = cpu_to_le16(1);
1113 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
1115 if (path[0].p_hdr->eh_depth)
1116 curp->p_idx->ei_block =
1117 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
1119 curp->p_idx->ei_block =
1120 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1121 ext4_idx_store_pblock(curp->p_idx, newblock);
1123 neh = ext_inode_hdr(inode);
1124 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1125 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1126 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1127 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1129 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
1130 err = ext4_ext_dirty(handle, inode, curp);
1138 * ext4_ext_create_new_leaf:
1139 * finds empty index and adds new leaf.
1140 * if no free index is found, then it requests in-depth growing.
1142 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1143 struct ext4_ext_path *path,
1144 struct ext4_extent *newext)
1146 struct ext4_ext_path *curp;
1147 int depth, i, err = 0;
1150 i = depth = ext_depth(inode);
1152 /* walk up to the tree and look for free index entry */
1153 curp = path + depth;
1154 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1159 /* we use already allocated block for index block,
1160 * so subsequent data blocks should be contiguous */
1161 if (EXT_HAS_FREE_INDEX(curp)) {
1162 /* if we found index with free entry, then use that
1163 * entry: create all needed subtree and add new leaf */
1164 err = ext4_ext_split(handle, inode, path, newext, i);
1169 ext4_ext_drop_refs(path);
1170 path = ext4_ext_find_extent(inode,
1171 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1174 err = PTR_ERR(path);
1176 /* tree is full, time to grow in depth */
1177 err = ext4_ext_grow_indepth(handle, inode, path, newext);
1182 ext4_ext_drop_refs(path);
1183 path = ext4_ext_find_extent(inode,
1184 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1187 err = PTR_ERR(path);
1192 * only first (depth 0 -> 1) produces free space;
1193 * in all other cases we have to split the grown tree
1195 depth = ext_depth(inode);
1196 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1197 /* now we need to split */
1207 * search the closest allocated block to the left for *logical
1208 * and returns it at @logical + it's physical address at @phys
1209 * if *logical is the smallest allocated block, the function
1210 * returns 0 at @phys
1211 * return value contains 0 (success) or error code
1213 static int ext4_ext_search_left(struct inode *inode,
1214 struct ext4_ext_path *path,
1215 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1217 struct ext4_extent_idx *ix;
1218 struct ext4_extent *ex;
1221 if (unlikely(path == NULL)) {
1222 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1225 depth = path->p_depth;
1228 if (depth == 0 && path->p_ext == NULL)
1231 /* usually extent in the path covers blocks smaller
1232 * then *logical, but it can be that extent is the
1233 * first one in the file */
1235 ex = path[depth].p_ext;
1236 ee_len = ext4_ext_get_actual_len(ex);
1237 if (*logical < le32_to_cpu(ex->ee_block)) {
1238 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1239 EXT4_ERROR_INODE(inode,
1240 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1241 *logical, le32_to_cpu(ex->ee_block));
1244 while (--depth >= 0) {
1245 ix = path[depth].p_idx;
1246 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1247 EXT4_ERROR_INODE(inode,
1248 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1249 ix != NULL ? ix->ei_block : 0,
1250 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1251 EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
1259 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1260 EXT4_ERROR_INODE(inode,
1261 "logical %d < ee_block %d + ee_len %d!",
1262 *logical, le32_to_cpu(ex->ee_block), ee_len);
1266 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1267 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1272 * search the closest allocated block to the right for *logical
1273 * and returns it at @logical + it's physical address at @phys
1274 * if *logical is the smallest allocated block, the function
1275 * returns 0 at @phys
1276 * return value contains 0 (success) or error code
1278 static int ext4_ext_search_right(struct inode *inode,
1279 struct ext4_ext_path *path,
1280 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1282 struct buffer_head *bh = NULL;
1283 struct ext4_extent_header *eh;
1284 struct ext4_extent_idx *ix;
1285 struct ext4_extent *ex;
1287 int depth; /* Note, NOT eh_depth; depth from top of tree */
1290 if (unlikely(path == NULL)) {
1291 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1294 depth = path->p_depth;
1297 if (depth == 0 && path->p_ext == NULL)
1300 /* usually extent in the path covers blocks smaller
1301 * then *logical, but it can be that extent is the
1302 * first one in the file */
1304 ex = path[depth].p_ext;
1305 ee_len = ext4_ext_get_actual_len(ex);
1306 if (*logical < le32_to_cpu(ex->ee_block)) {
1307 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1308 EXT4_ERROR_INODE(inode,
1309 "first_extent(path[%d].p_hdr) != ex",
1313 while (--depth >= 0) {
1314 ix = path[depth].p_idx;
1315 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1316 EXT4_ERROR_INODE(inode,
1317 "ix != EXT_FIRST_INDEX *logical %d!",
1322 *logical = le32_to_cpu(ex->ee_block);
1323 *phys = ext4_ext_pblock(ex);
1327 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1328 EXT4_ERROR_INODE(inode,
1329 "logical %d < ee_block %d + ee_len %d!",
1330 *logical, le32_to_cpu(ex->ee_block), ee_len);
1334 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1335 /* next allocated block in this leaf */
1337 *logical = le32_to_cpu(ex->ee_block);
1338 *phys = ext4_ext_pblock(ex);
1342 /* go up and search for index to the right */
1343 while (--depth >= 0) {
1344 ix = path[depth].p_idx;
1345 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1349 /* we've gone up to the root and found no index to the right */
1353 /* we've found index to the right, let's
1354 * follow it and find the closest allocated
1355 * block to the right */
1357 block = ext4_idx_pblock(ix);
1358 while (++depth < path->p_depth) {
1359 bh = sb_bread(inode->i_sb, block);
1362 eh = ext_block_hdr(bh);
1363 /* subtract from p_depth to get proper eh_depth */
1364 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1368 ix = EXT_FIRST_INDEX(eh);
1369 block = ext4_idx_pblock(ix);
1373 bh = sb_bread(inode->i_sb, block);
1376 eh = ext_block_hdr(bh);
1377 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1381 ex = EXT_FIRST_EXTENT(eh);
1382 *logical = le32_to_cpu(ex->ee_block);
1383 *phys = ext4_ext_pblock(ex);
1389 * ext4_ext_next_allocated_block:
1390 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1391 * NOTE: it considers block number from index entry as
1392 * allocated block. Thus, index entries have to be consistent
1396 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1400 BUG_ON(path == NULL);
1401 depth = path->p_depth;
1403 if (depth == 0 && path->p_ext == NULL)
1404 return EXT_MAX_BLOCK;
1406 while (depth >= 0) {
1407 if (depth == path->p_depth) {
1409 if (path[depth].p_ext !=
1410 EXT_LAST_EXTENT(path[depth].p_hdr))
1411 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1414 if (path[depth].p_idx !=
1415 EXT_LAST_INDEX(path[depth].p_hdr))
1416 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1421 return EXT_MAX_BLOCK;
1425 * ext4_ext_next_leaf_block:
1426 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1428 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1429 struct ext4_ext_path *path)
1433 BUG_ON(path == NULL);
1434 depth = path->p_depth;
1436 /* zero-tree has no leaf blocks at all */
1438 return EXT_MAX_BLOCK;
1440 /* go to index block */
1443 while (depth >= 0) {
1444 if (path[depth].p_idx !=
1445 EXT_LAST_INDEX(path[depth].p_hdr))
1446 return (ext4_lblk_t)
1447 le32_to_cpu(path[depth].p_idx[1].ei_block);
1451 return EXT_MAX_BLOCK;
1455 * ext4_ext_correct_indexes:
1456 * if leaf gets modified and modified extent is first in the leaf,
1457 * then we have to correct all indexes above.
1458 * TODO: do we need to correct tree in all cases?
1460 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1461 struct ext4_ext_path *path)
1463 struct ext4_extent_header *eh;
1464 int depth = ext_depth(inode);
1465 struct ext4_extent *ex;
1469 eh = path[depth].p_hdr;
1470 ex = path[depth].p_ext;
1472 if (unlikely(ex == NULL || eh == NULL)) {
1473 EXT4_ERROR_INODE(inode,
1474 "ex %p == NULL or eh %p == NULL", ex, eh);
1479 /* there is no tree at all */
1483 if (ex != EXT_FIRST_EXTENT(eh)) {
1484 /* we correct tree if first leaf got modified only */
1489 * TODO: we need correction if border is smaller than current one
1492 border = path[depth].p_ext->ee_block;
1493 err = ext4_ext_get_access(handle, inode, path + k);
1496 path[k].p_idx->ei_block = border;
1497 err = ext4_ext_dirty(handle, inode, path + k);
1502 /* change all left-side indexes */
1503 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1505 err = ext4_ext_get_access(handle, inode, path + k);
1508 path[k].p_idx->ei_block = border;
1509 err = ext4_ext_dirty(handle, inode, path + k);
1518 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1519 struct ext4_extent *ex2)
1521 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1524 * Make sure that either both extents are uninitialized, or
1527 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1530 if (ext4_ext_is_uninitialized(ex1))
1531 max_len = EXT_UNINIT_MAX_LEN;
1533 max_len = EXT_INIT_MAX_LEN;
1535 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1536 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1538 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1539 le32_to_cpu(ex2->ee_block))
1543 * To allow future support for preallocated extents to be added
1544 * as an RO_COMPAT feature, refuse to merge to extents if
1545 * this can result in the top bit of ee_len being set.
1547 if (ext1_ee_len + ext2_ee_len > max_len)
1549 #ifdef AGGRESSIVE_TEST
1550 if (ext1_ee_len >= 4)
1554 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1560 * This function tries to merge the "ex" extent to the next extent in the tree.
1561 * It always tries to merge towards right. If you want to merge towards
1562 * left, pass "ex - 1" as argument instead of "ex".
1563 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1564 * 1 if they got merged.
1566 static int ext4_ext_try_to_merge_right(struct inode *inode,
1567 struct ext4_ext_path *path,
1568 struct ext4_extent *ex)
1570 struct ext4_extent_header *eh;
1571 unsigned int depth, len;
1573 int uninitialized = 0;
1575 depth = ext_depth(inode);
1576 BUG_ON(path[depth].p_hdr == NULL);
1577 eh = path[depth].p_hdr;
1579 while (ex < EXT_LAST_EXTENT(eh)) {
1580 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1582 /* merge with next extent! */
1583 if (ext4_ext_is_uninitialized(ex))
1585 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1586 + ext4_ext_get_actual_len(ex + 1));
1588 ext4_ext_mark_uninitialized(ex);
1590 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1591 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1592 * sizeof(struct ext4_extent);
1593 memmove(ex + 1, ex + 2, len);
1595 le16_add_cpu(&eh->eh_entries, -1);
1597 WARN_ON(eh->eh_entries == 0);
1598 if (!eh->eh_entries)
1599 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1606 * This function tries to merge the @ex extent to neighbours in the tree.
1607 * return 1 if merge left else 0.
1609 static int ext4_ext_try_to_merge(struct inode *inode,
1610 struct ext4_ext_path *path,
1611 struct ext4_extent *ex) {
1612 struct ext4_extent_header *eh;
1617 depth = ext_depth(inode);
1618 BUG_ON(path[depth].p_hdr == NULL);
1619 eh = path[depth].p_hdr;
1621 if (ex > EXT_FIRST_EXTENT(eh))
1622 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1625 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1631 * check if a portion of the "newext" extent overlaps with an
1634 * If there is an overlap discovered, it updates the length of the newext
1635 * such that there will be no overlap, and then returns 1.
1636 * If there is no overlap found, it returns 0.
1638 static unsigned int ext4_ext_check_overlap(struct inode *inode,
1639 struct ext4_extent *newext,
1640 struct ext4_ext_path *path)
1643 unsigned int depth, len1;
1644 unsigned int ret = 0;
1646 b1 = le32_to_cpu(newext->ee_block);
1647 len1 = ext4_ext_get_actual_len(newext);
1648 depth = ext_depth(inode);
1649 if (!path[depth].p_ext)
1651 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1654 * get the next allocated block if the extent in the path
1655 * is before the requested block(s)
1658 b2 = ext4_ext_next_allocated_block(path);
1659 if (b2 == EXT_MAX_BLOCK)
1663 /* check for wrap through zero on extent logical start block*/
1664 if (b1 + len1 < b1) {
1665 len1 = EXT_MAX_BLOCK - b1;
1666 newext->ee_len = cpu_to_le16(len1);
1670 /* check for overlap */
1671 if (b1 + len1 > b2) {
1672 newext->ee_len = cpu_to_le16(b2 - b1);
1680 * ext4_ext_insert_extent:
1681 * tries to merge requsted extent into the existing extent or
1682 * inserts requested extent as new one into the tree,
1683 * creating new leaf in the no-space case.
1685 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1686 struct ext4_ext_path *path,
1687 struct ext4_extent *newext, int flag)
1689 struct ext4_extent_header *eh;
1690 struct ext4_extent *ex, *fex;
1691 struct ext4_extent *nearex; /* nearest extent */
1692 struct ext4_ext_path *npath = NULL;
1693 int depth, len, err;
1695 unsigned uninitialized = 0;
1697 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1698 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1701 depth = ext_depth(inode);
1702 ex = path[depth].p_ext;
1703 if (unlikely(path[depth].p_hdr == NULL)) {
1704 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1708 /* try to insert block into found extent and return */
1709 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1710 && ext4_can_extents_be_merged(inode, ex, newext)) {
1711 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1712 ext4_ext_is_uninitialized(newext),
1713 ext4_ext_get_actual_len(newext),
1714 le32_to_cpu(ex->ee_block),
1715 ext4_ext_is_uninitialized(ex),
1716 ext4_ext_get_actual_len(ex),
1717 ext4_ext_pblock(ex));
1718 err = ext4_ext_get_access(handle, inode, path + depth);
1723 * ext4_can_extents_be_merged should have checked that either
1724 * both extents are uninitialized, or both aren't. Thus we
1725 * need to check only one of them here.
1727 if (ext4_ext_is_uninitialized(ex))
1729 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1730 + ext4_ext_get_actual_len(newext));
1732 ext4_ext_mark_uninitialized(ex);
1733 eh = path[depth].p_hdr;
1739 depth = ext_depth(inode);
1740 eh = path[depth].p_hdr;
1741 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1744 /* probably next leaf has space for us? */
1745 fex = EXT_LAST_EXTENT(eh);
1746 next = ext4_ext_next_leaf_block(inode, path);
1747 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1748 && next != EXT_MAX_BLOCK) {
1749 ext_debug("next leaf block - %d\n", next);
1750 BUG_ON(npath != NULL);
1751 npath = ext4_ext_find_extent(inode, next, NULL);
1753 return PTR_ERR(npath);
1754 BUG_ON(npath->p_depth != path->p_depth);
1755 eh = npath[depth].p_hdr;
1756 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1757 ext_debug("next leaf isn't full(%d)\n",
1758 le16_to_cpu(eh->eh_entries));
1762 ext_debug("next leaf has no free space(%d,%d)\n",
1763 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1767 * There is no free space in the found leaf.
1768 * We're gonna add a new leaf in the tree.
1770 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1773 depth = ext_depth(inode);
1774 eh = path[depth].p_hdr;
1777 nearex = path[depth].p_ext;
1779 err = ext4_ext_get_access(handle, inode, path + depth);
1784 /* there is no extent in this leaf, create first one */
1785 ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
1786 le32_to_cpu(newext->ee_block),
1787 ext4_ext_pblock(newext),
1788 ext4_ext_is_uninitialized(newext),
1789 ext4_ext_get_actual_len(newext));
1790 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1791 } else if (le32_to_cpu(newext->ee_block)
1792 > le32_to_cpu(nearex->ee_block)) {
1793 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1794 if (nearex != EXT_LAST_EXTENT(eh)) {
1795 len = EXT_MAX_EXTENT(eh) - nearex;
1796 len = (len - 1) * sizeof(struct ext4_extent);
1797 len = len < 0 ? 0 : len;
1798 ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
1799 "move %d from 0x%p to 0x%p\n",
1800 le32_to_cpu(newext->ee_block),
1801 ext4_ext_pblock(newext),
1802 ext4_ext_is_uninitialized(newext),
1803 ext4_ext_get_actual_len(newext),
1804 nearex, len, nearex + 1, nearex + 2);
1805 memmove(nearex + 2, nearex + 1, len);
1807 path[depth].p_ext = nearex + 1;
1809 BUG_ON(newext->ee_block == nearex->ee_block);
1810 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1811 len = len < 0 ? 0 : len;
1812 ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
1813 "move %d from 0x%p to 0x%p\n",
1814 le32_to_cpu(newext->ee_block),
1815 ext4_ext_pblock(newext),
1816 ext4_ext_is_uninitialized(newext),
1817 ext4_ext_get_actual_len(newext),
1818 nearex, len, nearex + 1, nearex + 2);
1819 memmove(nearex + 1, nearex, len);
1820 path[depth].p_ext = nearex;
1823 le16_add_cpu(&eh->eh_entries, 1);
1824 nearex = path[depth].p_ext;
1825 nearex->ee_block = newext->ee_block;
1826 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1827 nearex->ee_len = newext->ee_len;
1830 /* try to merge extents to the right */
1831 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1832 ext4_ext_try_to_merge(inode, path, nearex);
1834 /* try to merge extents to the left */
1836 /* time to correct all indexes above */
1837 err = ext4_ext_correct_indexes(handle, inode, path);
1841 err = ext4_ext_dirty(handle, inode, path + depth);
1845 ext4_ext_drop_refs(npath);
1848 ext4_ext_invalidate_cache(inode);
1852 static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1853 ext4_lblk_t num, ext_prepare_callback func,
1856 struct ext4_ext_path *path = NULL;
1857 struct ext4_ext_cache cbex;
1858 struct ext4_extent *ex;
1859 ext4_lblk_t next, start = 0, end = 0;
1860 ext4_lblk_t last = block + num;
1861 int depth, exists, err = 0;
1863 BUG_ON(func == NULL);
1864 BUG_ON(inode == NULL);
1866 while (block < last && block != EXT_MAX_BLOCK) {
1868 /* find extent for this block */
1869 down_read(&EXT4_I(inode)->i_data_sem);
1870 path = ext4_ext_find_extent(inode, block, path);
1871 up_read(&EXT4_I(inode)->i_data_sem);
1873 err = PTR_ERR(path);
1878 depth = ext_depth(inode);
1879 if (unlikely(path[depth].p_hdr == NULL)) {
1880 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1884 ex = path[depth].p_ext;
1885 next = ext4_ext_next_allocated_block(path);
1889 /* there is no extent yet, so try to allocate
1890 * all requested space */
1893 } else if (le32_to_cpu(ex->ee_block) > block) {
1894 /* need to allocate space before found extent */
1896 end = le32_to_cpu(ex->ee_block);
1897 if (block + num < end)
1899 } else if (block >= le32_to_cpu(ex->ee_block)
1900 + ext4_ext_get_actual_len(ex)) {
1901 /* need to allocate space after found extent */
1906 } else if (block >= le32_to_cpu(ex->ee_block)) {
1908 * some part of requested space is covered
1912 end = le32_to_cpu(ex->ee_block)
1913 + ext4_ext_get_actual_len(ex);
1914 if (block + num < end)
1920 BUG_ON(end <= start);
1923 cbex.ec_block = start;
1924 cbex.ec_len = end - start;
1927 cbex.ec_block = le32_to_cpu(ex->ee_block);
1928 cbex.ec_len = ext4_ext_get_actual_len(ex);
1929 cbex.ec_start = ext4_ext_pblock(ex);
1932 if (unlikely(cbex.ec_len == 0)) {
1933 EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1937 err = func(inode, path, &cbex, ex, cbdata);
1938 ext4_ext_drop_refs(path);
1943 if (err == EXT_REPEAT)
1945 else if (err == EXT_BREAK) {
1950 if (ext_depth(inode) != depth) {
1951 /* depth was changed. we have to realloc path */
1956 block = cbex.ec_block + cbex.ec_len;
1960 ext4_ext_drop_refs(path);
1968 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1969 __u32 len, ext4_fsblk_t start)
1971 struct ext4_ext_cache *cex;
1973 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1974 cex = &EXT4_I(inode)->i_cached_extent;
1975 cex->ec_block = block;
1977 cex->ec_start = start;
1978 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1982 * ext4_ext_put_gap_in_cache:
1983 * calculate boundaries of the gap that the requested block fits into
1984 * and cache this gap
1987 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1990 int depth = ext_depth(inode);
1993 struct ext4_extent *ex;
1995 ex = path[depth].p_ext;
1997 /* there is no extent yet, so gap is [0;-] */
1999 len = EXT_MAX_BLOCK;
2000 ext_debug("cache gap(whole file):");
2001 } else if (block < le32_to_cpu(ex->ee_block)) {
2003 len = le32_to_cpu(ex->ee_block) - block;
2004 ext_debug("cache gap(before): %u [%u:%u]",
2006 le32_to_cpu(ex->ee_block),
2007 ext4_ext_get_actual_len(ex));
2008 } else if (block >= le32_to_cpu(ex->ee_block)
2009 + ext4_ext_get_actual_len(ex)) {
2011 lblock = le32_to_cpu(ex->ee_block)
2012 + ext4_ext_get_actual_len(ex);
2014 next = ext4_ext_next_allocated_block(path);
2015 ext_debug("cache gap(after): [%u:%u] %u",
2016 le32_to_cpu(ex->ee_block),
2017 ext4_ext_get_actual_len(ex),
2019 BUG_ON(next == lblock);
2020 len = next - lblock;
2026 ext_debug(" -> %u:%lu\n", lblock, len);
2027 ext4_ext_put_in_cache(inode, lblock, len, 0);
2031 * Return 0 if cache is invalid; 1 if the cache is valid
2034 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2035 struct ext4_extent *ex)
2037 struct ext4_ext_cache *cex;
2041 * We borrow i_block_reservation_lock to protect i_cached_extent
2043 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2044 cex = &EXT4_I(inode)->i_cached_extent;
2046 /* has cache valid data? */
2047 if (cex->ec_len == 0)
2050 if (in_range(block, cex->ec_block, cex->ec_len)) {
2051 ex->ee_block = cpu_to_le32(cex->ec_block);
2052 ext4_ext_store_pblock(ex, cex->ec_start);
2053 ex->ee_len = cpu_to_le16(cex->ec_len);
2054 ext_debug("%u cached by %u:%u:%llu\n",
2056 cex->ec_block, cex->ec_len, cex->ec_start);
2060 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2066 * removes index from the index block.
2067 * It's used in truncate case only, thus all requests are for
2068 * last index in the block only.
2070 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2071 struct ext4_ext_path *path)
2076 /* free index block */
2078 leaf = ext4_idx_pblock(path->p_idx);
2079 if (unlikely(path->p_hdr->eh_entries == 0)) {
2080 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2083 err = ext4_ext_get_access(handle, inode, path);
2086 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2087 err = ext4_ext_dirty(handle, inode, path);
2090 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2091 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2092 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2097 * ext4_ext_calc_credits_for_single_extent:
2098 * This routine returns max. credits that needed to insert an extent
2099 * to the extent tree.
2100 * When pass the actual path, the caller should calculate credits
2103 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2104 struct ext4_ext_path *path)
2107 int depth = ext_depth(inode);
2110 /* probably there is space in leaf? */
2111 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2112 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2115 * There are some space in the leaf tree, no
2116 * need to account for leaf block credit
2118 * bitmaps and block group descriptor blocks
2119 * and other metadat blocks still need to be
2122 /* 1 bitmap, 1 block group descriptor */
2123 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2128 return ext4_chunk_trans_blocks(inode, nrblocks);
2132 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2134 * if nrblocks are fit in a single extent (chunk flag is 1), then
2135 * in the worse case, each tree level index/leaf need to be changed
2136 * if the tree split due to insert a new extent, then the old tree
2137 * index/leaf need to be updated too
2139 * If the nrblocks are discontiguous, they could cause
2140 * the whole tree split more than once, but this is really rare.
2142 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2145 int depth = ext_depth(inode);
2155 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2156 struct ext4_extent *ex,
2157 ext4_lblk_t from, ext4_lblk_t to)
2159 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2160 int flags = EXT4_FREE_BLOCKS_FORGET;
2162 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2163 flags |= EXT4_FREE_BLOCKS_METADATA;
2164 #ifdef EXTENTS_STATS
2166 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2167 spin_lock(&sbi->s_ext_stats_lock);
2168 sbi->s_ext_blocks += ee_len;
2169 sbi->s_ext_extents++;
2170 if (ee_len < sbi->s_ext_min)
2171 sbi->s_ext_min = ee_len;
2172 if (ee_len > sbi->s_ext_max)
2173 sbi->s_ext_max = ee_len;
2174 if (ext_depth(inode) > sbi->s_depth_max)
2175 sbi->s_depth_max = ext_depth(inode);
2176 spin_unlock(&sbi->s_ext_stats_lock);
2179 if (from >= le32_to_cpu(ex->ee_block)
2180 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2185 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2186 start = ext4_ext_pblock(ex) + ee_len - num;
2187 ext_debug("free last %u blocks starting %llu\n", num, start);
2188 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2189 } else if (from == le32_to_cpu(ex->ee_block)
2190 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2191 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
2192 from, to, le32_to_cpu(ex->ee_block), ee_len);
2194 printk(KERN_INFO "strange request: removal(2) "
2195 "%u-%u from %u:%u\n",
2196 from, to, le32_to_cpu(ex->ee_block), ee_len);
2202 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2203 struct ext4_ext_path *path, ext4_lblk_t start)
2205 int err = 0, correct_index = 0;
2206 int depth = ext_depth(inode), credits;
2207 struct ext4_extent_header *eh;
2208 ext4_lblk_t a, b, block;
2210 ext4_lblk_t ex_ee_block;
2211 unsigned short ex_ee_len;
2212 unsigned uninitialized = 0;
2213 struct ext4_extent *ex;
2215 /* the header must be checked already in ext4_ext_remove_space() */
2216 ext_debug("truncate since %u in leaf\n", start);
2217 if (!path[depth].p_hdr)
2218 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2219 eh = path[depth].p_hdr;
2220 if (unlikely(path[depth].p_hdr == NULL)) {
2221 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2224 /* find where to start removing */
2225 ex = EXT_LAST_EXTENT(eh);
2227 ex_ee_block = le32_to_cpu(ex->ee_block);
2228 ex_ee_len = ext4_ext_get_actual_len(ex);
2230 while (ex >= EXT_FIRST_EXTENT(eh) &&
2231 ex_ee_block + ex_ee_len > start) {
2233 if (ext4_ext_is_uninitialized(ex))
2238 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2239 uninitialized, ex_ee_len);
2240 path[depth].p_ext = ex;
2242 a = ex_ee_block > start ? ex_ee_block : start;
2243 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2244 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2246 ext_debug(" border %u:%u\n", a, b);
2248 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2252 } else if (a != ex_ee_block) {
2253 /* remove tail of the extent */
2254 block = ex_ee_block;
2256 } else if (b != ex_ee_block + ex_ee_len - 1) {
2257 /* remove head of the extent */
2260 /* there is no "make a hole" API yet */
2263 /* remove whole extent: excellent! */
2264 block = ex_ee_block;
2266 BUG_ON(a != ex_ee_block);
2267 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2271 * 3 for leaf, sb, and inode plus 2 (bmap and group
2272 * descriptor) for each block group; assume two block
2273 * groups plus ex_ee_len/blocks_per_block_group for
2276 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2277 if (ex == EXT_FIRST_EXTENT(eh)) {
2279 credits += (ext_depth(inode)) + 1;
2281 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2283 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2287 err = ext4_ext_get_access(handle, inode, path + depth);
2291 err = ext4_remove_blocks(handle, inode, ex, a, b);
2296 /* this extent is removed; mark slot entirely unused */
2297 ext4_ext_store_pblock(ex, 0);
2298 le16_add_cpu(&eh->eh_entries, -1);
2301 ex->ee_block = cpu_to_le32(block);
2302 ex->ee_len = cpu_to_le16(num);
2304 * Do not mark uninitialized if all the blocks in the
2305 * extent have been removed.
2307 if (uninitialized && num)
2308 ext4_ext_mark_uninitialized(ex);
2310 err = ext4_ext_dirty(handle, inode, path + depth);
2314 ext_debug("new extent: %u:%u:%llu\n", block, num,
2315 ext4_ext_pblock(ex));
2317 ex_ee_block = le32_to_cpu(ex->ee_block);
2318 ex_ee_len = ext4_ext_get_actual_len(ex);
2321 if (correct_index && eh->eh_entries)
2322 err = ext4_ext_correct_indexes(handle, inode, path);
2324 /* if this leaf is free, then we should
2325 * remove it from index block above */
2326 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2327 err = ext4_ext_rm_idx(handle, inode, path + depth);
2334 * ext4_ext_more_to_rm:
2335 * returns 1 if current index has to be freed (even partial)
2338 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2340 BUG_ON(path->p_idx == NULL);
2342 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2346 * if truncate on deeper level happened, it wasn't partial,
2347 * so we have to consider current index for truncation
2349 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2354 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2356 struct super_block *sb = inode->i_sb;
2357 int depth = ext_depth(inode);
2358 struct ext4_ext_path *path;
2362 ext_debug("truncate since %u\n", start);
2364 /* probably first extent we're gonna free will be last in block */
2365 handle = ext4_journal_start(inode, depth + 1);
2367 return PTR_ERR(handle);
2370 ext4_ext_invalidate_cache(inode);
2373 * We start scanning from right side, freeing all the blocks
2374 * after i_size and walking into the tree depth-wise.
2376 depth = ext_depth(inode);
2377 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2379 ext4_journal_stop(handle);
2382 path[0].p_depth = depth;
2383 path[0].p_hdr = ext_inode_hdr(inode);
2384 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2390 while (i >= 0 && err == 0) {
2392 /* this is leaf block */
2393 err = ext4_ext_rm_leaf(handle, inode, path, start);
2394 /* root level has p_bh == NULL, brelse() eats this */
2395 brelse(path[i].p_bh);
2396 path[i].p_bh = NULL;
2401 /* this is index block */
2402 if (!path[i].p_hdr) {
2403 ext_debug("initialize header\n");
2404 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2407 if (!path[i].p_idx) {
2408 /* this level hasn't been touched yet */
2409 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2410 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2411 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2413 le16_to_cpu(path[i].p_hdr->eh_entries));
2415 /* we were already here, see at next index */
2419 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2420 i, EXT_FIRST_INDEX(path[i].p_hdr),
2422 if (ext4_ext_more_to_rm(path + i)) {
2423 struct buffer_head *bh;
2424 /* go to the next level */
2425 ext_debug("move to level %d (block %llu)\n",
2426 i + 1, ext4_idx_pblock(path[i].p_idx));
2427 memset(path + i + 1, 0, sizeof(*path));
2428 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2430 /* should we reset i_size? */
2434 if (WARN_ON(i + 1 > depth)) {
2438 if (ext4_ext_check(inode, ext_block_hdr(bh),
2443 path[i + 1].p_bh = bh;
2445 /* save actual number of indexes since this
2446 * number is changed at the next iteration */
2447 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2450 /* we finished processing this index, go up */
2451 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2452 /* index is empty, remove it;
2453 * handle must be already prepared by the
2454 * truncatei_leaf() */
2455 err = ext4_ext_rm_idx(handle, inode, path + i);
2457 /* root level has p_bh == NULL, brelse() eats this */
2458 brelse(path[i].p_bh);
2459 path[i].p_bh = NULL;
2461 ext_debug("return to level %d\n", i);
2465 /* TODO: flexible tree reduction should be here */
2466 if (path->p_hdr->eh_entries == 0) {
2468 * truncate to zero freed all the tree,
2469 * so we need to correct eh_depth
2471 err = ext4_ext_get_access(handle, inode, path);
2473 ext_inode_hdr(inode)->eh_depth = 0;
2474 ext_inode_hdr(inode)->eh_max =
2475 cpu_to_le16(ext4_ext_space_root(inode, 0));
2476 err = ext4_ext_dirty(handle, inode, path);
2480 ext4_ext_drop_refs(path);
2484 ext4_journal_stop(handle);
2490 * called at mount time
2492 void ext4_ext_init(struct super_block *sb)
2495 * possible initialization would be here
2498 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2499 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2500 printk(KERN_INFO "EXT4-fs: file extents enabled");
2501 #ifdef AGGRESSIVE_TEST
2502 printk(", aggressive tests");
2504 #ifdef CHECK_BINSEARCH
2505 printk(", check binsearch");
2507 #ifdef EXTENTS_STATS
2512 #ifdef EXTENTS_STATS
2513 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2514 EXT4_SB(sb)->s_ext_min = 1 << 30;
2515 EXT4_SB(sb)->s_ext_max = 0;
2521 * called at umount time
2523 void ext4_ext_release(struct super_block *sb)
2525 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2528 #ifdef EXTENTS_STATS
2529 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2530 struct ext4_sb_info *sbi = EXT4_SB(sb);
2531 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2532 sbi->s_ext_blocks, sbi->s_ext_extents,
2533 sbi->s_ext_blocks / sbi->s_ext_extents);
2534 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2535 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2540 /* FIXME!! we need to try to merge to left or right after zero-out */
2541 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2543 ext4_fsblk_t ee_pblock;
2544 unsigned int ee_len;
2547 ee_len = ext4_ext_get_actual_len(ex);
2548 ee_pblock = ext4_ext_pblock(ex);
2550 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2558 * used by extent splitting.
2560 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
2562 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
2563 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
2566 * ext4_split_extent_at() splits an extent at given block.
2568 * @handle: the journal handle
2569 * @inode: the file inode
2570 * @path: the path to the extent
2571 * @split: the logical block where the extent is splitted.
2572 * @split_flags: indicates if the extent could be zeroout if split fails, and
2573 * the states(init or uninit) of new extents.
2574 * @flags: flags used to insert new extent to extent tree.
2577 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2578 * of which are deterimined by split_flag.
2580 * There are two cases:
2581 * a> the extent are splitted into two extent.
2582 * b> split is not needed, and just mark the extent.
2584 * return 0 on success.
2586 static int ext4_split_extent_at(handle_t *handle,
2587 struct inode *inode,
2588 struct ext4_ext_path *path,
2593 ext4_fsblk_t newblock;
2594 ext4_lblk_t ee_block;
2595 struct ext4_extent *ex, newex, orig_ex;
2596 struct ext4_extent *ex2 = NULL;
2597 unsigned int ee_len, depth;
2600 ext_debug("ext4_split_extents_at: inode %lu, logical"
2601 "block %llu\n", inode->i_ino, (unsigned long long)split);
2603 ext4_ext_show_leaf(inode, path);
2605 depth = ext_depth(inode);
2606 ex = path[depth].p_ext;
2607 ee_block = le32_to_cpu(ex->ee_block);
2608 ee_len = ext4_ext_get_actual_len(ex);
2609 newblock = split - ee_block + ext4_ext_pblock(ex);
2611 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2613 err = ext4_ext_get_access(handle, inode, path + depth);
2617 if (split == ee_block) {
2619 * case b: block @split is the block that the extent begins with
2620 * then we just change the state of the extent, and splitting
2623 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2624 ext4_ext_mark_uninitialized(ex);
2626 ext4_ext_mark_initialized(ex);
2628 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2629 ext4_ext_try_to_merge(inode, path, ex);
2631 err = ext4_ext_dirty(handle, inode, path + depth);
2636 memcpy(&orig_ex, ex, sizeof(orig_ex));
2637 ex->ee_len = cpu_to_le16(split - ee_block);
2638 if (split_flag & EXT4_EXT_MARK_UNINIT1)
2639 ext4_ext_mark_uninitialized(ex);
2642 * path may lead to new leaf, not to original leaf any more
2643 * after ext4_ext_insert_extent() returns,
2645 err = ext4_ext_dirty(handle, inode, path + depth);
2647 goto fix_extent_len;
2650 ex2->ee_block = cpu_to_le32(split);
2651 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
2652 ext4_ext_store_pblock(ex2, newblock);
2653 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2654 ext4_ext_mark_uninitialized(ex2);
2656 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2657 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2658 err = ext4_ext_zeroout(inode, &orig_ex);
2660 goto fix_extent_len;
2661 /* update the extent length and mark as initialized */
2662 ex->ee_len = cpu_to_le32(ee_len);
2663 ext4_ext_try_to_merge(inode, path, ex);
2664 err = ext4_ext_dirty(handle, inode, path + depth);
2667 goto fix_extent_len;
2670 ext4_ext_show_leaf(inode, path);
2674 ex->ee_len = orig_ex.ee_len;
2675 ext4_ext_dirty(handle, inode, path + depth);
2680 * ext4_split_extents() splits an extent and mark extent which is covered
2681 * by @map as split_flags indicates
2683 * It may result in splitting the extent into multiple extents (upto three)
2684 * There are three possibilities:
2685 * a> There is no split required
2686 * b> Splits in two extents: Split is happening at either end of the extent
2687 * c> Splits in three extents: Somone is splitting in middle of the extent
2690 static int ext4_split_extent(handle_t *handle,
2691 struct inode *inode,
2692 struct ext4_ext_path *path,
2693 struct ext4_map_blocks *map,
2697 ext4_lblk_t ee_block;
2698 struct ext4_extent *ex;
2699 unsigned int ee_len, depth;
2702 int split_flag1, flags1;
2704 depth = ext_depth(inode);
2705 ex = path[depth].p_ext;
2706 ee_block = le32_to_cpu(ex->ee_block);
2707 ee_len = ext4_ext_get_actual_len(ex);
2708 uninitialized = ext4_ext_is_uninitialized(ex);
2710 if (map->m_lblk + map->m_len < ee_block + ee_len) {
2711 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2712 EXT4_EXT_MAY_ZEROOUT : 0;
2713 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
2715 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
2716 EXT4_EXT_MARK_UNINIT2;
2717 err = ext4_split_extent_at(handle, inode, path,
2718 map->m_lblk + map->m_len, split_flag1, flags1);
2721 ext4_ext_drop_refs(path);
2722 path = ext4_ext_find_extent(inode, map->m_lblk, path);
2724 return PTR_ERR(path);
2726 if (map->m_lblk >= ee_block) {
2727 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2728 EXT4_EXT_MAY_ZEROOUT : 0;
2730 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
2731 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2732 split_flag1 |= EXT4_EXT_MARK_UNINIT2;
2733 err = ext4_split_extent_at(handle, inode, path,
2734 map->m_lblk, split_flag1, flags);
2739 ext4_ext_show_leaf(inode, path);
2741 return err ? err : map->m_len;
2744 #define EXT4_EXT_ZERO_LEN 7
2746 * This function is called by ext4_ext_map_blocks() if someone tries to write
2747 * to an uninitialized extent. It may result in splitting the uninitialized
2748 * extent into multiple extents (up to three - one initialized and two
2750 * There are three possibilities:
2751 * a> There is no split required: Entire extent should be initialized
2752 * b> Splits in two extents: Write is happening at either end of the extent
2753 * c> Splits in three extents: Somone is writing in middle of the extent
2755 static int ext4_ext_convert_to_initialized(handle_t *handle,
2756 struct inode *inode,
2757 struct ext4_map_blocks *map,
2758 struct ext4_ext_path *path)
2760 struct ext4_map_blocks split_map;
2761 struct ext4_extent zero_ex;
2762 struct ext4_extent *ex;
2763 ext4_lblk_t ee_block, eof_block;
2764 unsigned int allocated, ee_len, depth;
2768 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2769 "block %llu, max_blocks %u\n", inode->i_ino,
2770 (unsigned long long)map->m_lblk, map->m_len);
2772 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2773 inode->i_sb->s_blocksize_bits;
2774 if (eof_block < map->m_lblk + map->m_len)
2775 eof_block = map->m_lblk + map->m_len;
2777 depth = ext_depth(inode);
2778 ex = path[depth].p_ext;
2779 ee_block = le32_to_cpu(ex->ee_block);
2780 ee_len = ext4_ext_get_actual_len(ex);
2781 allocated = ee_len - (map->m_lblk - ee_block);
2783 WARN_ON(map->m_lblk < ee_block);
2785 * It is safe to convert extent to initialized via explicit
2786 * zeroout only if extent is fully insde i_size or new_size.
2788 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
2790 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2791 if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
2792 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2793 err = ext4_ext_zeroout(inode, ex);
2797 err = ext4_ext_get_access(handle, inode, path + depth);
2800 ext4_ext_mark_initialized(ex);
2801 ext4_ext_try_to_merge(inode, path, ex);
2802 err = ext4_ext_dirty(handle, inode, path + depth);
2808 * 1. split the extent into three extents.
2809 * 2. split the extent into two extents, zeroout the first half.
2810 * 3. split the extent into two extents, zeroout the second half.
2811 * 4. split the extent into two extents with out zeroout.
2813 split_map.m_lblk = map->m_lblk;
2814 split_map.m_len = map->m_len;
2816 if (allocated > map->m_len) {
2817 if (allocated <= EXT4_EXT_ZERO_LEN &&
2818 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2821 cpu_to_le32(map->m_lblk);
2822 zero_ex.ee_len = cpu_to_le16(allocated);
2823 ext4_ext_store_pblock(&zero_ex,
2824 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
2825 err = ext4_ext_zeroout(inode, &zero_ex);
2828 split_map.m_lblk = map->m_lblk;
2829 split_map.m_len = allocated;
2830 } else if ((map->m_lblk - ee_block + map->m_len <
2831 EXT4_EXT_ZERO_LEN) &&
2832 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2834 if (map->m_lblk != ee_block) {
2835 zero_ex.ee_block = ex->ee_block;
2836 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
2838 ext4_ext_store_pblock(&zero_ex,
2839 ext4_ext_pblock(ex));
2840 err = ext4_ext_zeroout(inode, &zero_ex);
2845 split_map.m_lblk = ee_block;
2846 split_map.m_len = map->m_lblk - ee_block + map->m_len;
2847 allocated = map->m_len;
2851 allocated = ext4_split_extent(handle, inode, path,
2852 &split_map, split_flag, 0);
2857 return err ? err : allocated;
2861 * This function is called by ext4_ext_map_blocks() from
2862 * ext4_get_blocks_dio_write() when DIO to write
2863 * to an uninitialized extent.
2865 * Writing to an uninitialized extent may result in splitting the uninitialized
2866 * extent into multiple /initialized uninitialized extents (up to three)
2867 * There are three possibilities:
2868 * a> There is no split required: Entire extent should be uninitialized
2869 * b> Splits in two extents: Write is happening at either end of the extent
2870 * c> Splits in three extents: Somone is writing in middle of the extent
2872 * One of more index blocks maybe needed if the extent tree grow after
2873 * the uninitialized extent split. To prevent ENOSPC occur at the IO
2874 * complete, we need to split the uninitialized extent before DIO submit
2875 * the IO. The uninitialized extent called at this time will be split
2876 * into three uninitialized extent(at most). After IO complete, the part
2877 * being filled will be convert to initialized by the end_io callback function
2878 * via ext4_convert_unwritten_extents().
2880 * Returns the size of uninitialized extent to be written on success.
2882 static int ext4_split_unwritten_extents(handle_t *handle,
2883 struct inode *inode,
2884 struct ext4_map_blocks *map,
2885 struct ext4_ext_path *path,
2888 ext4_lblk_t eof_block;
2889 ext4_lblk_t ee_block;
2890 struct ext4_extent *ex;
2891 unsigned int ee_len;
2892 int split_flag = 0, depth;
2894 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
2895 "block %llu, max_blocks %u\n", inode->i_ino,
2896 (unsigned long long)map->m_lblk, map->m_len);
2898 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2899 inode->i_sb->s_blocksize_bits;
2900 if (eof_block < map->m_lblk + map->m_len)
2901 eof_block = map->m_lblk + map->m_len;
2903 * It is safe to convert extent to initialized via explicit
2904 * zeroout only if extent is fully insde i_size or new_size.
2906 depth = ext_depth(inode);
2907 ex = path[depth].p_ext;
2908 ee_block = le32_to_cpu(ex->ee_block);
2909 ee_len = ext4_ext_get_actual_len(ex);
2911 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
2912 split_flag |= EXT4_EXT_MARK_UNINIT2;
2914 flags |= EXT4_GET_BLOCKS_PRE_IO;
2915 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
2918 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
2919 struct inode *inode,
2920 struct ext4_ext_path *path)
2922 struct ext4_extent *ex;
2923 struct ext4_extent_header *eh;
2927 depth = ext_depth(inode);
2928 eh = path[depth].p_hdr;
2929 ex = path[depth].p_ext;
2931 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
2932 "block %llu, max_blocks %u\n", inode->i_ino,
2933 (unsigned long long)le32_to_cpu(ex->ee_block),
2934 ext4_ext_get_actual_len(ex));
2936 err = ext4_ext_get_access(handle, inode, path + depth);
2939 /* first mark the extent as initialized */
2940 ext4_ext_mark_initialized(ex);
2942 /* note: ext4_ext_correct_indexes() isn't needed here because
2943 * borders are not changed
2945 ext4_ext_try_to_merge(inode, path, ex);
2947 /* Mark modified extent as dirty */
2948 err = ext4_ext_dirty(handle, inode, path + depth);
2950 ext4_ext_show_leaf(inode, path);
2954 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
2955 sector_t block, int count)
2958 for (i = 0; i < count; i++)
2959 unmap_underlying_metadata(bdev, block + i);
2963 * Handle EOFBLOCKS_FL flag, clearing it if necessary
2965 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
2967 struct ext4_ext_path *path,
2971 struct ext4_extent_header *eh;
2972 struct ext4_extent *last_ex;
2974 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
2977 depth = ext_depth(inode);
2978 eh = path[depth].p_hdr;
2980 if (unlikely(!eh->eh_entries)) {
2981 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
2982 "EOFBLOCKS_FL set");
2985 last_ex = EXT_LAST_EXTENT(eh);
2987 * We should clear the EOFBLOCKS_FL flag if we are writing the
2988 * last block in the last extent in the file. We test this by
2989 * first checking to see if the caller to
2990 * ext4_ext_get_blocks() was interested in the last block (or
2991 * a block beyond the last block) in the current extent. If
2992 * this turns out to be false, we can bail out from this
2993 * function immediately.
2995 if (lblk + len < le32_to_cpu(last_ex->ee_block) +
2996 ext4_ext_get_actual_len(last_ex))
2999 * If the caller does appear to be planning to write at or
3000 * beyond the end of the current extent, we then test to see
3001 * if the current extent is the last extent in the file, by
3002 * checking to make sure it was reached via the rightmost node
3003 * at each level of the tree.
3005 for (i = depth-1; i >= 0; i--)
3006 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3008 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3009 return ext4_mark_inode_dirty(handle, inode);
3013 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3014 struct ext4_map_blocks *map,
3015 struct ext4_ext_path *path, int flags,
3016 unsigned int allocated, ext4_fsblk_t newblock)
3020 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3022 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3023 "block %llu, max_blocks %u, flags %d, allocated %u",
3024 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3026 ext4_ext_show_leaf(inode, path);
3028 /* get_block() before submit the IO, split the extent */
3029 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3030 ret = ext4_split_unwritten_extents(handle, inode, map,
3033 * Flag the inode(non aio case) or end_io struct (aio case)
3034 * that this IO needs to conversion to written when IO is
3037 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
3038 io->flag = EXT4_IO_END_UNWRITTEN;
3039 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
3041 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3042 if (ext4_should_dioread_nolock(inode))
3043 map->m_flags |= EXT4_MAP_UNINIT;
3046 /* IO end_io complete, convert the filled extent to written */
3047 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3048 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3051 ext4_update_inode_fsync_trans(handle, inode, 1);
3052 err = check_eofblocks_fl(handle, inode, map->m_lblk,
3058 /* buffered IO case */
3060 * repeat fallocate creation request
3061 * we already have an unwritten extent
3063 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3066 /* buffered READ or buffered write_begin() lookup */
3067 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3069 * We have blocks reserved already. We
3070 * return allocated blocks so that delalloc
3071 * won't do block reservation for us. But
3072 * the buffer head will be unmapped so that
3073 * a read from the block returns 0s.
3075 map->m_flags |= EXT4_MAP_UNWRITTEN;
3079 /* buffered write, writepage time, convert*/
3080 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3082 ext4_update_inode_fsync_trans(handle, inode, 1);
3083 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3095 map->m_flags |= EXT4_MAP_NEW;
3097 * if we allocated more blocks than requested
3098 * we need to make sure we unmap the extra block
3099 * allocated. The actual needed block will get
3100 * unmapped later when we find the buffer_head marked
3103 if (allocated > map->m_len) {
3104 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3105 newblock + map->m_len,
3106 allocated - map->m_len);
3107 allocated = map->m_len;
3111 * If we have done fallocate with the offset that is already
3112 * delayed allocated, we would have block reservation
3113 * and quota reservation done in the delayed write path.
3114 * But fallocate would have already updated quota and block
3115 * count for this offset. So cancel these reservation
3117 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3118 ext4_da_update_reserve_space(inode, allocated, 0);
3121 map->m_flags |= EXT4_MAP_MAPPED;
3123 if (allocated > map->m_len)
3124 allocated = map->m_len;
3125 ext4_ext_show_leaf(inode, path);
3126 map->m_pblk = newblock;
3127 map->m_len = allocated;
3130 ext4_ext_drop_refs(path);
3133 return err ? err : allocated;
3137 * Block allocation/map/preallocation routine for extents based files
3140 * Need to be called with
3141 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3142 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3144 * return > 0, number of of blocks already mapped/allocated
3145 * if create == 0 and these are pre-allocated blocks
3146 * buffer head is unmapped
3147 * otherwise blocks are mapped
3149 * return = 0, if plain look up failed (blocks have not been allocated)
3150 * buffer head is unmapped
3152 * return < 0, error case.
3154 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3155 struct ext4_map_blocks *map, int flags)
3157 struct ext4_ext_path *path = NULL;
3158 struct ext4_extent newex, *ex;
3159 ext4_fsblk_t newblock = 0;
3160 int err = 0, depth, ret;
3161 unsigned int allocated = 0;
3162 struct ext4_allocation_request ar;
3163 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3165 ext_debug("blocks %u/%u requested for inode %lu\n",
3166 map->m_lblk, map->m_len, inode->i_ino);
3167 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3169 /* check in cache */
3170 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3171 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3172 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3174 * block isn't allocated yet and
3175 * user doesn't want to allocate it
3179 /* we should allocate requested block */
3181 /* block is already allocated */
3182 newblock = map->m_lblk
3183 - le32_to_cpu(newex.ee_block)
3184 + ext4_ext_pblock(&newex);
3185 /* number of remaining blocks in the extent */
3186 allocated = ext4_ext_get_actual_len(&newex) -
3187 (map->m_lblk - le32_to_cpu(newex.ee_block));
3192 /* find extent for this block */
3193 path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3195 err = PTR_ERR(path);
3200 depth = ext_depth(inode);
3203 * consistent leaf must not be empty;
3204 * this situation is possible, though, _during_ tree modification;
3205 * this is why assert can't be put in ext4_ext_find_extent()
3207 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3208 EXT4_ERROR_INODE(inode, "bad extent address "
3209 "lblock: %lu, depth: %d pblock %lld",
3210 (unsigned long) map->m_lblk, depth,
3211 path[depth].p_block);
3216 ex = path[depth].p_ext;
3218 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3219 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3220 unsigned short ee_len;
3223 * Uninitialized extents are treated as holes, except that
3224 * we split out initialized portions during a write.
3226 ee_len = ext4_ext_get_actual_len(ex);
3227 /* if found extent covers block, simply return it */
3228 if (in_range(map->m_lblk, ee_block, ee_len)) {
3229 newblock = map->m_lblk - ee_block + ee_start;
3230 /* number of remaining blocks in the extent */
3231 allocated = ee_len - (map->m_lblk - ee_block);
3232 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3233 ee_block, ee_len, newblock);
3235 /* Do not put uninitialized extent in the cache */
3236 if (!ext4_ext_is_uninitialized(ex)) {
3237 ext4_ext_put_in_cache(inode, ee_block,
3241 ret = ext4_ext_handle_uninitialized_extents(handle,
3242 inode, map, path, flags, allocated,
3249 * requested block isn't allocated yet;
3250 * we couldn't try to create block if create flag is zero
3252 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3254 * put just found gap into cache to speed up
3255 * subsequent requests
3257 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3261 * Okay, we need to do block allocation.
3264 /* find neighbour allocated blocks */
3265 ar.lleft = map->m_lblk;
3266 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3269 ar.lright = map->m_lblk;
3270 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
3275 * See if request is beyond maximum number of blocks we can have in
3276 * a single extent. For an initialized extent this limit is
3277 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3278 * EXT_UNINIT_MAX_LEN.
3280 if (map->m_len > EXT_INIT_MAX_LEN &&
3281 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3282 map->m_len = EXT_INIT_MAX_LEN;
3283 else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3284 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3285 map->m_len = EXT_UNINIT_MAX_LEN;
3287 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
3288 newex.ee_block = cpu_to_le32(map->m_lblk);
3289 newex.ee_len = cpu_to_le16(map->m_len);
3290 err = ext4_ext_check_overlap(inode, &newex, path);
3292 allocated = ext4_ext_get_actual_len(&newex);
3294 allocated = map->m_len;
3296 /* allocate new block */
3298 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
3299 ar.logical = map->m_lblk;
3301 if (S_ISREG(inode->i_mode))
3302 ar.flags = EXT4_MB_HINT_DATA;
3304 /* disable in-core preallocation for non-regular files */
3306 newblock = ext4_mb_new_blocks(handle, &ar, &err);
3309 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3310 ar.goal, newblock, allocated);
3312 /* try to insert new extent into found leaf and return */
3313 ext4_ext_store_pblock(&newex, newblock);
3314 newex.ee_len = cpu_to_le16(ar.len);
3315 /* Mark uninitialized */
3316 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
3317 ext4_ext_mark_uninitialized(&newex);
3319 * io_end structure was created for every IO write to an
3320 * uninitialized extent. To avoid unnecessary conversion,
3321 * here we flag the IO that really needs the conversion.
3322 * For non asycn direct IO case, flag the inode state
3323 * that we need to perform conversion when IO is done.
3325 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3326 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
3327 io->flag = EXT4_IO_END_UNWRITTEN;
3328 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
3330 ext4_set_inode_state(inode,
3331 EXT4_STATE_DIO_UNWRITTEN);
3333 if (ext4_should_dioread_nolock(inode))
3334 map->m_flags |= EXT4_MAP_UNINIT;
3337 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
3341 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3343 /* free data blocks we just allocated */
3344 /* not a good idea to call discard here directly,
3345 * but otherwise we'd need to call it every free() */
3346 ext4_discard_preallocations(inode);
3347 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
3348 ext4_ext_get_actual_len(&newex), 0);
3352 /* previous routine could use block we allocated */
3353 newblock = ext4_ext_pblock(&newex);
3354 allocated = ext4_ext_get_actual_len(&newex);
3355 if (allocated > map->m_len)
3356 allocated = map->m_len;
3357 map->m_flags |= EXT4_MAP_NEW;
3360 * Update reserved blocks/metadata blocks after successful
3361 * block allocation which had been deferred till now.
3363 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3364 ext4_da_update_reserve_space(inode, allocated, 1);
3367 * Cache the extent and update transaction to commit on fdatasync only
3368 * when it is _not_ an uninitialized extent.
3370 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3371 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
3372 ext4_update_inode_fsync_trans(handle, inode, 1);
3374 ext4_update_inode_fsync_trans(handle, inode, 0);
3376 if (allocated > map->m_len)
3377 allocated = map->m_len;
3378 ext4_ext_show_leaf(inode, path);
3379 map->m_flags |= EXT4_MAP_MAPPED;
3380 map->m_pblk = newblock;
3381 map->m_len = allocated;
3384 ext4_ext_drop_refs(path);
3387 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
3388 newblock, map->m_len, err ? err : allocated);
3389 return err ? err : allocated;
3392 void ext4_ext_truncate(struct inode *inode)
3394 struct address_space *mapping = inode->i_mapping;
3395 struct super_block *sb = inode->i_sb;
3396 ext4_lblk_t last_block;
3401 * finish any pending end_io work so we won't run the risk of
3402 * converting any truncated blocks to initialized later
3404 ext4_flush_completed_IO(inode);
3407 * probably first extent we're gonna free will be last in block
3409 err = ext4_writepage_trans_blocks(inode);
3410 handle = ext4_journal_start(inode, err);
3414 if (inode->i_size & (sb->s_blocksize - 1))
3415 ext4_block_truncate_page(handle, mapping, inode->i_size);
3417 if (ext4_orphan_add(handle, inode))
3420 down_write(&EXT4_I(inode)->i_data_sem);
3421 ext4_ext_invalidate_cache(inode);
3423 ext4_discard_preallocations(inode);
3426 * TODO: optimization is possible here.
3427 * Probably we need not scan at all,
3428 * because page truncation is enough.
3431 /* we have to know where to truncate from in crash case */
3432 EXT4_I(inode)->i_disksize = inode->i_size;
3433 ext4_mark_inode_dirty(handle, inode);
3435 last_block = (inode->i_size + sb->s_blocksize - 1)
3436 >> EXT4_BLOCK_SIZE_BITS(sb);
3437 err = ext4_ext_remove_space(inode, last_block);
3439 /* In a multi-transaction truncate, we only make the final
3440 * transaction synchronous.
3443 ext4_handle_sync(handle);
3446 up_write(&EXT4_I(inode)->i_data_sem);
3448 * If this was a simple ftruncate() and the file will remain alive,
3449 * then we need to clear up the orphan record which we created above.
3450 * However, if this was a real unlink then we were called by
3451 * ext4_delete_inode(), and we allow that function to clean up the
3452 * orphan info for us.
3455 ext4_orphan_del(handle, inode);
3457 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3458 ext4_mark_inode_dirty(handle, inode);
3459 ext4_journal_stop(handle);
3462 static void ext4_falloc_update_inode(struct inode *inode,
3463 int mode, loff_t new_size, int update_ctime)
3465 struct timespec now;
3468 now = current_fs_time(inode->i_sb);
3469 if (!timespec_equal(&inode->i_ctime, &now))
3470 inode->i_ctime = now;
3473 * Update only when preallocation was requested beyond
3476 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3477 if (new_size > i_size_read(inode))
3478 i_size_write(inode, new_size);
3479 if (new_size > EXT4_I(inode)->i_disksize)
3480 ext4_update_i_disksize(inode, new_size);
3483 * Mark that we allocate beyond EOF so the subsequent truncate
3484 * can proceed even if the new size is the same as i_size.
3486 if (new_size > i_size_read(inode))
3487 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3493 * preallocate space for a file. This implements ext4's fallocate file
3494 * operation, which gets called from sys_fallocate system call.
3495 * For block-mapped files, posix_fallocate should fall back to the method
3496 * of writing zeroes to the required new blocks (the same behavior which is
3497 * expected for file systems which do not support fallocate() system call).
3499 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3501 struct inode *inode = file->f_path.dentry->d_inode;
3504 unsigned int max_blocks;
3508 struct ext4_map_blocks map;
3509 unsigned int credits, blkbits = inode->i_blkbits;
3511 /* We only support the FALLOC_FL_KEEP_SIZE mode */
3512 if (mode & ~FALLOC_FL_KEEP_SIZE)
3516 * currently supporting (pre)allocate mode for extent-based
3519 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3522 trace_ext4_fallocate_enter(inode, offset, len, mode);
3523 map.m_lblk = offset >> blkbits;
3525 * We can't just convert len to max_blocks because
3526 * If blocksize = 4096 offset = 3072 and len = 2048
3528 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3531 * credits to insert 1 extent into extent tree
3533 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3534 mutex_lock(&inode->i_mutex);
3535 ret = inode_newsize_ok(inode, (len + offset));
3537 mutex_unlock(&inode->i_mutex);
3538 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
3542 while (ret >= 0 && ret < max_blocks) {
3543 map.m_lblk = map.m_lblk + ret;
3544 map.m_len = max_blocks = max_blocks - ret;
3545 handle = ext4_journal_start(inode, credits);
3546 if (IS_ERR(handle)) {
3547 ret = PTR_ERR(handle);
3550 ret = ext4_map_blocks(handle, inode, &map,
3551 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3555 printk(KERN_ERR "%s: ext4_ext_map_blocks "
3556 "returned error inode#%lu, block=%u, "
3557 "max_blocks=%u", __func__,
3558 inode->i_ino, map.m_lblk, max_blocks);
3560 ext4_mark_inode_dirty(handle, inode);
3561 ret2 = ext4_journal_stop(handle);
3564 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3565 blkbits) >> blkbits))
3566 new_size = offset + len;
3568 new_size = (map.m_lblk + ret) << blkbits;
3570 ext4_falloc_update_inode(inode, mode, new_size,
3571 (map.m_flags & EXT4_MAP_NEW));
3572 ext4_mark_inode_dirty(handle, inode);
3573 ret2 = ext4_journal_stop(handle);
3577 if (ret == -ENOSPC &&
3578 ext4_should_retry_alloc(inode->i_sb, &retries)) {
3582 mutex_unlock(&inode->i_mutex);
3583 trace_ext4_fallocate_exit(inode, offset, max_blocks,
3584 ret > 0 ? ret2 : ret);
3585 return ret > 0 ? ret2 : ret;
3589 * This function convert a range of blocks to written extents
3590 * The caller of this function will pass the start offset and the size.
3591 * all unwritten extents within this range will be converted to
3594 * This function is called from the direct IO end io call back
3595 * function, to convert the fallocated extents after IO is completed.
3596 * Returns 0 on success.
3598 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3602 unsigned int max_blocks;
3605 struct ext4_map_blocks map;
3606 unsigned int credits, blkbits = inode->i_blkbits;
3608 map.m_lblk = offset >> blkbits;
3610 * We can't just convert len to max_blocks because
3611 * If blocksize = 4096 offset = 3072 and len = 2048
3613 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
3616 * credits to insert 1 extent into extent tree
3618 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3619 while (ret >= 0 && ret < max_blocks) {
3621 map.m_len = (max_blocks -= ret);
3622 handle = ext4_journal_start(inode, credits);
3623 if (IS_ERR(handle)) {
3624 ret = PTR_ERR(handle);
3627 ret = ext4_map_blocks(handle, inode, &map,
3628 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3631 printk(KERN_ERR "%s: ext4_ext_map_blocks "
3632 "returned error inode#%lu, block=%u, "
3633 "max_blocks=%u", __func__,
3634 inode->i_ino, map.m_lblk, map.m_len);
3636 ext4_mark_inode_dirty(handle, inode);
3637 ret2 = ext4_journal_stop(handle);
3638 if (ret <= 0 || ret2 )
3641 return ret > 0 ? ret2 : ret;
3645 * Callback function called for each extent to gather FIEMAP information.
3647 static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3648 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3657 struct fiemap_extent_info *fieinfo = data;
3658 unsigned char blksize_bits;
3660 blksize_bits = inode->i_sb->s_blocksize_bits;
3661 logical = (__u64)newex->ec_block << blksize_bits;
3663 if (newex->ec_start == 0) {
3665 * No extent in extent-tree contains block @newex->ec_start,
3666 * then the block may stay in 1)a hole or 2)delayed-extent.
3668 * Holes or delayed-extents are processed as follows.
3669 * 1. lookup dirty pages with specified range in pagecache.
3670 * If no page is got, then there is no delayed-extent and
3671 * return with EXT_CONTINUE.
3672 * 2. find the 1st mapped buffer,
3673 * 3. check if the mapped buffer is both in the request range
3674 * and a delayed buffer. If not, there is no delayed-extent,
3676 * 4. a delayed-extent is found, the extent will be collected.
3678 ext4_lblk_t end = 0;
3679 pgoff_t last_offset;
3682 struct page **pages = NULL;
3683 struct buffer_head *bh = NULL;
3684 struct buffer_head *head = NULL;
3685 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
3687 pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
3691 offset = logical >> PAGE_SHIFT;
3693 last_offset = offset;
3695 ret = find_get_pages_tag(inode->i_mapping, &offset,
3696 PAGECACHE_TAG_DIRTY, nr_pages, pages);
3698 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
3699 /* First time, try to find a mapped buffer. */
3702 for (index = 0; index < ret; index++)
3703 page_cache_release(pages[index]);
3706 return EXT_CONTINUE;
3709 /* Try to find the 1st mapped buffer. */
3710 end = ((__u64)pages[0]->index << PAGE_SHIFT) >>
3712 if (!page_has_buffers(pages[0]))
3714 head = page_buffers(pages[0]);
3720 if (buffer_mapped(bh)) {
3721 /* get the 1st mapped buffer. */
3722 if (end > newex->ec_block +
3724 /* The buffer is out of
3725 * the request range.
3728 goto found_mapped_buffer;
3730 bh = bh->b_this_page;
3732 } while (bh != head);
3734 /* No mapped buffer found. */
3737 /*Find contiguous delayed buffers. */
3738 if (ret > 0 && pages[0]->index == last_offset)
3739 head = page_buffers(pages[0]);
3743 found_mapped_buffer:
3744 if (bh != NULL && buffer_delay(bh)) {
3745 /* 1st or contiguous delayed buffer found. */
3746 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
3748 * 1st delayed buffer found, record
3749 * the start of extent.
3751 flags |= FIEMAP_EXTENT_DELALLOC;
3752 newex->ec_block = end;
3753 logical = (__u64)end << blksize_bits;
3755 /* Find contiguous delayed buffers. */
3757 if (!buffer_delay(bh))
3758 goto found_delayed_extent;
3759 bh = bh->b_this_page;
3761 } while (bh != head);
3763 for (index = 1; index < ret; index++) {
3764 if (!page_has_buffers(pages[index])) {
3768 head = page_buffers(pages[index]);
3773 if (pages[index]->index !=
3774 pages[0]->index + index) {
3775 /* Blocks are not contiguous. */
3781 if (!buffer_delay(bh))
3782 /* Delayed-extent ends. */
3783 goto found_delayed_extent;
3784 bh = bh->b_this_page;
3786 } while (bh != head);
3788 } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
3792 found_delayed_extent:
3793 newex->ec_len = min(end - newex->ec_block,
3794 (ext4_lblk_t)EXT_INIT_MAX_LEN);
3795 if (ret == nr_pages && bh != NULL &&
3796 newex->ec_len < EXT_INIT_MAX_LEN &&
3798 /* Have not collected an extent and continue. */
3799 for (index = 0; index < ret; index++)
3800 page_cache_release(pages[index]);
3804 for (index = 0; index < ret; index++)
3805 page_cache_release(pages[index]);
3809 physical = (__u64)newex->ec_start << blksize_bits;
3810 length = (__u64)newex->ec_len << blksize_bits;
3812 if (ex && ext4_ext_is_uninitialized(ex))
3813 flags |= FIEMAP_EXTENT_UNWRITTEN;
3815 size = i_size_read(inode);
3816 if (logical + length >= size)
3817 flags |= FIEMAP_EXTENT_LAST;
3819 ret = fiemap_fill_next_extent(fieinfo, logical, physical,
3825 return EXT_CONTINUE;
3828 /* fiemap flags we can handle specified here */
3829 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3831 static int ext4_xattr_fiemap(struct inode *inode,
3832 struct fiemap_extent_info *fieinfo)
3836 __u32 flags = FIEMAP_EXTENT_LAST;
3837 int blockbits = inode->i_sb->s_blocksize_bits;
3841 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3842 struct ext4_iloc iloc;
3843 int offset; /* offset of xattr in inode */
3845 error = ext4_get_inode_loc(inode, &iloc);
3848 physical = iloc.bh->b_blocknr << blockbits;
3849 offset = EXT4_GOOD_OLD_INODE_SIZE +
3850 EXT4_I(inode)->i_extra_isize;
3852 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3853 flags |= FIEMAP_EXTENT_DATA_INLINE;
3855 } else { /* external block */
3856 physical = EXT4_I(inode)->i_file_acl << blockbits;
3857 length = inode->i_sb->s_blocksize;
3861 error = fiemap_fill_next_extent(fieinfo, 0, physical,
3863 return (error < 0 ? error : 0);
3866 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3867 __u64 start, __u64 len)
3869 ext4_lblk_t start_blk;
3872 /* fallback to generic here if not in extents fmt */
3873 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3874 return generic_block_fiemap(inode, fieinfo, start, len,
3877 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3880 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3881 error = ext4_xattr_fiemap(inode, fieinfo);
3883 ext4_lblk_t len_blks;
3886 start_blk = start >> inode->i_sb->s_blocksize_bits;
3887 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
3888 if (last_blk >= EXT_MAX_BLOCK)
3889 last_blk = EXT_MAX_BLOCK-1;
3890 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3893 * Walk the extent tree gathering extent information.
3894 * ext4_ext_fiemap_cb will push extents back to user.
3896 error = ext4_ext_walk_space(inode, start_blk, len_blks,
3897 ext4_ext_fiemap_cb, fieinfo);