2 * linux/fs/ext4/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
25 #include <linux/module.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40 #include <linux/workqueue.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/ratelimit.h>
45 #include "ext4_jbd2.h"
48 #include "ext4_extents.h"
50 #include <trace/events/ext4.h>
52 #define MPAGE_DA_EXTENT_TAIL 0x01
54 static inline int ext4_begin_ordered_truncate(struct inode *inode,
57 trace_ext4_begin_ordered_truncate(inode, new_size);
59 * If jinode is zero, then we never opened the file for
60 * writing, so there's no need to call
61 * jbd2_journal_begin_ordered_truncate() since there's no
62 * outstanding writes we need to flush.
64 if (!EXT4_I(inode)->jinode)
66 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
67 EXT4_I(inode)->jinode,
71 static void ext4_invalidatepage(struct page *page, unsigned long offset);
72 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
73 struct buffer_head *bh_result, int create);
74 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
75 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
76 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
77 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
80 * Test whether an inode is a fast symlink.
82 static int ext4_inode_is_fast_symlink(struct inode *inode)
84 int ea_blocks = EXT4_I(inode)->i_file_acl ?
85 (inode->i_sb->s_blocksize >> 9) : 0;
87 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
91 * Work out how many blocks we need to proceed with the next chunk of a
92 * truncate transaction.
94 static unsigned long blocks_for_truncate(struct inode *inode)
98 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
100 /* Give ourselves just enough room to cope with inodes in which
101 * i_blocks is corrupt: we've seen disk corruptions in the past
102 * which resulted in random data in an inode which looked enough
103 * like a regular file for ext4 to try to delete it. Things
104 * will go a bit crazy if that happens, but at least we should
105 * try not to panic the whole kernel. */
109 /* But we need to bound the transaction so we don't overflow the
111 if (needed > EXT4_MAX_TRANS_DATA)
112 needed = EXT4_MAX_TRANS_DATA;
114 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
118 * Truncate transactions can be complex and absolutely huge. So we need to
119 * be able to restart the transaction at a conventient checkpoint to make
120 * sure we don't overflow the journal.
122 * start_transaction gets us a new handle for a truncate transaction,
123 * and extend_transaction tries to extend the existing one a bit. If
124 * extend fails, we need to propagate the failure up and restart the
125 * transaction in the top-level truncate loop. --sct
127 static handle_t *start_transaction(struct inode *inode)
131 result = ext4_journal_start(inode, blocks_for_truncate(inode));
135 ext4_std_error(inode->i_sb, PTR_ERR(result));
140 * Try to extend this transaction for the purposes of truncation.
142 * Returns 0 if we managed to create more room. If we can't create more
143 * room, and the transaction must be restarted we return 1.
145 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
147 if (!ext4_handle_valid(handle))
149 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
151 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
157 * Restart the transaction associated with *handle. This does a commit,
158 * so before we call here everything must be consistently dirtied against
161 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
167 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
168 * moment, get_block can be called only for blocks inside i_size since
169 * page cache has been already dropped and writes are blocked by
170 * i_mutex. So we can safely drop the i_data_sem here.
172 BUG_ON(EXT4_JOURNAL(inode) == NULL);
173 jbd_debug(2, "restarting handle %p\n", handle);
174 up_write(&EXT4_I(inode)->i_data_sem);
175 ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
176 down_write(&EXT4_I(inode)->i_data_sem);
177 ext4_discard_preallocations(inode);
183 * Called at the last iput() if i_nlink is zero.
185 void ext4_evict_inode(struct inode *inode)
190 trace_ext4_evict_inode(inode);
191 if (inode->i_nlink) {
192 truncate_inode_pages(&inode->i_data, 0);
196 if (!is_bad_inode(inode))
197 dquot_initialize(inode);
199 if (ext4_should_order_data(inode))
200 ext4_begin_ordered_truncate(inode, 0);
201 truncate_inode_pages(&inode->i_data, 0);
203 if (is_bad_inode(inode))
206 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
207 if (IS_ERR(handle)) {
208 ext4_std_error(inode->i_sb, PTR_ERR(handle));
210 * If we're going to skip the normal cleanup, we still need to
211 * make sure that the in-core orphan linked list is properly
214 ext4_orphan_del(NULL, inode);
219 ext4_handle_sync(handle);
221 err = ext4_mark_inode_dirty(handle, inode);
223 ext4_warning(inode->i_sb,
224 "couldn't mark inode dirty (err %d)", err);
228 ext4_truncate(inode);
231 * ext4_ext_truncate() doesn't reserve any slop when it
232 * restarts journal transactions; therefore there may not be
233 * enough credits left in the handle to remove the inode from
234 * the orphan list and set the dtime field.
236 if (!ext4_handle_has_enough_credits(handle, 3)) {
237 err = ext4_journal_extend(handle, 3);
239 err = ext4_journal_restart(handle, 3);
241 ext4_warning(inode->i_sb,
242 "couldn't extend journal (err %d)", err);
244 ext4_journal_stop(handle);
245 ext4_orphan_del(NULL, inode);
251 * Kill off the orphan record which ext4_truncate created.
252 * AKPM: I think this can be inside the above `if'.
253 * Note that ext4_orphan_del() has to be able to cope with the
254 * deletion of a non-existent orphan - this is because we don't
255 * know if ext4_truncate() actually created an orphan record.
256 * (Well, we could do this if we need to, but heck - it works)
258 ext4_orphan_del(handle, inode);
259 EXT4_I(inode)->i_dtime = get_seconds();
262 * One subtle ordering requirement: if anything has gone wrong
263 * (transaction abort, IO errors, whatever), then we can still
264 * do these next steps (the fs will already have been marked as
265 * having errors), but we can't free the inode if the mark_dirty
268 if (ext4_mark_inode_dirty(handle, inode))
269 /* If that failed, just do the required in-core inode clear. */
270 ext4_clear_inode(inode);
272 ext4_free_inode(handle, inode);
273 ext4_journal_stop(handle);
276 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
282 struct buffer_head *bh;
285 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
287 p->key = *(p->p = v);
292 * ext4_block_to_path - parse the block number into array of offsets
293 * @inode: inode in question (we are only interested in its superblock)
294 * @i_block: block number to be parsed
295 * @offsets: array to store the offsets in
296 * @boundary: set this non-zero if the referred-to block is likely to be
297 * followed (on disk) by an indirect block.
299 * To store the locations of file's data ext4 uses a data structure common
300 * for UNIX filesystems - tree of pointers anchored in the inode, with
301 * data blocks at leaves and indirect blocks in intermediate nodes.
302 * This function translates the block number into path in that tree -
303 * return value is the path length and @offsets[n] is the offset of
304 * pointer to (n+1)th node in the nth one. If @block is out of range
305 * (negative or too large) warning is printed and zero returned.
307 * Note: function doesn't find node addresses, so no IO is needed. All
308 * we need to know is the capacity of indirect blocks (taken from the
313 * Portability note: the last comparison (check that we fit into triple
314 * indirect block) is spelled differently, because otherwise on an
315 * architecture with 32-bit longs and 8Kb pages we might get into trouble
316 * if our filesystem had 8Kb blocks. We might use long long, but that would
317 * kill us on x86. Oh, well, at least the sign propagation does not matter -
318 * i_block would have to be negative in the very beginning, so we would not
322 static int ext4_block_to_path(struct inode *inode,
324 ext4_lblk_t offsets[4], int *boundary)
326 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
327 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
328 const long direct_blocks = EXT4_NDIR_BLOCKS,
329 indirect_blocks = ptrs,
330 double_blocks = (1 << (ptrs_bits * 2));
334 if (i_block < direct_blocks) {
335 offsets[n++] = i_block;
336 final = direct_blocks;
337 } else if ((i_block -= direct_blocks) < indirect_blocks) {
338 offsets[n++] = EXT4_IND_BLOCK;
339 offsets[n++] = i_block;
341 } else if ((i_block -= indirect_blocks) < double_blocks) {
342 offsets[n++] = EXT4_DIND_BLOCK;
343 offsets[n++] = i_block >> ptrs_bits;
344 offsets[n++] = i_block & (ptrs - 1);
346 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
347 offsets[n++] = EXT4_TIND_BLOCK;
348 offsets[n++] = i_block >> (ptrs_bits * 2);
349 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
350 offsets[n++] = i_block & (ptrs - 1);
353 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
354 i_block + direct_blocks +
355 indirect_blocks + double_blocks, inode->i_ino);
358 *boundary = final - 1 - (i_block & (ptrs - 1));
362 static int __ext4_check_blockref(const char *function, unsigned int line,
364 __le32 *p, unsigned int max)
366 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
370 while (bref < p+max) {
371 blk = le32_to_cpu(*bref++);
373 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
375 es->s_last_error_block = cpu_to_le64(blk);
376 ext4_error_inode(inode, function, line, blk,
385 #define ext4_check_indirect_blockref(inode, bh) \
386 __ext4_check_blockref(__func__, __LINE__, inode, \
387 (__le32 *)(bh)->b_data, \
388 EXT4_ADDR_PER_BLOCK((inode)->i_sb))
390 #define ext4_check_inode_blockref(inode) \
391 __ext4_check_blockref(__func__, __LINE__, inode, \
392 EXT4_I(inode)->i_data, \
396 * ext4_get_branch - read the chain of indirect blocks leading to data
397 * @inode: inode in question
398 * @depth: depth of the chain (1 - direct pointer, etc.)
399 * @offsets: offsets of pointers in inode/indirect blocks
400 * @chain: place to store the result
401 * @err: here we store the error value
403 * Function fills the array of triples <key, p, bh> and returns %NULL
404 * if everything went OK or the pointer to the last filled triple
405 * (incomplete one) otherwise. Upon the return chain[i].key contains
406 * the number of (i+1)-th block in the chain (as it is stored in memory,
407 * i.e. little-endian 32-bit), chain[i].p contains the address of that
408 * number (it points into struct inode for i==0 and into the bh->b_data
409 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
410 * block for i>0 and NULL for i==0. In other words, it holds the block
411 * numbers of the chain, addresses they were taken from (and where we can
412 * verify that chain did not change) and buffer_heads hosting these
415 * Function stops when it stumbles upon zero pointer (absent block)
416 * (pointer to last triple returned, *@err == 0)
417 * or when it gets an IO error reading an indirect block
418 * (ditto, *@err == -EIO)
419 * or when it reads all @depth-1 indirect blocks successfully and finds
420 * the whole chain, all way to the data (returns %NULL, *err == 0).
422 * Need to be called with
423 * down_read(&EXT4_I(inode)->i_data_sem)
425 static Indirect *ext4_get_branch(struct inode *inode, int depth,
426 ext4_lblk_t *offsets,
427 Indirect chain[4], int *err)
429 struct super_block *sb = inode->i_sb;
431 struct buffer_head *bh;
434 /* i_data is not going away, no lock needed */
435 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
439 bh = sb_getblk(sb, le32_to_cpu(p->key));
443 if (!bh_uptodate_or_lock(bh)) {
444 if (bh_submit_read(bh) < 0) {
448 /* validate block references */
449 if (ext4_check_indirect_blockref(inode, bh)) {
455 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
469 * ext4_find_near - find a place for allocation with sufficient locality
471 * @ind: descriptor of indirect block.
473 * This function returns the preferred place for block allocation.
474 * It is used when heuristic for sequential allocation fails.
476 * + if there is a block to the left of our position - allocate near it.
477 * + if pointer will live in indirect block - allocate near that block.
478 * + if pointer will live in inode - allocate in the same
481 * In the latter case we colour the starting block by the callers PID to
482 * prevent it from clashing with concurrent allocations for a different inode
483 * in the same block group. The PID is used here so that functionally related
484 * files will be close-by on-disk.
486 * Caller must make sure that @ind is valid and will stay that way.
488 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
490 struct ext4_inode_info *ei = EXT4_I(inode);
491 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
493 ext4_fsblk_t bg_start;
494 ext4_fsblk_t last_block;
495 ext4_grpblk_t colour;
496 ext4_group_t block_group;
497 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
499 /* Try to find previous block */
500 for (p = ind->p - 1; p >= start; p--) {
502 return le32_to_cpu(*p);
505 /* No such thing, so let's try location of indirect block */
507 return ind->bh->b_blocknr;
510 * It is going to be referred to from the inode itself? OK, just put it
511 * into the same cylinder group then.
513 block_group = ei->i_block_group;
514 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
515 block_group &= ~(flex_size-1);
516 if (S_ISREG(inode->i_mode))
519 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
520 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
523 * If we are doing delayed allocation, we don't need take
524 * colour into account.
526 if (test_opt(inode->i_sb, DELALLOC))
529 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
530 colour = (current->pid % 16) *
531 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
533 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
534 return bg_start + colour;
538 * ext4_find_goal - find a preferred place for allocation.
540 * @block: block we want
541 * @partial: pointer to the last triple within a chain
543 * Normally this function find the preferred place for block allocation,
545 * Because this is only used for non-extent files, we limit the block nr
548 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
554 * XXX need to get goal block from mballoc's data structures
557 goal = ext4_find_near(inode, partial);
558 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
563 * ext4_blks_to_allocate - Look up the block map and count the number
564 * of direct blocks need to be allocated for the given branch.
566 * @branch: chain of indirect blocks
567 * @k: number of blocks need for indirect blocks
568 * @blks: number of data blocks to be mapped.
569 * @blocks_to_boundary: the offset in the indirect block
571 * return the total number of blocks to be allocate, including the
572 * direct and indirect blocks.
574 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
575 int blocks_to_boundary)
577 unsigned int count = 0;
580 * Simple case, [t,d]Indirect block(s) has not allocated yet
581 * then it's clear blocks on that path have not allocated
584 /* right now we don't handle cross boundary allocation */
585 if (blks < blocks_to_boundary + 1)
588 count += blocks_to_boundary + 1;
593 while (count < blks && count <= blocks_to_boundary &&
594 le32_to_cpu(*(branch[0].p + count)) == 0) {
601 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
602 * @handle: handle for this transaction
603 * @inode: inode which needs allocated blocks
604 * @iblock: the logical block to start allocated at
605 * @goal: preferred physical block of allocation
606 * @indirect_blks: the number of blocks need to allocate for indirect
608 * @blks: number of desired blocks
609 * @new_blocks: on return it will store the new block numbers for
610 * the indirect blocks(if needed) and the first direct block,
611 * @err: on return it will store the error code
613 * This function will return the number of blocks allocated as
614 * requested by the passed-in parameters.
616 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
617 ext4_lblk_t iblock, ext4_fsblk_t goal,
618 int indirect_blks, int blks,
619 ext4_fsblk_t new_blocks[4], int *err)
621 struct ext4_allocation_request ar;
623 unsigned long count = 0, blk_allocated = 0;
625 ext4_fsblk_t current_block = 0;
629 * Here we try to allocate the requested multiple blocks at once,
630 * on a best-effort basis.
631 * To build a branch, we should allocate blocks for
632 * the indirect blocks(if not allocated yet), and at least
633 * the first direct block of this branch. That's the
634 * minimum number of blocks need to allocate(required)
636 /* first we try to allocate the indirect blocks */
637 target = indirect_blks;
640 /* allocating blocks for indirect blocks and direct blocks */
641 current_block = ext4_new_meta_blocks(handle, inode,
646 if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
647 EXT4_ERROR_INODE(inode,
648 "current_block %llu + count %lu > %d!",
649 current_block, count,
650 EXT4_MAX_BLOCK_FILE_PHYS);
656 /* allocate blocks for indirect blocks */
657 while (index < indirect_blks && count) {
658 new_blocks[index++] = current_block++;
663 * save the new block number
664 * for the first direct block
666 new_blocks[index] = current_block;
667 printk(KERN_INFO "%s returned more blocks than "
668 "requested\n", __func__);
674 target = blks - count ;
675 blk_allocated = count;
678 /* Now allocate data blocks */
679 memset(&ar, 0, sizeof(ar));
684 if (S_ISREG(inode->i_mode))
685 /* enable in-core preallocation only for regular files */
686 ar.flags = EXT4_MB_HINT_DATA;
688 current_block = ext4_mb_new_blocks(handle, &ar, err);
689 if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
690 EXT4_ERROR_INODE(inode,
691 "current_block %llu + ar.len %d > %d!",
692 current_block, ar.len,
693 EXT4_MAX_BLOCK_FILE_PHYS);
698 if (*err && (target == blks)) {
700 * if the allocation failed and we didn't allocate
706 if (target == blks) {
708 * save the new block number
709 * for the first direct block
711 new_blocks[index] = current_block;
713 blk_allocated += ar.len;
716 /* total number of blocks allocated for direct blocks */
721 for (i = 0; i < index; i++)
722 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
727 * ext4_alloc_branch - allocate and set up a chain of blocks.
728 * @handle: handle for this transaction
730 * @indirect_blks: number of allocated indirect blocks
731 * @blks: number of allocated direct blocks
732 * @goal: preferred place for allocation
733 * @offsets: offsets (in the blocks) to store the pointers to next.
734 * @branch: place to store the chain in.
736 * This function allocates blocks, zeroes out all but the last one,
737 * links them into chain and (if we are synchronous) writes them to disk.
738 * In other words, it prepares a branch that can be spliced onto the
739 * inode. It stores the information about that chain in the branch[], in
740 * the same format as ext4_get_branch() would do. We are calling it after
741 * we had read the existing part of chain and partial points to the last
742 * triple of that (one with zero ->key). Upon the exit we have the same
743 * picture as after the successful ext4_get_block(), except that in one
744 * place chain is disconnected - *branch->p is still zero (we did not
745 * set the last link), but branch->key contains the number that should
746 * be placed into *branch->p to fill that gap.
748 * If allocation fails we free all blocks we've allocated (and forget
749 * their buffer_heads) and return the error value the from failed
750 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
751 * as described above and return 0.
753 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
754 ext4_lblk_t iblock, int indirect_blks,
755 int *blks, ext4_fsblk_t goal,
756 ext4_lblk_t *offsets, Indirect *branch)
758 int blocksize = inode->i_sb->s_blocksize;
761 struct buffer_head *bh;
763 ext4_fsblk_t new_blocks[4];
764 ext4_fsblk_t current_block;
766 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
767 *blks, new_blocks, &err);
771 branch[0].key = cpu_to_le32(new_blocks[0]);
773 * metadata blocks and data blocks are allocated.
775 for (n = 1; n <= indirect_blks; n++) {
777 * Get buffer_head for parent block, zero it out
778 * and set the pointer to new one, then send
781 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
789 BUFFER_TRACE(bh, "call get_create_access");
790 err = ext4_journal_get_create_access(handle, bh);
792 /* Don't brelse(bh) here; it's done in
793 * ext4_journal_forget() below */
798 memset(bh->b_data, 0, blocksize);
799 branch[n].p = (__le32 *) bh->b_data + offsets[n];
800 branch[n].key = cpu_to_le32(new_blocks[n]);
801 *branch[n].p = branch[n].key;
802 if (n == indirect_blks) {
803 current_block = new_blocks[n];
805 * End of chain, update the last new metablock of
806 * the chain to point to the new allocated
807 * data blocks numbers
809 for (i = 1; i < num; i++)
810 *(branch[n].p + i) = cpu_to_le32(++current_block);
812 BUFFER_TRACE(bh, "marking uptodate");
813 set_buffer_uptodate(bh);
816 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
817 err = ext4_handle_dirty_metadata(handle, inode, bh);
824 /* Allocation failed, free what we already allocated */
825 ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
826 for (i = 1; i <= n ; i++) {
828 * branch[i].bh is newly allocated, so there is no
829 * need to revoke the block, which is why we don't
830 * need to set EXT4_FREE_BLOCKS_METADATA.
832 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
833 EXT4_FREE_BLOCKS_FORGET);
835 for (i = n+1; i < indirect_blks; i++)
836 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
838 ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
844 * ext4_splice_branch - splice the allocated branch onto inode.
845 * @handle: handle for this transaction
847 * @block: (logical) number of block we are adding
848 * @chain: chain of indirect blocks (with a missing link - see
850 * @where: location of missing link
851 * @num: number of indirect blocks we are adding
852 * @blks: number of direct blocks we are adding
854 * This function fills the missing link and does all housekeeping needed in
855 * inode (->i_blocks, etc.). In case of success we end up with the full
856 * chain to new block and return 0.
858 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
859 ext4_lblk_t block, Indirect *where, int num,
864 ext4_fsblk_t current_block;
867 * If we're splicing into a [td]indirect block (as opposed to the
868 * inode) then we need to get write access to the [td]indirect block
872 BUFFER_TRACE(where->bh, "get_write_access");
873 err = ext4_journal_get_write_access(handle, where->bh);
879 *where->p = where->key;
882 * Update the host buffer_head or inode to point to more just allocated
883 * direct blocks blocks
885 if (num == 0 && blks > 1) {
886 current_block = le32_to_cpu(where->key) + 1;
887 for (i = 1; i < blks; i++)
888 *(where->p + i) = cpu_to_le32(current_block++);
891 /* We are done with atomic stuff, now do the rest of housekeeping */
892 /* had we spliced it onto indirect block? */
895 * If we spliced it onto an indirect block, we haven't
896 * altered the inode. Note however that if it is being spliced
897 * onto an indirect block at the very end of the file (the
898 * file is growing) then we *will* alter the inode to reflect
899 * the new i_size. But that is not done here - it is done in
900 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
902 jbd_debug(5, "splicing indirect only\n");
903 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
904 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
909 * OK, we spliced it into the inode itself on a direct block.
911 ext4_mark_inode_dirty(handle, inode);
912 jbd_debug(5, "splicing direct\n");
917 for (i = 1; i <= num; i++) {
919 * branch[i].bh is newly allocated, so there is no
920 * need to revoke the block, which is why we don't
921 * need to set EXT4_FREE_BLOCKS_METADATA.
923 ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
924 EXT4_FREE_BLOCKS_FORGET);
926 ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
933 * The ext4_ind_map_blocks() function handles non-extents inodes
934 * (i.e., using the traditional indirect/double-indirect i_blocks
935 * scheme) for ext4_map_blocks().
937 * Allocation strategy is simple: if we have to allocate something, we will
938 * have to go the whole way to leaf. So let's do it before attaching anything
939 * to tree, set linkage between the newborn blocks, write them if sync is
940 * required, recheck the path, free and repeat if check fails, otherwise
941 * set the last missing link (that will protect us from any truncate-generated
942 * removals - all blocks on the path are immune now) and possibly force the
943 * write on the parent block.
944 * That has a nice additional property: no special recovery from the failed
945 * allocations is needed - we simply release blocks and do not touch anything
946 * reachable from inode.
948 * `handle' can be NULL if create == 0.
950 * return > 0, # of blocks mapped or allocated.
951 * return = 0, if plain lookup failed.
952 * return < 0, error case.
954 * The ext4_ind_get_blocks() function should be called with
955 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
956 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
957 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
960 static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
961 struct ext4_map_blocks *map,
965 ext4_lblk_t offsets[4];
970 int blocks_to_boundary = 0;
973 ext4_fsblk_t first_block = 0;
975 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
976 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
977 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
978 &blocks_to_boundary);
983 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
985 /* Simplest case - block found, no allocation needed */
987 first_block = le32_to_cpu(chain[depth - 1].key);
990 while (count < map->m_len && count <= blocks_to_boundary) {
993 blk = le32_to_cpu(*(chain[depth-1].p + count));
995 if (blk == first_block + count)
1003 /* Next simple case - plain lookup or failed read of indirect block */
1004 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
1008 * Okay, we need to do block allocation.
1010 goal = ext4_find_goal(inode, map->m_lblk, partial);
1012 /* the number of blocks need to allocate for [d,t]indirect blocks */
1013 indirect_blks = (chain + depth) - partial - 1;
1016 * Next look up the indirect map to count the totoal number of
1017 * direct blocks to allocate for this branch.
1019 count = ext4_blks_to_allocate(partial, indirect_blks,
1020 map->m_len, blocks_to_boundary);
1022 * Block out ext4_truncate while we alter the tree
1024 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
1026 offsets + (partial - chain), partial);
1029 * The ext4_splice_branch call will free and forget any buffers
1030 * on the new chain if there is a failure, but that risks using
1031 * up transaction credits, especially for bitmaps where the
1032 * credits cannot be returned. Can we handle this somehow? We
1033 * may need to return -EAGAIN upwards in the worst case. --sct
1036 err = ext4_splice_branch(handle, inode, map->m_lblk,
1037 partial, indirect_blks, count);
1041 map->m_flags |= EXT4_MAP_NEW;
1043 ext4_update_inode_fsync_trans(handle, inode, 1);
1045 map->m_flags |= EXT4_MAP_MAPPED;
1046 map->m_pblk = le32_to_cpu(chain[depth-1].key);
1048 if (count > blocks_to_boundary)
1049 map->m_flags |= EXT4_MAP_BOUNDARY;
1051 /* Clean up and exit */
1052 partial = chain + depth - 1; /* the whole chain */
1054 while (partial > chain) {
1055 BUFFER_TRACE(partial->bh, "call brelse");
1056 brelse(partial->bh);
1064 qsize_t *ext4_get_reserved_space(struct inode *inode)
1066 return &EXT4_I(inode)->i_reserved_quota;
1071 * Calculate the number of metadata blocks need to reserve
1072 * to allocate a new block at @lblocks for non extent file based file
1074 static int ext4_indirect_calc_metadata_amount(struct inode *inode,
1077 struct ext4_inode_info *ei = EXT4_I(inode);
1078 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
1081 if (lblock < EXT4_NDIR_BLOCKS)
1084 lblock -= EXT4_NDIR_BLOCKS;
1086 if (ei->i_da_metadata_calc_len &&
1087 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
1088 ei->i_da_metadata_calc_len++;
1091 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
1092 ei->i_da_metadata_calc_len = 1;
1093 blk_bits = order_base_2(lblock);
1094 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
1098 * Calculate the number of metadata blocks need to reserve
1099 * to allocate a block located at @lblock
1101 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
1103 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1104 return ext4_ext_calc_metadata_amount(inode, lblock);
1106 return ext4_indirect_calc_metadata_amount(inode, lblock);
1110 * Called with i_data_sem down, which is important since we can call
1111 * ext4_discard_preallocations() from here.
1113 void ext4_da_update_reserve_space(struct inode *inode,
1114 int used, int quota_claim)
1116 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1117 struct ext4_inode_info *ei = EXT4_I(inode);
1119 spin_lock(&ei->i_block_reservation_lock);
1120 trace_ext4_da_update_reserve_space(inode, used);
1121 if (unlikely(used > ei->i_reserved_data_blocks)) {
1122 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
1123 "with only %d reserved data blocks\n",
1124 __func__, inode->i_ino, used,
1125 ei->i_reserved_data_blocks);
1127 used = ei->i_reserved_data_blocks;
1130 /* Update per-inode reservations */
1131 ei->i_reserved_data_blocks -= used;
1132 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
1133 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1134 used + ei->i_allocated_meta_blocks);
1135 ei->i_allocated_meta_blocks = 0;
1137 if (ei->i_reserved_data_blocks == 0) {
1139 * We can release all of the reserved metadata blocks
1140 * only when we have written all of the delayed
1141 * allocation blocks.
1143 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1144 ei->i_reserved_meta_blocks);
1145 ei->i_reserved_meta_blocks = 0;
1146 ei->i_da_metadata_calc_len = 0;
1148 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1150 /* Update quota subsystem for data blocks */
1152 dquot_claim_block(inode, used);
1155 * We did fallocate with an offset that is already delayed
1156 * allocated. So on delayed allocated writeback we should
1157 * not re-claim the quota for fallocated blocks.
1159 dquot_release_reservation_block(inode, used);
1163 * If we have done all the pending block allocations and if
1164 * there aren't any writers on the inode, we can discard the
1165 * inode's preallocations.
1167 if ((ei->i_reserved_data_blocks == 0) &&
1168 (atomic_read(&inode->i_writecount) == 0))
1169 ext4_discard_preallocations(inode);
1172 static int __check_block_validity(struct inode *inode, const char *func,
1174 struct ext4_map_blocks *map)
1176 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
1178 ext4_error_inode(inode, func, line, map->m_pblk,
1179 "lblock %lu mapped to illegal pblock "
1180 "(length %d)", (unsigned long) map->m_lblk,
1187 #define check_block_validity(inode, map) \
1188 __check_block_validity((inode), __func__, __LINE__, (map))
1191 * Return the number of contiguous dirty pages in a given inode
1192 * starting at page frame idx.
1194 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1195 unsigned int max_pages)
1197 struct address_space *mapping = inode->i_mapping;
1199 struct pagevec pvec;
1201 int i, nr_pages, done = 0;
1205 pagevec_init(&pvec, 0);
1208 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1209 PAGECACHE_TAG_DIRTY,
1210 (pgoff_t)PAGEVEC_SIZE);
1213 for (i = 0; i < nr_pages; i++) {
1214 struct page *page = pvec.pages[i];
1215 struct buffer_head *bh, *head;
1218 if (unlikely(page->mapping != mapping) ||
1220 PageWriteback(page) ||
1221 page->index != idx) {
1226 if (page_has_buffers(page)) {
1227 bh = head = page_buffers(page);
1229 if (!buffer_delay(bh) &&
1230 !buffer_unwritten(bh))
1232 bh = bh->b_this_page;
1233 } while (!done && (bh != head));
1240 if (num >= max_pages) {
1245 pagevec_release(&pvec);
1251 * The ext4_map_blocks() function tries to look up the requested blocks,
1252 * and returns if the blocks are already mapped.
1254 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1255 * and store the allocated blocks in the result buffer head and mark it
1258 * If file type is extents based, it will call ext4_ext_map_blocks(),
1259 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
1262 * On success, it returns the number of blocks being mapped or allocate.
1263 * if create==0 and the blocks are pre-allocated and uninitialized block,
1264 * the result buffer head is unmapped. If the create ==1, it will make sure
1265 * the buffer head is mapped.
1267 * It returns 0 if plain look up failed (blocks have not been allocated), in
1268 * that casem, buffer head is unmapped
1270 * It returns the error in case of allocation failure.
1272 int ext4_map_blocks(handle_t *handle, struct inode *inode,
1273 struct ext4_map_blocks *map, int flags)
1278 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
1279 "logical block %lu\n", inode->i_ino, flags, map->m_len,
1280 (unsigned long) map->m_lblk);
1282 * Try to see if we can get the block without requesting a new
1283 * file system block.
1285 down_read((&EXT4_I(inode)->i_data_sem));
1286 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1287 retval = ext4_ext_map_blocks(handle, inode, map, 0);
1289 retval = ext4_ind_map_blocks(handle, inode, map, 0);
1291 up_read((&EXT4_I(inode)->i_data_sem));
1293 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1294 int ret = check_block_validity(inode, map);
1299 /* If it is only a block(s) look up */
1300 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1304 * Returns if the blocks have already allocated
1306 * Note that if blocks have been preallocated
1307 * ext4_ext_get_block() returns th create = 0
1308 * with buffer head unmapped.
1310 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
1314 * When we call get_blocks without the create flag, the
1315 * BH_Unwritten flag could have gotten set if the blocks
1316 * requested were part of a uninitialized extent. We need to
1317 * clear this flag now that we are committed to convert all or
1318 * part of the uninitialized extent to be an initialized
1319 * extent. This is because we need to avoid the combination
1320 * of BH_Unwritten and BH_Mapped flags being simultaneously
1321 * set on the buffer_head.
1323 map->m_flags &= ~EXT4_MAP_UNWRITTEN;
1326 * New blocks allocate and/or writing to uninitialized extent
1327 * will possibly result in updating i_data, so we take
1328 * the write lock of i_data_sem, and call get_blocks()
1329 * with create == 1 flag.
1331 down_write((&EXT4_I(inode)->i_data_sem));
1334 * if the caller is from delayed allocation writeout path
1335 * we have already reserved fs blocks for allocation
1336 * let the underlying get_block() function know to
1337 * avoid double accounting
1339 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1340 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
1342 * We need to check for EXT4 here because migrate
1343 * could have changed the inode type in between
1345 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1346 retval = ext4_ext_map_blocks(handle, inode, map, flags);
1348 retval = ext4_ind_map_blocks(handle, inode, map, flags);
1350 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
1352 * We allocated new blocks which will result in
1353 * i_data's format changing. Force the migrate
1354 * to fail by clearing migrate flags
1356 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
1360 * Update reserved blocks/metadata blocks after successful
1361 * block allocation which had been deferred till now. We don't
1362 * support fallocate for non extent files. So we can update
1363 * reserve space here.
1366 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
1367 ext4_da_update_reserve_space(inode, retval, 1);
1369 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1370 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
1372 up_write((&EXT4_I(inode)->i_data_sem));
1373 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1374 int ret = check_block_validity(inode, map);
1381 /* Maximum number of blocks we map for direct IO at once. */
1382 #define DIO_MAX_BLOCKS 4096
1384 static int _ext4_get_block(struct inode *inode, sector_t iblock,
1385 struct buffer_head *bh, int flags)
1387 handle_t *handle = ext4_journal_current_handle();
1388 struct ext4_map_blocks map;
1389 int ret = 0, started = 0;
1392 map.m_lblk = iblock;
1393 map.m_len = bh->b_size >> inode->i_blkbits;
1395 if (flags && !handle) {
1396 /* Direct IO write... */
1397 if (map.m_len > DIO_MAX_BLOCKS)
1398 map.m_len = DIO_MAX_BLOCKS;
1399 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
1400 handle = ext4_journal_start(inode, dio_credits);
1401 if (IS_ERR(handle)) {
1402 ret = PTR_ERR(handle);
1408 ret = ext4_map_blocks(handle, inode, &map, flags);
1410 map_bh(bh, inode->i_sb, map.m_pblk);
1411 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1412 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1416 ext4_journal_stop(handle);
1420 int ext4_get_block(struct inode *inode, sector_t iblock,
1421 struct buffer_head *bh, int create)
1423 return _ext4_get_block(inode, iblock, bh,
1424 create ? EXT4_GET_BLOCKS_CREATE : 0);
1428 * `handle' can be NULL if create is zero
1430 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1431 ext4_lblk_t block, int create, int *errp)
1433 struct ext4_map_blocks map;
1434 struct buffer_head *bh;
1437 J_ASSERT(handle != NULL || create == 0);
1441 err = ext4_map_blocks(handle, inode, &map,
1442 create ? EXT4_GET_BLOCKS_CREATE : 0);
1450 bh = sb_getblk(inode->i_sb, map.m_pblk);
1455 if (map.m_flags & EXT4_MAP_NEW) {
1456 J_ASSERT(create != 0);
1457 J_ASSERT(handle != NULL);
1460 * Now that we do not always journal data, we should
1461 * keep in mind whether this should always journal the
1462 * new buffer as metadata. For now, regular file
1463 * writes use ext4_get_block instead, so it's not a
1467 BUFFER_TRACE(bh, "call get_create_access");
1468 fatal = ext4_journal_get_create_access(handle, bh);
1469 if (!fatal && !buffer_uptodate(bh)) {
1470 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1471 set_buffer_uptodate(bh);
1474 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1475 err = ext4_handle_dirty_metadata(handle, inode, bh);
1479 BUFFER_TRACE(bh, "not a new buffer");
1489 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1490 ext4_lblk_t block, int create, int *err)
1492 struct buffer_head *bh;
1494 bh = ext4_getblk(handle, inode, block, create, err);
1497 if (buffer_uptodate(bh))
1499 ll_rw_block(READ_META, 1, &bh);
1501 if (buffer_uptodate(bh))
1508 static int walk_page_buffers(handle_t *handle,
1509 struct buffer_head *head,
1513 int (*fn)(handle_t *handle,
1514 struct buffer_head *bh))
1516 struct buffer_head *bh;
1517 unsigned block_start, block_end;
1518 unsigned blocksize = head->b_size;
1520 struct buffer_head *next;
1522 for (bh = head, block_start = 0;
1523 ret == 0 && (bh != head || !block_start);
1524 block_start = block_end, bh = next) {
1525 next = bh->b_this_page;
1526 block_end = block_start + blocksize;
1527 if (block_end <= from || block_start >= to) {
1528 if (partial && !buffer_uptodate(bh))
1532 err = (*fn)(handle, bh);
1540 * To preserve ordering, it is essential that the hole instantiation and
1541 * the data write be encapsulated in a single transaction. We cannot
1542 * close off a transaction and start a new one between the ext4_get_block()
1543 * and the commit_write(). So doing the jbd2_journal_start at the start of
1544 * prepare_write() is the right place.
1546 * Also, this function can nest inside ext4_writepage() ->
1547 * block_write_full_page(). In that case, we *know* that ext4_writepage()
1548 * has generated enough buffer credits to do the whole page. So we won't
1549 * block on the journal in that case, which is good, because the caller may
1552 * By accident, ext4 can be reentered when a transaction is open via
1553 * quota file writes. If we were to commit the transaction while thus
1554 * reentered, there can be a deadlock - we would be holding a quota
1555 * lock, and the commit would never complete if another thread had a
1556 * transaction open and was blocking on the quota lock - a ranking
1559 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1560 * will _not_ run commit under these circumstances because handle->h_ref
1561 * is elevated. We'll still have enough credits for the tiny quotafile
1564 static int do_journal_get_write_access(handle_t *handle,
1565 struct buffer_head *bh)
1567 int dirty = buffer_dirty(bh);
1570 if (!buffer_mapped(bh) || buffer_freed(bh))
1573 * __block_write_begin() could have dirtied some buffers. Clean
1574 * the dirty bit as jbd2_journal_get_write_access() could complain
1575 * otherwise about fs integrity issues. Setting of the dirty bit
1576 * by __block_write_begin() isn't a real problem here as we clear
1577 * the bit before releasing a page lock and thus writeback cannot
1578 * ever write the buffer.
1581 clear_buffer_dirty(bh);
1582 ret = ext4_journal_get_write_access(handle, bh);
1584 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1589 * Truncate blocks that were not used by write. We have to truncate the
1590 * pagecache as well so that corresponding buffers get properly unmapped.
1592 static void ext4_truncate_failed_write(struct inode *inode)
1594 truncate_inode_pages(inode->i_mapping, inode->i_size);
1595 ext4_truncate(inode);
1598 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
1599 struct buffer_head *bh_result, int create);
1600 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1601 loff_t pos, unsigned len, unsigned flags,
1602 struct page **pagep, void **fsdata)
1604 struct inode *inode = mapping->host;
1605 int ret, needed_blocks;
1612 trace_ext4_write_begin(inode, pos, len, flags);
1614 * Reserve one block more for addition to orphan list in case
1615 * we allocate blocks but write fails for some reason
1617 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1618 index = pos >> PAGE_CACHE_SHIFT;
1619 from = pos & (PAGE_CACHE_SIZE - 1);
1623 handle = ext4_journal_start(inode, needed_blocks);
1624 if (IS_ERR(handle)) {
1625 ret = PTR_ERR(handle);
1629 /* We cannot recurse into the filesystem as the transaction is already
1631 flags |= AOP_FLAG_NOFS;
1633 page = grab_cache_page_write_begin(mapping, index, flags);
1635 ext4_journal_stop(handle);
1641 if (ext4_should_dioread_nolock(inode))
1642 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
1644 ret = __block_write_begin(page, pos, len, ext4_get_block);
1646 if (!ret && ext4_should_journal_data(inode)) {
1647 ret = walk_page_buffers(handle, page_buffers(page),
1648 from, to, NULL, do_journal_get_write_access);
1653 page_cache_release(page);
1655 * __block_write_begin may have instantiated a few blocks
1656 * outside i_size. Trim these off again. Don't need
1657 * i_size_read because we hold i_mutex.
1659 * Add inode to orphan list in case we crash before
1662 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1663 ext4_orphan_add(handle, inode);
1665 ext4_journal_stop(handle);
1666 if (pos + len > inode->i_size) {
1667 ext4_truncate_failed_write(inode);
1669 * If truncate failed early the inode might
1670 * still be on the orphan list; we need to
1671 * make sure the inode is removed from the
1672 * orphan list in that case.
1675 ext4_orphan_del(NULL, inode);
1679 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1685 /* For write_end() in data=journal mode */
1686 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1688 if (!buffer_mapped(bh) || buffer_freed(bh))
1690 set_buffer_uptodate(bh);
1691 return ext4_handle_dirty_metadata(handle, NULL, bh);
1694 static int ext4_generic_write_end(struct file *file,
1695 struct address_space *mapping,
1696 loff_t pos, unsigned len, unsigned copied,
1697 struct page *page, void *fsdata)
1699 int i_size_changed = 0;
1700 struct inode *inode = mapping->host;
1701 handle_t *handle = ext4_journal_current_handle();
1703 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1706 * No need to use i_size_read() here, the i_size
1707 * cannot change under us because we hold i_mutex.
1709 * But it's important to update i_size while still holding page lock:
1710 * page writeout could otherwise come in and zero beyond i_size.
1712 if (pos + copied > inode->i_size) {
1713 i_size_write(inode, pos + copied);
1717 if (pos + copied > EXT4_I(inode)->i_disksize) {
1718 /* We need to mark inode dirty even if
1719 * new_i_size is less that inode->i_size
1720 * bu greater than i_disksize.(hint delalloc)
1722 ext4_update_i_disksize(inode, (pos + copied));
1726 page_cache_release(page);
1729 * Don't mark the inode dirty under page lock. First, it unnecessarily
1730 * makes the holding time of page lock longer. Second, it forces lock
1731 * ordering of page lock and transaction start for journaling
1735 ext4_mark_inode_dirty(handle, inode);
1741 * We need to pick up the new inode size which generic_commit_write gave us
1742 * `file' can be NULL - eg, when called from page_symlink().
1744 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1745 * buffers are managed internally.
1747 static int ext4_ordered_write_end(struct file *file,
1748 struct address_space *mapping,
1749 loff_t pos, unsigned len, unsigned copied,
1750 struct page *page, void *fsdata)
1752 handle_t *handle = ext4_journal_current_handle();
1753 struct inode *inode = mapping->host;
1756 trace_ext4_ordered_write_end(inode, pos, len, copied);
1757 ret = ext4_jbd2_file_inode(handle, inode);
1760 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1763 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1764 /* if we have allocated more blocks and copied
1765 * less. We will have blocks allocated outside
1766 * inode->i_size. So truncate them
1768 ext4_orphan_add(handle, inode);
1772 ret2 = ext4_journal_stop(handle);
1776 if (pos + len > inode->i_size) {
1777 ext4_truncate_failed_write(inode);
1779 * If truncate failed early the inode might still be
1780 * on the orphan list; we need to make sure the inode
1781 * is removed from the orphan list in that case.
1784 ext4_orphan_del(NULL, inode);
1788 return ret ? ret : copied;
1791 static int ext4_writeback_write_end(struct file *file,
1792 struct address_space *mapping,
1793 loff_t pos, unsigned len, unsigned copied,
1794 struct page *page, void *fsdata)
1796 handle_t *handle = ext4_journal_current_handle();
1797 struct inode *inode = mapping->host;
1800 trace_ext4_writeback_write_end(inode, pos, len, copied);
1801 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1804 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1805 /* if we have allocated more blocks and copied
1806 * less. We will have blocks allocated outside
1807 * inode->i_size. So truncate them
1809 ext4_orphan_add(handle, inode);
1814 ret2 = ext4_journal_stop(handle);
1818 if (pos + len > inode->i_size) {
1819 ext4_truncate_failed_write(inode);
1821 * If truncate failed early the inode might still be
1822 * on the orphan list; we need to make sure the inode
1823 * is removed from the orphan list in that case.
1826 ext4_orphan_del(NULL, inode);
1829 return ret ? ret : copied;
1832 static int ext4_journalled_write_end(struct file *file,
1833 struct address_space *mapping,
1834 loff_t pos, unsigned len, unsigned copied,
1835 struct page *page, void *fsdata)
1837 handle_t *handle = ext4_journal_current_handle();
1838 struct inode *inode = mapping->host;
1844 trace_ext4_journalled_write_end(inode, pos, len, copied);
1845 from = pos & (PAGE_CACHE_SIZE - 1);
1849 if (!PageUptodate(page))
1851 page_zero_new_buffers(page, from+copied, to);
1854 ret = walk_page_buffers(handle, page_buffers(page), from,
1855 to, &partial, write_end_fn);
1857 SetPageUptodate(page);
1858 new_i_size = pos + copied;
1859 if (new_i_size > inode->i_size)
1860 i_size_write(inode, pos+copied);
1861 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1862 if (new_i_size > EXT4_I(inode)->i_disksize) {
1863 ext4_update_i_disksize(inode, new_i_size);
1864 ret2 = ext4_mark_inode_dirty(handle, inode);
1870 page_cache_release(page);
1871 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1872 /* if we have allocated more blocks and copied
1873 * less. We will have blocks allocated outside
1874 * inode->i_size. So truncate them
1876 ext4_orphan_add(handle, inode);
1878 ret2 = ext4_journal_stop(handle);
1881 if (pos + len > inode->i_size) {
1882 ext4_truncate_failed_write(inode);
1884 * If truncate failed early the inode might still be
1885 * on the orphan list; we need to make sure the inode
1886 * is removed from the orphan list in that case.
1889 ext4_orphan_del(NULL, inode);
1892 return ret ? ret : copied;
1896 * Reserve a single block located at lblock
1898 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1901 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1902 struct ext4_inode_info *ei = EXT4_I(inode);
1903 unsigned long md_needed;
1907 * recalculate the amount of metadata blocks to reserve
1908 * in order to allocate nrblocks
1909 * worse case is one extent per block
1912 spin_lock(&ei->i_block_reservation_lock);
1913 md_needed = ext4_calc_metadata_amount(inode, lblock);
1914 trace_ext4_da_reserve_space(inode, md_needed);
1915 spin_unlock(&ei->i_block_reservation_lock);
1918 * We will charge metadata quota at writeout time; this saves
1919 * us from metadata over-estimation, though we may go over by
1920 * a small amount in the end. Here we just reserve for data.
1922 ret = dquot_reserve_block(inode, 1);
1926 * We do still charge estimated metadata to the sb though;
1927 * we cannot afford to run out of free blocks.
1929 if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
1930 dquot_release_reservation_block(inode, 1);
1931 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1937 spin_lock(&ei->i_block_reservation_lock);
1938 ei->i_reserved_data_blocks++;
1939 ei->i_reserved_meta_blocks += md_needed;
1940 spin_unlock(&ei->i_block_reservation_lock);
1942 return 0; /* success */
1945 static void ext4_da_release_space(struct inode *inode, int to_free)
1947 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1948 struct ext4_inode_info *ei = EXT4_I(inode);
1951 return; /* Nothing to release, exit */
1953 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1955 trace_ext4_da_release_space(inode, to_free);
1956 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1958 * if there aren't enough reserved blocks, then the
1959 * counter is messed up somewhere. Since this
1960 * function is called from invalidate page, it's
1961 * harmless to return without any action.
1963 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1964 "ino %lu, to_free %d with only %d reserved "
1965 "data blocks\n", inode->i_ino, to_free,
1966 ei->i_reserved_data_blocks);
1968 to_free = ei->i_reserved_data_blocks;
1970 ei->i_reserved_data_blocks -= to_free;
1972 if (ei->i_reserved_data_blocks == 0) {
1974 * We can release all of the reserved metadata blocks
1975 * only when we have written all of the delayed
1976 * allocation blocks.
1978 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1979 ei->i_reserved_meta_blocks);
1980 ei->i_reserved_meta_blocks = 0;
1981 ei->i_da_metadata_calc_len = 0;
1984 /* update fs dirty data blocks counter */
1985 percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
1987 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1989 dquot_release_reservation_block(inode, to_free);
1992 static void ext4_da_page_release_reservation(struct page *page,
1993 unsigned long offset)
1996 struct buffer_head *head, *bh;
1997 unsigned int curr_off = 0;
1999 head = page_buffers(page);
2002 unsigned int next_off = curr_off + bh->b_size;
2004 if ((offset <= curr_off) && (buffer_delay(bh))) {
2006 clear_buffer_delay(bh);
2008 curr_off = next_off;
2009 } while ((bh = bh->b_this_page) != head);
2010 ext4_da_release_space(page->mapping->host, to_release);
2014 * Delayed allocation stuff
2018 * mpage_da_submit_io - walks through extent of pages and try to write
2019 * them with writepage() call back
2021 * @mpd->inode: inode
2022 * @mpd->first_page: first page of the extent
2023 * @mpd->next_page: page after the last page of the extent
2025 * By the time mpage_da_submit_io() is called we expect all blocks
2026 * to be allocated. this may be wrong if allocation failed.
2028 * As pages are already locked by write_cache_pages(), we can't use it
2030 static int mpage_da_submit_io(struct mpage_da_data *mpd,
2031 struct ext4_map_blocks *map)
2033 struct pagevec pvec;
2034 unsigned long index, end;
2035 int ret = 0, err, nr_pages, i;
2036 struct inode *inode = mpd->inode;
2037 struct address_space *mapping = inode->i_mapping;
2038 loff_t size = i_size_read(inode);
2039 unsigned int len, block_start;
2040 struct buffer_head *bh, *page_bufs = NULL;
2041 int journal_data = ext4_should_journal_data(inode);
2042 sector_t pblock = 0, cur_logical = 0;
2043 struct ext4_io_submit io_submit;
2045 BUG_ON(mpd->next_page <= mpd->first_page);
2046 memset(&io_submit, 0, sizeof(io_submit));
2048 * We need to start from the first_page to the next_page - 1
2049 * to make sure we also write the mapped dirty buffer_heads.
2050 * If we look at mpd->b_blocknr we would only be looking
2051 * at the currently mapped buffer_heads.
2053 index = mpd->first_page;
2054 end = mpd->next_page - 1;
2056 pagevec_init(&pvec, 0);
2057 while (index <= end) {
2058 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2061 for (i = 0; i < nr_pages; i++) {
2062 int commit_write = 0, redirty_page = 0;
2063 struct page *page = pvec.pages[i];
2065 index = page->index;
2069 if (index == size >> PAGE_CACHE_SHIFT)
2070 len = size & ~PAGE_CACHE_MASK;
2072 len = PAGE_CACHE_SIZE;
2074 cur_logical = index << (PAGE_CACHE_SHIFT -
2076 pblock = map->m_pblk + (cur_logical -
2081 BUG_ON(!PageLocked(page));
2082 BUG_ON(PageWriteback(page));
2085 * If the page does not have buffers (for
2086 * whatever reason), try to create them using
2087 * __block_write_begin. If this fails,
2088 * redirty the page and move on.
2090 if (!page_has_buffers(page)) {
2091 if (__block_write_begin(page, 0, len,
2092 noalloc_get_block_write)) {
2094 redirty_page_for_writepage(mpd->wbc,
2102 bh = page_bufs = page_buffers(page);
2107 if (map && (cur_logical >= map->m_lblk) &&
2108 (cur_logical <= (map->m_lblk +
2109 (map->m_len - 1)))) {
2110 if (buffer_delay(bh)) {
2111 clear_buffer_delay(bh);
2112 bh->b_blocknr = pblock;
2114 if (buffer_unwritten(bh) ||
2116 BUG_ON(bh->b_blocknr != pblock);
2117 if (map->m_flags & EXT4_MAP_UNINIT)
2118 set_buffer_uninit(bh);
2119 clear_buffer_unwritten(bh);
2122 /* redirty page if block allocation undone */
2123 if (buffer_delay(bh) || buffer_unwritten(bh))
2125 bh = bh->b_this_page;
2126 block_start += bh->b_size;
2129 } while (bh != page_bufs);
2135 /* mark the buffer_heads as dirty & uptodate */
2136 block_commit_write(page, 0, len);
2139 * Delalloc doesn't support data journalling,
2140 * but eventually maybe we'll lift this
2143 if (unlikely(journal_data && PageChecked(page)))
2144 err = __ext4_journalled_writepage(page, len);
2145 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
2146 err = ext4_bio_write_page(&io_submit, page,
2149 err = block_write_full_page(page,
2150 noalloc_get_block_write, mpd->wbc);
2153 mpd->pages_written++;
2155 * In error case, we have to continue because
2156 * remaining pages are still locked
2161 pagevec_release(&pvec);
2163 ext4_io_submit(&io_submit);
2167 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2168 sector_t logical, long blk_cnt)
2172 struct pagevec pvec;
2173 struct inode *inode = mpd->inode;
2174 struct address_space *mapping = inode->i_mapping;
2176 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2177 end = (logical + blk_cnt - 1) >>
2178 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2179 while (index <= end) {
2180 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2183 for (i = 0; i < nr_pages; i++) {
2184 struct page *page = pvec.pages[i];
2185 if (page->index > end)
2187 BUG_ON(!PageLocked(page));
2188 BUG_ON(PageWriteback(page));
2189 block_invalidatepage(page, 0);
2190 ClearPageUptodate(page);
2193 index = pvec.pages[nr_pages - 1]->index + 1;
2194 pagevec_release(&pvec);
2199 static void ext4_print_free_blocks(struct inode *inode)
2201 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2202 printk(KERN_CRIT "Total free blocks count %lld\n",
2203 ext4_count_free_blocks(inode->i_sb));
2204 printk(KERN_CRIT "Free/Dirty block details\n");
2205 printk(KERN_CRIT "free_blocks=%lld\n",
2206 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2207 printk(KERN_CRIT "dirty_blocks=%lld\n",
2208 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2209 printk(KERN_CRIT "Block reservation details\n");
2210 printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2211 EXT4_I(inode)->i_reserved_data_blocks);
2212 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2213 EXT4_I(inode)->i_reserved_meta_blocks);
2218 * mpage_da_map_and_submit - go through given space, map them
2219 * if necessary, and then submit them for I/O
2221 * @mpd - bh describing space
2223 * The function skips space we know is already mapped to disk blocks.
2226 static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2228 int err, blks, get_blocks_flags;
2229 struct ext4_map_blocks map, *mapp = NULL;
2230 sector_t next = mpd->b_blocknr;
2231 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2232 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
2233 handle_t *handle = NULL;
2236 * If the blocks are mapped already, or we couldn't accumulate
2237 * any blocks, then proceed immediately to the submission stage.
2239 if ((mpd->b_size == 0) ||
2240 ((mpd->b_state & (1 << BH_Mapped)) &&
2241 !(mpd->b_state & (1 << BH_Delay)) &&
2242 !(mpd->b_state & (1 << BH_Unwritten))))
2245 handle = ext4_journal_current_handle();
2249 * Call ext4_map_blocks() to allocate any delayed allocation
2250 * blocks, or to convert an uninitialized extent to be
2251 * initialized (in the case where we have written into
2252 * one or more preallocated blocks).
2254 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
2255 * indicate that we are on the delayed allocation path. This
2256 * affects functions in many different parts of the allocation
2257 * call path. This flag exists primarily because we don't
2258 * want to change *many* call functions, so ext4_map_blocks()
2259 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
2260 * inode's allocation semaphore is taken.
2262 * If the blocks in questions were delalloc blocks, set
2263 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2264 * variables are updated after the blocks have been allocated.
2267 map.m_len = max_blocks;
2268 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2269 if (ext4_should_dioread_nolock(mpd->inode))
2270 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2271 if (mpd->b_state & (1 << BH_Delay))
2272 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2274 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
2276 struct super_block *sb = mpd->inode->i_sb;
2280 * If get block returns EAGAIN or ENOSPC and there
2281 * appears to be free blocks we will call
2282 * ext4_writepage() for all of the pages which will
2283 * just redirty the pages.
2288 if (err == -ENOSPC &&
2289 ext4_count_free_blocks(sb)) {
2295 * get block failure will cause us to loop in
2296 * writepages, because a_ops->writepage won't be able
2297 * to make progress. The page will be redirtied by
2298 * writepage and writepages will again try to write
2301 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2302 ext4_msg(sb, KERN_CRIT,
2303 "delayed block allocation failed for inode %lu "
2304 "at logical offset %llu with max blocks %zd "
2305 "with error %d", mpd->inode->i_ino,
2306 (unsigned long long) next,
2307 mpd->b_size >> mpd->inode->i_blkbits, err);
2308 ext4_msg(sb, KERN_CRIT,
2309 "This should not happen!! Data will be lost\n");
2311 ext4_print_free_blocks(mpd->inode);
2313 /* invalidate all the pages */
2314 ext4_da_block_invalidatepages(mpd, next,
2315 mpd->b_size >> mpd->inode->i_blkbits);
2321 if (map.m_flags & EXT4_MAP_NEW) {
2322 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
2325 for (i = 0; i < map.m_len; i++)
2326 unmap_underlying_metadata(bdev, map.m_pblk + i);
2329 if (ext4_should_order_data(mpd->inode)) {
2330 err = ext4_jbd2_file_inode(handle, mpd->inode);
2332 /* This only happens if the journal is aborted */
2337 * Update on-disk size along with block allocation.
2339 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
2340 if (disksize > i_size_read(mpd->inode))
2341 disksize = i_size_read(mpd->inode);
2342 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2343 ext4_update_i_disksize(mpd->inode, disksize);
2344 err = ext4_mark_inode_dirty(handle, mpd->inode);
2346 ext4_error(mpd->inode->i_sb,
2347 "Failed to mark inode %lu dirty",
2352 mpage_da_submit_io(mpd, mapp);
2356 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2357 (1 << BH_Delay) | (1 << BH_Unwritten))
2360 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2362 * @mpd->lbh - extent of blocks
2363 * @logical - logical number of the block in the file
2364 * @bh - bh of the block (used to access block's state)
2366 * the function is used to collect contig. blocks in same state
2368 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2369 sector_t logical, size_t b_size,
2370 unsigned long b_state)
2373 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2376 * XXX Don't go larger than mballoc is willing to allocate
2377 * This is a stopgap solution. We eventually need to fold
2378 * mpage_da_submit_io() into this function and then call
2379 * ext4_map_blocks() multiple times in a loop
2381 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
2384 /* check if thereserved journal credits might overflow */
2385 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
2386 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2388 * With non-extent format we are limited by the journal
2389 * credit available. Total credit needed to insert
2390 * nrblocks contiguous blocks is dependent on the
2391 * nrblocks. So limit nrblocks.
2394 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2395 EXT4_MAX_TRANS_DATA) {
2397 * Adding the new buffer_head would make it cross the
2398 * allowed limit for which we have journal credit
2399 * reserved. So limit the new bh->b_size
2401 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2402 mpd->inode->i_blkbits;
2403 /* we will do mpage_da_submit_io in the next loop */
2407 * First block in the extent
2409 if (mpd->b_size == 0) {
2410 mpd->b_blocknr = logical;
2411 mpd->b_size = b_size;
2412 mpd->b_state = b_state & BH_FLAGS;
2416 next = mpd->b_blocknr + nrblocks;
2418 * Can we merge the block to our big extent?
2420 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2421 mpd->b_size += b_size;
2427 * We couldn't merge the block to our extent, so we
2428 * need to flush current extent and start new one
2430 mpage_da_map_and_submit(mpd);
2434 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2436 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
2440 * __mpage_da_writepage - finds extent of pages and blocks
2442 * @page: page to consider
2443 * @wbc: not used, we just follow rules
2446 * The function finds extents of pages and scan them for all blocks.
2448 static int __mpage_da_writepage(struct page *page,
2449 struct writeback_control *wbc,
2450 struct mpage_da_data *mpd)
2452 struct inode *inode = mpd->inode;
2453 struct buffer_head *bh, *head;
2457 * Can we merge this page to current extent?
2459 if (mpd->next_page != page->index) {
2461 * Nope, we can't. So, we map non-allocated blocks
2462 * and start IO on them
2464 if (mpd->next_page != mpd->first_page) {
2465 mpage_da_map_and_submit(mpd);
2467 * skip rest of the page in the page_vec
2469 redirty_page_for_writepage(wbc, page);
2471 return MPAGE_DA_EXTENT_TAIL;
2475 * Start next extent of pages ...
2477 mpd->first_page = page->index;
2487 mpd->next_page = page->index + 1;
2488 logical = (sector_t) page->index <<
2489 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2491 if (!page_has_buffers(page)) {
2492 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2493 (1 << BH_Dirty) | (1 << BH_Uptodate));
2495 return MPAGE_DA_EXTENT_TAIL;
2498 * Page with regular buffer heads, just add all dirty ones
2500 head = page_buffers(page);
2503 BUG_ON(buffer_locked(bh));
2505 * We need to try to allocate
2506 * unmapped blocks in the same page.
2507 * Otherwise we won't make progress
2508 * with the page in ext4_writepage
2510 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2511 mpage_add_bh_to_extent(mpd, logical,
2515 return MPAGE_DA_EXTENT_TAIL;
2516 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2518 * mapped dirty buffer. We need to update
2519 * the b_state because we look at
2520 * b_state in mpage_da_map_blocks. We don't
2521 * update b_size because if we find an
2522 * unmapped buffer_head later we need to
2523 * use the b_state flag of that buffer_head.
2525 if (mpd->b_size == 0)
2526 mpd->b_state = bh->b_state & BH_FLAGS;
2529 } while ((bh = bh->b_this_page) != head);
2536 * This is a special get_blocks_t callback which is used by
2537 * ext4_da_write_begin(). It will either return mapped block or
2538 * reserve space for a single block.
2540 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2541 * We also have b_blocknr = -1 and b_bdev initialized properly
2543 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2544 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2545 * initialized properly.
2547 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2548 struct buffer_head *bh, int create)
2550 struct ext4_map_blocks map;
2552 sector_t invalid_block = ~((sector_t) 0xffff);
2554 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2557 BUG_ON(create == 0);
2558 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
2560 map.m_lblk = iblock;
2564 * first, we need to know whether the block is allocated already
2565 * preallocated blocks are unmapped but should treated
2566 * the same as allocated blocks.
2568 ret = ext4_map_blocks(NULL, inode, &map, 0);
2572 if (buffer_delay(bh))
2573 return 0; /* Not sure this could or should happen */
2575 * XXX: __block_write_begin() unmaps passed block, is it OK?
2577 ret = ext4_da_reserve_space(inode, iblock);
2579 /* not enough space to reserve */
2582 map_bh(bh, inode->i_sb, invalid_block);
2584 set_buffer_delay(bh);
2588 map_bh(bh, inode->i_sb, map.m_pblk);
2589 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
2591 if (buffer_unwritten(bh)) {
2592 /* A delayed write to unwritten bh should be marked
2593 * new and mapped. Mapped ensures that we don't do
2594 * get_block multiple times when we write to the same
2595 * offset and new ensures that we do proper zero out
2596 * for partial write.
2599 set_buffer_mapped(bh);
2605 * This function is used as a standard get_block_t calback function
2606 * when there is no desire to allocate any blocks. It is used as a
2607 * callback function for block_write_begin() and block_write_full_page().
2608 * These functions should only try to map a single block at a time.
2610 * Since this function doesn't do block allocations even if the caller
2611 * requests it by passing in create=1, it is critically important that
2612 * any caller checks to make sure that any buffer heads are returned
2613 * by this function are either all already mapped or marked for
2614 * delayed allocation before calling block_write_full_page(). Otherwise,
2615 * b_blocknr could be left unitialized, and the page write functions will
2616 * be taken by surprise.
2618 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2619 struct buffer_head *bh_result, int create)
2621 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2622 return _ext4_get_block(inode, iblock, bh_result, 0);
2625 static int bget_one(handle_t *handle, struct buffer_head *bh)
2631 static int bput_one(handle_t *handle, struct buffer_head *bh)
2637 static int __ext4_journalled_writepage(struct page *page,
2640 struct address_space *mapping = page->mapping;
2641 struct inode *inode = mapping->host;
2642 struct buffer_head *page_bufs;
2643 handle_t *handle = NULL;
2647 ClearPageChecked(page);
2648 page_bufs = page_buffers(page);
2650 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
2651 /* As soon as we unlock the page, it can go away, but we have
2652 * references to buffers so we are safe */
2655 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2656 if (IS_ERR(handle)) {
2657 ret = PTR_ERR(handle);
2661 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2662 do_journal_get_write_access);
2664 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2668 err = ext4_journal_stop(handle);
2672 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2673 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2678 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
2679 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
2682 * Note that we don't need to start a transaction unless we're journaling data
2683 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2684 * need to file the inode to the transaction's list in ordered mode because if
2685 * we are writing back data added by write(), the inode is already there and if
2686 * we are writing back data modified via mmap(), noone guarantees in which
2687 * transaction the data will hit the disk. In case we are journaling data, we
2688 * cannot start transaction directly because transaction start ranks above page
2689 * lock so we have to do some magic.
2691 * This function can get called via...
2692 * - ext4_da_writepages after taking page lock (have journal handle)
2693 * - journal_submit_inode_data_buffers (no journal handle)
2694 * - shrink_page_list via pdflush (no journal handle)
2695 * - grab_page_cache when doing write_begin (have journal handle)
2697 * We don't do any block allocation in this function. If we have page with
2698 * multiple blocks we need to write those buffer_heads that are mapped. This
2699 * is important for mmaped based write. So if we do with blocksize 1K
2700 * truncate(f, 1024);
2701 * a = mmap(f, 0, 4096);
2703 * truncate(f, 4096);
2704 * we have in the page first buffer_head mapped via page_mkwrite call back
2705 * but other bufer_heads would be unmapped but dirty(dirty done via the
2706 * do_wp_page). So writepage should write the first block. If we modify
2707 * the mmap area beyond 1024 we will again get a page_fault and the
2708 * page_mkwrite callback will do the block allocation and mark the
2709 * buffer_heads mapped.
2711 * We redirty the page if we have any buffer_heads that is either delay or
2712 * unwritten in the page.
2714 * We can get recursively called as show below.
2716 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2719 * But since we don't do any block allocation we should not deadlock.
2720 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2722 static int ext4_writepage(struct page *page,
2723 struct writeback_control *wbc)
2725 int ret = 0, commit_write = 0;
2728 struct buffer_head *page_bufs = NULL;
2729 struct inode *inode = page->mapping->host;
2731 trace_ext4_writepage(inode, page);
2732 size = i_size_read(inode);
2733 if (page->index == size >> PAGE_CACHE_SHIFT)
2734 len = size & ~PAGE_CACHE_MASK;
2736 len = PAGE_CACHE_SIZE;
2739 * If the page does not have buffers (for whatever reason),
2740 * try to create them using __block_write_begin. If this
2741 * fails, redirty the page and move on.
2743 if (!page_has_buffers(page)) {
2744 if (__block_write_begin(page, 0, len,
2745 noalloc_get_block_write)) {
2747 redirty_page_for_writepage(wbc, page);
2753 page_bufs = page_buffers(page);
2754 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2755 ext4_bh_delay_or_unwritten)) {
2757 * We don't want to do block allocation, so redirty
2758 * the page and return. We may reach here when we do
2759 * a journal commit via journal_submit_inode_data_buffers.
2760 * We can also reach here via shrink_page_list
2765 /* now mark the buffer_heads as dirty and uptodate */
2766 block_commit_write(page, 0, len);
2768 if (PageChecked(page) && ext4_should_journal_data(inode))
2770 * It's mmapped pagecache. Add buffers and journal it. There
2771 * doesn't seem much point in redirtying the page here.
2773 return __ext4_journalled_writepage(page, len);
2775 if (buffer_uninit(page_bufs)) {
2776 ext4_set_bh_endio(page_bufs, inode);
2777 ret = block_write_full_page_endio(page, noalloc_get_block_write,
2778 wbc, ext4_end_io_buffer_write);
2780 ret = block_write_full_page(page, noalloc_get_block_write,
2787 * This is called via ext4_da_writepages() to
2788 * calulate the total number of credits to reserve to fit
2789 * a single extent allocation into a single transaction,
2790 * ext4_da_writpeages() will loop calling this before
2791 * the block allocation.
2794 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2796 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2799 * With non-extent format the journal credit needed to
2800 * insert nrblocks contiguous block is dependent on
2801 * number of contiguous block. So we will limit
2802 * number of contiguous block to a sane value
2804 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2805 (max_blocks > EXT4_MAX_TRANS_DATA))
2806 max_blocks = EXT4_MAX_TRANS_DATA;
2808 return ext4_chunk_trans_blocks(inode, max_blocks);
2812 * write_cache_pages_da - walk the list of dirty pages of the given
2813 * address space and call the callback function (which usually writes
2816 * This is a forked version of write_cache_pages(). Differences:
2817 * Range cyclic is ignored.
2818 * no_nrwrite_index_update is always presumed true
2820 static int write_cache_pages_da(struct address_space *mapping,
2821 struct writeback_control *wbc,
2822 struct mpage_da_data *mpd,
2823 pgoff_t *done_index)
2827 struct pagevec pvec;
2830 pgoff_t end; /* Inclusive */
2831 long nr_to_write = wbc->nr_to_write;
2834 pagevec_init(&pvec, 0);
2835 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2836 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2838 if (wbc->sync_mode == WB_SYNC_ALL)
2839 tag = PAGECACHE_TAG_TOWRITE;
2841 tag = PAGECACHE_TAG_DIRTY;
2843 *done_index = index;
2844 while (!done && (index <= end)) {
2847 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2848 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2852 for (i = 0; i < nr_pages; i++) {
2853 struct page *page = pvec.pages[i];
2856 * At this point, the page may be truncated or
2857 * invalidated (changing page->mapping to NULL), or
2858 * even swizzled back from swapper_space to tmpfs file
2859 * mapping. However, page->index will not change
2860 * because we have a reference on the page.
2862 if (page->index > end) {
2867 *done_index = page->index + 1;
2872 * Page truncated or invalidated. We can freely skip it
2873 * then, even for data integrity operations: the page
2874 * has disappeared concurrently, so there could be no
2875 * real expectation of this data interity operation
2876 * even if there is now a new, dirty page at the same
2877 * pagecache address.
2879 if (unlikely(page->mapping != mapping)) {
2885 if (!PageDirty(page)) {
2886 /* someone wrote it for us */
2887 goto continue_unlock;
2890 if (PageWriteback(page)) {
2891 if (wbc->sync_mode != WB_SYNC_NONE)
2892 wait_on_page_writeback(page);
2894 goto continue_unlock;
2897 BUG_ON(PageWriteback(page));
2898 if (!clear_page_dirty_for_io(page))
2899 goto continue_unlock;
2901 ret = __mpage_da_writepage(page, wbc, mpd);
2902 if (unlikely(ret)) {
2903 if (ret == AOP_WRITEPAGE_ACTIVATE) {
2912 if (nr_to_write > 0) {
2914 if (nr_to_write == 0 &&
2915 wbc->sync_mode == WB_SYNC_NONE) {
2917 * We stop writing back only if we are
2918 * not doing integrity sync. In case of
2919 * integrity sync we have to keep going
2920 * because someone may be concurrently
2921 * dirtying pages, and we might have
2922 * synced a lot of newly appeared dirty
2923 * pages, but have not synced all of the
2931 pagevec_release(&pvec);
2938 static int ext4_da_writepages(struct address_space *mapping,
2939 struct writeback_control *wbc)
2942 int range_whole = 0;
2943 handle_t *handle = NULL;
2944 struct mpage_da_data mpd;
2945 struct inode *inode = mapping->host;
2946 int pages_written = 0;
2948 unsigned int max_pages;
2949 int range_cyclic, cycled = 1, io_done = 0;
2950 int needed_blocks, ret = 0;
2951 long desired_nr_to_write, nr_to_writebump = 0;
2952 loff_t range_start = wbc->range_start;
2953 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2954 pgoff_t done_index = 0;
2957 trace_ext4_da_writepages(inode, wbc);
2960 * No pages to write? This is mainly a kludge to avoid starting
2961 * a transaction for special inodes like journal inode on last iput()
2962 * because that could violate lock ordering on umount
2964 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2968 * If the filesystem has aborted, it is read-only, so return
2969 * right away instead of dumping stack traces later on that
2970 * will obscure the real source of the problem. We test
2971 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2972 * the latter could be true if the filesystem is mounted
2973 * read-only, and in that case, ext4_da_writepages should
2974 * *never* be called, so if that ever happens, we would want
2977 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2980 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2983 range_cyclic = wbc->range_cyclic;
2984 if (wbc->range_cyclic) {
2985 index = mapping->writeback_index;
2988 wbc->range_start = index << PAGE_CACHE_SHIFT;
2989 wbc->range_end = LLONG_MAX;
2990 wbc->range_cyclic = 0;
2993 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2994 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2998 * This works around two forms of stupidity. The first is in
2999 * the writeback code, which caps the maximum number of pages
3000 * written to be 1024 pages. This is wrong on multiple
3001 * levels; different architectues have a different page size,
3002 * which changes the maximum amount of data which gets
3003 * written. Secondly, 4 megabytes is way too small. XFS
3004 * forces this value to be 16 megabytes by multiplying
3005 * nr_to_write parameter by four, and then relies on its
3006 * allocator to allocate larger extents to make them
3007 * contiguous. Unfortunately this brings us to the second
3008 * stupidity, which is that ext4's mballoc code only allocates
3009 * at most 2048 blocks. So we force contiguous writes up to
3010 * the number of dirty blocks in the inode, or
3011 * sbi->max_writeback_mb_bump whichever is smaller.
3013 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
3014 if (!range_cyclic && range_whole) {
3015 if (wbc->nr_to_write == LONG_MAX)
3016 desired_nr_to_write = wbc->nr_to_write;
3018 desired_nr_to_write = wbc->nr_to_write * 8;
3020 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
3022 if (desired_nr_to_write > max_pages)
3023 desired_nr_to_write = max_pages;
3025 if (wbc->nr_to_write < desired_nr_to_write) {
3026 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
3027 wbc->nr_to_write = desired_nr_to_write;
3031 mpd.inode = mapping->host;
3033 pages_skipped = wbc->pages_skipped;
3036 if (wbc->sync_mode == WB_SYNC_ALL)
3037 tag_pages_for_writeback(mapping, index, end);
3039 while (!ret && wbc->nr_to_write > 0) {
3042 * we insert one extent at a time. So we need
3043 * credit needed for single extent allocation.
3044 * journalled mode is currently not supported
3047 BUG_ON(ext4_should_journal_data(inode));
3048 needed_blocks = ext4_da_writepages_trans_blocks(inode);
3050 /* start a new transaction*/
3051 handle = ext4_journal_start(inode, needed_blocks);
3052 if (IS_ERR(handle)) {
3053 ret = PTR_ERR(handle);
3054 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
3055 "%ld pages, ino %lu; err %d", __func__,
3056 wbc->nr_to_write, inode->i_ino, ret);
3057 goto out_writepages;
3061 * Now call __mpage_da_writepage to find the next
3062 * contiguous region of logical blocks that need
3063 * blocks to be allocated by ext4. We don't actually
3064 * submit the blocks for I/O here, even though
3065 * write_cache_pages thinks it will, and will set the
3066 * pages as clean for write before calling
3067 * __mpage_da_writepage().
3075 mpd.pages_written = 0;
3077 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
3079 * If we have a contiguous extent of pages and we
3080 * haven't done the I/O yet, map the blocks and submit
3083 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
3084 mpage_da_map_and_submit(&mpd);
3085 ret = MPAGE_DA_EXTENT_TAIL;
3087 trace_ext4_da_write_pages(inode, &mpd);
3088 wbc->nr_to_write -= mpd.pages_written;