7c17ae275af4c0a0910bf5e7af7bc243199889de
[pandora-kernel.git] / fs / ext4 / inode.c
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40
41 #include "ext4_jbd2.h"
42 #include "xattr.h"
43 #include "acl.h"
44 #include "ext4_extents.h"
45
46 #include <trace/events/ext4.h>
47
48 #define MPAGE_DA_EXTENT_TAIL 0x01
49
50 static inline int ext4_begin_ordered_truncate(struct inode *inode,
51                                               loff_t new_size)
52 {
53         return jbd2_journal_begin_ordered_truncate(
54                                         EXT4_SB(inode->i_sb)->s_journal,
55                                         &EXT4_I(inode)->jinode,
56                                         new_size);
57 }
58
59 static void ext4_invalidatepage(struct page *page, unsigned long offset);
60
61 /*
62  * Test whether an inode is a fast symlink.
63  */
64 static int ext4_inode_is_fast_symlink(struct inode *inode)
65 {
66         int ea_blocks = EXT4_I(inode)->i_file_acl ?
67                 (inode->i_sb->s_blocksize >> 9) : 0;
68
69         return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
70 }
71
72 /*
73  * The ext4 forget function must perform a revoke if we are freeing data
74  * which has been journaled.  Metadata (eg. indirect blocks) must be
75  * revoked in all cases.
76  *
77  * "bh" may be NULL: a metadata block may have been freed from memory
78  * but there may still be a record of it in the journal, and that record
79  * still needs to be revoked.
80  *
81  * If the handle isn't valid we're not journaling so there's nothing to do.
82  */
83 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
84                 struct buffer_head *bh, ext4_fsblk_t blocknr)
85 {
86         int err;
87
88         if (!ext4_handle_valid(handle))
89                 return 0;
90
91         might_sleep();
92
93         BUFFER_TRACE(bh, "enter");
94
95         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
96                   "data mode %x\n",
97                   bh, is_metadata, inode->i_mode,
98                   test_opt(inode->i_sb, DATA_FLAGS));
99
100         /* Never use the revoke function if we are doing full data
101          * journaling: there is no need to, and a V1 superblock won't
102          * support it.  Otherwise, only skip the revoke on un-journaled
103          * data blocks. */
104
105         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
106             (!is_metadata && !ext4_should_journal_data(inode))) {
107                 if (bh) {
108                         BUFFER_TRACE(bh, "call jbd2_journal_forget");
109                         return ext4_journal_forget(handle, bh);
110                 }
111                 return 0;
112         }
113
114         /*
115          * data!=journal && (is_metadata || should_journal_data(inode))
116          */
117         BUFFER_TRACE(bh, "call ext4_journal_revoke");
118         err = ext4_journal_revoke(handle, blocknr, bh);
119         if (err)
120                 ext4_abort(inode->i_sb, __func__,
121                            "error %d when attempting revoke", err);
122         BUFFER_TRACE(bh, "exit");
123         return err;
124 }
125
126 /*
127  * Work out how many blocks we need to proceed with the next chunk of a
128  * truncate transaction.
129  */
130 static unsigned long blocks_for_truncate(struct inode *inode)
131 {
132         ext4_lblk_t needed;
133
134         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
135
136         /* Give ourselves just enough room to cope with inodes in which
137          * i_blocks is corrupt: we've seen disk corruptions in the past
138          * which resulted in random data in an inode which looked enough
139          * like a regular file for ext4 to try to delete it.  Things
140          * will go a bit crazy if that happens, but at least we should
141          * try not to panic the whole kernel. */
142         if (needed < 2)
143                 needed = 2;
144
145         /* But we need to bound the transaction so we don't overflow the
146          * journal. */
147         if (needed > EXT4_MAX_TRANS_DATA)
148                 needed = EXT4_MAX_TRANS_DATA;
149
150         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
151 }
152
153 /*
154  * Truncate transactions can be complex and absolutely huge.  So we need to
155  * be able to restart the transaction at a conventient checkpoint to make
156  * sure we don't overflow the journal.
157  *
158  * start_transaction gets us a new handle for a truncate transaction,
159  * and extend_transaction tries to extend the existing one a bit.  If
160  * extend fails, we need to propagate the failure up and restart the
161  * transaction in the top-level truncate loop. --sct
162  */
163 static handle_t *start_transaction(struct inode *inode)
164 {
165         handle_t *result;
166
167         result = ext4_journal_start(inode, blocks_for_truncate(inode));
168         if (!IS_ERR(result))
169                 return result;
170
171         ext4_std_error(inode->i_sb, PTR_ERR(result));
172         return result;
173 }
174
175 /*
176  * Try to extend this transaction for the purposes of truncation.
177  *
178  * Returns 0 if we managed to create more room.  If we can't create more
179  * room, and the transaction must be restarted we return 1.
180  */
181 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
182 {
183         if (!ext4_handle_valid(handle))
184                 return 0;
185         if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
186                 return 0;
187         if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
188                 return 0;
189         return 1;
190 }
191
192 /*
193  * Restart the transaction associated with *handle.  This does a commit,
194  * so before we call here everything must be consistently dirtied against
195  * this transaction.
196  */
197 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
198 {
199         BUG_ON(EXT4_JOURNAL(inode) == NULL);
200         jbd_debug(2, "restarting handle %p\n", handle);
201         return ext4_journal_restart(handle, blocks_for_truncate(inode));
202 }
203
204 /*
205  * Called at the last iput() if i_nlink is zero.
206  */
207 void ext4_delete_inode(struct inode *inode)
208 {
209         handle_t *handle;
210         int err;
211
212         if (ext4_should_order_data(inode))
213                 ext4_begin_ordered_truncate(inode, 0);
214         truncate_inode_pages(&inode->i_data, 0);
215
216         if (is_bad_inode(inode))
217                 goto no_delete;
218
219         handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
220         if (IS_ERR(handle)) {
221                 ext4_std_error(inode->i_sb, PTR_ERR(handle));
222                 /*
223                  * If we're going to skip the normal cleanup, we still need to
224                  * make sure that the in-core orphan linked list is properly
225                  * cleaned up.
226                  */
227                 ext4_orphan_del(NULL, inode);
228                 goto no_delete;
229         }
230
231         if (IS_SYNC(inode))
232                 ext4_handle_sync(handle);
233         inode->i_size = 0;
234         err = ext4_mark_inode_dirty(handle, inode);
235         if (err) {
236                 ext4_warning(inode->i_sb, __func__,
237                              "couldn't mark inode dirty (err %d)", err);
238                 goto stop_handle;
239         }
240         if (inode->i_blocks)
241                 ext4_truncate(inode);
242
243         /*
244          * ext4_ext_truncate() doesn't reserve any slop when it
245          * restarts journal transactions; therefore there may not be
246          * enough credits left in the handle to remove the inode from
247          * the orphan list and set the dtime field.
248          */
249         if (!ext4_handle_has_enough_credits(handle, 3)) {
250                 err = ext4_journal_extend(handle, 3);
251                 if (err > 0)
252                         err = ext4_journal_restart(handle, 3);
253                 if (err != 0) {
254                         ext4_warning(inode->i_sb, __func__,
255                                      "couldn't extend journal (err %d)", err);
256                 stop_handle:
257                         ext4_journal_stop(handle);
258                         goto no_delete;
259                 }
260         }
261
262         /*
263          * Kill off the orphan record which ext4_truncate created.
264          * AKPM: I think this can be inside the above `if'.
265          * Note that ext4_orphan_del() has to be able to cope with the
266          * deletion of a non-existent orphan - this is because we don't
267          * know if ext4_truncate() actually created an orphan record.
268          * (Well, we could do this if we need to, but heck - it works)
269          */
270         ext4_orphan_del(handle, inode);
271         EXT4_I(inode)->i_dtime  = get_seconds();
272
273         /*
274          * One subtle ordering requirement: if anything has gone wrong
275          * (transaction abort, IO errors, whatever), then we can still
276          * do these next steps (the fs will already have been marked as
277          * having errors), but we can't free the inode if the mark_dirty
278          * fails.
279          */
280         if (ext4_mark_inode_dirty(handle, inode))
281                 /* If that failed, just do the required in-core inode clear. */
282                 clear_inode(inode);
283         else
284                 ext4_free_inode(handle, inode);
285         ext4_journal_stop(handle);
286         return;
287 no_delete:
288         clear_inode(inode);     /* We must guarantee clearing of inode... */
289 }
290
291 typedef struct {
292         __le32  *p;
293         __le32  key;
294         struct buffer_head *bh;
295 } Indirect;
296
297 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
298 {
299         p->key = *(p->p = v);
300         p->bh = bh;
301 }
302
303 /**
304  *      ext4_block_to_path - parse the block number into array of offsets
305  *      @inode: inode in question (we are only interested in its superblock)
306  *      @i_block: block number to be parsed
307  *      @offsets: array to store the offsets in
308  *      @boundary: set this non-zero if the referred-to block is likely to be
309  *             followed (on disk) by an indirect block.
310  *
311  *      To store the locations of file's data ext4 uses a data structure common
312  *      for UNIX filesystems - tree of pointers anchored in the inode, with
313  *      data blocks at leaves and indirect blocks in intermediate nodes.
314  *      This function translates the block number into path in that tree -
315  *      return value is the path length and @offsets[n] is the offset of
316  *      pointer to (n+1)th node in the nth one. If @block is out of range
317  *      (negative or too large) warning is printed and zero returned.
318  *
319  *      Note: function doesn't find node addresses, so no IO is needed. All
320  *      we need to know is the capacity of indirect blocks (taken from the
321  *      inode->i_sb).
322  */
323
324 /*
325  * Portability note: the last comparison (check that we fit into triple
326  * indirect block) is spelled differently, because otherwise on an
327  * architecture with 32-bit longs and 8Kb pages we might get into trouble
328  * if our filesystem had 8Kb blocks. We might use long long, but that would
329  * kill us on x86. Oh, well, at least the sign propagation does not matter -
330  * i_block would have to be negative in the very beginning, so we would not
331  * get there at all.
332  */
333
334 static int ext4_block_to_path(struct inode *inode,
335                               ext4_lblk_t i_block,
336                               ext4_lblk_t offsets[4], int *boundary)
337 {
338         int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
339         int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
340         const long direct_blocks = EXT4_NDIR_BLOCKS,
341                 indirect_blocks = ptrs,
342                 double_blocks = (1 << (ptrs_bits * 2));
343         int n = 0;
344         int final = 0;
345
346         if (i_block < 0) {
347                 ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
348         } else if (i_block < direct_blocks) {
349                 offsets[n++] = i_block;
350                 final = direct_blocks;
351         } else if ((i_block -= direct_blocks) < indirect_blocks) {
352                 offsets[n++] = EXT4_IND_BLOCK;
353                 offsets[n++] = i_block;
354                 final = ptrs;
355         } else if ((i_block -= indirect_blocks) < double_blocks) {
356                 offsets[n++] = EXT4_DIND_BLOCK;
357                 offsets[n++] = i_block >> ptrs_bits;
358                 offsets[n++] = i_block & (ptrs - 1);
359                 final = ptrs;
360         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
361                 offsets[n++] = EXT4_TIND_BLOCK;
362                 offsets[n++] = i_block >> (ptrs_bits * 2);
363                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
364                 offsets[n++] = i_block & (ptrs - 1);
365                 final = ptrs;
366         } else {
367                 ext4_warning(inode->i_sb, "ext4_block_to_path",
368                              "block %lu > max in inode %lu",
369                              i_block + direct_blocks +
370                              indirect_blocks + double_blocks, inode->i_ino);
371         }
372         if (boundary)
373                 *boundary = final - 1 - (i_block & (ptrs - 1));
374         return n;
375 }
376
377 static int __ext4_check_blockref(const char *function, struct inode *inode,
378                                  __le32 *p, unsigned int max)
379 {
380         __le32 *bref = p;
381         unsigned int blk;
382
383         while (bref < p+max) {
384                 blk = le32_to_cpu(*bref++);
385                 if (blk &&
386                     unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
387                                                     blk, 1))) {
388                         ext4_error(inode->i_sb, function,
389                                    "invalid block reference %u "
390                                    "in inode #%lu", blk, inode->i_ino);
391                         return -EIO;
392                 }
393         }
394         return 0;
395 }
396
397
398 #define ext4_check_indirect_blockref(inode, bh)                         \
399         __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
400                               EXT4_ADDR_PER_BLOCK((inode)->i_sb))
401
402 #define ext4_check_inode_blockref(inode)                                \
403         __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
404                               EXT4_NDIR_BLOCKS)
405
406 /**
407  *      ext4_get_branch - read the chain of indirect blocks leading to data
408  *      @inode: inode in question
409  *      @depth: depth of the chain (1 - direct pointer, etc.)
410  *      @offsets: offsets of pointers in inode/indirect blocks
411  *      @chain: place to store the result
412  *      @err: here we store the error value
413  *
414  *      Function fills the array of triples <key, p, bh> and returns %NULL
415  *      if everything went OK or the pointer to the last filled triple
416  *      (incomplete one) otherwise. Upon the return chain[i].key contains
417  *      the number of (i+1)-th block in the chain (as it is stored in memory,
418  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
419  *      number (it points into struct inode for i==0 and into the bh->b_data
420  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
421  *      block for i>0 and NULL for i==0. In other words, it holds the block
422  *      numbers of the chain, addresses they were taken from (and where we can
423  *      verify that chain did not change) and buffer_heads hosting these
424  *      numbers.
425  *
426  *      Function stops when it stumbles upon zero pointer (absent block)
427  *              (pointer to last triple returned, *@err == 0)
428  *      or when it gets an IO error reading an indirect block
429  *              (ditto, *@err == -EIO)
430  *      or when it reads all @depth-1 indirect blocks successfully and finds
431  *      the whole chain, all way to the data (returns %NULL, *err == 0).
432  *
433  *      Need to be called with
434  *      down_read(&EXT4_I(inode)->i_data_sem)
435  */
436 static Indirect *ext4_get_branch(struct inode *inode, int depth,
437                                  ext4_lblk_t  *offsets,
438                                  Indirect chain[4], int *err)
439 {
440         struct super_block *sb = inode->i_sb;
441         Indirect *p = chain;
442         struct buffer_head *bh;
443
444         *err = 0;
445         /* i_data is not going away, no lock needed */
446         add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
447         if (!p->key)
448                 goto no_block;
449         while (--depth) {
450                 bh = sb_getblk(sb, le32_to_cpu(p->key));
451                 if (unlikely(!bh))
452                         goto failure;
453
454                 if (!bh_uptodate_or_lock(bh)) {
455                         if (bh_submit_read(bh) < 0) {
456                                 put_bh(bh);
457                                 goto failure;
458                         }
459                         /* validate block references */
460                         if (ext4_check_indirect_blockref(inode, bh)) {
461                                 put_bh(bh);
462                                 goto failure;
463                         }
464                 }
465
466                 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
467                 /* Reader: end */
468                 if (!p->key)
469                         goto no_block;
470         }
471         return NULL;
472
473 failure:
474         *err = -EIO;
475 no_block:
476         return p;
477 }
478
479 /**
480  *      ext4_find_near - find a place for allocation with sufficient locality
481  *      @inode: owner
482  *      @ind: descriptor of indirect block.
483  *
484  *      This function returns the preferred place for block allocation.
485  *      It is used when heuristic for sequential allocation fails.
486  *      Rules are:
487  *        + if there is a block to the left of our position - allocate near it.
488  *        + if pointer will live in indirect block - allocate near that block.
489  *        + if pointer will live in inode - allocate in the same
490  *          cylinder group.
491  *
492  * In the latter case we colour the starting block by the callers PID to
493  * prevent it from clashing with concurrent allocations for a different inode
494  * in the same block group.   The PID is used here so that functionally related
495  * files will be close-by on-disk.
496  *
497  *      Caller must make sure that @ind is valid and will stay that way.
498  */
499 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
500 {
501         struct ext4_inode_info *ei = EXT4_I(inode);
502         __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
503         __le32 *p;
504         ext4_fsblk_t bg_start;
505         ext4_fsblk_t last_block;
506         ext4_grpblk_t colour;
507         ext4_group_t block_group;
508         int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
509
510         /* Try to find previous block */
511         for (p = ind->p - 1; p >= start; p--) {
512                 if (*p)
513                         return le32_to_cpu(*p);
514         }
515
516         /* No such thing, so let's try location of indirect block */
517         if (ind->bh)
518                 return ind->bh->b_blocknr;
519
520         /*
521          * It is going to be referred to from the inode itself? OK, just put it
522          * into the same cylinder group then.
523          */
524         block_group = ei->i_block_group;
525         if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
526                 block_group &= ~(flex_size-1);
527                 if (S_ISREG(inode->i_mode))
528                         block_group++;
529         }
530         bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
531         last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
532
533         /*
534          * If we are doing delayed allocation, we don't need take
535          * colour into account.
536          */
537         if (test_opt(inode->i_sb, DELALLOC))
538                 return bg_start;
539
540         if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
541                 colour = (current->pid % 16) *
542                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
543         else
544                 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
545         return bg_start + colour;
546 }
547
548 /**
549  *      ext4_find_goal - find a preferred place for allocation.
550  *      @inode: owner
551  *      @block:  block we want
552  *      @partial: pointer to the last triple within a chain
553  *
554  *      Normally this function find the preferred place for block allocation,
555  *      returns it.
556  */
557 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
558                                    Indirect *partial)
559 {
560         /*
561          * XXX need to get goal block from mballoc's data structures
562          */
563
564         return ext4_find_near(inode, partial);
565 }
566
567 /**
568  *      ext4_blks_to_allocate: Look up the block map and count the number
569  *      of direct blocks need to be allocated for the given branch.
570  *
571  *      @branch: chain of indirect blocks
572  *      @k: number of blocks need for indirect blocks
573  *      @blks: number of data blocks to be mapped.
574  *      @blocks_to_boundary:  the offset in the indirect block
575  *
576  *      return the total number of blocks to be allocate, including the
577  *      direct and indirect blocks.
578  */
579 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
580                                  int blocks_to_boundary)
581 {
582         unsigned int count = 0;
583
584         /*
585          * Simple case, [t,d]Indirect block(s) has not allocated yet
586          * then it's clear blocks on that path have not allocated
587          */
588         if (k > 0) {
589                 /* right now we don't handle cross boundary allocation */
590                 if (blks < blocks_to_boundary + 1)
591                         count += blks;
592                 else
593                         count += blocks_to_boundary + 1;
594                 return count;
595         }
596
597         count++;
598         while (count < blks && count <= blocks_to_boundary &&
599                 le32_to_cpu(*(branch[0].p + count)) == 0) {
600                 count++;
601         }
602         return count;
603 }
604
605 /**
606  *      ext4_alloc_blocks: multiple allocate blocks needed for a branch
607  *      @indirect_blks: the number of blocks need to allocate for indirect
608  *                      blocks
609  *
610  *      @new_blocks: on return it will store the new block numbers for
611  *      the indirect blocks(if needed) and the first direct block,
612  *      @blks:  on return it will store the total number of allocated
613  *              direct blocks
614  */
615 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
616                              ext4_lblk_t iblock, ext4_fsblk_t goal,
617                              int indirect_blks, int blks,
618                              ext4_fsblk_t new_blocks[4], int *err)
619 {
620         struct ext4_allocation_request ar;
621         int target, i;
622         unsigned long count = 0, blk_allocated = 0;
623         int index = 0;
624         ext4_fsblk_t current_block = 0;
625         int ret = 0;
626
627         /*
628          * Here we try to allocate the requested multiple blocks at once,
629          * on a best-effort basis.
630          * To build a branch, we should allocate blocks for
631          * the indirect blocks(if not allocated yet), and at least
632          * the first direct block of this branch.  That's the
633          * minimum number of blocks need to allocate(required)
634          */
635         /* first we try to allocate the indirect blocks */
636         target = indirect_blks;
637         while (target > 0) {
638                 count = target;
639                 /* allocating blocks for indirect blocks and direct blocks */
640                 current_block = ext4_new_meta_blocks(handle, inode,
641                                                         goal, &count, err);
642                 if (*err)
643                         goto failed_out;
644
645                 target -= count;
646                 /* allocate blocks for indirect blocks */
647                 while (index < indirect_blks && count) {
648                         new_blocks[index++] = current_block++;
649                         count--;
650                 }
651                 if (count > 0) {
652                         /*
653                          * save the new block number
654                          * for the first direct block
655                          */
656                         new_blocks[index] = current_block;
657                         printk(KERN_INFO "%s returned more blocks than "
658                                                 "requested\n", __func__);
659                         WARN_ON(1);
660                         break;
661                 }
662         }
663
664         target = blks - count ;
665         blk_allocated = count;
666         if (!target)
667                 goto allocated;
668         /* Now allocate data blocks */
669         memset(&ar, 0, sizeof(ar));
670         ar.inode = inode;
671         ar.goal = goal;
672         ar.len = target;
673         ar.logical = iblock;
674         if (S_ISREG(inode->i_mode))
675                 /* enable in-core preallocation only for regular files */
676                 ar.flags = EXT4_MB_HINT_DATA;
677
678         current_block = ext4_mb_new_blocks(handle, &ar, err);
679
680         if (*err && (target == blks)) {
681                 /*
682                  * if the allocation failed and we didn't allocate
683                  * any blocks before
684                  */
685                 goto failed_out;
686         }
687         if (!*err) {
688                 if (target == blks) {
689                         /*
690                          * save the new block number
691                          * for the first direct block
692                          */
693                         new_blocks[index] = current_block;
694                 }
695                 blk_allocated += ar.len;
696         }
697 allocated:
698         /* total number of blocks allocated for direct blocks */
699         ret = blk_allocated;
700         *err = 0;
701         return ret;
702 failed_out:
703         for (i = 0; i < index; i++)
704                 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
705         return ret;
706 }
707
708 /**
709  *      ext4_alloc_branch - allocate and set up a chain of blocks.
710  *      @inode: owner
711  *      @indirect_blks: number of allocated indirect blocks
712  *      @blks: number of allocated direct blocks
713  *      @offsets: offsets (in the blocks) to store the pointers to next.
714  *      @branch: place to store the chain in.
715  *
716  *      This function allocates blocks, zeroes out all but the last one,
717  *      links them into chain and (if we are synchronous) writes them to disk.
718  *      In other words, it prepares a branch that can be spliced onto the
719  *      inode. It stores the information about that chain in the branch[], in
720  *      the same format as ext4_get_branch() would do. We are calling it after
721  *      we had read the existing part of chain and partial points to the last
722  *      triple of that (one with zero ->key). Upon the exit we have the same
723  *      picture as after the successful ext4_get_block(), except that in one
724  *      place chain is disconnected - *branch->p is still zero (we did not
725  *      set the last link), but branch->key contains the number that should
726  *      be placed into *branch->p to fill that gap.
727  *
728  *      If allocation fails we free all blocks we've allocated (and forget
729  *      their buffer_heads) and return the error value the from failed
730  *      ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
731  *      as described above and return 0.
732  */
733 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
734                              ext4_lblk_t iblock, int indirect_blks,
735                              int *blks, ext4_fsblk_t goal,
736                              ext4_lblk_t *offsets, Indirect *branch)
737 {
738         int blocksize = inode->i_sb->s_blocksize;
739         int i, n = 0;
740         int err = 0;
741         struct buffer_head *bh;
742         int num;
743         ext4_fsblk_t new_blocks[4];
744         ext4_fsblk_t current_block;
745
746         num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
747                                 *blks, new_blocks, &err);
748         if (err)
749                 return err;
750
751         branch[0].key = cpu_to_le32(new_blocks[0]);
752         /*
753          * metadata blocks and data blocks are allocated.
754          */
755         for (n = 1; n <= indirect_blks;  n++) {
756                 /*
757                  * Get buffer_head for parent block, zero it out
758                  * and set the pointer to new one, then send
759                  * parent to disk.
760                  */
761                 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
762                 branch[n].bh = bh;
763                 lock_buffer(bh);
764                 BUFFER_TRACE(bh, "call get_create_access");
765                 err = ext4_journal_get_create_access(handle, bh);
766                 if (err) {
767                         unlock_buffer(bh);
768                         brelse(bh);
769                         goto failed;
770                 }
771
772                 memset(bh->b_data, 0, blocksize);
773                 branch[n].p = (__le32 *) bh->b_data + offsets[n];
774                 branch[n].key = cpu_to_le32(new_blocks[n]);
775                 *branch[n].p = branch[n].key;
776                 if (n == indirect_blks) {
777                         current_block = new_blocks[n];
778                         /*
779                          * End of chain, update the last new metablock of
780                          * the chain to point to the new allocated
781                          * data blocks numbers
782                          */
783                         for (i = 1; i < num; i++)
784                                 *(branch[n].p + i) = cpu_to_le32(++current_block);
785                 }
786                 BUFFER_TRACE(bh, "marking uptodate");
787                 set_buffer_uptodate(bh);
788                 unlock_buffer(bh);
789
790                 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
791                 err = ext4_handle_dirty_metadata(handle, inode, bh);
792                 if (err)
793                         goto failed;
794         }
795         *blks = num;
796         return err;
797 failed:
798         /* Allocation failed, free what we already allocated */
799         for (i = 1; i <= n ; i++) {
800                 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
801                 ext4_journal_forget(handle, branch[i].bh);
802         }
803         for (i = 0; i < indirect_blks; i++)
804                 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
805
806         ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
807
808         return err;
809 }
810
811 /**
812  * ext4_splice_branch - splice the allocated branch onto inode.
813  * @inode: owner
814  * @block: (logical) number of block we are adding
815  * @chain: chain of indirect blocks (with a missing link - see
816  *      ext4_alloc_branch)
817  * @where: location of missing link
818  * @num:   number of indirect blocks we are adding
819  * @blks:  number of direct blocks we are adding
820  *
821  * This function fills the missing link and does all housekeeping needed in
822  * inode (->i_blocks, etc.). In case of success we end up with the full
823  * chain to new block and return 0.
824  */
825 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
826                               ext4_lblk_t block, Indirect *where, int num,
827                               int blks)
828 {
829         int i;
830         int err = 0;
831         ext4_fsblk_t current_block;
832
833         /*
834          * If we're splicing into a [td]indirect block (as opposed to the
835          * inode) then we need to get write access to the [td]indirect block
836          * before the splice.
837          */
838         if (where->bh) {
839                 BUFFER_TRACE(where->bh, "get_write_access");
840                 err = ext4_journal_get_write_access(handle, where->bh);
841                 if (err)
842                         goto err_out;
843         }
844         /* That's it */
845
846         *where->p = where->key;
847
848         /*
849          * Update the host buffer_head or inode to point to more just allocated
850          * direct blocks blocks
851          */
852         if (num == 0 && blks > 1) {
853                 current_block = le32_to_cpu(where->key) + 1;
854                 for (i = 1; i < blks; i++)
855                         *(where->p + i) = cpu_to_le32(current_block++);
856         }
857
858         /* We are done with atomic stuff, now do the rest of housekeeping */
859         /* had we spliced it onto indirect block? */
860         if (where->bh) {
861                 /*
862                  * If we spliced it onto an indirect block, we haven't
863                  * altered the inode.  Note however that if it is being spliced
864                  * onto an indirect block at the very end of the file (the
865                  * file is growing) then we *will* alter the inode to reflect
866                  * the new i_size.  But that is not done here - it is done in
867                  * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
868                  */
869                 jbd_debug(5, "splicing indirect only\n");
870                 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
871                 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
872                 if (err)
873                         goto err_out;
874         } else {
875                 /*
876                  * OK, we spliced it into the inode itself on a direct block.
877                  */
878                 ext4_mark_inode_dirty(handle, inode);
879                 jbd_debug(5, "splicing direct\n");
880         }
881         return err;
882
883 err_out:
884         for (i = 1; i <= num; i++) {
885                 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
886                 ext4_journal_forget(handle, where[i].bh);
887                 ext4_free_blocks(handle, inode,
888                                         le32_to_cpu(where[i-1].key), 1, 0);
889         }
890         ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
891
892         return err;
893 }
894
895 /*
896  * The ext4_ind_get_blocks() function handles non-extents inodes
897  * (i.e., using the traditional indirect/double-indirect i_blocks
898  * scheme) for ext4_get_blocks().
899  *
900  * Allocation strategy is simple: if we have to allocate something, we will
901  * have to go the whole way to leaf. So let's do it before attaching anything
902  * to tree, set linkage between the newborn blocks, write them if sync is
903  * required, recheck the path, free and repeat if check fails, otherwise
904  * set the last missing link (that will protect us from any truncate-generated
905  * removals - all blocks on the path are immune now) and possibly force the
906  * write on the parent block.
907  * That has a nice additional property: no special recovery from the failed
908  * allocations is needed - we simply release blocks and do not touch anything
909  * reachable from inode.
910  *
911  * `handle' can be NULL if create == 0.
912  *
913  * return > 0, # of blocks mapped or allocated.
914  * return = 0, if plain lookup failed.
915  * return < 0, error case.
916  *
917  * The ext4_ind_get_blocks() function should be called with
918  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
919  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
920  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
921  * blocks.
922  */
923 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
924                                ext4_lblk_t iblock, unsigned int maxblocks,
925                                struct buffer_head *bh_result,
926                                int flags)
927 {
928         int err = -EIO;
929         ext4_lblk_t offsets[4];
930         Indirect chain[4];
931         Indirect *partial;
932         ext4_fsblk_t goal;
933         int indirect_blks;
934         int blocks_to_boundary = 0;
935         int depth;
936         int count = 0;
937         ext4_fsblk_t first_block = 0;
938
939         J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
940         J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
941         depth = ext4_block_to_path(inode, iblock, offsets,
942                                    &blocks_to_boundary);
943
944         if (depth == 0)
945                 goto out;
946
947         partial = ext4_get_branch(inode, depth, offsets, chain, &err);
948
949         /* Simplest case - block found, no allocation needed */
950         if (!partial) {
951                 first_block = le32_to_cpu(chain[depth - 1].key);
952                 clear_buffer_new(bh_result);
953                 count++;
954                 /*map more blocks*/
955                 while (count < maxblocks && count <= blocks_to_boundary) {
956                         ext4_fsblk_t blk;
957
958                         blk = le32_to_cpu(*(chain[depth-1].p + count));
959
960                         if (blk == first_block + count)
961                                 count++;
962                         else
963                                 break;
964                 }
965                 goto got_it;
966         }
967
968         /* Next simple case - plain lookup or failed read of indirect block */
969         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
970                 goto cleanup;
971
972         /*
973          * Okay, we need to do block allocation.
974         */
975         goal = ext4_find_goal(inode, iblock, partial);
976
977         /* the number of blocks need to allocate for [d,t]indirect blocks */
978         indirect_blks = (chain + depth) - partial - 1;
979
980         /*
981          * Next look up the indirect map to count the totoal number of
982          * direct blocks to allocate for this branch.
983          */
984         count = ext4_blks_to_allocate(partial, indirect_blks,
985                                         maxblocks, blocks_to_boundary);
986         /*
987          * Block out ext4_truncate while we alter the tree
988          */
989         err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
990                                 &count, goal,
991                                 offsets + (partial - chain), partial);
992
993         /*
994          * The ext4_splice_branch call will free and forget any buffers
995          * on the new chain if there is a failure, but that risks using
996          * up transaction credits, especially for bitmaps where the
997          * credits cannot be returned.  Can we handle this somehow?  We
998          * may need to return -EAGAIN upwards in the worst case.  --sct
999          */
1000         if (!err)
1001                 err = ext4_splice_branch(handle, inode, iblock,
1002                                          partial, indirect_blks, count);
1003         else
1004                 goto cleanup;
1005
1006         set_buffer_new(bh_result);
1007 got_it:
1008         map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
1009         if (count > blocks_to_boundary)
1010                 set_buffer_boundary(bh_result);
1011         err = count;
1012         /* Clean up and exit */
1013         partial = chain + depth - 1;    /* the whole chain */
1014 cleanup:
1015         while (partial > chain) {
1016                 BUFFER_TRACE(partial->bh, "call brelse");
1017                 brelse(partial->bh);
1018                 partial--;
1019         }
1020         BUFFER_TRACE(bh_result, "returned");
1021 out:
1022         return err;
1023 }
1024
1025 qsize_t ext4_get_reserved_space(struct inode *inode)
1026 {
1027         unsigned long long total;
1028
1029         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1030         total = EXT4_I(inode)->i_reserved_data_blocks +
1031                 EXT4_I(inode)->i_reserved_meta_blocks;
1032         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1033
1034         return total;
1035 }
1036 /*
1037  * Calculate the number of metadata blocks need to reserve
1038  * to allocate @blocks for non extent file based file
1039  */
1040 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
1041 {
1042         int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1043         int ind_blks, dind_blks, tind_blks;
1044
1045         /* number of new indirect blocks needed */
1046         ind_blks = (blocks + icap - 1) / icap;
1047
1048         dind_blks = (ind_blks + icap - 1) / icap;
1049
1050         tind_blks = 1;
1051
1052         return ind_blks + dind_blks + tind_blks;
1053 }
1054
1055 /*
1056  * Calculate the number of metadata blocks need to reserve
1057  * to allocate given number of blocks
1058  */
1059 static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
1060 {
1061         if (!blocks)
1062                 return 0;
1063
1064         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
1065                 return ext4_ext_calc_metadata_amount(inode, blocks);
1066
1067         return ext4_indirect_calc_metadata_amount(inode, blocks);
1068 }
1069
1070 static void ext4_da_update_reserve_space(struct inode *inode, int used)
1071 {
1072         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1073         int total, mdb, mdb_free;
1074
1075         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1076         /* recalculate the number of metablocks still need to be reserved */
1077         total = EXT4_I(inode)->i_reserved_data_blocks - used;
1078         mdb = ext4_calc_metadata_amount(inode, total);
1079
1080         /* figure out how many metablocks to release */
1081         BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1082         mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1083
1084         if (mdb_free) {
1085                 /* Account for allocated meta_blocks */
1086                 mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
1087
1088                 /* update fs dirty blocks counter */
1089                 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
1090                 EXT4_I(inode)->i_allocated_meta_blocks = 0;
1091                 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1092         }
1093
1094         /* update per-inode reservations */
1095         BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
1096         EXT4_I(inode)->i_reserved_data_blocks -= used;
1097         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1098
1099         /*
1100          * free those over-booking quota for metadata blocks
1101          */
1102         if (mdb_free)
1103                 vfs_dq_release_reservation_block(inode, mdb_free);
1104
1105         /*
1106          * If we have done all the pending block allocations and if
1107          * there aren't any writers on the inode, we can discard the
1108          * inode's preallocations.
1109          */
1110         if (!total && (atomic_read(&inode->i_writecount) == 0))
1111                 ext4_discard_preallocations(inode);
1112 }
1113
1114 static int check_block_validity(struct inode *inode, sector_t logical,
1115                                 sector_t phys, int len)
1116 {
1117         if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
1118                 ext4_error(inode->i_sb, "check_block_validity",
1119                            "inode #%lu logical block %llu mapped to %llu "
1120                            "(size %d)", inode->i_ino,
1121                            (unsigned long long) logical,
1122                            (unsigned long long) phys, len);
1123                 WARN_ON(1);
1124                 return -EIO;
1125         }
1126         return 0;
1127 }
1128
1129 /*
1130  * The ext4_get_blocks() function tries to look up the requested blocks,
1131  * and returns if the blocks are already mapped.
1132  *
1133  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1134  * and store the allocated blocks in the result buffer head and mark it
1135  * mapped.
1136  *
1137  * If file type is extents based, it will call ext4_ext_get_blocks(),
1138  * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
1139  * based files
1140  *
1141  * On success, it returns the number of blocks being mapped or allocate.
1142  * if create==0 and the blocks are pre-allocated and uninitialized block,
1143  * the result buffer head is unmapped. If the create ==1, it will make sure
1144  * the buffer head is mapped.
1145  *
1146  * It returns 0 if plain look up failed (blocks have not been allocated), in
1147  * that casem, buffer head is unmapped
1148  *
1149  * It returns the error in case of allocation failure.
1150  */
1151 int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1152                     unsigned int max_blocks, struct buffer_head *bh,
1153                     int flags)
1154 {
1155         int retval;
1156
1157         clear_buffer_mapped(bh);
1158         clear_buffer_unwritten(bh);
1159
1160         /*
1161          * Try to see if we can get the block without requesting a new
1162          * file system block.
1163          */
1164         down_read((&EXT4_I(inode)->i_data_sem));
1165         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1166                 retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
1167                                 bh, 0);
1168         } else {
1169                 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
1170                                              bh, 0);
1171         }
1172         up_read((&EXT4_I(inode)->i_data_sem));
1173
1174         if (retval > 0 && buffer_mapped(bh)) {
1175                 int ret = check_block_validity(inode, block,
1176                                                bh->b_blocknr, retval);
1177                 if (ret != 0)
1178                         return ret;
1179         }
1180
1181         /* If it is only a block(s) look up */
1182         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1183                 return retval;
1184
1185         /*
1186          * Returns if the blocks have already allocated
1187          *
1188          * Note that if blocks have been preallocated
1189          * ext4_ext_get_block() returns th create = 0
1190          * with buffer head unmapped.
1191          */
1192         if (retval > 0 && buffer_mapped(bh))
1193                 return retval;
1194
1195         /*
1196          * When we call get_blocks without the create flag, the
1197          * BH_Unwritten flag could have gotten set if the blocks
1198          * requested were part of a uninitialized extent.  We need to
1199          * clear this flag now that we are committed to convert all or
1200          * part of the uninitialized extent to be an initialized
1201          * extent.  This is because we need to avoid the combination
1202          * of BH_Unwritten and BH_Mapped flags being simultaneously
1203          * set on the buffer_head.
1204          */
1205         clear_buffer_unwritten(bh);
1206
1207         /*
1208          * New blocks allocate and/or writing to uninitialized extent
1209          * will possibly result in updating i_data, so we take
1210          * the write lock of i_data_sem, and call get_blocks()
1211          * with create == 1 flag.
1212          */
1213         down_write((&EXT4_I(inode)->i_data_sem));
1214
1215         /*
1216          * if the caller is from delayed allocation writeout path
1217          * we have already reserved fs blocks for allocation
1218          * let the underlying get_block() function know to
1219          * avoid double accounting
1220          */
1221         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1222                 EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1223         /*
1224          * We need to check for EXT4 here because migrate
1225          * could have changed the inode type in between
1226          */
1227         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1228                 retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
1229                                               bh, flags);
1230         } else {
1231                 retval = ext4_ind_get_blocks(handle, inode, block,
1232                                              max_blocks, bh, flags);
1233
1234                 if (retval > 0 && buffer_new(bh)) {
1235                         /*
1236                          * We allocated new blocks which will result in
1237                          * i_data's format changing.  Force the migrate
1238                          * to fail by clearing migrate flags
1239                          */
1240                         EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1241                                                         ~EXT4_EXT_MIGRATE;
1242                 }
1243         }
1244
1245         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1246                 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1247
1248         /*
1249          * Update reserved blocks/metadata blocks after successful
1250          * block allocation which had been deferred till now.
1251          */
1252         if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
1253                 ext4_da_update_reserve_space(inode, retval);
1254
1255         up_write((&EXT4_I(inode)->i_data_sem));
1256         if (retval > 0 && buffer_mapped(bh)) {
1257                 int ret = check_block_validity(inode, block,
1258                                                bh->b_blocknr, retval);
1259                 if (ret != 0)
1260                         return ret;
1261         }
1262         return retval;
1263 }
1264
1265 /* Maximum number of blocks we map for direct IO at once. */
1266 #define DIO_MAX_BLOCKS 4096
1267
1268 int ext4_get_block(struct inode *inode, sector_t iblock,
1269                    struct buffer_head *bh_result, int create)
1270 {
1271         handle_t *handle = ext4_journal_current_handle();
1272         int ret = 0, started = 0;
1273         unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1274         int dio_credits;
1275
1276         if (create && !handle) {
1277                 /* Direct IO write... */
1278                 if (max_blocks > DIO_MAX_BLOCKS)
1279                         max_blocks = DIO_MAX_BLOCKS;
1280                 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
1281                 handle = ext4_journal_start(inode, dio_credits);
1282                 if (IS_ERR(handle)) {
1283                         ret = PTR_ERR(handle);
1284                         goto out;
1285                 }
1286                 started = 1;
1287         }
1288
1289         ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
1290                               create ? EXT4_GET_BLOCKS_CREATE : 0);
1291         if (ret > 0) {
1292                 bh_result->b_size = (ret << inode->i_blkbits);
1293                 ret = 0;
1294         }
1295         if (started)
1296                 ext4_journal_stop(handle);
1297 out:
1298         return ret;
1299 }
1300
1301 /*
1302  * `handle' can be NULL if create is zero
1303  */
1304 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1305                                 ext4_lblk_t block, int create, int *errp)
1306 {
1307         struct buffer_head dummy;
1308         int fatal = 0, err;
1309         int flags = 0;
1310
1311         J_ASSERT(handle != NULL || create == 0);
1312
1313         dummy.b_state = 0;
1314         dummy.b_blocknr = -1000;
1315         buffer_trace_init(&dummy.b_history);
1316         if (create)
1317                 flags |= EXT4_GET_BLOCKS_CREATE;
1318         err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
1319         /*
1320          * ext4_get_blocks() returns number of blocks mapped. 0 in
1321          * case of a HOLE.
1322          */
1323         if (err > 0) {
1324                 if (err > 1)
1325                         WARN_ON(1);
1326                 err = 0;
1327         }
1328         *errp = err;
1329         if (!err && buffer_mapped(&dummy)) {
1330                 struct buffer_head *bh;
1331                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1332                 if (!bh) {
1333                         *errp = -EIO;
1334                         goto err;
1335                 }
1336                 if (buffer_new(&dummy)) {
1337                         J_ASSERT(create != 0);
1338                         J_ASSERT(handle != NULL);
1339
1340                         /*
1341                          * Now that we do not always journal data, we should
1342                          * keep in mind whether this should always journal the
1343                          * new buffer as metadata.  For now, regular file
1344                          * writes use ext4_get_block instead, so it's not a
1345                          * problem.
1346                          */
1347                         lock_buffer(bh);
1348                         BUFFER_TRACE(bh, "call get_create_access");
1349                         fatal = ext4_journal_get_create_access(handle, bh);
1350                         if (!fatal && !buffer_uptodate(bh)) {
1351                                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1352                                 set_buffer_uptodate(bh);
1353                         }
1354                         unlock_buffer(bh);
1355                         BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1356                         err = ext4_handle_dirty_metadata(handle, inode, bh);
1357                         if (!fatal)
1358                                 fatal = err;
1359                 } else {
1360                         BUFFER_TRACE(bh, "not a new buffer");
1361                 }
1362                 if (fatal) {
1363                         *errp = fatal;
1364                         brelse(bh);
1365                         bh = NULL;
1366                 }
1367                 return bh;
1368         }
1369 err:
1370         return NULL;
1371 }
1372
1373 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1374                                ext4_lblk_t block, int create, int *err)
1375 {
1376         struct buffer_head *bh;
1377
1378         bh = ext4_getblk(handle, inode, block, create, err);
1379         if (!bh)
1380                 return bh;
1381         if (buffer_uptodate(bh))
1382                 return bh;
1383         ll_rw_block(READ_META, 1, &bh);
1384         wait_on_buffer(bh);
1385         if (buffer_uptodate(bh))
1386                 return bh;
1387         put_bh(bh);
1388         *err = -EIO;
1389         return NULL;
1390 }
1391
1392 static int walk_page_buffers(handle_t *handle,
1393                              struct buffer_head *head,
1394                              unsigned from,
1395                              unsigned to,
1396                              int *partial,
1397                              int (*fn)(handle_t *handle,
1398                                        struct buffer_head *bh))
1399 {
1400         struct buffer_head *bh;
1401         unsigned block_start, block_end;
1402         unsigned blocksize = head->b_size;
1403         int err, ret = 0;
1404         struct buffer_head *next;
1405
1406         for (bh = head, block_start = 0;
1407              ret == 0 && (bh != head || !block_start);
1408              block_start = block_end, bh = next) {
1409                 next = bh->b_this_page;
1410                 block_end = block_start + blocksize;
1411                 if (block_end <= from || block_start >= to) {
1412                         if (partial && !buffer_uptodate(bh))
1413                                 *partial = 1;
1414                         continue;
1415                 }
1416                 err = (*fn)(handle, bh);
1417                 if (!ret)
1418                         ret = err;
1419         }
1420         return ret;
1421 }
1422
1423 /*
1424  * To preserve ordering, it is essential that the hole instantiation and
1425  * the data write be encapsulated in a single transaction.  We cannot
1426  * close off a transaction and start a new one between the ext4_get_block()
1427  * and the commit_write().  So doing the jbd2_journal_start at the start of
1428  * prepare_write() is the right place.
1429  *
1430  * Also, this function can nest inside ext4_writepage() ->
1431  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1432  * has generated enough buffer credits to do the whole page.  So we won't
1433  * block on the journal in that case, which is good, because the caller may
1434  * be PF_MEMALLOC.
1435  *
1436  * By accident, ext4 can be reentered when a transaction is open via
1437  * quota file writes.  If we were to commit the transaction while thus
1438  * reentered, there can be a deadlock - we would be holding a quota
1439  * lock, and the commit would never complete if another thread had a
1440  * transaction open and was blocking on the quota lock - a ranking
1441  * violation.
1442  *
1443  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1444  * will _not_ run commit under these circumstances because handle->h_ref
1445  * is elevated.  We'll still have enough credits for the tiny quotafile
1446  * write.
1447  */
1448 static int do_journal_get_write_access(handle_t *handle,
1449                                        struct buffer_head *bh)
1450 {
1451         if (!buffer_mapped(bh) || buffer_freed(bh))
1452                 return 0;
1453         return ext4_journal_get_write_access(handle, bh);
1454 }
1455
1456 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1457                             loff_t pos, unsigned len, unsigned flags,
1458                             struct page **pagep, void **fsdata)
1459 {
1460         struct inode *inode = mapping->host;
1461         int ret, needed_blocks;
1462         handle_t *handle;
1463         int retries = 0;
1464         struct page *page;
1465         pgoff_t index;
1466         unsigned from, to;
1467
1468         trace_ext4_write_begin(inode, pos, len, flags);
1469         /*
1470          * Reserve one block more for addition to orphan list in case
1471          * we allocate blocks but write fails for some reason
1472          */
1473         needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1474         index = pos >> PAGE_CACHE_SHIFT;
1475         from = pos & (PAGE_CACHE_SIZE - 1);
1476         to = from + len;
1477
1478 retry:
1479         handle = ext4_journal_start(inode, needed_blocks);
1480         if (IS_ERR(handle)) {
1481                 ret = PTR_ERR(handle);
1482                 goto out;
1483         }
1484
1485         /* We cannot recurse into the filesystem as the transaction is already
1486          * started */
1487         flags |= AOP_FLAG_NOFS;
1488
1489         page = grab_cache_page_write_begin(mapping, index, flags);
1490         if (!page) {
1491                 ext4_journal_stop(handle);
1492                 ret = -ENOMEM;
1493                 goto out;
1494         }
1495         *pagep = page;
1496
1497         ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1498                                 ext4_get_block);
1499
1500         if (!ret && ext4_should_journal_data(inode)) {
1501                 ret = walk_page_buffers(handle, page_buffers(page),
1502                                 from, to, NULL, do_journal_get_write_access);
1503         }
1504
1505         if (ret) {
1506                 unlock_page(page);
1507                 page_cache_release(page);
1508                 /*
1509                  * block_write_begin may have instantiated a few blocks
1510                  * outside i_size.  Trim these off again. Don't need
1511                  * i_size_read because we hold i_mutex.
1512                  *
1513                  * Add inode to orphan list in case we crash before
1514                  * truncate finishes
1515                  */
1516                 if (pos + len > inode->i_size)
1517                         ext4_orphan_add(handle, inode);
1518
1519                 ext4_journal_stop(handle);
1520                 if (pos + len > inode->i_size) {
1521                         vmtruncate(inode, inode->i_size);
1522                         /*
1523                          * If vmtruncate failed early the inode might
1524                          * still be on the orphan list; we need to
1525                          * make sure the inode is removed from the
1526                          * orphan list in that case.
1527                          */
1528                         if (inode->i_nlink)
1529                                 ext4_orphan_del(NULL, inode);
1530                 }
1531         }
1532
1533         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1534                 goto retry;
1535 out:
1536         return ret;
1537 }
1538
1539 /* For write_end() in data=journal mode */
1540 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1541 {
1542         if (!buffer_mapped(bh) || buffer_freed(bh))
1543                 return 0;
1544         set_buffer_uptodate(bh);
1545         return ext4_handle_dirty_metadata(handle, NULL, bh);
1546 }
1547
1548 static int ext4_generic_write_end(struct file *file,
1549                                   struct address_space *mapping,
1550                                   loff_t pos, unsigned len, unsigned copied,
1551                                   struct page *page, void *fsdata)
1552 {
1553         int i_size_changed = 0;
1554         struct inode *inode = mapping->host;
1555         handle_t *handle = ext4_journal_current_handle();
1556
1557         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1558
1559         /*
1560          * No need to use i_size_read() here, the i_size
1561          * cannot change under us because we hold i_mutex.
1562          *
1563          * But it's important to update i_size while still holding page lock:
1564          * page writeout could otherwise come in and zero beyond i_size.
1565          */
1566         if (pos + copied > inode->i_size) {
1567                 i_size_write(inode, pos + copied);
1568                 i_size_changed = 1;
1569         }
1570
1571         if (pos + copied >  EXT4_I(inode)->i_disksize) {
1572                 /* We need to mark inode dirty even if
1573                  * new_i_size is less that inode->i_size
1574                  * bu greater than i_disksize.(hint delalloc)
1575                  */
1576                 ext4_update_i_disksize(inode, (pos + copied));
1577                 i_size_changed = 1;
1578         }
1579         unlock_page(page);
1580         page_cache_release(page);
1581
1582         /*
1583          * Don't mark the inode dirty under page lock. First, it unnecessarily
1584          * makes the holding time of page lock longer. Second, it forces lock
1585          * ordering of page lock and transaction start for journaling
1586          * filesystems.
1587          */
1588         if (i_size_changed)
1589                 ext4_mark_inode_dirty(handle, inode);
1590
1591         return copied;
1592 }
1593
1594 /*
1595  * We need to pick up the new inode size which generic_commit_write gave us
1596  * `file' can be NULL - eg, when called from page_symlink().
1597  *
1598  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1599  * buffers are managed internally.
1600  */
1601 static int ext4_ordered_write_end(struct file *file,
1602                                   struct address_space *mapping,
1603                                   loff_t pos, unsigned len, unsigned copied,
1604                                   struct page *page, void *fsdata)
1605 {
1606         handle_t *handle = ext4_journal_current_handle();
1607         struct inode *inode = mapping->host;
1608         int ret = 0, ret2;
1609
1610         trace_ext4_ordered_write_end(inode, pos, len, copied);
1611         ret = ext4_jbd2_file_inode(handle, inode);
1612
1613         if (ret == 0) {
1614                 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1615                                                         page, fsdata);
1616                 copied = ret2;
1617                 if (pos + len > inode->i_size)
1618                         /* if we have allocated more blocks and copied
1619                          * less. We will have blocks allocated outside
1620                          * inode->i_size. So truncate them
1621                          */
1622                         ext4_orphan_add(handle, inode);
1623                 if (ret2 < 0)
1624                         ret = ret2;
1625         }
1626         ret2 = ext4_journal_stop(handle);
1627         if (!ret)
1628                 ret = ret2;
1629
1630         if (pos + len > inode->i_size) {
1631                 vmtruncate(inode, inode->i_size);
1632                 /*
1633                  * If vmtruncate failed early the inode might still be
1634                  * on the orphan list; we need to make sure the inode
1635                  * is removed from the orphan list in that case.
1636                  */
1637                 if (inode->i_nlink)
1638                         ext4_orphan_del(NULL, inode);
1639         }
1640
1641
1642         return ret ? ret : copied;
1643 }
1644
1645 static int ext4_writeback_write_end(struct file *file,
1646                                     struct address_space *mapping,
1647                                     loff_t pos, unsigned len, unsigned copied,
1648                                     struct page *page, void *fsdata)
1649 {
1650         handle_t *handle = ext4_journal_current_handle();
1651         struct inode *inode = mapping->host;
1652         int ret = 0, ret2;
1653
1654         trace_ext4_writeback_write_end(inode, pos, len, copied);
1655         ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1656                                                         page, fsdata);
1657         copied = ret2;
1658         if (pos + len > inode->i_size)
1659                 /* if we have allocated more blocks and copied
1660                  * less. We will have blocks allocated outside
1661                  * inode->i_size. So truncate them
1662                  */
1663                 ext4_orphan_add(handle, inode);
1664
1665         if (ret2 < 0)
1666                 ret = ret2;
1667
1668         ret2 = ext4_journal_stop(handle);
1669         if (!ret)
1670                 ret = ret2;
1671
1672         if (pos + len > inode->i_size) {
1673                 vmtruncate(inode, inode->i_size);
1674                 /*
1675                  * If vmtruncate failed early the inode might still be
1676                  * on the orphan list; we need to make sure the inode
1677                  * is removed from the orphan list in that case.
1678                  */
1679                 if (inode->i_nlink)
1680                         ext4_orphan_del(NULL, inode);
1681         }
1682
1683         return ret ? ret : copied;
1684 }
1685
1686 static int ext4_journalled_write_end(struct file *file,
1687                                      struct address_space *mapping,
1688                                      loff_t pos, unsigned len, unsigned copied,
1689                                      struct page *page, void *fsdata)
1690 {
1691         handle_t *handle = ext4_journal_current_handle();
1692         struct inode *inode = mapping->host;
1693         int ret = 0, ret2;
1694         int partial = 0;
1695         unsigned from, to;
1696         loff_t new_i_size;
1697
1698         trace_ext4_journalled_write_end(inode, pos, len, copied);
1699         from = pos & (PAGE_CACHE_SIZE - 1);
1700         to = from + len;
1701
1702         if (copied < len) {
1703                 if (!PageUptodate(page))
1704                         copied = 0;
1705                 page_zero_new_buffers(page, from+copied, to);
1706         }
1707
1708         ret = walk_page_buffers(handle, page_buffers(page), from,
1709                                 to, &partial, write_end_fn);
1710         if (!partial)
1711                 SetPageUptodate(page);
1712         new_i_size = pos + copied;
1713         if (new_i_size > inode->i_size)
1714                 i_size_write(inode, pos+copied);
1715         EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1716         if (new_i_size > EXT4_I(inode)->i_disksize) {
1717                 ext4_update_i_disksize(inode, new_i_size);
1718                 ret2 = ext4_mark_inode_dirty(handle, inode);
1719                 if (!ret)
1720                         ret = ret2;
1721         }
1722
1723         unlock_page(page);
1724         page_cache_release(page);
1725         if (pos + len > inode->i_size)
1726                 /* if we have allocated more blocks and copied
1727                  * less. We will have blocks allocated outside
1728                  * inode->i_size. So truncate them
1729                  */
1730                 ext4_orphan_add(handle, inode);
1731
1732         ret2 = ext4_journal_stop(handle);
1733         if (!ret)
1734                 ret = ret2;
1735         if (pos + len > inode->i_size) {
1736                 vmtruncate(inode, inode->i_size);
1737                 /*
1738                  * If vmtruncate failed early the inode might still be
1739                  * on the orphan list; we need to make sure the inode
1740                  * is removed from the orphan list in that case.
1741                  */
1742                 if (inode->i_nlink)
1743                         ext4_orphan_del(NULL, inode);
1744         }
1745
1746         return ret ? ret : copied;
1747 }
1748
1749 static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1750 {
1751         int retries = 0;
1752         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1753         unsigned long md_needed, mdblocks, total = 0;
1754
1755         /*
1756          * recalculate the amount of metadata blocks to reserve
1757          * in order to allocate nrblocks
1758          * worse case is one extent per block
1759          */
1760 repeat:
1761         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1762         total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
1763         mdblocks = ext4_calc_metadata_amount(inode, total);
1764         BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
1765
1766         md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1767         total = md_needed + nrblocks;
1768
1769         /*
1770          * Make quota reservation here to prevent quota overflow
1771          * later. Real quota accounting is done at pages writeout
1772          * time.
1773          */
1774         if (vfs_dq_reserve_block(inode, total)) {
1775                 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1776                 return -EDQUOT;
1777         }
1778
1779         if (ext4_claim_free_blocks(sbi, total)) {
1780                 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1781                 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1782                         yield();
1783                         goto repeat;
1784                 }
1785                 vfs_dq_release_reservation_block(inode, total);
1786                 return -ENOSPC;
1787         }
1788         EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
1789         EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
1790
1791         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1792         return 0;       /* success */
1793 }
1794
1795 static void ext4_da_release_space(struct inode *inode, int to_free)
1796 {
1797         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1798         int total, mdb, mdb_free, release;
1799
1800         if (!to_free)
1801                 return;         /* Nothing to release, exit */
1802
1803         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1804
1805         if (!EXT4_I(inode)->i_reserved_data_blocks) {
1806                 /*
1807                  * if there is no reserved blocks, but we try to free some
1808                  * then the counter is messed up somewhere.
1809                  * but since this function is called from invalidate
1810                  * page, it's harmless to return without any action
1811                  */
1812                 printk(KERN_INFO "ext4 delalloc try to release %d reserved "
1813                             "blocks for inode %lu, but there is no reserved "
1814                             "data blocks\n", to_free, inode->i_ino);
1815                 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1816                 return;
1817         }
1818
1819         /* recalculate the number of metablocks still need to be reserved */
1820         total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
1821         mdb = ext4_calc_metadata_amount(inode, total);
1822
1823         /* figure out how many metablocks to release */
1824         BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1825         mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1826
1827         release = to_free + mdb_free;
1828
1829         /* update fs dirty blocks counter for truncate case */
1830         percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
1831
1832         /* update per-inode reservations */
1833         BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
1834         EXT4_I(inode)->i_reserved_data_blocks -= to_free;
1835
1836         BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1837         EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1838         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1839
1840         vfs_dq_release_reservation_block(inode, release);
1841 }
1842
1843 static void ext4_da_page_release_reservation(struct page *page,
1844                                              unsigned long offset)
1845 {
1846         int to_release = 0;
1847         struct buffer_head *head, *bh;
1848         unsigned int curr_off = 0;
1849
1850         head = page_buffers(page);
1851         bh = head;
1852         do {
1853                 unsigned int next_off = curr_off + bh->b_size;
1854
1855                 if ((offset <= curr_off) && (buffer_delay(bh))) {
1856                         to_release++;
1857                         clear_buffer_delay(bh);
1858                 }
1859                 curr_off = next_off;
1860         } while ((bh = bh->b_this_page) != head);
1861         ext4_da_release_space(page->mapping->host, to_release);
1862 }
1863
1864 /*
1865  * Delayed allocation stuff
1866  */
1867
1868 struct mpage_da_data {
1869         struct inode *inode;
1870         sector_t b_blocknr;             /* start block number of extent */
1871         size_t b_size;                  /* size of extent */
1872         unsigned long b_state;          /* state of the extent */
1873         unsigned long first_page, next_page;    /* extent of pages */
1874         struct writeback_control *wbc;
1875         int io_done;
1876         int pages_written;
1877         int retval;
1878 };
1879
1880 /*
1881  * mpage_da_submit_io - walks through extent of pages and try to write
1882  * them with writepage() call back
1883  *
1884  * @mpd->inode: inode
1885  * @mpd->first_page: first page of the extent
1886  * @mpd->next_page: page after the last page of the extent
1887  *
1888  * By the time mpage_da_submit_io() is called we expect all blocks
1889  * to be allocated. this may be wrong if allocation failed.
1890  *
1891  * As pages are already locked by write_cache_pages(), we can't use it
1892  */
1893 static int mpage_da_submit_io(struct mpage_da_data *mpd)
1894 {
1895         long pages_skipped;
1896         struct pagevec pvec;
1897         unsigned long index, end;
1898         int ret = 0, err, nr_pages, i;
1899         struct inode *inode = mpd->inode;
1900         struct address_space *mapping = inode->i_mapping;
1901
1902         BUG_ON(mpd->next_page <= mpd->first_page);
1903         /*
1904          * We need to start from the first_page to the next_page - 1
1905          * to make sure we also write the mapped dirty buffer_heads.
1906          * If we look at mpd->b_blocknr we would only be looking
1907          * at the currently mapped buffer_heads.
1908          */
1909         index = mpd->first_page;
1910         end = mpd->next_page - 1;
1911
1912         pagevec_init(&pvec, 0);
1913         while (index <= end) {
1914                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1915                 if (nr_pages == 0)
1916                         break;
1917                 for (i = 0; i < nr_pages; i++) {
1918                         struct page *page = pvec.pages[i];
1919
1920                         index = page->index;
1921                         if (index > end)
1922                                 break;
1923                         index++;
1924
1925                         BUG_ON(!PageLocked(page));
1926                         BUG_ON(PageWriteback(page));
1927
1928                         pages_skipped = mpd->wbc->pages_skipped;
1929                         err = mapping->a_ops->writepage(page, mpd->wbc);
1930                         if (!err && (pages_skipped == mpd->wbc->pages_skipped))
1931                                 /*
1932                                  * have successfully written the page
1933                                  * without skipping the same
1934                                  */
1935                                 mpd->pages_written++;
1936                         /*
1937                          * In error case, we have to continue because
1938                          * remaining pages are still locked
1939                          * XXX: unlock and re-dirty them?
1940                          */
1941                         if (ret == 0)
1942                                 ret = err;
1943                 }
1944                 pagevec_release(&pvec);
1945         }
1946         return ret;
1947 }
1948
1949 /*
1950  * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
1951  *
1952  * @mpd->inode - inode to walk through
1953  * @exbh->b_blocknr - first block on a disk
1954  * @exbh->b_size - amount of space in bytes
1955  * @logical - first logical block to start assignment with
1956  *
1957  * the function goes through all passed space and put actual disk
1958  * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
1959  */
1960 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
1961                                  struct buffer_head *exbh)
1962 {
1963         struct inode *inode = mpd->inode;
1964         struct address_space *mapping = inode->i_mapping;
1965         int blocks = exbh->b_size >> inode->i_blkbits;
1966         sector_t pblock = exbh->b_blocknr, cur_logical;
1967         struct buffer_head *head, *bh;
1968         pgoff_t index, end;
1969         struct pagevec pvec;
1970         int nr_pages, i;
1971
1972         index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1973         end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1974         cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1975
1976         pagevec_init(&pvec, 0);
1977
1978         while (index <= end) {
1979                 /* XXX: optimize tail */
1980                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1981                 if (nr_pages == 0)
1982                         break;
1983                 for (i = 0; i < nr_pages; i++) {
1984                         struct page *page = pvec.pages[i];
1985
1986                         index = page->index;
1987                         if (index > end)
1988                                 break;
1989                         index++;
1990
1991                         BUG_ON(!PageLocked(page));
1992                         BUG_ON(PageWriteback(page));
1993                         BUG_ON(!page_has_buffers(page));
1994
1995                         bh = page_buffers(page);
1996                         head = bh;
1997
1998                         /* skip blocks out of the range */
1999                         do {
2000                                 if (cur_logical >= logical)
2001                                         break;
2002                                 cur_logical++;
2003                         } while ((bh = bh->b_this_page) != head);
2004
2005                         do {
2006                                 if (cur_logical >= logical + blocks)
2007                                         break;
2008
2009                                 if (buffer_delay(bh) ||
2010                                                 buffer_unwritten(bh)) {
2011
2012                                         BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2013
2014                                         if (buffer_delay(bh)) {
2015                                                 clear_buffer_delay(bh);
2016                                                 bh->b_blocknr = pblock;
2017                                         } else {
2018                                                 /*
2019                                                  * unwritten already should have
2020                                                  * blocknr assigned. Verify that
2021                                                  */
2022                                                 clear_buffer_unwritten(bh);
2023                                                 BUG_ON(bh->b_blocknr != pblock);
2024                                         }
2025
2026                                 } else if (buffer_mapped(bh))
2027                                         BUG_ON(bh->b_blocknr != pblock);
2028
2029                                 cur_logical++;
2030                                 pblock++;
2031                         } while ((bh = bh->b_this_page) != head);
2032                 }
2033                 pagevec_release(&pvec);
2034         }
2035 }
2036
2037
2038 /*
2039  * __unmap_underlying_blocks - just a helper function to unmap
2040  * set of blocks described by @bh
2041  */
2042 static inline void __unmap_underlying_blocks(struct inode *inode,
2043                                              struct buffer_head *bh)
2044 {
2045         struct block_device *bdev = inode->i_sb->s_bdev;
2046         int blocks, i;
2047
2048         blocks = bh->b_size >> inode->i_blkbits;
2049         for (i = 0; i < blocks; i++)
2050                 unmap_underlying_metadata(bdev, bh->b_blocknr + i);
2051 }
2052
2053 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2054                                         sector_t logical, long blk_cnt)
2055 {
2056         int nr_pages, i;
2057         pgoff_t index, end;
2058         struct pagevec pvec;
2059         struct inode *inode = mpd->inode;
2060         struct address_space *mapping = inode->i_mapping;
2061
2062         index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2063         end   = (logical + blk_cnt - 1) >>
2064                                 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2065         while (index <= end) {
2066                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2067                 if (nr_pages == 0)
2068                         break;
2069                 for (i = 0; i < nr_pages; i++) {
2070                         struct page *page = pvec.pages[i];
2071                         index = page->index;
2072                         if (index > end)
2073                                 break;
2074                         index++;
2075
2076                         BUG_ON(!PageLocked(page));
2077                         BUG_ON(PageWriteback(page));
2078                         block_invalidatepage(page, 0);
2079                         ClearPageUptodate(page);
2080                         unlock_page(page);
2081                 }
2082         }
2083         return;
2084 }
2085
2086 static void ext4_print_free_blocks(struct inode *inode)
2087 {
2088         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2089         printk(KERN_EMERG "Total free blocks count %lld\n",
2090                         ext4_count_free_blocks(inode->i_sb));
2091         printk(KERN_EMERG "Free/Dirty block details\n");
2092         printk(KERN_EMERG "free_blocks=%lld\n",
2093                         (long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
2094         printk(KERN_EMERG "dirty_blocks=%lld\n",
2095                         (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2096         printk(KERN_EMERG "Block reservation details\n");
2097         printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
2098                         EXT4_I(inode)->i_reserved_data_blocks);
2099         printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
2100                         EXT4_I(inode)->i_reserved_meta_blocks);
2101         return;
2102 }
2103
2104 /*
2105  * mpage_da_map_blocks - go through given space
2106  *
2107  * @mpd - bh describing space
2108  *
2109  * The function skips space we know is already mapped to disk blocks.
2110  *
2111  */
2112 static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2113 {
2114         int err, blks, get_blocks_flags;
2115         struct buffer_head new;
2116         sector_t next = mpd->b_blocknr;
2117         unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2118         loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
2119         handle_t *handle = NULL;
2120
2121         /*
2122          * We consider only non-mapped and non-allocated blocks
2123          */
2124         if ((mpd->b_state  & (1 << BH_Mapped)) &&
2125                 !(mpd->b_state & (1 << BH_Delay)) &&
2126                 !(mpd->b_state & (1 << BH_Unwritten)))
2127                 return 0;
2128
2129         /*
2130          * If we didn't accumulate anything to write simply return
2131          */
2132         if (!mpd->b_size)
2133                 return 0;
2134
2135         handle = ext4_journal_current_handle();
2136         BUG_ON(!handle);
2137
2138         /*
2139          * Call ext4_get_blocks() to allocate any delayed allocation
2140          * blocks, or to convert an uninitialized extent to be
2141          * initialized (in the case where we have written into
2142          * one or more preallocated blocks).
2143          *
2144          * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
2145          * indicate that we are on the delayed allocation path.  This
2146          * affects functions in many different parts of the allocation
2147          * call path.  This flag exists primarily because we don't
2148          * want to change *many* call functions, so ext4_get_blocks()
2149          * will set the magic i_delalloc_reserved_flag once the
2150          * inode's allocation semaphore is taken.
2151          *
2152          * If the blocks in questions were delalloc blocks, set
2153          * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2154          * variables are updated after the blocks have been allocated.
2155          */
2156         new.b_state = 0;
2157         get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
2158                             EXT4_GET_BLOCKS_DELALLOC_RESERVE);
2159         if (mpd->b_state & (1 << BH_Delay))
2160                 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
2161         blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
2162                                &new, get_blocks_flags);
2163         if (blks < 0) {
2164                 err = blks;
2165                 /*
2166                  * If get block returns with error we simply
2167                  * return. Later writepage will redirty the page and
2168                  * writepages will find the dirty page again
2169                  */
2170                 if (err == -EAGAIN)
2171                         return 0;
2172
2173                 if (err == -ENOSPC &&
2174                     ext4_count_free_blocks(mpd->inode->i_sb)) {
2175                         mpd->retval = err;
2176                         return 0;
2177                 }
2178
2179                 /*
2180                  * get block failure will cause us to loop in
2181                  * writepages, because a_ops->writepage won't be able
2182                  * to make progress. The page will be redirtied by
2183                  * writepage and writepages will again try to write
2184                  * the same.
2185                  */
2186                 printk(KERN_EMERG "%s block allocation failed for inode %lu "
2187                                   "at logical offset %llu with max blocks "
2188                                   "%zd with error %d\n",
2189                                   __func__, mpd->inode->i_ino,
2190                                   (unsigned long long)next,
2191                                   mpd->b_size >> mpd->inode->i_blkbits, err);
2192                 printk(KERN_EMERG "This should not happen.!! "
2193                                         "Data will be lost\n");
2194                 if (err == -ENOSPC) {
2195                         ext4_print_free_blocks(mpd->inode);
2196                 }
2197                 /* invalidate all the pages */
2198                 ext4_da_block_invalidatepages(mpd, next,
2199                                 mpd->b_size >> mpd->inode->i_blkbits);
2200                 return err;
2201         }
2202         BUG_ON(blks == 0);
2203
2204         new.b_size = (blks << mpd->inode->i_blkbits);
2205
2206         if (buffer_new(&new))
2207                 __unmap_underlying_blocks(mpd->inode, &new);
2208
2209         /*
2210          * If blocks are delayed marked, we need to
2211          * put actual blocknr and drop delayed bit
2212          */
2213         if ((mpd->b_state & (1 << BH_Delay)) ||
2214             (mpd->b_state & (1 << BH_Unwritten)))
2215                 mpage_put_bnr_to_bhs(mpd, next, &new);
2216
2217         if (ext4_should_order_data(mpd->inode)) {
2218                 err = ext4_jbd2_file_inode(handle, mpd->inode);
2219                 if (err)
2220                         return err;
2221         }
2222
2223         /*
2224          * Update on-disk size along with block allocation.
2225          */
2226         disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
2227         if (disksize > i_size_read(mpd->inode))
2228                 disksize = i_size_read(mpd->inode);
2229         if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2230                 ext4_update_i_disksize(mpd->inode, disksize);
2231                 return ext4_mark_inode_dirty(handle, mpd->inode);
2232         }
2233
2234         return 0;
2235 }
2236
2237 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2238                 (1 << BH_Delay) | (1 << BH_Unwritten))
2239
2240 /*
2241  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2242  *
2243  * @mpd->lbh - extent of blocks
2244  * @logical - logical number of the block in the file
2245  * @bh - bh of the block (used to access block's state)
2246  *
2247  * the function is used to collect contig. blocks in same state
2248  */
2249 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2250                                    sector_t logical, size_t b_size,
2251                                    unsigned long b_state)
2252 {
2253         sector_t next;
2254         int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2255
2256         /* check if thereserved journal credits might overflow */
2257         if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
2258                 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2259                         /*
2260                          * With non-extent format we are limited by the journal
2261                          * credit available.  Total credit needed to insert
2262                          * nrblocks contiguous blocks is dependent on the
2263                          * nrblocks.  So limit nrblocks.
2264                          */
2265                         goto flush_it;
2266                 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2267                                 EXT4_MAX_TRANS_DATA) {
2268                         /*
2269                          * Adding the new buffer_head would make it cross the
2270                          * allowed limit for which we have journal credit
2271                          * reserved. So limit the new bh->b_size
2272                          */
2273                         b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2274                                                 mpd->inode->i_blkbits;
2275                         /* we will do mpage_da_submit_io in the next loop */
2276                 }
2277         }
2278         /*
2279          * First block in the extent
2280          */
2281         if (mpd->b_size == 0) {
2282                 mpd->b_blocknr = logical;
2283                 mpd->b_size = b_size;
2284                 mpd->b_state = b_state & BH_FLAGS;
2285                 return;
2286         }
2287
2288         next = mpd->b_blocknr + nrblocks;
2289         /*
2290          * Can we merge the block to our big extent?
2291          */
2292         if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2293                 mpd->b_size += b_size;
2294                 return;
2295         }
2296
2297 flush_it:
2298         /*
2299          * We couldn't merge the block to our extent, so we
2300          * need to flush current  extent and start new one
2301          */
2302         if (mpage_da_map_blocks(mpd) == 0)
2303                 mpage_da_submit_io(mpd);
2304         mpd->io_done = 1;
2305         return;
2306 }
2307
2308 static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
2309 {
2310         /*
2311          * unmapped buffer is possible for holes.
2312          * delay buffer is possible with delayed allocation.
2313          * We also need to consider unwritten buffer as unmapped.
2314          */
2315         return (!buffer_mapped(bh) || buffer_delay(bh) ||
2316                                 buffer_unwritten(bh)) && buffer_dirty(bh);
2317 }
2318
2319 /*
2320  * __mpage_da_writepage - finds extent of pages and blocks
2321  *
2322  * @page: page to consider
2323  * @wbc: not used, we just follow rules
2324  * @data: context
2325  *
2326  * The function finds extents of pages and scan them for all blocks.
2327  */
2328 static int __mpage_da_writepage(struct page *page,
2329                                 struct writeback_control *wbc, void *data)
2330 {
2331         struct mpage_da_data *mpd = data;
2332         struct inode *inode = mpd->inode;
2333         struct buffer_head *bh, *head;
2334         sector_t logical;
2335
2336         if (mpd->io_done) {
2337                 /*
2338                  * Rest of the page in the page_vec
2339                  * redirty then and skip then. We will
2340                  * try to to write them again after
2341                  * starting a new transaction
2342                  */
2343                 redirty_page_for_writepage(wbc, page);
2344                 unlock_page(page);
2345                 return MPAGE_DA_EXTENT_TAIL;
2346         }
2347         /*
2348          * Can we merge this page to current extent?
2349          */
2350         if (mpd->next_page != page->index) {
2351                 /*
2352                  * Nope, we can't. So, we map non-allocated blocks
2353                  * and start IO on them using writepage()
2354                  */
2355                 if (mpd->next_page != mpd->first_page) {
2356                         if (mpage_da_map_blocks(mpd) == 0)
2357                                 mpage_da_submit_io(mpd);
2358                         /*
2359                          * skip rest of the page in the page_vec
2360                          */
2361                         mpd->io_done = 1;
2362                         redirty_page_for_writepage(wbc, page);
2363                         unlock_page(page);
2364                         return MPAGE_DA_EXTENT_TAIL;
2365                 }
2366
2367                 /*
2368                  * Start next extent of pages ...
2369                  */
2370                 mpd->first_page = page->index;
2371
2372                 /*
2373                  * ... and blocks
2374                  */
2375                 mpd->b_size = 0;
2376                 mpd->b_state = 0;
2377                 mpd->b_blocknr = 0;
2378         }
2379
2380         mpd->next_page = page->index + 1;
2381         logical = (sector_t) page->index <<
2382                   (PAGE_CACHE_SHIFT - inode->i_blkbits);
2383
2384         if (!page_has_buffers(page)) {
2385                 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2386                                        (1 << BH_Dirty) | (1 << BH_Uptodate));
2387                 if (mpd->io_done)
2388                         return MPAGE_DA_EXTENT_TAIL;
2389         } else {
2390                 /*
2391                  * Page with regular buffer heads, just add all dirty ones
2392                  */
2393                 head = page_buffers(page);
2394                 bh = head;
2395                 do {
2396                         BUG_ON(buffer_locked(bh));
2397                         /*
2398                          * We need to try to allocate
2399                          * unmapped blocks in the same page.
2400                          * Otherwise we won't make progress
2401                          * with the page in ext4_da_writepage
2402                          */
2403                         if (ext4_bh_unmapped_or_delay(NULL, bh)) {
2404                                 mpage_add_bh_to_extent(mpd, logical,
2405                                                        bh->b_size,
2406                                                        bh->b_state);
2407                                 if (mpd->io_done)
2408                                         return MPAGE_DA_EXTENT_TAIL;
2409                         } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2410                                 /*
2411                                  * mapped dirty buffer. We need to update
2412                                  * the b_state because we look at
2413                                  * b_state in mpage_da_map_blocks. We don't
2414                                  * update b_size because if we find an
2415                                  * unmapped buffer_head later we need to
2416                                  * use the b_state flag of that buffer_head.
2417                                  */
2418                                 if (mpd->b_size == 0)
2419                                         mpd->b_state = bh->b_state & BH_FLAGS;
2420                         }
2421                         logical++;
2422                 } while ((bh = bh->b_this_page) != head);
2423         }
2424
2425         return 0;
2426 }
2427
2428 /*
2429  * This is a special get_blocks_t callback which is used by
2430  * ext4_da_write_begin().  It will either return mapped block or
2431  * reserve space for a single block.
2432  *
2433  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2434  * We also have b_blocknr = -1 and b_bdev initialized properly
2435  *
2436  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2437  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2438  * initialized properly.
2439  */
2440 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2441                                   struct buffer_head *bh_result, int create)
2442 {
2443         int ret = 0;
2444         sector_t invalid_block = ~((sector_t) 0xffff);
2445
2446         if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2447                 invalid_block = ~0;
2448
2449         BUG_ON(create == 0);
2450         BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2451
2452         /*
2453          * first, we need to know whether the block is allocated already
2454          * preallocated blocks are unmapped but should treated
2455          * the same as allocated blocks.
2456          */
2457         ret = ext4_get_blocks(NULL, inode, iblock, 1,  bh_result, 0);
2458         if ((ret == 0) && !buffer_delay(bh_result)) {
2459                 /* the block isn't (pre)allocated yet, let's reserve space */
2460                 /*
2461                  * XXX: __block_prepare_write() unmaps passed block,
2462                  * is it OK?
2463                  */
2464                 ret = ext4_da_reserve_space(inode, 1);
2465                 if (ret)
2466                         /* not enough space to reserve */
2467                         return ret;
2468
2469                 map_bh(bh_result, inode->i_sb, invalid_block);
2470                 set_buffer_new(bh_result);
2471                 set_buffer_delay(bh_result);
2472         } else if (ret > 0) {
2473                 bh_result->b_size = (ret << inode->i_blkbits);
2474                 if (buffer_unwritten(bh_result)) {
2475                         /* A delayed write to unwritten bh should
2476                          * be marked new and mapped.  Mapped ensures
2477                          * that we don't do get_block multiple times
2478                          * when we write to the same offset and new
2479                          * ensures that we do proper zero out for
2480                          * partial write.
2481                          */
2482                         set_buffer_new(bh_result);
2483                         set_buffer_mapped(bh_result);
2484                 }
2485                 ret = 0;
2486         }
2487
2488         return ret;
2489 }
2490
2491 /*
2492  * This function is used as a standard get_block_t calback function
2493  * when there is no desire to allocate any blocks.  It is used as a
2494  * callback function for block_prepare_write(), nobh_writepage(), and
2495  * block_write_full_page().  These functions should only try to map a
2496  * single block at a time.
2497  *
2498  * Since this function doesn't do block allocations even if the caller
2499  * requests it by passing in create=1, it is critically important that
2500  * any caller checks to make sure that any buffer heads are returned
2501  * by this function are either all already mapped or marked for
2502  * delayed allocation before calling nobh_writepage() or
2503  * block_write_full_page().  Otherwise, b_blocknr could be left
2504  * unitialized, and the page write functions will be taken by
2505  * surprise.
2506  */
2507 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2508                                    struct buffer_head *bh_result, int create)
2509 {
2510         int ret = 0;
2511         unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2512
2513         BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2514
2515         /*
2516          * we don't want to do block allocation in writepage
2517          * so call get_block_wrap with create = 0
2518          */
2519         ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
2520         BUG_ON(create && ret == 0);
2521         if (ret > 0) {
2522                 bh_result->b_size = (ret << inode->i_blkbits);
2523                 ret = 0;
2524         }
2525         return ret;
2526 }
2527
2528 /*
2529  * This function can get called via...
2530  *   - ext4_da_writepages after taking page lock (have journal handle)
2531  *   - journal_submit_inode_data_buffers (no journal handle)
2532  *   - shrink_page_list via pdflush (no journal handle)
2533  *   - grab_page_cache when doing write_begin (have journal handle)
2534  */
2535 static int ext4_da_writepage(struct page *page,
2536                                 struct writeback_control *wbc)
2537 {
2538         int ret = 0;
2539         loff_t size;
2540         unsigned int len;
2541         struct buffer_head *page_bufs;
2542         struct inode *inode = page->mapping->host;
2543
2544         trace_ext4_da_writepage(inode, page);
2545         size = i_size_read(inode);
2546         if (page->index == size >> PAGE_CACHE_SHIFT)
2547                 len = size & ~PAGE_CACHE_MASK;
2548         else
2549                 len = PAGE_CACHE_SIZE;
2550
2551         if (page_has_buffers(page)) {
2552                 page_bufs = page_buffers(page);
2553                 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2554                                         ext4_bh_unmapped_or_delay)) {
2555                         /*
2556                          * We don't want to do  block allocation
2557                          * So redirty the page and return
2558                          * We may reach here when we do a journal commit
2559                          * via journal_submit_inode_data_buffers.
2560                          * If we don't have mapping block we just ignore
2561                          * them. We can also reach here via shrink_page_list
2562                          */
2563                         redirty_page_for_writepage(wbc, page);
2564                         unlock_page(page);
2565                         return 0;
2566                 }
2567         } else {
2568                 /*
2569                  * The test for page_has_buffers() is subtle:
2570                  * We know the page is dirty but it lost buffers. That means
2571                  * that at some moment in time after write_begin()/write_end()
2572                  * has been called all buffers have been clean and thus they
2573                  * must have been written at least once. So they are all
2574                  * mapped and we can happily proceed with mapping them
2575                  * and writing the page.
2576                  *
2577                  * Try to initialize the buffer_heads and check whether
2578                  * all are mapped and non delay. We don't want to
2579                  * do block allocation here.
2580                  */
2581                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2582                                           noalloc_get_block_write);
2583                 if (!ret) {
2584                         page_bufs = page_buffers(page);
2585                         /* check whether all are mapped and non delay */
2586                         if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2587                                                 ext4_bh_unmapped_or_delay)) {
2588                                 redirty_page_for_writepage(wbc, page);
2589                                 unlock_page(page);
2590                                 return 0;
2591                         }
2592                 } else {
2593                         /*
2594                          * We can't do block allocation here
2595                          * so just redity the page and unlock
2596                          * and return
2597                          */
2598                         redirty_page_for_writepage(wbc, page);
2599                         unlock_page(page);
2600                         return 0;
2601                 }
2602                 /* now mark the buffer_heads as dirty and uptodate */
2603                 block_commit_write(page, 0, PAGE_CACHE_SIZE);
2604         }
2605
2606         if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2607                 ret = nobh_writepage(page, noalloc_get_block_write, wbc);
2608         else
2609                 ret = block_write_full_page(page, noalloc_get_block_write,
2610                                             wbc);
2611
2612         return ret;
2613 }
2614
2615 /*
2616  * This is called via ext4_da_writepages() to
2617  * calulate the total number of credits to reserve to fit
2618  * a single extent allocation into a single transaction,
2619  * ext4_da_writpeages() will loop calling this before
2620  * the block allocation.
2621  */
2622
2623 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2624 {
2625         int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2626
2627         /*
2628          * With non-extent format the journal credit needed to
2629          * insert nrblocks contiguous block is dependent on
2630          * number of contiguous block. So we will limit
2631          * number of contiguous block to a sane value
2632          */
2633         if (!(inode->i_flags & EXT4_EXTENTS_FL) &&
2634             (max_blocks > EXT4_MAX_TRANS_DATA))
2635                 max_blocks = EXT4_MAX_TRANS_DATA;
2636
2637         return ext4_chunk_trans_blocks(inode, max_blocks);
2638 }
2639
2640 static int ext4_da_writepages(struct address_space *mapping,
2641                               struct writeback_control *wbc)
2642 {
2643         pgoff_t index;
2644         int range_whole = 0;
2645         handle_t *handle = NULL;
2646         struct mpage_da_data mpd;
2647         struct inode *inode = mapping->host;
2648         int no_nrwrite_index_update;
2649         int pages_written = 0;
2650         long pages_skipped;
2651         int range_cyclic, cycled = 1, io_done = 0;
2652         int needed_blocks, ret = 0, nr_to_writebump = 0;
2653         struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2654
2655         trace_ext4_da_writepages(inode, wbc);
2656
2657         /*
2658          * No pages to write? This is mainly a kludge to avoid starting
2659          * a transaction for special inodes like journal inode on last iput()
2660          * because that could violate lock ordering on umount
2661          */
2662         if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2663                 return 0;
2664
2665         /*
2666          * If the filesystem has aborted, it is read-only, so return
2667          * right away instead of dumping stack traces later on that
2668          * will obscure the real source of the problem.  We test
2669          * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2670          * the latter could be true if the filesystem is mounted
2671          * read-only, and in that case, ext4_da_writepages should
2672          * *never* be called, so if that ever happens, we would want
2673          * the stack trace.
2674          */
2675         if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2676                 return -EROFS;
2677
2678         /*
2679          * Make sure nr_to_write is >= sbi->s_mb_stream_request
2680          * This make sure small files blocks are allocated in
2681          * single attempt. This ensure that small files
2682          * get less fragmented.
2683          */
2684         if (wbc->nr_to_write < sbi->s_mb_stream_request) {
2685                 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
2686                 wbc->nr_to_write = sbi->s_mb_stream_request;
2687         }
2688         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2689                 range_whole = 1;
2690
2691         range_cyclic = wbc->range_cyclic;
2692         if (wbc->range_cyclic) {
2693                 index = mapping->writeback_index;
2694                 if (index)
2695                         cycled = 0;
2696                 wbc->range_start = index << PAGE_CACHE_SHIFT;
2697                 wbc->range_end  = LLONG_MAX;
2698                 wbc->range_cyclic = 0;
2699         } else
2700                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2701
2702         mpd.wbc = wbc;
2703         mpd.inode = mapping->host;
2704
2705         /*
2706          * we don't want write_cache_pages to update
2707          * nr_to_write and writeback_index
2708          */
2709         no_nrwrite_index_update = wbc->no_nrwrite_index_update;
2710         wbc->no_nrwrite_index_update = 1;
2711         pages_skipped = wbc->pages_skipped;
2712
2713 retry:
2714         while (!ret && wbc->nr_to_write > 0) {
2715
2716                 /*
2717                  * we  insert one extent at a time. So we need
2718                  * credit needed for single extent allocation.
2719                  * journalled mode is currently not supported
2720                  * by delalloc
2721                  */
2722                 BUG_ON(ext4_should_journal_data(inode));
2723                 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2724
2725                 /* start a new transaction*/
2726                 handle = ext4_journal_start(inode, needed_blocks);
2727                 if (IS_ERR(handle)) {
2728                         ret = PTR_ERR(handle);
2729                         printk(KERN_CRIT "%s: jbd2_start: "
2730                                "%ld pages, ino %lu; err %d\n", __func__,
2731                                 wbc->nr_to_write, inode->i_ino, ret);
2732                         dump_stack();
2733                         goto out_writepages;
2734                 }
2735
2736                 /*
2737                  * Now call __mpage_da_writepage to find the next
2738                  * contiguous region of logical blocks that need
2739                  * blocks to be allocated by ext4.  We don't actually
2740                  * submit the blocks for I/O here, even though
2741                  * write_cache_pages thinks it will, and will set the
2742                  * pages as clean for write before calling
2743                  * __mpage_da_writepage().
2744                  */
2745                 mpd.b_size = 0;
2746                 mpd.b_state = 0;
2747                 mpd.b_blocknr = 0;
2748                 mpd.first_page = 0;
2749                 mpd.next_page = 0;
2750                 mpd.io_done = 0;
2751                 mpd.pages_written = 0;
2752                 mpd.retval = 0;
2753                 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
2754                                         &mpd);
2755                 /*
2756                  * If we have a contigous extent of pages and we
2757                  * haven't done the I/O yet, map the blocks and submit
2758                  * them for I/O.
2759                  */
2760                 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2761                         if (mpage_da_map_blocks(&mpd) == 0)
2762                                 mpage_da_submit_io(&mpd);
2763                         mpd.io_done = 1;
2764                         ret = MPAGE_DA_EXTENT_TAIL;
2765                 }
2766                 wbc->nr_to_write -= mpd.pages_written;
2767
2768                 ext4_journal_stop(handle);
2769
2770                 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2771                         /* commit the transaction which would
2772                          * free blocks released in the transaction
2773                          * and try again
2774                          */
2775                         jbd2_journal_force_commit_nested(sbi->s_journal);
2776                         wbc->pages_skipped = pages_skipped;
2777                         ret = 0;
2778                 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2779                         /*
2780                          * got one extent now try with
2781                          * rest of the pages
2782                          */
2783                         pages_written += mpd.pages_written;
2784                         wbc->pages_skipped = pages_skipped;
2785                         ret = 0;
2786                         io_done = 1;
2787                 } else if (wbc->nr_to_write)
2788                         /*
2789                          * There is no more writeout needed
2790                          * or we requested for a noblocking writeout
2791                          * and we found the device congested
2792                          */
2793                         break;
2794         }
2795         if (!io_done && !cycled) {
2796                 cycled = 1;
2797                 index = 0;
2798                 wbc->range_start = index << PAGE_CACHE_SHIFT;
2799                 wbc->range_end  = mapping->writeback_index - 1;
2800                 goto retry;
2801         }
2802         if (pages_skipped != wbc->pages_skipped)
2803                 printk(KERN_EMERG "This should not happen leaving %s "
2804                                 "with nr_to_write = %ld ret = %d\n",
2805                                 __func__, wbc->nr_to_write, ret);
2806
2807         /* Update index */
2808         index += pages_written;
2809         wbc->range_cyclic = range_cyclic;
2810         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2811                 /*
2812                  * set the writeback_index so that range_cyclic
2813                  * mode will write it back later
2814                  */
2815                 mapping->writeback_index = index;
2816
2817 out_writepages:
2818         if (!no_nrwrite_index_update)
2819                 wbc->no_nrwrite_index_update = 0;
2820         wbc->nr_to_write -= nr_to_writebump;
2821         trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2822         return ret;
2823 }
2824
2825 #define FALL_BACK_TO_NONDELALLOC 1
2826 static int ext4_nonda_switch(struct super_block *sb)
2827 {
2828         s64 free_blocks, dirty_blocks;
2829         struct ext4_sb_info *sbi = EXT4_SB(sb);
2830
2831         /*
2832          * switch to non delalloc mode if we are running low
2833          * on free block. The free block accounting via percpu
2834          * counters can get slightly wrong with percpu_counter_batch getting
2835          * accumulated on each CPU without updating global counters
2836          * Delalloc need an accurate free block accounting. So switch
2837          * to non delalloc when we are near to error range.
2838          */
2839         free_blocks  = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
2840         dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
2841         if (2 * free_blocks < 3 * dirty_blocks ||
2842                 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
2843                 /*
2844                  * free block count is less that 150% of dirty blocks
2845                  * or free blocks is less that watermark
2846                  */
2847                 return 1;
2848         }
2849         return 0;
2850 }
2851
2852 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2853                                loff_t pos, unsigned len, unsigned flags,
2854                                struct page **pagep, void **fsdata)
2855 {
2856         int ret, retries = 0;
2857         struct page *page;
2858         pgoff_t index;
2859         unsigned from, to;
2860         struct inode *inode = mapping->host;
2861         handle_t *handle;
2862
2863         index = pos >> PAGE_CACHE_SHIFT;
2864         from = pos & (PAGE_CACHE_SIZE - 1);
2865         to = from + len;
2866
2867         if (ext4_nonda_switch(inode->i_sb)) {
2868                 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2869                 return ext4_write_begin(file, mapping, pos,
2870                                         len, flags, pagep, fsdata);
2871         }
2872         *fsdata = (void *)0;
2873         trace_ext4_da_write_begin(inode, pos, len, flags);
2874 retry:
2875         /*
2876          * With delayed allocation, we don't log the i_disksize update
2877          * if there is delayed block allocation. But we still need
2878          * to journalling the i_disksize update if writes to the end
2879          * of file which has an already mapped buffer.
2880          */
2881         handle = ext4_journal_start(inode, 1);
2882         if (IS_ERR(handle)) {
2883                 ret = PTR_ERR(handle);
2884                 goto out;
2885         }
2886         /* We cannot recurse into the filesystem as the transaction is already
2887          * started */
2888         flags |= AOP_FLAG_NOFS;
2889
2890         page = grab_cache_page_write_begin(mapping, index, flags);
2891         if (!page) {
2892                 ext4_journal_stop(handle);
2893                 ret = -ENOMEM;
2894                 goto out;
2895         }
2896         *pagep = page;
2897
2898         ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
2899                                 ext4_da_get_block_prep);
2900         if (ret < 0) {
2901                 unlock_page(page);
2902                 ext4_journal_stop(handle);
2903                 page_cache_release(page);
2904                 /*
2905                  * block_write_begin may have instantiated a few blocks
2906                  * outside i_size.  Trim these off again. Don't need
2907                  * i_size_read because we hold i_mutex.
2908                  */
2909                 if (pos + len > inode->i_size)
2910                         vmtruncate(inode, inode->i_size);
2911         }
2912
2913         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2914                 goto retry;
2915 out:
2916         return ret;
2917 }
2918
2919 /*
2920  * Check if we should update i_disksize
2921  * when write to the end of file but not require block allocation
2922  */
2923 static int ext4_da_should_update_i_disksize(struct page *page,
2924                                             unsigned long offset)
2925 {
2926         struct buffer_head *bh;
2927         struct inode *inode = page->mapping->host;
2928         unsigned int idx;
2929         int i;
2930
2931         bh = page_buffers(page);
2932         idx = offset >> inode->i_blkbits;
2933
2934         for (i = 0; i < idx; i++)
2935                 bh = bh->b_this_page;
2936
2937         if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2938                 return 0;
2939         return 1;
2940 }
2941
2942 static int ext4_da_write_end(struct file *file,
2943                              struct address_space *mapping,
2944                              loff_t pos, unsigned len, unsigned copied,
2945                              struct page *page, void *fsdata)
2946 {
2947         struct inode *inode = mapping->host;
2948         int ret = 0, ret2;
2949         handle_t *handle = ext4_journal_current_handle();
2950         loff_t new_i_size;
2951         unsigned long start, end;
2952         int write_mode = (int)(unsigned long)fsdata;
2953
2954         if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2955                 if (ext4_should_order_data(inode)) {
2956                         return ext4_ordered_write_end(file, mapping, pos,
2957                                         len, copied, page, fsdata);
2958                 } else if (ext4_should_writeback_data(inode)) {
2959                         return ext4_writeback_write_end(file, mapping, pos,
2960                                         len, copied, page, fsdata);
2961                 } else {
2962                         BUG();
2963                 }
2964         }
2965
2966         trace_ext4_da_write_end(inode, pos, len, copied);
2967         start = pos & (PAGE_CACHE_SIZE - 1);
2968         end = start + copied - 1;
2969
2970         /*
2971          * generic_write_end() will run mark_inode_dirty() if i_size
2972          * changes.  So let's piggyback the i_disksize mark_inode_dirty
2973          * into that.
2974          */
2975
2976         new_i_size = pos + copied;
2977         if (new_i_size > EXT4_I(inode)->i_disksize) {
2978                 if (ext4_da_should_update_i_disksize(page, end)) {
2979                         down_write(&EXT4_I(inode)->i_data_sem);
2980                         if (new_i_size > EXT4_I(inode)->i_disksize) {
2981                                 /*
2982                                  * Updating i_disksize when extending file
2983                                  * without needing block allocation
2984                                  */
2985                                 if (ext4_should_order_data(inode))
2986                                         ret = ext4_jbd2_file_inode(handle,
2987                                                                    inode);
2988
2989                                 EXT4_I(inode)->i_disksize = new_i_size;
2990                         }
2991                         up_write(&EXT4_I(inode)->i_data_sem);
2992                         /* We need to mark inode dirty even if
2993                          * new_i_size is less that inode->i_size
2994                          * bu greater than i_disksize.(hint delalloc)
2995                          */
2996                         ext4_mark_inode_dirty(handle, inode);
2997                 }
2998         }
2999         ret2 = generic_write_end(file, mapping, pos, len, copied,
3000                                                         page, fsdata);
3001         copied = ret2;
3002         if (ret2 < 0)
3003                 ret = ret2;
3004         ret2 = ext4_journal_stop(handle);
3005         if (!ret)
3006                 ret = ret2;
3007
3008         return ret ? ret : copied;
3009 }
3010
3011 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
3012 {
3013         /*
3014          * Drop reserved blocks
3015          */
3016         BUG_ON(!PageLocked(page));
3017         if (!page_has_buffers(page))
3018                 goto out;
3019
3020         ext4_da_page_release_reservation(page, offset);
3021
3022 out:
3023         ext4_invalidatepage(page, offset);
3024
3025         return;
3026 }
3027
3028 /*
3029  * Force all delayed allocation blocks to be allocated for a given inode.
3030  */
3031 int ext4_alloc_da_blocks(struct inode *inode)
3032 {
3033         if (!EXT4_I(inode)->i_reserved_data_blocks &&
3034             !EXT4_I(inode)->i_reserved_meta_blocks)
3035                 return 0;
3036
3037         /*
3038          * We do something simple for now.  The filemap_flush() will
3039          * also start triggering a write of the data blocks, which is
3040          * not strictly speaking necessary (and for users of
3041          * laptop_mode, not even desirable).  However, to do otherwise
3042          * would require replicating code paths in:
3043          *
3044          * ext4_da_writepages() ->
3045          *    write_cache_pages() ---> (via passed in callback function)
3046          *        __mpage_da_writepage() -->
3047          *           mpage_add_bh_to_extent()
3048          *           mpage_da_map_blocks()
3049          *
3050          * The problem is that write_cache_pages(), located in
3051          * mm/page-writeback.c, marks pages clean in preparation for
3052          * doing I/O, which is not desirable if we're not planning on
3053          * doing I/O at all.
3054          *
3055          * We could call write_cache_pages(), and then redirty all of
3056          * the pages by calling redirty_page_for_writeback() but that
3057          * would be ugly in the extreme.  So instead we would need to
3058          * replicate parts of the code in the above functions,
3059          * simplifying them becuase we wouldn't actually intend to
3060          * write out the pages, but rather only collect contiguous
3061          * logical block extents, call the multi-block allocator, and
3062          * then update the buffer heads with the block allocations.
3063          *
3064          * For now, though, we'll cheat by calling filemap_flush(),
3065          * which will map the blocks, and start the I/O, but not
3066          * actually wait for the I/O to complete.
3067          */
3068         return filemap_flush(inode->i_mapping);
3069 }
3070
3071 /*
3072  * bmap() is special.  It gets used by applications such as lilo and by
3073  * the swapper to find the on-disk block of a specific piece of data.
3074  *
3075  * Naturally, this is dangerous if the block concerned is still in the
3076  * journal.  If somebody makes a swapfile on an ext4 data-journaling
3077  * filesystem and enables swap, then they may get a nasty shock when the
3078  * data getting swapped to that swapfile suddenly gets overwritten by
3079  * the original zero's written out previously to the journal and
3080  * awaiting writeback in the kernel's buffer cache.
3081  *
3082  * So, if we see any bmap calls here on a modified, data-journaled file,
3083  * take extra steps to flush any blocks which might be in the cache.
3084  */
3085 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3086 {
3087         struct inode *inode = mapping->host;
3088         journal_t *journal;
3089         int err;
3090
3091         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3092                         test_opt(inode->i_sb, DELALLOC)) {
3093                 /*
3094                  * With delalloc we want to sync the file
3095                  * so that we can make sure we allocate
3096                  * blocks for file
3097                  */
3098                 filemap_write_and_wait(mapping);
3099         }
3100
3101         if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
3102                 /*
3103                  * This is a REALLY heavyweight approach, but the use of
3104                  * bmap on dirty files is expected to be extremely rare:
3105                  * only if we run lilo or swapon on a freshly made file
3106                  * do we expect this to happen.
3107                  *
3108                  * (bmap requires CAP_SYS_RAWIO so this does not
3109                  * represent an unprivileged user DOS attack --- we'd be
3110                  * in trouble if mortal users could trigger this path at
3111                  * will.)
3112                  *
3113                  * NB. EXT4_STATE_JDATA is not set on files other than
3114                  * regular files.  If somebody wants to bmap a directory
3115                  * or symlink and gets confused because the buffer
3116                  * hasn't yet been flushed to disk, they deserve
3117                  * everything they get.
3118                  */
3119
3120                 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
3121                 journal = EXT4_JOURNAL(inode);
3122                 jbd2_journal_lock_updates(journal);
3123                 err = jbd2_journal_flush(journal);
3124                 jbd2_journal_unlock_updates(journal);
3125
3126                 if (err)
3127                         return 0;
3128         }
3129
3130         return generic_block_bmap(mapping, block, ext4_get_block);
3131 }
3132
3133 static int bget_one(handle_t *handle, struct buffer_head *bh)
3134 {
3135         get_bh(bh);
3136         return 0;
3137 }
3138
3139 static int bput_one(handle_t *handle, struct buffer_head *bh)
3140 {
3141         put_bh(bh);
3142         return 0;
3143 }
3144
3145 /*
3146  * Note that we don't need to start a transaction unless we're journaling data
3147  * because we should have holes filled from ext4_page_mkwrite(). We even don't
3148  * need to file the inode to the transaction's list in ordered mode because if
3149  * we are writing back data added by write(), the inode is already there and if
3150  * we are writing back data modified via mmap(), noone guarantees in which
3151  * transaction the data will hit the disk. In case we are journaling data, we
3152  * cannot start transaction directly because transaction start ranks above page
3153  * lock so we have to do some magic.
3154  *
3155  * In all journaling modes block_write_full_page() will start the I/O.
3156  *
3157  * Problem:
3158  *
3159  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
3160  *              ext4_writepage()
3161  *
3162  * Similar for:
3163  *
3164  *      ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
3165  *
3166  * Same applies to ext4_get_block().  We will deadlock on various things like
3167  * lock_journal and i_data_sem
3168  *
3169  * Setting PF_MEMALLOC here doesn't work - too many internal memory
3170  * allocations fail.
3171  *
3172  * 16May01: If we're reentered then journal_current_handle() will be
3173  *          non-zero. We simply *return*.
3174  *
3175  * 1 July 2001: @@@ FIXME:
3176  *   In journalled data mode, a data buffer may be metadata against the
3177  *   current transaction.  But the same file is part of a shared mapping
3178  *   and someone does a writepage() on it.
3179  *
3180  *   We will move the buffer onto the async_data list, but *after* it has
3181  *   been dirtied. So there's a small window where we have dirty data on
3182  *   BJ_Metadata.
3183  *
3184  *   Note that this only applies to the last partial page in the file.  The
3185  *   bit which block_write_full_page() uses prepare/commit for.  (That's
3186  *   broken code anyway: it's wrong for msync()).
3187  *
3188  *   It's a rare case: affects the final partial page, for journalled data
3189  *   where the file is subject to bith write() and writepage() in the same
3190  *   transction.  To fix it we'll need a custom block_write_full_page().
3191  *   We'll probably need that anyway for journalling writepage() output.
3192  *
3193  * We don't honour synchronous mounts for writepage().  That would be
3194  * disastrous.  Any write() or metadata operation will sync the fs for
3195  * us.
3196  *
3197  */
3198 static int __ext4_normal_writepage(struct page *page,
3199                                    struct writeback_control *wbc)
3200 {
3201         struct inode *inode = page->mapping->host;
3202
3203         if (test_opt(inode->i_sb, NOBH))
3204                 return nobh_writepage(page, noalloc_get_block_write, wbc);
3205         else
3206                 return block_write_full_page(page, noalloc_get_block_write,
3207                                              wbc);
3208 }
3209
3210 static int ext4_normal_writepage(struct page *page,
3211                                  struct writeback_control *wbc)
3212 {
3213         struct inode *inode = page->mapping->host;
3214         loff_t size = i_size_read(inode);
3215         loff_t len;
3216
3217         trace_ext4_normal_writepage(inode, page);
3218         J_ASSERT(PageLocked(page));
3219         if (page->index == size >> PAGE_CACHE_SHIFT)
3220                 len = size & ~PAGE_CACHE_MASK;
3221         else
3222                 len = PAGE_CACHE_SIZE;
3223
3224         if (page_has_buffers(page)) {
3225                 /* if page has buffers it should all be mapped
3226                  * and allocated. If there are not buffers attached
3227                  * to the page we know the page is dirty but it lost
3228                  * buffers. That means that at some moment in time
3229                  * after write_begin() / write_end() has been called
3230                  * all buffers have been clean and thus they must have been
3231                  * written at least once. So they are all mapped and we can
3232                  * happily proceed with mapping them and writing the page.
3233                  */
3234                 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3235                                         ext4_bh_unmapped_or_delay));
3236         }
3237
3238         if (!ext4_journal_current_handle())
3239                 return __ext4_normal_writepage(page, wbc);
3240
3241         redirty_page_for_writepage(wbc, page);
3242         unlock_page(page);
3243         return 0;
3244 }
3245
3246 static int __ext4_journalled_writepage(struct page *page,
3247                                        struct writeback_control *wbc)
3248 {
3249         struct address_space *mapping = page->mapping;
3250         struct inode *inode = mapping->host;
3251         struct buffer_head *page_bufs;
3252         handle_t *handle = NULL;
3253         int ret = 0;
3254         int err;
3255
3256         ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
3257                                   noalloc_get_block_write);
3258         if (ret != 0)
3259                 goto out_unlock;
3260
3261         page_bufs = page_buffers(page);
3262         walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
3263                                                                 bget_one);
3264         /* As soon as we unlock the page, it can go away, but we have
3265          * references to buffers so we are safe */
3266         unlock_page(page);
3267
3268         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
3269         if (IS_ERR(handle)) {
3270                 ret = PTR_ERR(handle);
3271                 goto out;
3272         }
3273
3274         ret = walk_page_buffers(handle, page_bufs, 0,
3275                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
3276
3277         err = walk_page_buffers(handle, page_bufs, 0,
3278                                 PAGE_CACHE_SIZE, NULL, write_end_fn);
3279         if (ret == 0)
3280                 ret = err;
3281         err = ext4_journal_stop(handle);
3282         if (!ret)
3283                 ret = err;
3284
3285         walk_page_buffers(handle, page_bufs, 0,
3286                                 PAGE_CACHE_SIZE, NULL, bput_one);
3287         EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
3288         goto out;
3289
3290 out_unlock:
3291         unlock_page(page);
3292 out:
3293         return ret;
3294 }
3295
3296 static int ext4_journalled_writepage(struct page *page,
3297                                      struct writeback_control *wbc)
3298 {
3299         struct inode *inode = page->mapping->host;
3300         loff_t size = i_size_read(inode);
3301         loff_t len;
3302
3303         trace_ext4_journalled_writepage(inode, page);
3304         J_ASSERT(PageLocked(page));
3305         if (page->index == size >> PAGE_CACHE_SHIFT)
3306                 len = size & ~PAGE_CACHE_MASK;
3307         else
3308                 len = PAGE_CACHE_SIZE;
3309
3310         if (page_has_buffers(page)) {
3311                 /* if page has buffers it should all be mapped
3312                  * and allocated. If there are not buffers attached
3313                  * to the page we know the page is dirty but it lost
3314                  * buffers. That means that at some moment in time
3315                  * after write_begin() / write_end() has been called
3316                  * all buffers have been clean and thus they must have been
3317                  * written at least once. So they are all mapped and we can
3318                  * happily proceed with mapping them and writing the page.
3319                  */
3320                 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3321                                         ext4_bh_unmapped_or_delay));
3322         }
3323
3324         if (ext4_journal_current_handle())
3325                 goto no_write;
3326
3327         if (PageChecked(page)) {
3328                 /*
3329                  * It's mmapped pagecache.  Add buffers and journal it.  There
3330                  * doesn't seem much point in redirtying the page here.
3331                  */
3332                 ClearPageChecked(page);
3333                 return __ext4_journalled_writepage(page, wbc);
3334         } else {
3335                 /*
3336                  * It may be a page full of checkpoint-mode buffers.  We don't
3337                  * really know unless we go poke around in the buffer_heads.
3338                  * But block_write_full_page will do the right thing.
3339                  */
3340                 return block_write_full_page(page, noalloc_get_block_write,
3341                                              wbc);
3342         }
3343 no_write:
3344         redirty_page_for_writepage(wbc, page);
3345         unlock_page(page);
3346         return 0;
3347 }
3348
3349 static int ext4_readpage(struct file *file, struct page *page)
3350 {
3351         return mpage_readpage(page, ext4_get_block);
3352 }
3353
3354 static int
3355 ext4_readpages(struct file *file, struct address_space *mapping,
3356                 struct list_head *pages, unsigned nr_pages)
3357 {
3358         return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3359 }
3360
3361 static void ext4_invalidatepage(struct page *page, unsigned long offset)
3362 {
3363         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3364
3365         /*
3366          * If it's a full truncate we just forget about the pending dirtying
3367          */
3368         if (offset == 0)
3369                 ClearPageChecked(page);
3370
3371         if (journal)
3372                 jbd2_journal_invalidatepage(journal, page, offset);
3373         else
3374                 block_invalidatepage(page, offset);
3375 }
3376
3377 static int ext4_releasepage(struct page *page, gfp_t wait)
3378 {
3379         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3380
3381         WARN_ON(PageChecked(page));
3382         if (!page_has_buffers(page))
3383                 return 0;
3384         if (journal)
3385                 return jbd2_journal_try_to_free_buffers(journal, page, wait);
3386         else
3387                 return try_to_free_buffers(page);
3388 }
3389
3390 /*
3391  * If the O_DIRECT write will extend the file then add this inode to the
3392  * orphan list.  So recovery will truncate it back to the original size
3393  * if the machine crashes during the write.
3394  *
3395  * If the O_DIRECT write is intantiating holes inside i_size and the machine
3396  * crashes then stale disk data _may_ be exposed inside the file. But current
3397  * VFS code falls back into buffered path in that case so we are safe.
3398  */
3399 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3400                               const struct iovec *iov, loff_t offset,
3401                               unsigned long nr_segs)
3402 {
3403         struct file *file = iocb->ki_filp;
3404         struct inode *inode = file->f_mapping->host;
3405         struct ext4_inode_info *ei = EXT4_I(inode);
3406         handle_t *handle;
3407         ssize_t ret;
3408         int orphan = 0;
3409         size_t count = iov_length(iov, nr_segs);
3410
3411         if (rw == WRITE) {
3412                 loff_t final_size = offset + count;
3413
3414                 if (final_size > inode->i_size) {
3415                         /* Credits for sb + inode write */
3416                         handle = ext4_journal_start(inode, 2);
3417                         if (IS_ERR(handle)) {
3418                                 ret = PTR_ERR(handle);
3419                                 goto out;
3420                         }
3421                         ret = ext4_orphan_add(handle, inode);
3422                         if (ret) {
3423                                 ext4_journal_stop(handle);
3424                                 goto out;
3425                         }
3426                         orphan = 1;
3427                         ei->i_disksize = inode->i_size;
3428                         ext4_journal_stop(handle);
3429                 }
3430         }
3431
3432         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3433                                  offset, nr_segs,
3434                                  ext4_get_block, NULL);
3435
3436         if (orphan) {
3437                 int err;
3438
3439                 /* Credits for sb + inode write */
3440                 handle = ext4_journal_start(inode, 2);
3441                 if (IS_ERR(handle)) {
3442                         /* This is really bad luck. We've written the data
3443                          * but cannot extend i_size. Bail out and pretend
3444                          * the write failed... */
3445                         ret = PTR_ERR(handle);
3446                         goto out;
3447                 }
3448                 if (inode->i_nlink)
3449                         ext4_orphan_del(handle, inode);
3450                 if (ret > 0) {
3451                         loff_t end = offset + ret;
3452                         if (end > inode->i_size) {
3453                                 ei->i_disksize = end;
3454                                 i_size_write(inode, end);
3455                                 /*
3456                                  * We're going to return a positive `ret'
3457                                  * here due to non-zero-length I/O, so there's
3458                                  * no way of reporting error returns from
3459                                  * ext4_mark_inode_dirty() to userspace.  So
3460                                  * ignore it.
3461                                  */
3462                                 ext4_mark_inode_dirty(handle, inode);
3463                         }
3464                 }
3465                 err = ext4_journal_stop(handle);
3466                 if (ret == 0)
3467                         ret = err;
3468         }
3469 out:
3470         return ret;
3471 }
3472
3473 /*
3474  * Pages can be marked dirty completely asynchronously from ext4's journalling
3475  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3476  * much here because ->set_page_dirty is called under VFS locks.  The page is
3477  * not necessarily locked.
3478  *
3479  * We cannot just dirty the page and leave attached buffers clean, because the
3480  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3481  * or jbddirty because all the journalling code will explode.
3482  *
3483  * So what we do is to mark the page "pending dirty" and next time writepage
3484  * is called, propagate that into the buffers appropriately.
3485  */
3486 static int ext4_journalled_set_page_dirty(struct page *page)
3487 {
3488         SetPageChecked(page);
3489         return __set_page_dirty_nobuffers(page);
3490 }
3491
3492 static const struct address_space_operations ext4_ordered_aops = {
3493         .readpage               = ext4_readpage,
3494         .readpages              = ext4_readpages,
3495         .writepage              = ext4_normal_writepage,
3496         .sync_page              = block_sync_page,
3497         .write_begin            = ext4_write_begin,
3498         .write_end              = ext4_ordered_write_end,
3499         .bmap                   = ext4_bmap,
3500         .invalidatepage         = ext4_invalidatepage,
3501         .releasepage            = ext4_releasepage,
3502         .direct_IO              = ext4_direct_IO,
3503         .migratepage            = buffer_migrate_page,
3504         .is_partially_uptodate  = block_is_partially_uptodate,
3505 };
3506
3507 static const struct address_space_operations ext4_writeback_aops = {
3508         .readpage               = ext4_readpage,
3509         .readpages              = ext4_readpages,
3510         .writepage              = ext4_normal_writepage,
3511         .sync_page              = block_sync_page,
3512         .write_begin            = ext4_write_begin,
3513         .write_end              = ext4_writeback_write_end,
3514         .bmap                   = ext4_bmap,
3515         .invalidatepage         = ext4_invalidatepage,
3516         .releasepage            = ext4_releasepage,
3517         .direct_IO              = ext4_direct_IO,
3518         .migratepage            = buffer_migrate_page,
3519         .is_partially_uptodate  = block_is_partially_uptodate,
3520 };
3521
3522 static const struct address_space_operations ext4_journalled_aops = {
3523         .readpage               = ext4_readpage,
3524         .readpages              = ext4_readpages,
3525         .writepage              = ext4_journalled_writepage,
3526         .sync_page              = block_sync_page,
3527         .write_begin            = ext4_write_begin,
3528         .write_end              = ext4_journalled_write_end,
3529         .set_page_dirty         = ext4_journalled_set_page_dirty,
3530         .bmap                   = ext4_bmap,
3531         .invalidatepage         = ext4_invalidatepage,
3532         .releasepage            = ext4_releasepage,
3533         .is_partially_uptodate  = block_is_partially_uptodate,
3534 };
3535
3536 static const struct address_space_operations ext4_da_aops = {
3537         .readpage               = ext4_readpage,
3538         .readpages              = ext4_readpages,
3539         .writepage              = ext4_da_writepage,
3540         .writepages             = ext4_da_writepages,
3541         .sync_page              = block_sync_page,
3542         .write_begin            = ext4_da_write_begin,
3543         .write_end              = ext4_da_write_end,
3544         .bmap                   = ext4_bmap,
3545         .invalidatepage         = ext4_da_invalidatepage,
3546         .releasepage            = ext4_releasepage,
3547         .direct_IO              = ext4_direct_IO,
3548         .migratepage            = buffer_migrate_page,
3549         .is_partially_uptodate  = block_is_partially_uptodate,
3550 };
3551
3552 void ext4_set_aops(struct inode *inode)
3553 {
3554         if (ext4_should_order_data(inode) &&
3555                 test_opt(inode->i_sb, DELALLOC))
3556                 inode->i_mapping->a_ops = &ext4_da_aops;
3557         else if (ext4_should_order_data(inode))
3558                 inode->i_mapping->a_ops = &ext4_ordered_aops;
3559         else if (ext4_should_writeback_data(inode) &&
3560                  test_opt(inode->i_sb, DELALLOC))
3561                 inode->i_mapping->a_ops = &ext4_da_aops;
3562         else if (ext4_should_writeback_data(inode))
3563                 inode->i_mapping->a_ops = &ext4_writeback_aops;
3564         else
3565                 inode->i_mapping->a_ops = &ext4_journalled_aops;
3566 }
3567
3568 /*
3569  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3570  * up to the end of the block which corresponds to `from'.
3571  * This required during truncate. We need to physically zero the tail end
3572  * of that block so it doesn't yield old data if the file is later grown.
3573  */
3574 int ext4_block_truncate_page(handle_t *handle,
3575                 struct address_space *mapping, loff_t from)
3576 {
3577         ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3578         unsigned offset = from & (PAGE_CACHE_SIZE-1);
3579         unsigned blocksize, length, pos;
3580         ext4_lblk_t iblock;
3581         struct inode *inode = mapping->host;
3582         struct buffer_head *bh;
3583         struct page *page;
3584         int err = 0;
3585
3586         page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
3587         if (!page)
3588                 return -EINVAL;
3589
3590         blocksize = inode->i_sb->s_blocksize;
3591         length = blocksize - (offset & (blocksize - 1));
3592         iblock = index <