ext4: simplify ext4_writepage()
[pandora-kernel.git] / fs / ext4 / inode.c
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40 #include <linux/workqueue.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43
44 #include "ext4_jbd2.h"
45 #include "xattr.h"
46 #include "acl.h"
47 #include "ext4_extents.h"
48
49 #include <trace/events/ext4.h>
50
51 #define MPAGE_DA_EXTENT_TAIL 0x01
52
53 static inline int ext4_begin_ordered_truncate(struct inode *inode,
54                                               loff_t new_size)
55 {
56         return jbd2_journal_begin_ordered_truncate(
57                                         EXT4_SB(inode->i_sb)->s_journal,
58                                         &EXT4_I(inode)->jinode,
59                                         new_size);
60 }
61
62 static void ext4_invalidatepage(struct page *page, unsigned long offset);
63 static int ext4_writepage(struct page *page, struct writeback_control *wbc);
64
65 /*
66  * Test whether an inode is a fast symlink.
67  */
68 static int ext4_inode_is_fast_symlink(struct inode *inode)
69 {
70         int ea_blocks = EXT4_I(inode)->i_file_acl ?
71                 (inode->i_sb->s_blocksize >> 9) : 0;
72
73         return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
74 }
75
76 /*
77  * Work out how many blocks we need to proceed with the next chunk of a
78  * truncate transaction.
79  */
80 static unsigned long blocks_for_truncate(struct inode *inode)
81 {
82         ext4_lblk_t needed;
83
84         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
85
86         /* Give ourselves just enough room to cope with inodes in which
87          * i_blocks is corrupt: we've seen disk corruptions in the past
88          * which resulted in random data in an inode which looked enough
89          * like a regular file for ext4 to try to delete it.  Things
90          * will go a bit crazy if that happens, but at least we should
91          * try not to panic the whole kernel. */
92         if (needed < 2)
93                 needed = 2;
94
95         /* But we need to bound the transaction so we don't overflow the
96          * journal. */
97         if (needed > EXT4_MAX_TRANS_DATA)
98                 needed = EXT4_MAX_TRANS_DATA;
99
100         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
101 }
102
103 /*
104  * Truncate transactions can be complex and absolutely huge.  So we need to
105  * be able to restart the transaction at a conventient checkpoint to make
106  * sure we don't overflow the journal.
107  *
108  * start_transaction gets us a new handle for a truncate transaction,
109  * and extend_transaction tries to extend the existing one a bit.  If
110  * extend fails, we need to propagate the failure up and restart the
111  * transaction in the top-level truncate loop. --sct
112  */
113 static handle_t *start_transaction(struct inode *inode)
114 {
115         handle_t *result;
116
117         result = ext4_journal_start(inode, blocks_for_truncate(inode));
118         if (!IS_ERR(result))
119                 return result;
120
121         ext4_std_error(inode->i_sb, PTR_ERR(result));
122         return result;
123 }
124
125 /*
126  * Try to extend this transaction for the purposes of truncation.
127  *
128  * Returns 0 if we managed to create more room.  If we can't create more
129  * room, and the transaction must be restarted we return 1.
130  */
131 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
132 {
133         if (!ext4_handle_valid(handle))
134                 return 0;
135         if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
136                 return 0;
137         if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
138                 return 0;
139         return 1;
140 }
141
142 /*
143  * Restart the transaction associated with *handle.  This does a commit,
144  * so before we call here everything must be consistently dirtied against
145  * this transaction.
146  */
147 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
148                                  int nblocks)
149 {
150         int ret;
151
152         /*
153          * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
154          * moment, get_block can be called only for blocks inside i_size since
155          * page cache has been already dropped and writes are blocked by
156          * i_mutex. So we can safely drop the i_data_sem here.
157          */
158         BUG_ON(EXT4_JOURNAL(inode) == NULL);
159         jbd_debug(2, "restarting handle %p\n", handle);
160         up_write(&EXT4_I(inode)->i_data_sem);
161         ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
162         down_write(&EXT4_I(inode)->i_data_sem);
163         ext4_discard_preallocations(inode);
164
165         return ret;
166 }
167
168 /*
169  * Called at the last iput() if i_nlink is zero.
170  */
171 void ext4_evict_inode(struct inode *inode)
172 {
173         handle_t *handle;
174         int err;
175
176         if (inode->i_nlink) {
177                 truncate_inode_pages(&inode->i_data, 0);
178                 goto no_delete;
179         }
180
181         if (!is_bad_inode(inode))
182                 dquot_initialize(inode);
183
184         if (ext4_should_order_data(inode))
185                 ext4_begin_ordered_truncate(inode, 0);
186         truncate_inode_pages(&inode->i_data, 0);
187
188         if (is_bad_inode(inode))
189                 goto no_delete;
190
191         handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
192         if (IS_ERR(handle)) {
193                 ext4_std_error(inode->i_sb, PTR_ERR(handle));
194                 /*
195                  * If we're going to skip the normal cleanup, we still need to
196                  * make sure that the in-core orphan linked list is properly
197                  * cleaned up.
198                  */
199                 ext4_orphan_del(NULL, inode);
200                 goto no_delete;
201         }
202
203         if (IS_SYNC(inode))
204                 ext4_handle_sync(handle);
205         inode->i_size = 0;
206         err = ext4_mark_inode_dirty(handle, inode);
207         if (err) {
208                 ext4_warning(inode->i_sb,
209                              "couldn't mark inode dirty (err %d)", err);
210                 goto stop_handle;
211         }
212         if (inode->i_blocks)
213                 ext4_truncate(inode);
214
215         /*
216          * ext4_ext_truncate() doesn't reserve any slop when it
217          * restarts journal transactions; therefore there may not be
218          * enough credits left in the handle to remove the inode from
219          * the orphan list and set the dtime field.
220          */
221         if (!ext4_handle_has_enough_credits(handle, 3)) {
222                 err = ext4_journal_extend(handle, 3);
223                 if (err > 0)
224                         err = ext4_journal_restart(handle, 3);
225                 if (err != 0) {
226                         ext4_warning(inode->i_sb,
227                                      "couldn't extend journal (err %d)", err);
228                 stop_handle:
229                         ext4_journal_stop(handle);
230                         ext4_orphan_del(NULL, inode);
231                         goto no_delete;
232                 }
233         }
234
235         /*
236          * Kill off the orphan record which ext4_truncate created.
237          * AKPM: I think this can be inside the above `if'.
238          * Note that ext4_orphan_del() has to be able to cope with the
239          * deletion of a non-existent orphan - this is because we don't
240          * know if ext4_truncate() actually created an orphan record.
241          * (Well, we could do this if we need to, but heck - it works)
242          */
243         ext4_orphan_del(handle, inode);
244         EXT4_I(inode)->i_dtime  = get_seconds();
245
246         /*
247          * One subtle ordering requirement: if anything has gone wrong
248          * (transaction abort, IO errors, whatever), then we can still
249          * do these next steps (the fs will already have been marked as
250          * having errors), but we can't free the inode if the mark_dirty
251          * fails.
252          */
253         if (ext4_mark_inode_dirty(handle, inode))
254                 /* If that failed, just do the required in-core inode clear. */
255                 ext4_clear_inode(inode);
256         else
257                 ext4_free_inode(handle, inode);
258         ext4_journal_stop(handle);
259         return;
260 no_delete:
261         ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
262 }
263
264 typedef struct {
265         __le32  *p;
266         __le32  key;
267         struct buffer_head *bh;
268 } Indirect;
269
270 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
271 {
272         p->key = *(p->p = v);
273         p->bh = bh;
274 }
275
276 /**
277  *      ext4_block_to_path - parse the block number into array of offsets
278  *      @inode: inode in question (we are only interested in its superblock)
279  *      @i_block: block number to be parsed
280  *      @offsets: array to store the offsets in
281  *      @boundary: set this non-zero if the referred-to block is likely to be
282  *             followed (on disk) by an indirect block.
283  *
284  *      To store the locations of file's data ext4 uses a data structure common
285  *      for UNIX filesystems - tree of pointers anchored in the inode, with
286  *      data blocks at leaves and indirect blocks in intermediate nodes.
287  *      This function translates the block number into path in that tree -
288  *      return value is the path length and @offsets[n] is the offset of
289  *      pointer to (n+1)th node in the nth one. If @block is out of range
290  *      (negative or too large) warning is printed and zero returned.
291  *
292  *      Note: function doesn't find node addresses, so no IO is needed. All
293  *      we need to know is the capacity of indirect blocks (taken from the
294  *      inode->i_sb).
295  */
296
297 /*
298  * Portability note: the last comparison (check that we fit into triple
299  * indirect block) is spelled differently, because otherwise on an
300  * architecture with 32-bit longs and 8Kb pages we might get into trouble
301  * if our filesystem had 8Kb blocks. We might use long long, but that would
302  * kill us on x86. Oh, well, at least the sign propagation does not matter -
303  * i_block would have to be negative in the very beginning, so we would not
304  * get there at all.
305  */
306
307 static int ext4_block_to_path(struct inode *inode,
308                               ext4_lblk_t i_block,
309                               ext4_lblk_t offsets[4], int *boundary)
310 {
311         int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
312         int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
313         const long direct_blocks = EXT4_NDIR_BLOCKS,
314                 indirect_blocks = ptrs,
315                 double_blocks = (1 << (ptrs_bits * 2));
316         int n = 0;
317         int final = 0;
318
319         if (i_block < direct_blocks) {
320                 offsets[n++] = i_block;
321                 final = direct_blocks;
322         } else if ((i_block -= direct_blocks) < indirect_blocks) {
323                 offsets[n++] = EXT4_IND_BLOCK;
324                 offsets[n++] = i_block;
325                 final = ptrs;
326         } else if ((i_block -= indirect_blocks) < double_blocks) {
327                 offsets[n++] = EXT4_DIND_BLOCK;
328                 offsets[n++] = i_block >> ptrs_bits;
329                 offsets[n++] = i_block & (ptrs - 1);
330                 final = ptrs;
331         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
332                 offsets[n++] = EXT4_TIND_BLOCK;
333                 offsets[n++] = i_block >> (ptrs_bits * 2);
334                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
335                 offsets[n++] = i_block & (ptrs - 1);
336                 final = ptrs;
337         } else {
338                 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
339                              i_block + direct_blocks +
340                              indirect_blocks + double_blocks, inode->i_ino);
341         }
342         if (boundary)
343                 *boundary = final - 1 - (i_block & (ptrs - 1));
344         return n;
345 }
346
347 static int __ext4_check_blockref(const char *function, unsigned int line,
348                                  struct inode *inode,
349                                  __le32 *p, unsigned int max)
350 {
351         struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
352         __le32 *bref = p;
353         unsigned int blk;
354
355         while (bref < p+max) {
356                 blk = le32_to_cpu(*bref++);
357                 if (blk &&
358                     unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
359                                                     blk, 1))) {
360                         es->s_last_error_block = cpu_to_le64(blk);
361                         ext4_error_inode(inode, function, line, blk,
362                                          "invalid block");
363                         return -EIO;
364                 }
365         }
366         return 0;
367 }
368
369
370 #define ext4_check_indirect_blockref(inode, bh)                         \
371         __ext4_check_blockref(__func__, __LINE__, inode,                \
372                               (__le32 *)(bh)->b_data,                   \
373                               EXT4_ADDR_PER_BLOCK((inode)->i_sb))
374
375 #define ext4_check_inode_blockref(inode)                                \
376         __ext4_check_blockref(__func__, __LINE__, inode,                \
377                               EXT4_I(inode)->i_data,                    \
378                               EXT4_NDIR_BLOCKS)
379
380 /**
381  *      ext4_get_branch - read the chain of indirect blocks leading to data
382  *      @inode: inode in question
383  *      @depth: depth of the chain (1 - direct pointer, etc.)
384  *      @offsets: offsets of pointers in inode/indirect blocks
385  *      @chain: place to store the result
386  *      @err: here we store the error value
387  *
388  *      Function fills the array of triples <key, p, bh> and returns %NULL
389  *      if everything went OK or the pointer to the last filled triple
390  *      (incomplete one) otherwise. Upon the return chain[i].key contains
391  *      the number of (i+1)-th block in the chain (as it is stored in memory,
392  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
393  *      number (it points into struct inode for i==0 and into the bh->b_data
394  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
395  *      block for i>0 and NULL for i==0. In other words, it holds the block
396  *      numbers of the chain, addresses they were taken from (and where we can
397  *      verify that chain did not change) and buffer_heads hosting these
398  *      numbers.
399  *
400  *      Function stops when it stumbles upon zero pointer (absent block)
401  *              (pointer to last triple returned, *@err == 0)
402  *      or when it gets an IO error reading an indirect block
403  *              (ditto, *@err == -EIO)
404  *      or when it reads all @depth-1 indirect blocks successfully and finds
405  *      the whole chain, all way to the data (returns %NULL, *err == 0).
406  *
407  *      Need to be called with
408  *      down_read(&EXT4_I(inode)->i_data_sem)
409  */
410 static Indirect *ext4_get_branch(struct inode *inode, int depth,
411                                  ext4_lblk_t  *offsets,
412                                  Indirect chain[4], int *err)
413 {
414         struct super_block *sb = inode->i_sb;
415         Indirect *p = chain;
416         struct buffer_head *bh;
417
418         *err = 0;
419         /* i_data is not going away, no lock needed */
420         add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
421         if (!p->key)
422                 goto no_block;
423         while (--depth) {
424                 bh = sb_getblk(sb, le32_to_cpu(p->key));
425                 if (unlikely(!bh))
426                         goto failure;
427
428                 if (!bh_uptodate_or_lock(bh)) {
429                         if (bh_submit_read(bh) < 0) {
430                                 put_bh(bh);
431                                 goto failure;
432                         }
433                         /* validate block references */
434                         if (ext4_check_indirect_blockref(inode, bh)) {
435                                 put_bh(bh);
436                                 goto failure;
437                         }
438                 }
439
440                 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
441                 /* Reader: end */
442                 if (!p->key)
443                         goto no_block;
444         }
445         return NULL;
446
447 failure:
448         *err = -EIO;
449 no_block:
450         return p;
451 }
452
453 /**
454  *      ext4_find_near - find a place for allocation with sufficient locality
455  *      @inode: owner
456  *      @ind: descriptor of indirect block.
457  *
458  *      This function returns the preferred place for block allocation.
459  *      It is used when heuristic for sequential allocation fails.
460  *      Rules are:
461  *        + if there is a block to the left of our position - allocate near it.
462  *        + if pointer will live in indirect block - allocate near that block.
463  *        + if pointer will live in inode - allocate in the same
464  *          cylinder group.
465  *
466  * In the latter case we colour the starting block by the callers PID to
467  * prevent it from clashing with concurrent allocations for a different inode
468  * in the same block group.   The PID is used here so that functionally related
469  * files will be close-by on-disk.
470  *
471  *      Caller must make sure that @ind is valid and will stay that way.
472  */
473 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
474 {
475         struct ext4_inode_info *ei = EXT4_I(inode);
476         __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
477         __le32 *p;
478         ext4_fsblk_t bg_start;
479         ext4_fsblk_t last_block;
480         ext4_grpblk_t colour;
481         ext4_group_t block_group;
482         int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
483
484         /* Try to find previous block */
485         for (p = ind->p - 1; p >= start; p--) {
486                 if (*p)
487                         return le32_to_cpu(*p);
488         }
489
490         /* No such thing, so let's try location of indirect block */
491         if (ind->bh)
492                 return ind->bh->b_blocknr;
493
494         /*
495          * It is going to be referred to from the inode itself? OK, just put it
496          * into the same cylinder group then.
497          */
498         block_group = ei->i_block_group;
499         if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
500                 block_group &= ~(flex_size-1);
501                 if (S_ISREG(inode->i_mode))
502                         block_group++;
503         }
504         bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
505         last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
506
507         /*
508          * If we are doing delayed allocation, we don't need take
509          * colour into account.
510          */
511         if (test_opt(inode->i_sb, DELALLOC))
512                 return bg_start;
513
514         if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
515                 colour = (current->pid % 16) *
516                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
517         else
518                 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
519         return bg_start + colour;
520 }
521
522 /**
523  *      ext4_find_goal - find a preferred place for allocation.
524  *      @inode: owner
525  *      @block:  block we want
526  *      @partial: pointer to the last triple within a chain
527  *
528  *      Normally this function find the preferred place for block allocation,
529  *      returns it.
530  *      Because this is only used for non-extent files, we limit the block nr
531  *      to 32 bits.
532  */
533 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
534                                    Indirect *partial)
535 {
536         ext4_fsblk_t goal;
537
538         /*
539          * XXX need to get goal block from mballoc's data structures
540          */
541
542         goal = ext4_find_near(inode, partial);
543         goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
544         return goal;
545 }
546
547 /**
548  *      ext4_blks_to_allocate: Look up the block map and count the number
549  *      of direct blocks need to be allocated for the given branch.
550  *
551  *      @branch: chain of indirect blocks
552  *      @k: number of blocks need for indirect blocks
553  *      @blks: number of data blocks to be mapped.
554  *      @blocks_to_boundary:  the offset in the indirect block
555  *
556  *      return the total number of blocks to be allocate, including the
557  *      direct and indirect blocks.
558  */
559 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
560                                  int blocks_to_boundary)
561 {
562         unsigned int count = 0;
563
564         /*
565          * Simple case, [t,d]Indirect block(s) has not allocated yet
566          * then it's clear blocks on that path have not allocated
567          */
568         if (k > 0) {
569                 /* right now we don't handle cross boundary allocation */
570                 if (blks < blocks_to_boundary + 1)
571                         count += blks;
572                 else
573                         count += blocks_to_boundary + 1;
574                 return count;
575         }
576
577         count++;
578         while (count < blks && count <= blocks_to_boundary &&
579                 le32_to_cpu(*(branch[0].p + count)) == 0) {
580                 count++;
581         }
582         return count;
583 }
584
585 /**
586  *      ext4_alloc_blocks: multiple allocate blocks needed for a branch
587  *      @indirect_blks: the number of blocks need to allocate for indirect
588  *                      blocks
589  *
590  *      @new_blocks: on return it will store the new block numbers for
591  *      the indirect blocks(if needed) and the first direct block,
592  *      @blks:  on return it will store the total number of allocated
593  *              direct blocks
594  */
595 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
596                              ext4_lblk_t iblock, ext4_fsblk_t goal,
597                              int indirect_blks, int blks,
598                              ext4_fsblk_t new_blocks[4], int *err)
599 {
600         struct ext4_allocation_request ar;
601         int target, i;
602         unsigned long count = 0, blk_allocated = 0;
603         int index = 0;
604         ext4_fsblk_t current_block = 0;
605         int ret = 0;
606
607         /*
608          * Here we try to allocate the requested multiple blocks at once,
609          * on a best-effort basis.
610          * To build a branch, we should allocate blocks for
611          * the indirect blocks(if not allocated yet), and at least
612          * the first direct block of this branch.  That's the
613          * minimum number of blocks need to allocate(required)
614          */
615         /* first we try to allocate the indirect blocks */
616         target = indirect_blks;
617         while (target > 0) {
618                 count = target;
619                 /* allocating blocks for indirect blocks and direct blocks */
620                 current_block = ext4_new_meta_blocks(handle, inode,
621                                                         goal, &count, err);
622                 if (*err)
623                         goto failed_out;
624
625                 if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
626                         EXT4_ERROR_INODE(inode,
627                                          "current_block %llu + count %lu > %d!",
628                                          current_block, count,
629                                          EXT4_MAX_BLOCK_FILE_PHYS);
630                         *err = -EIO;
631                         goto failed_out;
632                 }
633
634                 target -= count;
635                 /* allocate blocks for indirect blocks */
636                 while (index < indirect_blks && count) {
637                         new_blocks[index++] = current_block++;
638                         count--;
639                 }
640                 if (count > 0) {
641                         /*
642                          * save the new block number
643                          * for the first direct block
644                          */
645                         new_blocks[index] = current_block;
646                         printk(KERN_INFO "%s returned more blocks than "
647                                                 "requested\n", __func__);
648                         WARN_ON(1);
649                         break;
650                 }
651         }
652
653         target = blks - count ;
654         blk_allocated = count;
655         if (!target)
656                 goto allocated;
657         /* Now allocate data blocks */
658         memset(&ar, 0, sizeof(ar));
659         ar.inode = inode;
660         ar.goal = goal;
661         ar.len = target;
662         ar.logical = iblock;
663         if (S_ISREG(inode->i_mode))
664                 /* enable in-core preallocation only for regular files */
665                 ar.flags = EXT4_MB_HINT_DATA;
666
667         current_block = ext4_mb_new_blocks(handle, &ar, err);
668         if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
669                 EXT4_ERROR_INODE(inode,
670                                  "current_block %llu + ar.len %d > %d!",
671                                  current_block, ar.len,
672                                  EXT4_MAX_BLOCK_FILE_PHYS);
673                 *err = -EIO;
674                 goto failed_out;
675         }
676
677         if (*err && (target == blks)) {
678                 /*
679                  * if the allocation failed and we didn't allocate
680                  * any blocks before
681                  */
682                 goto failed_out;
683         }
684         if (!*err) {
685                 if (target == blks) {
686                         /*
687                          * save the new block number
688                          * for the first direct block
689                          */
690                         new_blocks[index] = current_block;
691                 }
692                 blk_allocated += ar.len;
693         }
694 allocated:
695         /* total number of blocks allocated for direct blocks */
696         ret = blk_allocated;
697         *err = 0;
698         return ret;
699 failed_out:
700         for (i = 0; i < index; i++)
701                 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
702         return ret;
703 }
704
705 /**
706  *      ext4_alloc_branch - allocate and set up a chain of blocks.
707  *      @inode: owner
708  *      @indirect_blks: number of allocated indirect blocks
709  *      @blks: number of allocated direct blocks
710  *      @offsets: offsets (in the blocks) to store the pointers to next.
711  *      @branch: place to store the chain in.
712  *
713  *      This function allocates blocks, zeroes out all but the last one,
714  *      links them into chain and (if we are synchronous) writes them to disk.
715  *      In other words, it prepares a branch that can be spliced onto the
716  *      inode. It stores the information about that chain in the branch[], in
717  *      the same format as ext4_get_branch() would do. We are calling it after
718  *      we had read the existing part of chain and partial points to the last
719  *      triple of that (one with zero ->key). Upon the exit we have the same
720  *      picture as after the successful ext4_get_block(), except that in one
721  *      place chain is disconnected - *branch->p is still zero (we did not
722  *      set the last link), but branch->key contains the number that should
723  *      be placed into *branch->p to fill that gap.
724  *
725  *      If allocation fails we free all blocks we've allocated (and forget
726  *      their buffer_heads) and return the error value the from failed
727  *      ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
728  *      as described above and return 0.
729  */
730 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
731                              ext4_lblk_t iblock, int indirect_blks,
732                              int *blks, ext4_fsblk_t goal,
733                              ext4_lblk_t *offsets, Indirect *branch)
734 {
735         int blocksize = inode->i_sb->s_blocksize;
736         int i, n = 0;
737         int err = 0;
738         struct buffer_head *bh;
739         int num;
740         ext4_fsblk_t new_blocks[4];
741         ext4_fsblk_t current_block;
742
743         num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
744                                 *blks, new_blocks, &err);
745         if (err)
746                 return err;
747
748         branch[0].key = cpu_to_le32(new_blocks[0]);
749         /*
750          * metadata blocks and data blocks are allocated.
751          */
752         for (n = 1; n <= indirect_blks;  n++) {
753                 /*
754                  * Get buffer_head for parent block, zero it out
755                  * and set the pointer to new one, then send
756                  * parent to disk.
757                  */
758                 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
759                 branch[n].bh = bh;
760                 lock_buffer(bh);
761                 BUFFER_TRACE(bh, "call get_create_access");
762                 err = ext4_journal_get_create_access(handle, bh);
763                 if (err) {
764                         /* Don't brelse(bh) here; it's done in
765                          * ext4_journal_forget() below */
766                         unlock_buffer(bh);
767                         goto failed;
768                 }
769
770                 memset(bh->b_data, 0, blocksize);
771                 branch[n].p = (__le32 *) bh->b_data + offsets[n];
772                 branch[n].key = cpu_to_le32(new_blocks[n]);
773                 *branch[n].p = branch[n].key;
774                 if (n == indirect_blks) {
775                         current_block = new_blocks[n];
776                         /*
777                          * End of chain, update the last new metablock of
778                          * the chain to point to the new allocated
779                          * data blocks numbers
780                          */
781                         for (i = 1; i < num; i++)
782                                 *(branch[n].p + i) = cpu_to_le32(++current_block);
783                 }
784                 BUFFER_TRACE(bh, "marking uptodate");
785                 set_buffer_uptodate(bh);
786                 unlock_buffer(bh);
787
788                 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
789                 err = ext4_handle_dirty_metadata(handle, inode, bh);
790                 if (err)
791                         goto failed;
792         }
793         *blks = num;
794         return err;
795 failed:
796         /* Allocation failed, free what we already allocated */
797         ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
798         for (i = 1; i <= n ; i++) {
799                 /*
800                  * branch[i].bh is newly allocated, so there is no
801                  * need to revoke the block, which is why we don't
802                  * need to set EXT4_FREE_BLOCKS_METADATA.
803                  */
804                 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
805                                  EXT4_FREE_BLOCKS_FORGET);
806         }
807         for (i = n+1; i < indirect_blks; i++)
808                 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
809
810         ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
811
812         return err;
813 }
814
815 /**
816  * ext4_splice_branch - splice the allocated branch onto inode.
817  * @inode: owner
818  * @block: (logical) number of block we are adding
819  * @chain: chain of indirect blocks (with a missing link - see
820  *      ext4_alloc_branch)
821  * @where: location of missing link
822  * @num:   number of indirect blocks we are adding
823  * @blks:  number of direct blocks we are adding
824  *
825  * This function fills the missing link and does all housekeeping needed in
826  * inode (->i_blocks, etc.). In case of success we end up with the full
827  * chain to new block and return 0.
828  */
829 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
830                               ext4_lblk_t block, Indirect *where, int num,
831                               int blks)
832 {
833         int i;
834         int err = 0;
835         ext4_fsblk_t current_block;
836
837         /*
838          * If we're splicing into a [td]indirect block (as opposed to the
839          * inode) then we need to get write access to the [td]indirect block
840          * before the splice.
841          */
842         if (where->bh) {
843                 BUFFER_TRACE(where->bh, "get_write_access");
844                 err = ext4_journal_get_write_access(handle, where->bh);
845                 if (err)
846                         goto err_out;
847         }
848         /* That's it */
849
850         *where->p = where->key;
851
852         /*
853          * Update the host buffer_head or inode to point to more just allocated
854          * direct blocks blocks
855          */
856         if (num == 0 && blks > 1) {
857                 current_block = le32_to_cpu(where->key) + 1;
858                 for (i = 1; i < blks; i++)
859                         *(where->p + i) = cpu_to_le32(current_block++);
860         }
861
862         /* We are done with atomic stuff, now do the rest of housekeeping */
863         /* had we spliced it onto indirect block? */
864         if (where->bh) {
865                 /*
866                  * If we spliced it onto an indirect block, we haven't
867                  * altered the inode.  Note however that if it is being spliced
868                  * onto an indirect block at the very end of the file (the
869                  * file is growing) then we *will* alter the inode to reflect
870                  * the new i_size.  But that is not done here - it is done in
871                  * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
872                  */
873                 jbd_debug(5, "splicing indirect only\n");
874                 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
875                 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
876                 if (err)
877                         goto err_out;
878         } else {
879                 /*
880                  * OK, we spliced it into the inode itself on a direct block.
881                  */
882                 ext4_mark_inode_dirty(handle, inode);
883                 jbd_debug(5, "splicing direct\n");
884         }
885         return err;
886
887 err_out:
888         for (i = 1; i <= num; i++) {
889                 /*
890                  * branch[i].bh is newly allocated, so there is no
891                  * need to revoke the block, which is why we don't
892                  * need to set EXT4_FREE_BLOCKS_METADATA.
893                  */
894                 ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
895                                  EXT4_FREE_BLOCKS_FORGET);
896         }
897         ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
898                          blks, 0);
899
900         return err;
901 }
902
903 /*
904  * The ext4_ind_map_blocks() function handles non-extents inodes
905  * (i.e., using the traditional indirect/double-indirect i_blocks
906  * scheme) for ext4_map_blocks().
907  *
908  * Allocation strategy is simple: if we have to allocate something, we will
909  * have to go the whole way to leaf. So let's do it before attaching anything
910  * to tree, set linkage between the newborn blocks, write them if sync is
911  * required, recheck the path, free and repeat if check fails, otherwise
912  * set the last missing link (that will protect us from any truncate-generated
913  * removals - all blocks on the path are immune now) and possibly force the
914  * write on the parent block.
915  * That has a nice additional property: no special recovery from the failed
916  * allocations is needed - we simply release blocks and do not touch anything
917  * reachable from inode.
918  *
919  * `handle' can be NULL if create == 0.
920  *
921  * return > 0, # of blocks mapped or allocated.
922  * return = 0, if plain lookup failed.
923  * return < 0, error case.
924  *
925  * The ext4_ind_get_blocks() function should be called with
926  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
927  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
928  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
929  * blocks.
930  */
931 static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
932                                struct ext4_map_blocks *map,
933                                int flags)
934 {
935         int err = -EIO;
936         ext4_lblk_t offsets[4];
937         Indirect chain[4];
938         Indirect *partial;
939         ext4_fsblk_t goal;
940         int indirect_blks;
941         int blocks_to_boundary = 0;
942         int depth;
943         int count = 0;
944         ext4_fsblk_t first_block = 0;
945
946         J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
947         J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
948         depth = ext4_block_to_path(inode, map->m_lblk, offsets,
949                                    &blocks_to_boundary);
950
951         if (depth == 0)
952                 goto out;
953
954         partial = ext4_get_branch(inode, depth, offsets, chain, &err);
955
956         /* Simplest case - block found, no allocation needed */
957         if (!partial) {
958                 first_block = le32_to_cpu(chain[depth - 1].key);
959                 count++;
960                 /*map more blocks*/
961                 while (count < map->m_len && count <= blocks_to_boundary) {
962                         ext4_fsblk_t blk;
963
964                         blk = le32_to_cpu(*(chain[depth-1].p + count));
965
966                         if (blk == first_block + count)
967                                 count++;
968                         else
969                                 break;
970                 }
971                 goto got_it;
972         }
973
974         /* Next simple case - plain lookup or failed read of indirect block */
975         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
976                 goto cleanup;
977
978         /*
979          * Okay, we need to do block allocation.
980         */
981         goal = ext4_find_goal(inode, map->m_lblk, partial);
982
983         /* the number of blocks need to allocate for [d,t]indirect blocks */
984         indirect_blks = (chain + depth) - partial - 1;
985
986         /*
987          * Next look up the indirect map to count the totoal number of
988          * direct blocks to allocate for this branch.
989          */
990         count = ext4_blks_to_allocate(partial, indirect_blks,
991                                       map->m_len, blocks_to_boundary);
992         /*
993          * Block out ext4_truncate while we alter the tree
994          */
995         err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
996                                 &count, goal,
997                                 offsets + (partial - chain), partial);
998
999         /*
1000          * The ext4_splice_branch call will free and forget any buffers
1001          * on the new chain if there is a failure, but that risks using
1002          * up transaction credits, especially for bitmaps where the
1003          * credits cannot be returned.  Can we handle this somehow?  We
1004          * may need to return -EAGAIN upwards in the worst case.  --sct
1005          */
1006         if (!err)
1007                 err = ext4_splice_branch(handle, inode, map->m_lblk,
1008                                          partial, indirect_blks, count);
1009         if (err)
1010                 goto cleanup;
1011
1012         map->m_flags |= EXT4_MAP_NEW;
1013
1014         ext4_update_inode_fsync_trans(handle, inode, 1);
1015 got_it:
1016         map->m_flags |= EXT4_MAP_MAPPED;
1017         map->m_pblk = le32_to_cpu(chain[depth-1].key);
1018         map->m_len = count;
1019         if (count > blocks_to_boundary)
1020                 map->m_flags |= EXT4_MAP_BOUNDARY;
1021         err = count;
1022         /* Clean up and exit */
1023         partial = chain + depth - 1;    /* the whole chain */
1024 cleanup:
1025         while (partial > chain) {
1026                 BUFFER_TRACE(partial->bh, "call brelse");
1027                 brelse(partial->bh);
1028                 partial--;
1029         }
1030 out:
1031         return err;
1032 }
1033
1034 #ifdef CONFIG_QUOTA
1035 qsize_t *ext4_get_reserved_space(struct inode *inode)
1036 {
1037         return &EXT4_I(inode)->i_reserved_quota;
1038 }
1039 #endif
1040
1041 /*
1042  * Calculate the number of metadata blocks need to reserve
1043  * to allocate a new block at @lblocks for non extent file based file
1044  */
1045 static int ext4_indirect_calc_metadata_amount(struct inode *inode,
1046                                               sector_t lblock)
1047 {
1048         struct ext4_inode_info *ei = EXT4_I(inode);
1049         sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
1050         int blk_bits;
1051
1052         if (lblock < EXT4_NDIR_BLOCKS)
1053                 return 0;
1054
1055         lblock -= EXT4_NDIR_BLOCKS;
1056
1057         if (ei->i_da_metadata_calc_len &&
1058             (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
1059                 ei->i_da_metadata_calc_len++;
1060                 return 0;
1061         }
1062         ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
1063         ei->i_da_metadata_calc_len = 1;
1064         blk_bits = order_base_2(lblock);
1065         return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
1066 }
1067
1068 /*
1069  * Calculate the number of metadata blocks need to reserve
1070  * to allocate a block located at @lblock
1071  */
1072 static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
1073 {
1074         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1075                 return ext4_ext_calc_metadata_amount(inode, lblock);
1076
1077         return ext4_indirect_calc_metadata_amount(inode, lblock);
1078 }
1079
1080 /*
1081  * Called with i_data_sem down, which is important since we can call
1082  * ext4_discard_preallocations() from here.
1083  */
1084 void ext4_da_update_reserve_space(struct inode *inode,
1085                                         int used, int quota_claim)
1086 {
1087         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1088         struct ext4_inode_info *ei = EXT4_I(inode);
1089
1090         spin_lock(&ei->i_block_reservation_lock);
1091         trace_ext4_da_update_reserve_space(inode, used);
1092         if (unlikely(used > ei->i_reserved_data_blocks)) {
1093                 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
1094                          "with only %d reserved data blocks\n",
1095                          __func__, inode->i_ino, used,
1096                          ei->i_reserved_data_blocks);
1097                 WARN_ON(1);
1098                 used = ei->i_reserved_data_blocks;
1099         }
1100
1101         /* Update per-inode reservations */
1102         ei->i_reserved_data_blocks -= used;
1103         ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
1104         percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1105                            used + ei->i_allocated_meta_blocks);
1106         ei->i_allocated_meta_blocks = 0;
1107
1108         if (ei->i_reserved_data_blocks == 0) {
1109                 /*
1110                  * We can release all of the reserved metadata blocks
1111                  * only when we have written all of the delayed
1112                  * allocation blocks.
1113                  */
1114                 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1115                                    ei->i_reserved_meta_blocks);
1116                 ei->i_reserved_meta_blocks = 0;
1117                 ei->i_da_metadata_calc_len = 0;
1118         }
1119         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1120
1121         /* Update quota subsystem for data blocks */
1122         if (quota_claim)
1123                 dquot_claim_block(inode, used);
1124         else {
1125                 /*
1126                  * We did fallocate with an offset that is already delayed
1127                  * allocated. So on delayed allocated writeback we should
1128                  * not re-claim the quota for fallocated blocks.
1129                  */
1130                 dquot_release_reservation_block(inode, used);
1131         }
1132
1133         /*
1134          * If we have done all the pending block allocations and if
1135          * there aren't any writers on the inode, we can discard the
1136          * inode's preallocations.
1137          */
1138         if ((ei->i_reserved_data_blocks == 0) &&
1139             (atomic_read(&inode->i_writecount) == 0))
1140                 ext4_discard_preallocations(inode);
1141 }
1142
1143 static int __check_block_validity(struct inode *inode, const char *func,
1144                                 unsigned int line,
1145                                 struct ext4_map_blocks *map)
1146 {
1147         if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
1148                                    map->m_len)) {
1149                 ext4_error_inode(inode, func, line, map->m_pblk,
1150                                  "lblock %lu mapped to illegal pblock "
1151                                  "(length %d)", (unsigned long) map->m_lblk,
1152                                  map->m_len);
1153                 return -EIO;
1154         }
1155         return 0;
1156 }
1157
1158 #define check_block_validity(inode, map)        \
1159         __check_block_validity((inode), __func__, __LINE__, (map))
1160
1161 /*
1162  * Return the number of contiguous dirty pages in a given inode
1163  * starting at page frame idx.
1164  */
1165 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1166                                     unsigned int max_pages)
1167 {
1168         struct address_space *mapping = inode->i_mapping;
1169         pgoff_t index;
1170         struct pagevec pvec;
1171         pgoff_t num = 0;
1172         int i, nr_pages, done = 0;
1173
1174         if (max_pages == 0)
1175                 return 0;
1176         pagevec_init(&pvec, 0);
1177         while (!done) {
1178                 index = idx;
1179                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1180                                               PAGECACHE_TAG_DIRTY,
1181                                               (pgoff_t)PAGEVEC_SIZE);
1182                 if (nr_pages == 0)
1183                         break;
1184                 for (i = 0; i < nr_pages; i++) {
1185                         struct page *page = pvec.pages[i];
1186                         struct buffer_head *bh, *head;
1187
1188                         lock_page(page);
1189                         if (unlikely(page->mapping != mapping) ||
1190                             !PageDirty(page) ||
1191                             PageWriteback(page) ||
1192                             page->index != idx) {
1193                                 done = 1;
1194                                 unlock_page(page);
1195                                 break;
1196                         }
1197                         if (page_has_buffers(page)) {
1198                                 bh = head = page_buffers(page);
1199                                 do {
1200                                         if (!buffer_delay(bh) &&
1201                                             !buffer_unwritten(bh))
1202                                                 done = 1;
1203                                         bh = bh->b_this_page;
1204                                 } while (!done && (bh != head));
1205                         }
1206                         unlock_page(page);
1207                         if (done)
1208                                 break;
1209                         idx++;
1210                         num++;
1211                         if (num >= max_pages) {
1212                                 done = 1;
1213                                 break;
1214                         }
1215                 }
1216                 pagevec_release(&pvec);
1217         }
1218         return num;
1219 }
1220
1221 /*
1222  * The ext4_map_blocks() function tries to look up the requested blocks,
1223  * and returns if the blocks are already mapped.
1224  *
1225  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1226  * and store the allocated blocks in the result buffer head and mark it
1227  * mapped.
1228  *
1229  * If file type is extents based, it will call ext4_ext_map_blocks(),
1230  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
1231  * based files
1232  *
1233  * On success, it returns the number of blocks being mapped or allocate.
1234  * if create==0 and the blocks are pre-allocated and uninitialized block,
1235  * the result buffer head is unmapped. If the create ==1, it will make sure
1236  * the buffer head is mapped.
1237  *
1238  * It returns 0 if plain look up failed (blocks have not been allocated), in
1239  * that casem, buffer head is unmapped
1240  *
1241  * It returns the error in case of allocation failure.
1242  */
1243 int ext4_map_blocks(handle_t *handle, struct inode *inode,
1244                     struct ext4_map_blocks *map, int flags)
1245 {
1246         int retval;
1247
1248         map->m_flags = 0;
1249         ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
1250                   "logical block %lu\n", inode->i_ino, flags, map->m_len,
1251                   (unsigned long) map->m_lblk);
1252         /*
1253          * Try to see if we can get the block without requesting a new
1254          * file system block.
1255          */
1256         down_read((&EXT4_I(inode)->i_data_sem));
1257         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1258                 retval = ext4_ext_map_blocks(handle, inode, map, 0);
1259         } else {
1260                 retval = ext4_ind_map_blocks(handle, inode, map, 0);
1261         }
1262         up_read((&EXT4_I(inode)->i_data_sem));
1263
1264         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1265                 int ret = check_block_validity(inode, map);
1266                 if (ret != 0)
1267                         return ret;
1268         }
1269
1270         /* If it is only a block(s) look up */
1271         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1272                 return retval;
1273
1274         /*
1275          * Returns if the blocks have already allocated
1276          *
1277          * Note that if blocks have been preallocated
1278          * ext4_ext_get_block() returns th create = 0
1279          * with buffer head unmapped.
1280          */
1281         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
1282                 return retval;
1283
1284         /*
1285          * When we call get_blocks without the create flag, the
1286          * BH_Unwritten flag could have gotten set if the blocks
1287          * requested were part of a uninitialized extent.  We need to
1288          * clear this flag now that we are committed to convert all or
1289          * part of the uninitialized extent to be an initialized
1290          * extent.  This is because we need to avoid the combination
1291          * of BH_Unwritten and BH_Mapped flags being simultaneously
1292          * set on the buffer_head.
1293          */
1294         map->m_flags &= ~EXT4_MAP_UNWRITTEN;
1295
1296         /*
1297          * New blocks allocate and/or writing to uninitialized extent
1298          * will possibly result in updating i_data, so we take
1299          * the write lock of i_data_sem, and call get_blocks()
1300          * with create == 1 flag.
1301          */
1302         down_write((&EXT4_I(inode)->i_data_sem));
1303
1304         /*
1305          * if the caller is from delayed allocation writeout path
1306          * we have already reserved fs blocks for allocation
1307          * let the underlying get_block() function know to
1308          * avoid double accounting
1309          */
1310         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1311                 EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1312         /*
1313          * We need to check for EXT4 here because migrate
1314          * could have changed the inode type in between
1315          */
1316         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1317                 retval = ext4_ext_map_blocks(handle, inode, map, flags);
1318         } else {
1319                 retval = ext4_ind_map_blocks(handle, inode, map, flags);
1320
1321                 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
1322                         /*
1323                          * We allocated new blocks which will result in
1324                          * i_data's format changing.  Force the migrate
1325                          * to fail by clearing migrate flags
1326                          */
1327                         ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
1328                 }
1329
1330                 /*
1331                  * Update reserved blocks/metadata blocks after successful
1332                  * block allocation which had been deferred till now. We don't
1333                  * support fallocate for non extent files. So we can update
1334                  * reserve space here.
1335                  */
1336                 if ((retval > 0) &&
1337                         (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
1338                         ext4_da_update_reserve_space(inode, retval, 1);
1339         }
1340         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1341                 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1342
1343         up_write((&EXT4_I(inode)->i_data_sem));
1344         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1345                 int ret = check_block_validity(inode, map);
1346                 if (ret != 0)
1347                         return ret;
1348         }
1349         return retval;
1350 }
1351
1352 /* Maximum number of blocks we map for direct IO at once. */
1353 #define DIO_MAX_BLOCKS 4096
1354
1355 static int _ext4_get_block(struct inode *inode, sector_t iblock,
1356                            struct buffer_head *bh, int flags)
1357 {
1358         handle_t *handle = ext4_journal_current_handle();
1359         struct ext4_map_blocks map;
1360         int ret = 0, started = 0;
1361         int dio_credits;
1362
1363         map.m_lblk = iblock;
1364         map.m_len = bh->b_size >> inode->i_blkbits;
1365
1366         if (flags && !handle) {
1367                 /* Direct IO write... */
1368                 if (map.m_len > DIO_MAX_BLOCKS)
1369                         map.m_len = DIO_MAX_BLOCKS;
1370                 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
1371                 handle = ext4_journal_start(inode, dio_credits);
1372                 if (IS_ERR(handle)) {
1373                         ret = PTR_ERR(handle);
1374                         return ret;
1375                 }
1376                 started = 1;
1377         }
1378
1379         ret = ext4_map_blocks(handle, inode, &map, flags);
1380         if (ret > 0) {
1381                 map_bh(bh, inode->i_sb, map.m_pblk);
1382                 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1383                 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1384                 ret = 0;
1385         }
1386         if (started)
1387                 ext4_journal_stop(handle);
1388         return ret;
1389 }
1390
1391 int ext4_get_block(struct inode *inode, sector_t iblock,
1392                    struct buffer_head *bh, int create)
1393 {
1394         return _ext4_get_block(inode, iblock, bh,
1395                                create ? EXT4_GET_BLOCKS_CREATE : 0);
1396 }
1397
1398 /*
1399  * `handle' can be NULL if create is zero
1400  */
1401 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1402                                 ext4_lblk_t block, int create, int *errp)
1403 {
1404         struct ext4_map_blocks map;
1405         struct buffer_head *bh;
1406         int fatal = 0, err;
1407
1408         J_ASSERT(handle != NULL || create == 0);
1409
1410         map.m_lblk = block;
1411         map.m_len = 1;
1412         err = ext4_map_blocks(handle, inode, &map,
1413                               create ? EXT4_GET_BLOCKS_CREATE : 0);
1414
1415         if (err < 0)
1416                 *errp = err;
1417         if (err <= 0)
1418                 return NULL;
1419         *errp = 0;
1420
1421         bh = sb_getblk(inode->i_sb, map.m_pblk);
1422         if (!bh) {
1423                 *errp = -EIO;
1424                 return NULL;
1425         }
1426         if (map.m_flags & EXT4_MAP_NEW) {
1427                 J_ASSERT(create != 0);
1428                 J_ASSERT(handle != NULL);
1429
1430                 /*
1431                  * Now that we do not always journal data, we should
1432                  * keep in mind whether this should always journal the
1433                  * new buffer as metadata.  For now, regular file
1434                  * writes use ext4_get_block instead, so it's not a
1435                  * problem.
1436                  */
1437                 lock_buffer(bh);
1438                 BUFFER_TRACE(bh, "call get_create_access");
1439                 fatal = ext4_journal_get_create_access(handle, bh);
1440                 if (!fatal && !buffer_uptodate(bh)) {
1441                         memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1442                         set_buffer_uptodate(bh);
1443                 }
1444                 unlock_buffer(bh);
1445                 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1446                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1447                 if (!fatal)
1448                         fatal = err;
1449         } else {
1450                 BUFFER_TRACE(bh, "not a new buffer");
1451         }
1452         if (fatal) {
1453                 *errp = fatal;
1454                 brelse(bh);
1455                 bh = NULL;
1456         }
1457         return bh;
1458 }
1459
1460 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1461                                ext4_lblk_t block, int create, int *err)
1462 {
1463         struct buffer_head *bh;
1464
1465         bh = ext4_getblk(handle, inode, block, create, err);
1466         if (!bh)
1467                 return bh;
1468         if (buffer_uptodate(bh))
1469                 return bh;
1470         ll_rw_block(READ_META, 1, &bh);
1471         wait_on_buffer(bh);
1472         if (buffer_uptodate(bh))
1473                 return bh;
1474         put_bh(bh);
1475         *err = -EIO;
1476         return NULL;
1477 }
1478
1479 static int walk_page_buffers(handle_t *handle,
1480                              struct buffer_head *head,
1481                              unsigned from,
1482                              unsigned to,
1483                              int *partial,
1484                              int (*fn)(handle_t *handle,
1485                                        struct buffer_head *bh))
1486 {
1487         struct buffer_head *bh;
1488         unsigned block_start, block_end;
1489         unsigned blocksize = head->b_size;
1490         int err, ret = 0;
1491         struct buffer_head *next;
1492
1493         for (bh = head, block_start = 0;
1494              ret == 0 && (bh != head || !block_start);
1495              block_start = block_end, bh = next) {
1496                 next = bh->b_this_page;
1497                 block_end = block_start + blocksize;
1498                 if (block_end <= from || block_start >= to) {
1499                         if (partial && !buffer_uptodate(bh))
1500                                 *partial = 1;
1501                         continue;
1502                 }
1503                 err = (*fn)(handle, bh);
1504                 if (!ret)
1505                         ret = err;
1506         }
1507         return ret;
1508 }
1509
1510 /*
1511  * To preserve ordering, it is essential that the hole instantiation and
1512  * the data write be encapsulated in a single transaction.  We cannot
1513  * close off a transaction and start a new one between the ext4_get_block()
1514  * and the commit_write().  So doing the jbd2_journal_start at the start of
1515  * prepare_write() is the right place.
1516  *
1517  * Also, this function can nest inside ext4_writepage() ->
1518  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1519  * has generated enough buffer credits to do the whole page.  So we won't
1520  * block on the journal in that case, which is good, because the caller may
1521  * be PF_MEMALLOC.
1522  *
1523  * By accident, ext4 can be reentered when a transaction is open via
1524  * quota file writes.  If we were to commit the transaction while thus
1525  * reentered, there can be a deadlock - we would be holding a quota
1526  * lock, and the commit would never complete if another thread had a
1527  * transaction open and was blocking on the quota lock - a ranking
1528  * violation.
1529  *
1530  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1531  * will _not_ run commit under these circumstances because handle->h_ref
1532  * is elevated.  We'll still have enough credits for the tiny quotafile
1533  * write.
1534  */
1535 static int do_journal_get_write_access(handle_t *handle,
1536                                        struct buffer_head *bh)
1537 {
1538         int dirty = buffer_dirty(bh);
1539         int ret;
1540
1541         if (!buffer_mapped(bh) || buffer_freed(bh))
1542                 return 0;
1543         /*
1544          * __block_prepare_write() could have dirtied some buffers. Clean
1545          * the dirty bit as jbd2_journal_get_write_access() could complain
1546          * otherwise about fs integrity issues. Setting of the dirty bit
1547          * by __block_prepare_write() isn't a real problem here as we clear
1548          * the bit before releasing a page lock and thus writeback cannot
1549          * ever write the buffer.
1550          */
1551         if (dirty)
1552                 clear_buffer_dirty(bh);
1553         ret = ext4_journal_get_write_access(handle, bh);
1554         if (!ret && dirty)
1555                 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1556         return ret;
1557 }
1558
1559 /*
1560  * Truncate blocks that were not used by write. We have to truncate the
1561  * pagecache as well so that corresponding buffers get properly unmapped.
1562  */
1563 static void ext4_truncate_failed_write(struct inode *inode)
1564 {
1565         truncate_inode_pages(inode->i_mapping, inode->i_size);
1566         ext4_truncate(inode);
1567 }
1568
1569 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
1570                    struct buffer_head *bh_result, int create);
1571 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1572                             loff_t pos, unsigned len, unsigned flags,
1573                             struct page **pagep, void **fsdata)
1574 {
1575         struct inode *inode = mapping->host;
1576         int ret, needed_blocks;
1577         handle_t *handle;
1578         int retries = 0;
1579         struct page *page;
1580         pgoff_t index;
1581         unsigned from, to;
1582
1583         trace_ext4_write_begin(inode, pos, len, flags);
1584         /*
1585          * Reserve one block more for addition to orphan list in case
1586          * we allocate blocks but write fails for some reason
1587          */
1588         needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1589         index = pos >> PAGE_CACHE_SHIFT;
1590         from = pos & (PAGE_CACHE_SIZE - 1);
1591         to = from + len;
1592
1593 retry:
1594         handle = ext4_journal_start(inode, needed_blocks);
1595         if (IS_ERR(handle)) {
1596                 ret = PTR_ERR(handle);
1597                 goto out;
1598         }
1599
1600         /* We cannot recurse into the filesystem as the transaction is already
1601          * started */
1602         flags |= AOP_FLAG_NOFS;
1603
1604         page = grab_cache_page_write_begin(mapping, index, flags);
1605         if (!page) {
1606                 ext4_journal_stop(handle);
1607                 ret = -ENOMEM;
1608                 goto out;
1609         }
1610         *pagep = page;
1611
1612         if (ext4_should_dioread_nolock(inode))
1613                 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
1614         else
1615                 ret = __block_write_begin(page, pos, len, ext4_get_block);
1616
1617         if (!ret && ext4_should_journal_data(inode)) {
1618                 ret = walk_page_buffers(handle, page_buffers(page),
1619                                 from, to, NULL, do_journal_get_write_access);
1620         }
1621
1622         if (ret) {
1623                 unlock_page(page);
1624                 page_cache_release(page);
1625                 /*
1626                  * __block_write_begin may have instantiated a few blocks
1627                  * outside i_size.  Trim these off again. Don't need
1628                  * i_size_read because we hold i_mutex.
1629                  *
1630                  * Add inode to orphan list in case we crash before
1631                  * truncate finishes
1632                  */
1633                 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1634                         ext4_orphan_add(handle, inode);
1635
1636                 ext4_journal_stop(handle);
1637                 if (pos + len > inode->i_size) {
1638                         ext4_truncate_failed_write(inode);
1639                         /*
1640                          * If truncate failed early the inode might
1641                          * still be on the orphan list; we need to
1642                          * make sure the inode is removed from the
1643                          * orphan list in that case.
1644                          */
1645                         if (inode->i_nlink)
1646                                 ext4_orphan_del(NULL, inode);
1647                 }
1648         }
1649
1650         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1651                 goto retry;
1652 out:
1653         return ret;
1654 }
1655
1656 /* For write_end() in data=journal mode */
1657 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1658 {
1659         if (!buffer_mapped(bh) || buffer_freed(bh))
1660                 return 0;
1661         set_buffer_uptodate(bh);
1662         return ext4_handle_dirty_metadata(handle, NULL, bh);
1663 }
1664
1665 static int ext4_generic_write_end(struct file *file,
1666                                   struct address_space *mapping,
1667                                   loff_t pos, unsigned len, unsigned copied,
1668                                   struct page *page, void *fsdata)
1669 {
1670         int i_size_changed = 0;
1671         struct inode *inode = mapping->host;
1672         handle_t *handle = ext4_journal_current_handle();
1673
1674         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1675
1676         /*
1677          * No need to use i_size_read() here, the i_size
1678          * cannot change under us because we hold i_mutex.
1679          *
1680          * But it's important to update i_size while still holding page lock:
1681          * page writeout could otherwise come in and zero beyond i_size.
1682          */
1683         if (pos + copied > inode->i_size) {
1684                 i_size_write(inode, pos + copied);
1685                 i_size_changed = 1;
1686         }
1687
1688         if (pos + copied >  EXT4_I(inode)->i_disksize) {
1689                 /* We need to mark inode dirty even if
1690                  * new_i_size is less that inode->i_size
1691                  * bu greater than i_disksize.(hint delalloc)
1692                  */
1693                 ext4_update_i_disksize(inode, (pos + copied));
1694                 i_size_changed = 1;
1695         }
1696         unlock_page(page);
1697         page_cache_release(page);
1698
1699         /*
1700          * Don't mark the inode dirty under page lock. First, it unnecessarily
1701          * makes the holding time of page lock longer. Second, it forces lock
1702          * ordering of page lock and transaction start for journaling
1703          * filesystems.
1704          */
1705         if (i_size_changed)
1706                 ext4_mark_inode_dirty(handle, inode);
1707
1708         return copied;
1709 }
1710
1711 /*
1712  * We need to pick up the new inode size which generic_commit_write gave us
1713  * `file' can be NULL - eg, when called from page_symlink().
1714  *
1715  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1716  * buffers are managed internally.
1717  */
1718 static int ext4_ordered_write_end(struct file *file,
1719                                   struct address_space *mapping,
1720                                   loff_t pos, unsigned len, unsigned copied,
1721                                   struct page *page, void *fsdata)
1722 {
1723         handle_t *handle = ext4_journal_current_handle();
1724         struct inode *inode = mapping->host;
1725         int ret = 0, ret2;
1726
1727         trace_ext4_ordered_write_end(inode, pos, len, copied);
1728         ret = ext4_jbd2_file_inode(handle, inode);
1729
1730         if (ret == 0) {
1731                 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1732                                                         page, fsdata);
1733                 copied = ret2;
1734                 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1735                         /* if we have allocated more blocks and copied
1736                          * less. We will have blocks allocated outside
1737                          * inode->i_size. So truncate them
1738                          */
1739                         ext4_orphan_add(handle, inode);
1740                 if (ret2 < 0)
1741                         ret = ret2;
1742         }
1743         ret2 = ext4_journal_stop(handle);
1744         if (!ret)
1745                 ret = ret2;
1746
1747         if (pos + len > inode->i_size) {
1748                 ext4_truncate_failed_write(inode);
1749                 /*
1750                  * If truncate failed early the inode might still be
1751                  * on the orphan list; we need to make sure the inode
1752                  * is removed from the orphan list in that case.
1753                  */
1754                 if (inode->i_nlink)
1755                         ext4_orphan_del(NULL, inode);
1756         }
1757
1758
1759         return ret ? ret : copied;
1760 }
1761
1762 static int ext4_writeback_write_end(struct file *file,
1763                                     struct address_space *mapping,
1764                                     loff_t pos, unsigned len, unsigned copied,
1765                                     struct page *page, void *fsdata)
1766 {
1767         handle_t *handle = ext4_journal_current_handle();
1768         struct inode *inode = mapping->host;
1769         int ret = 0, ret2;
1770
1771         trace_ext4_writeback_write_end(inode, pos, len, copied);
1772         ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1773                                                         page, fsdata);
1774         copied = ret2;
1775         if (pos + len > inode->i_size && ext4_can_truncate(inode))
1776                 /* if we have allocated more blocks and copied
1777                  * less. We will have blocks allocated outside
1778                  * inode->i_size. So truncate them
1779                  */
1780                 ext4_orphan_add(handle, inode);
1781
1782         if (ret2 < 0)
1783                 ret = ret2;
1784
1785         ret2 = ext4_journal_stop(handle);
1786         if (!ret)
1787                 ret = ret2;
1788
1789         if (pos + len > inode->i_size) {
1790                 ext4_truncate_failed_write(inode);
1791                 /*
1792                  * If truncate failed early the inode might still be
1793                  * on the orphan list; we need to make sure the inode
1794                  * is removed from the orphan list in that case.
1795                  */
1796                 if (inode->i_nlink)
1797                         ext4_orphan_del(NULL, inode);
1798         }
1799
1800         return ret ? ret : copied;
1801 }
1802
1803 static int ext4_journalled_write_end(struct file *file,
1804                                      struct address_space *mapping,
1805                                      loff_t pos, unsigned len, unsigned copied,
1806                                      struct page *page, void *fsdata)
1807 {
1808         handle_t *handle = ext4_journal_current_handle();
1809         struct inode *inode = mapping->host;
1810         int ret = 0, ret2;
1811         int partial = 0;
1812         unsigned from, to;
1813         loff_t new_i_size;
1814
1815         trace_ext4_journalled_write_end(inode, pos, len, copied);
1816         from = pos & (PAGE_CACHE_SIZE - 1);
1817         to = from + len;
1818
1819         if (copied < len) {
1820                 if (!PageUptodate(page))
1821                         copied = 0;
1822                 page_zero_new_buffers(page, from+copied, to);
1823         }
1824
1825         ret = walk_page_buffers(handle, page_buffers(page), from,
1826                                 to, &partial, write_end_fn);
1827         if (!partial)
1828                 SetPageUptodate(page);
1829         new_i_size = pos + copied;
1830         if (new_i_size > inode->i_size)
1831                 i_size_write(inode, pos+copied);
1832         ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1833         if (new_i_size > EXT4_I(inode)->i_disksize) {
1834                 ext4_update_i_disksize(inode, new_i_size);
1835                 ret2 = ext4_mark_inode_dirty(handle, inode);
1836                 if (!ret)
1837                         ret = ret2;
1838         }
1839
1840         unlock_page(page);
1841         page_cache_release(page);
1842         if (pos + len > inode->i_size && ext4_can_truncate(inode))
1843                 /* if we have allocated more blocks and copied
1844                  * less. We will have blocks allocated outside
1845                  * inode->i_size. So truncate them
1846                  */
1847                 ext4_orphan_add(handle, inode);
1848
1849         ret2 = ext4_journal_stop(handle);
1850         if (!ret)
1851                 ret = ret2;
1852         if (pos + len > inode->i_size) {
1853                 ext4_truncate_failed_write(inode);
1854                 /*
1855                  * If truncate failed early the inode might still be
1856                  * on the orphan list; we need to make sure the inode
1857                  * is removed from the orphan list in that case.
1858                  */
1859                 if (inode->i_nlink)
1860                         ext4_orphan_del(NULL, inode);
1861         }
1862
1863         return ret ? ret : copied;
1864 }
1865
1866 /*
1867  * Reserve a single block located at lblock
1868  */
1869 static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
1870 {
1871         int retries = 0;
1872         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1873         struct ext4_inode_info *ei = EXT4_I(inode);
1874         unsigned long md_needed;
1875         int ret;
1876
1877         /*
1878          * recalculate the amount of metadata blocks to reserve
1879          * in order to allocate nrblocks
1880          * worse case is one extent per block
1881          */
1882 repeat:
1883         spin_lock(&ei->i_block_reservation_lock);
1884         md_needed = ext4_calc_metadata_amount(inode, lblock);
1885         trace_ext4_da_reserve_space(inode, md_needed);
1886         spin_unlock(&ei->i_block_reservation_lock);
1887
1888         /*
1889          * We will charge metadata quota at writeout time; this saves
1890          * us from metadata over-estimation, though we may go over by
1891          * a small amount in the end.  Here we just reserve for data.
1892          */
1893         ret = dquot_reserve_block(inode, 1);
1894         if (ret)
1895                 return ret;
1896         /*
1897          * We do still charge estimated metadata to the sb though;
1898          * we cannot afford to run out of free blocks.
1899          */
1900         if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
1901                 dquot_release_reservation_block(inode, 1);
1902                 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1903                         yield();
1904                         goto repeat;
1905                 }
1906                 return -ENOSPC;
1907         }
1908         spin_lock(&ei->i_block_reservation_lock);
1909         ei->i_reserved_data_blocks++;
1910         ei->i_reserved_meta_blocks += md_needed;
1911         spin_unlock(&ei->i_block_reservation_lock);
1912
1913         return 0;       /* success */
1914 }
1915
1916 static void ext4_da_release_space(struct inode *inode, int to_free)
1917 {
1918         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1919         struct ext4_inode_info *ei = EXT4_I(inode);
1920
1921         if (!to_free)
1922                 return;         /* Nothing to release, exit */
1923
1924         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1925
1926         trace_ext4_da_release_space(inode, to_free);
1927         if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1928                 /*
1929                  * if there aren't enough reserved blocks, then the
1930                  * counter is messed up somewhere.  Since this
1931                  * function is called from invalidate page, it's
1932                  * harmless to return without any action.
1933                  */
1934                 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1935                          "ino %lu, to_free %d with only %d reserved "
1936                          "data blocks\n", inode->i_ino, to_free,
1937                          ei->i_reserved_data_blocks);
1938                 WARN_ON(1);
1939                 to_free = ei->i_reserved_data_blocks;
1940         }
1941         ei->i_reserved_data_blocks -= to_free;
1942
1943         if (ei->i_reserved_data_blocks == 0) {
1944                 /*
1945                  * We can release all of the reserved metadata blocks
1946                  * only when we have written all of the delayed
1947                  * allocation blocks.
1948                  */
1949                 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1950                                    ei->i_reserved_meta_blocks);
1951                 ei->i_reserved_meta_blocks = 0;
1952                 ei->i_da_metadata_calc_len = 0;
1953         }
1954
1955         /* update fs dirty data blocks counter */
1956         percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
1957
1958         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1959
1960         dquot_release_reservation_block(inode, to_free);
1961 }
1962
1963 static void ext4_da_page_release_reservation(struct page *page,
1964                                              unsigned long offset)
1965 {
1966         int to_release = 0;
1967         struct buffer_head *head, *bh;
1968         unsigned int curr_off = 0;
1969
1970         head = page_buffers(page);
1971         bh = head;
1972         do {
1973                 unsigned int next_off = curr_off + bh->b_size;
1974
1975                 if ((offset <= curr_off) && (buffer_delay(bh))) {
1976                         to_release++;
1977                         clear_buffer_delay(bh);
1978                 }
1979                 curr_off = next_off;
1980         } while ((bh = bh->b_this_page) != head);
1981         ext4_da_release_space(page->mapping->host, to_release);
1982 }
1983
1984 /*
1985  * Delayed allocation stuff
1986  */
1987
1988 /*
1989  * mpage_da_submit_io - walks through extent of pages and try to write
1990  * them with writepage() call back
1991  *
1992  * @mpd->inode: inode
1993  * @mpd->first_page: first page of the extent
1994  * @mpd->next_page: page after the last page of the extent
1995  *
1996  * By the time mpage_da_submit_io() is called we expect all blocks
1997  * to be allocated. this may be wrong if allocation failed.
1998  *
1999  * As pages are already locked by write_cache_pages(), we can't use it
2000  */
2001 static int mpage_da_submit_io(struct mpage_da_data *mpd)
2002 {
2003         long pages_skipped;
2004         struct pagevec pvec;
2005         unsigned long index, end;
2006         int ret = 0, err, nr_pages, i;
2007         struct inode *inode = mpd->inode;
2008         struct address_space *mapping = inode->i_mapping;
2009
2010         BUG_ON(mpd->next_page <= mpd->first_page);
2011         /*
2012          * We need to start from the first_page to the next_page - 1
2013          * to make sure we also write the mapped dirty buffer_heads.
2014          * If we look at mpd->b_blocknr we would only be looking
2015          * at the currently mapped buffer_heads.
2016          */
2017         index = mpd->first_page;
2018         end = mpd->next_page - 1;
2019
2020         pagevec_init(&pvec, 0);
2021         while (index <= end) {
2022                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2023                 if (nr_pages == 0)
2024                         break;
2025                 for (i = 0; i < nr_pages; i++) {
2026                         struct page *page = pvec.pages[i];
2027
2028                         index = page->index;
2029                         if (index > end)
2030                                 break;
2031                         index++;
2032
2033                         BUG_ON(!PageLocked(page));
2034                         BUG_ON(PageWriteback(page));
2035
2036                         pages_skipped = mpd->wbc->pages_skipped;
2037                         err = ext4_writepage(page, mpd->wbc);
2038                         if (!err && (pages_skipped == mpd->wbc->pages_skipped))
2039                                 /*
2040                                  * have successfully written the page
2041                                  * without skipping the same
2042                                  */
2043                                 mpd->pages_written++;
2044                         /*
2045                          * In error case, we have to continue because
2046                          * remaining pages are still locked
2047                          * XXX: unlock and re-dirty them?
2048                          */
2049                         if (ret == 0)
2050                                 ret = err;
2051                 }
2052                 pagevec_release(&pvec);
2053         }
2054         return ret;
2055 }
2056
2057 /*
2058  * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
2059  *
2060  * the function goes through all passed space and put actual disk
2061  * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
2062  */
2063 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd,
2064                                  struct ext4_map_blocks *map)
2065 {
2066         struct inode *inode = mpd->inode;
2067         struct address_space *mapping = inode->i_mapping;
2068         int blocks = map->m_len;
2069         sector_t pblock = map->m_pblk, cur_logical;
2070         struct buffer_head *head, *bh;
2071         pgoff_t index, end;
2072         struct pagevec pvec;
2073         int nr_pages, i;
2074
2075         index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2076         end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2077         cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2078
2079         pagevec_init(&pvec, 0);
2080
2081         while (index <= end) {
2082                 /* XXX: optimize tail */
2083                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2084                 if (nr_pages == 0)
2085                         break;
2086                 for (i = 0; i < nr_pages; i++) {
2087                         struct page *page = pvec.pages[i];
2088
2089                         index = page->index;
2090                         if (index > end)
2091                                 break;
2092                         index++;
2093
2094                         BUG_ON(!PageLocked(page));
2095                         BUG_ON(PageWriteback(page));
2096                         BUG_ON(!page_has_buffers(page));
2097
2098                         bh = page_buffers(page);
2099                         head = bh;
2100
2101                         /* skip blocks out of the range */
2102                         do {
2103                                 if (cur_logical >= map->m_lblk)
2104                                         break;
2105                                 cur_logical++;
2106                         } while ((bh = bh->b_this_page) != head);
2107
2108                         do {
2109                                 if (cur_logical > map->m_lblk + (blocks - 1))
2110                                         break;
2111
2112                                 if (buffer_delay(bh) || buffer_unwritten(bh)) {
2113
2114                                         BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2115
2116                                         if (buffer_delay(bh)) {
2117                                                 clear_buffer_delay(bh);
2118                                                 bh->b_blocknr = pblock;
2119                                         } else {
2120                                                 /*
2121                                                  * unwritten already should have
2122                                                  * blocknr assigned. Verify that
2123                                                  */
2124                                                 clear_buffer_unwritten(bh);
2125                                                 BUG_ON(bh->b_blocknr != pblock);
2126                                         }
2127
2128                                 } else if (buffer_mapped(bh))
2129                                         BUG_ON(bh->b_blocknr != pblock);
2130
2131                                 if (map->m_flags & EXT4_MAP_UNINIT)
2132                                         set_buffer_uninit(bh);
2133                                 cur_logical++;
2134                                 pblock++;
2135                         } while ((bh = bh->b_this_page) != head);
2136                 }
2137                 pagevec_release(&pvec);
2138         }
2139 }
2140
2141
2142 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2143                                         sector_t logical, long blk_cnt)
2144 {
2145         int nr_pages, i;
2146         pgoff_t index, end;
2147         struct pagevec pvec;
2148         struct inode *inode = mpd->inode;
2149         struct address_space *mapping = inode->i_mapping;
2150
2151         index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2152         end   = (logical + blk_cnt - 1) >>
2153                                 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2154         while (index <= end) {
2155                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2156                 if (nr_pages == 0)
2157                         break;
2158                 for (i = 0; i < nr_pages; i++) {
2159                         struct page *page = pvec.pages[i];
2160                         if (page->index > end)
2161                                 break;
2162                         BUG_ON(!PageLocked(page));
2163                         BUG_ON(PageWriteback(page));
2164                         block_invalidatepage(page, 0);
2165                         ClearPageUptodate(page);
2166                         unlock_page(page);
2167                 }
2168                 index = pvec.pages[nr_pages - 1]->index + 1;
2169                 pagevec_release(&pvec);
2170         }
2171         return;
2172 }
2173
2174 static void ext4_print_free_blocks(struct inode *inode)
2175 {
2176         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2177         printk(KERN_CRIT "Total free blocks count %lld\n",
2178                ext4_count_free_blocks(inode->i_sb));
2179         printk(KERN_CRIT "Free/Dirty block details\n");
2180         printk(KERN_CRIT "free_blocks=%lld\n",
2181                (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2182         printk(KERN_CRIT "dirty_blocks=%lld\n",
2183                (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2184         printk(KERN_CRIT "Block reservation details\n");
2185         printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2186                EXT4_I(inode)->i_reserved_data_blocks);
2187         printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2188                EXT4_I(inode)->i_reserved_meta_blocks);
2189         return;
2190 }
2191
2192 /*
2193  * mpage_da_map_and_submit - go through given space, map them
2194  *       if necessary, and then submit them for I/O
2195  *
2196  * @mpd - bh describing space
2197  *
2198  * The function skips space we know is already mapped to disk blocks.
2199  *
2200  */
2201 static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2202 {
2203         int err, blks, get_blocks_flags;
2204         struct ext4_map_blocks map;
2205         sector_t next = mpd->b_blocknr;
2206         unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2207         loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
2208         handle_t *handle = NULL;
2209
2210         /*
2211          * If the blocks are mapped already, or we couldn't accumulate
2212          * any blocks, then proceed immediately to the submission stage.
2213          */
2214         if ((mpd->b_size == 0) ||
2215             ((mpd->b_state  & (1 << BH_Mapped)) &&
2216              !(mpd->b_state & (1 << BH_Delay)) &&
2217              !(mpd->b_state & (1 << BH_Unwritten))))
2218                 goto submit_io;
2219
2220         handle = ext4_journal_current_handle();
2221         BUG_ON(!handle);
2222
2223         /*
2224          * Call ext4_map_blocks() to allocate any delayed allocation
2225          * blocks, or to convert an uninitialized extent to be
2226          * initialized (in the case where we have written into
2227          * one or more preallocated blocks).
2228          *
2229          * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
2230          * indicate that we are on the delayed allocation path.  This
2231          * affects functions in many different parts of the allocation
2232          * call path.  This flag exists primarily because we don't
2233          * want to change *many* call functions, so ext4_map_blocks()
2234          * will set the magic i_delalloc_reserved_flag once the
2235          * inode's allocation semaphore is taken.
2236          *
2237          * If the blocks in questions were delalloc blocks, set
2238          * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2239          * variables are updated after the blocks have been allocated.
2240          */
2241         map.m_lblk = next;
2242         map.m_len = max_blocks;
2243         get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2244         if (ext4_should_dioread_nolock(mpd->inode))
2245                 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2246         if (mpd->b_state & (1 << BH_Delay))
2247                 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2248
2249         blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
2250         if (blks < 0) {
2251                 struct super_block *sb = mpd->inode->i_sb;
2252
2253                 err = blks;
2254                 /*
2255                  * If get block returns EAGAIN or ENOSPC and there
2256                  * appears to be free blocks we will call
2257                  * ext4_writepage() for all of the pages which will
2258                  * just redirty the pages.
2259                  */
2260                 if (err == -EAGAIN)
2261                         goto submit_io;
2262
2263                 if (err == -ENOSPC &&
2264                     ext4_count_free_blocks(sb)) {
2265                         mpd->retval = err;
2266                         goto submit_io;
2267                 }
2268
2269                 /*
2270                  * get block failure will cause us to loop in
2271                  * writepages, because a_ops->writepage won't be able
2272                  * to make progress. The page will be redirtied by
2273                  * writepage and writepages will again try to write
2274                  * the same.
2275                  */
2276                 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2277                         ext4_msg(sb, KERN_CRIT,
2278                                  "delayed block allocation failed for inode %lu "
2279                                  "at logical offset %llu with max blocks %zd "
2280                                  "with error %d", mpd->inode->i_ino,
2281                                  (unsigned long long) next,
2282                                  mpd->b_size >> mpd->inode->i_blkbits, err);
2283                         ext4_msg(sb, KERN_CRIT,
2284                                 "This should not happen!! Data will be lost\n");
2285                         if (err == -ENOSPC)
2286                                 ext4_print_free_blocks(mpd->inode);
2287                 }
2288                 /* invalidate all the pages */
2289                 ext4_da_block_invalidatepages(mpd, next,
2290                                 mpd->b_size >> mpd->inode->i_blkbits);
2291                 return;
2292         }
2293         BUG_ON(blks == 0);
2294
2295         if (map.m_flags & EXT4_MAP_NEW) {
2296                 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
2297                 int i;
2298
2299                 for (i = 0; i < map.m_len; i++)
2300                         unmap_underlying_metadata(bdev, map.m_pblk + i);
2301         }
2302
2303         /*
2304          * If blocks are delayed marked, we need to
2305          * put actual blocknr and drop delayed bit
2306          */
2307         if ((mpd->b_state & (1 << BH_Delay)) ||
2308             (mpd->b_state & (1 << BH_Unwritten)))
2309                 mpage_put_bnr_to_bhs(mpd, &map);
2310
2311         if (ext4_should_order_data(mpd->inode)) {
2312                 err = ext4_jbd2_file_inode(handle, mpd->inode);
2313                 if (err)
2314                         /* This only happens if the journal is aborted */
2315                         return;
2316         }
2317
2318         /*
2319          * Update on-disk size along with block allocation.
2320          */
2321         disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
2322         if (disksize > i_size_read(mpd->inode))
2323                 disksize = i_size_read(mpd->inode);
2324         if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2325                 ext4_update_i_disksize(mpd->inode, disksize);
2326                 err = ext4_mark_inode_dirty(handle, mpd->inode);
2327                 if (err)
2328                         ext4_error(mpd->inode->i_sb,
2329                                    "Failed to mark inode %lu dirty",
2330                                    mpd->inode->i_ino);
2331         }
2332
2333 submit_io:
2334         mpage_da_submit_io(mpd);
2335         mpd->io_done = 1;
2336 }
2337
2338 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2339                 (1 << BH_Delay) | (1 << BH_Unwritten))
2340
2341 /*
2342  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2343  *
2344  * @mpd->lbh - extent of blocks
2345  * @logical - logical number of the block in the file
2346  * @bh - bh of the block (used to access block's state)
2347  *
2348  * the function is used to collect contig. blocks in same state
2349  */
2350 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2351                                    sector_t logical, size_t b_size,
2352                                    unsigned long b_state)
2353 {
2354         sector_t next;
2355         int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2356
2357         /*
2358          * XXX Don't go larger than mballoc is willing to allocate
2359          * This is a stopgap solution.  We eventually need to fold
2360          * mpage_da_submit_io() into this function and then call
2361          * ext4_map_blocks() multiple times in a loop
2362          */
2363         if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
2364                 goto flush_it;
2365
2366         /* check if thereserved journal credits might overflow */
2367         if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
2368                 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2369                         /*
2370                          * With non-extent format we are limited by the journal
2371                          * credit available.  Total credit needed to insert
2372                          * nrblocks contiguous blocks is dependent on the
2373                          * nrblocks.  So limit nrblocks.
2374                          */
2375                         goto flush_it;
2376                 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2377                                 EXT4_MAX_TRANS_DATA) {
2378                         /*
2379                          * Adding the new buffer_head would make it cross the
2380                          * allowed limit for which we have journal credit
2381                          * reserved. So limit the new bh->b_size
2382                          */
2383                         b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2384                                                 mpd->inode->i_blkbits;
2385                         /* we will do mpage_da_submit_io in the next loop */
2386                 }
2387         }
2388         /*
2389          * First block in the extent
2390          */
2391         if (mpd->b_size == 0) {
2392                 mpd->b_blocknr = logical;
2393                 mpd->b_size = b_size;
2394                 mpd->b_state = b_state & BH_FLAGS;
2395                 return;
2396         }
2397
2398         next = mpd->b_blocknr + nrblocks;
2399         /*
2400          * Can we merge the block to our big extent?
2401          */
2402         if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2403                 mpd->b_size += b_size;
2404                 return;
2405         }
2406
2407 flush_it:
2408         /*
2409          * We couldn't merge the block to our extent, so we
2410          * need to flush current  extent and start new one
2411          */
2412         mpage_da_map_and_submit(mpd);
2413         return;
2414 }
2415
2416 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2417 {
2418         return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
2419 }
2420
2421 /*
2422  * __mpage_da_writepage - finds extent of pages and blocks
2423  *
2424  * @page: page to consider
2425  * @wbc: not used, we just follow rules
2426  * @data: context
2427  *
2428  * The function finds extents of pages and scan them for all blocks.
2429  */
2430 static int __mpage_da_writepage(struct page *page,
2431                                 struct writeback_control *wbc, void *data)
2432 {
2433         struct mpage_da_data *mpd = data;
2434         struct inode *inode = mpd->inode;
2435         struct buffer_head *bh, *head;
2436         sector_t logical;
2437
2438         /*
2439          * Can we merge this page to current extent?
2440          */
2441         if (mpd->next_page != page->index) {
2442                 /*
2443                  * Nope, we can't. So, we map non-allocated blocks
2444                  * and start IO on them
2445                  */
2446                 if (mpd->next_page != mpd->first_page) {
2447                         mpage_da_map_and_submit(mpd);
2448                         /*
2449                          * skip rest of the page in the page_vec
2450                          */
2451                         redirty_page_for_writepage(wbc, page);
2452                         unlock_page(page);
2453                         return MPAGE_DA_EXTENT_TAIL;
2454                 }
2455
2456                 /*
2457                  * Start next extent of pages ...
2458                  */
2459                 mpd->first_page = page->index;
2460
2461                 /*
2462                  * ... and blocks
2463                  */
2464                 mpd->b_size = 0;
2465                 mpd->b_state = 0;
2466                 mpd->b_blocknr = 0;
2467         }
2468
2469         mpd->next_page = page->index + 1;
2470         logical = (sector_t) page->index <<
2471                   (PAGE_CACHE_SHIFT - inode->i_blkbits);
2472
2473         if (!page_has_buffers(page)) {
2474                 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2475                                        (1 << BH_Dirty) | (1 << BH_Uptodate));
2476                 if (mpd->io_done)
2477                         return MPAGE_DA_EXTENT_TAIL;
2478         } else {
2479                 /*
2480                  * Page with regular buffer heads, just add all dirty ones
2481                  */
2482                 head = page_buffers(page);
2483                 bh = head;
2484                 do {
2485                         BUG_ON(buffer_locked(bh));
2486                         /*
2487                          * We need to try to allocate
2488                          * unmapped blocks in the same page.
2489                          * Otherwise we won't make progress
2490                          * with the page in ext4_writepage
2491                          */
2492                         if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2493                                 mpage_add_bh_to_extent(mpd, logical,
2494                                                        bh->b_size,
2495                                                        bh->b_state);
2496                                 if (mpd->io_done)
2497                                         return MPAGE_DA_EXTENT_TAIL;
2498                         } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2499                                 /*
2500                                  * mapped dirty buffer. We need to update
2501                                  * the b_state because we look at
2502                                  * b_state in mpage_da_map_blocks. We don't
2503                                  * update b_size because if we find an
2504                                  * unmapped buffer_head later we need to
2505                                  * use the b_state flag of that buffer_head.
2506                                  */
2507                                 if (mpd->b_size == 0)
2508                                         mpd->b_state = bh->b_state & BH_FLAGS;
2509                         }
2510                         logical++;
2511                 } while ((bh = bh->b_this_page) != head);
2512         }
2513
2514         return 0;
2515 }
2516
2517 /*
2518  * This is a special get_blocks_t callback which is used by
2519  * ext4_da_write_begin().  It will either return mapped block or
2520  * reserve space for a single block.
2521  *
2522  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2523  * We also have b_blocknr = -1 and b_bdev initialized properly
2524  *
2525  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2526  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2527  * initialized properly.
2528  */
2529 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2530                                   struct buffer_head *bh, int create)
2531 {
2532         struct ext4_map_blocks map;
2533         int ret = 0;
2534         sector_t invalid_block = ~((sector_t) 0xffff);
2535
2536         if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2537                 invalid_block = ~0;
2538
2539         BUG_ON(create == 0);
2540         BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
2541
2542         map.m_lblk = iblock;
2543         map.m_len = 1;
2544
2545         /*
2546          * first, we need to know whether the block is allocated already
2547          * preallocated blocks are unmapped but should treated
2548          * the same as allocated blocks.
2549          */
2550         ret = ext4_map_blocks(NULL, inode, &map, 0);
2551         if (ret < 0)
2552                 return ret;
2553         if (ret == 0) {
2554                 if (buffer_delay(bh))
2555                         return 0; /* Not sure this could or should happen */
2556                 /*
2557                  * XXX: __block_prepare_write() unmaps passed block,
2558                  * is it OK?
2559                  */
2560                 ret = ext4_da_reserve_space(inode, iblock);
2561                 if (ret)
2562                         /* not enough space to reserve */
2563                         return ret;
2564
2565                 map_bh(bh, inode->i_sb, invalid_block);
2566                 set_buffer_new(bh);
2567                 set_buffer_delay(bh);
2568                 return 0;
2569         }
2570
2571         map_bh(bh, inode->i_sb, map.m_pblk);
2572         bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
2573
2574         if (buffer_unwritten(bh)) {
2575                 /* A delayed write to unwritten bh should be marked
2576                  * new and mapped.  Mapped ensures that we don't do
2577                  * get_block multiple times when we write to the same
2578                  * offset and new ensures that we do proper zero out
2579                  * for partial write.
2580                  */
2581                 set_buffer_new(bh);
2582                 set_buffer_mapped(bh);
2583         }
2584         return 0;
2585 }
2586
2587 /*
2588  * This function is used as a standard get_block_t calback function
2589  * when there is no desire to allocate any blocks.  It is used as a
2590  * callback function for block_prepare_write() and block_write_full_page().
2591  * These functions should only try to map a single block at a time.
2592  *
2593  * Since this function doesn't do block allocations even if the caller
2594  * requests it by passing in create=1, it is critically important that
2595  * any caller checks to make sure that any buffer heads are returned
2596  * by this function are either all already mapped or marked for
2597  * delayed allocation before calling  block_write_full_page().  Otherwise,
2598  * b_blocknr could be left unitialized, and the page write functions will
2599  * be taken by surprise.
2600  */
2601 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2602                                    struct buffer_head *bh_result, int create)
2603 {
2604         BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2605         return _ext4_get_block(inode, iblock, bh_result, 0);
2606 }
2607
2608 static int bget_one(handle_t *handle, struct buffer_head *bh)
2609 {
2610         get_bh(bh);
2611         return 0;
2612 }
2613
2614 static int bput_one(handle_t *handle, struct buffer_head *bh)
2615 {
2616         put_bh(bh);
2617         return 0;
2618 }
2619
2620 static int __ext4_journalled_writepage(struct page *page,
2621                                        unsigned int len)
2622 {
2623         struct address_space *mapping = page->mapping;
2624         struct inode *inode = mapping->host;
2625         struct buffer_head *page_bufs;
2626         handle_t *handle = NULL;
2627         int ret = 0;
2628         int err;
2629
2630         page_bufs = page_buffers(page);
2631         BUG_ON(!page_bufs);
2632         walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
2633         /* As soon as we unlock the page, it can go away, but we have
2634          * references to buffers so we are safe */
2635         unlock_page(page);
2636
2637         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2638         if (IS_ERR(handle)) {
2639                 ret = PTR_ERR(handle);
2640                 goto out;
2641         }
2642
2643         ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2644                                 do_journal_get_write_access);
2645
2646         err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2647                                 write_end_fn);
2648         if (ret == 0)
2649                 ret = err;
2650         err = ext4_journal_stop(handle);
2651         if (!ret)
2652                 ret = err;
2653
2654         walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2655         ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2656 out:
2657         return ret;
2658 }
2659
2660 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
2661 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
2662
2663 /*
2664  * Note that we don't need to start a transaction unless we're journaling data
2665  * because we should have holes filled from ext4_page_mkwrite(). We even don't
2666  * need to file the inode to the transaction's list in ordered mode because if
2667  * we are writing back data added by write(), the inode is already there and if
2668  * we are writing back data modified via mmap(), noone guarantees in which
2669  * transaction the data will hit the disk. In case we are journaling data, we
2670  * cannot start transaction directly because transaction start ranks above page
2671  * lock so we have to do some magic.
2672  *
2673  * This function can get called via...
2674  *   - ext4_da_writepages after taking page lock (have journal handle)
2675  *   - journal_submit_inode_data_buffers (no journal handle)
2676  *   - shrink_page_list via pdflush (no journal handle)
2677  *   - grab_page_cache when doing write_begin (have journal handle)
2678  *
2679  * We don't do any block allocation in this function. If we have page with
2680  * multiple blocks we need to write those buffer_heads that are mapped. This
2681  * is important for mmaped based write. So if we do with blocksize 1K
2682  * truncate(f, 1024);
2683  * a = mmap(f, 0, 4096);
2684  * a[0] = 'a';
2685  * truncate(f, 4096);
2686  * we have in the page first buffer_head mapped via page_mkwrite call back
2687  * but other bufer_heads would be unmapped but dirty(dirty done via the
2688  * do_wp_page). So writepage should write the first block. If we modify
2689  * the mmap area beyond 1024 we will again get a page_fault and the
2690  * page_mkwrite callback will do the block allocation and mark the
2691  * buffer_heads mapped.
2692  *
2693  * We redirty the page if we have any buffer_heads that is either delay or
2694  * unwritten in the page.
2695  *
2696  * We can get recursively called as show below.
2697  *
2698  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2699  *              ext4_writepage()
2700  *
2701  * But since we don't do any block allocation we should not deadlock.
2702  * Page also have the dirty flag cleared so we don't get recurive page_lock.
2703  */
2704 static int ext4_writepage(struct page *page,
2705                           struct writeback_control *wbc)
2706 {
2707         int ret = 0, commit_write = 0;
2708         loff_t size;
2709         unsigned int len;
2710         struct buffer_head *page_bufs = NULL;
2711         struct inode *inode = page->mapping->host;
2712
2713         trace_ext4_writepage(inode, page);
2714         size = i_size_read(inode);
2715         if (page->index == size >> PAGE_CACHE_SHIFT)
2716                 len = size & ~PAGE_CACHE_MASK;
2717         else
2718                 len = PAGE_CACHE_SIZE;
2719
2720         /*
2721          * If the page does not have buffers (for whatever reason),
2722          * try to create them using block_prepare_write.  If this
2723          * fails, redirty the page and move on.
2724          */
2725         if (!page_buffers(page)) {
2726                 if (block_prepare_write(page, 0, len,
2727                                         noalloc_get_block_write)) {
2728                 redirty_page:
2729                         redirty_page_for_writepage(wbc, page);
2730                         unlock_page(page);
2731                         return 0;
2732                 }
2733                 commit_write = 1;
2734         }
2735         page_bufs = page_buffers(page);
2736         if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2737                               ext4_bh_delay_or_unwritten)) {
2738                 /*
2739                  * We don't want to do block allocation So redirty the
2740                  * page and return We may reach here when we do a
2741                  * journal commit via
2742                  * journal_submit_inode_data_buffers.  If we don't
2743                  * have mapping block we just ignore them. We can also
2744                  * reach here via shrink_page_list
2745                  */
2746                 goto redirty_page;
2747         }
2748         if (commit_write)
2749                 /* now mark the buffer_heads as dirty and uptodate */
2750                 block_commit_write(page, 0, len);
2751
2752         if (PageChecked(page) && ext4_should_journal_data(inode)) {
2753                 /*
2754                  * It's mmapped pagecache.  Add buffers and journal it.  There
2755                  * doesn't seem much point in redirtying the page here.
2756                  */
2757                 ClearPageChecked(page);
2758                 return __ext4_journalled_writepage(page, len);
2759         }
2760
2761         if (buffer_uninit(page_bufs)) {
2762                 ext4_set_bh_endio(page_bufs, inode);
2763                 ret = block_write_full_page_endio(page, noalloc_get_block_write,
2764                                             wbc, ext4_end_io_buffer_write);
2765         } else
2766                 ret = block_write_full_page(page, noalloc_get_block_write,
2767                                             wbc);
2768
2769         return ret;
2770 }
2771
2772 /*
2773  * This is called via ext4_da_writepages() to
2774  * calulate the total number of credits to reserve to fit
2775  * a single extent allocation into a single transaction,
2776  * ext4_da_writpeages() will loop calling this before
2777  * the block allocation.
2778  */
2779
2780 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2781 {
2782         int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2783
2784         /*
2785          * With non-extent format the journal credit needed to
2786          * insert nrblocks contiguous block is dependent on
2787          * number of contiguous block. So we will limit
2788          * number of contiguous block to a sane value
2789          */
2790         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2791             (max_blocks > EXT4_MAX_TRANS_DATA))
2792                 max_blocks = EXT4_MAX_TRANS_DATA;
2793
2794         return ext4_chunk_trans_blocks(inode, max_blocks);
2795 }
2796
2797 /*
2798  * write_cache_pages_da - walk the list of dirty pages of the given
2799  * address space and call the callback function (which usually writes
2800  * the pages).
2801  *
2802  * This is a forked version of write_cache_pages().  Differences:
2803  *      Range cyclic is ignored.
2804  *      no_nrwrite_index_update is always presumed true
2805  */
2806 static int write_cache_pages_da(struct address_space *mapping,
2807                                 struct writeback_control *wbc,
2808                                 struct mpage_da_data *mpd)
2809 {
2810         int ret = 0;
2811         int done = 0;
2812         struct pagevec pvec;
2813         int nr_pages;
2814         pgoff_t index;
2815         pgoff_t end;            /* Inclusive */
2816         long nr_to_write = wbc->nr_to_write;
2817
2818         pagevec_init(&pvec, 0);
2819         index = wbc->range_start >> PAGE_CACHE_SHIFT;
2820         end = wbc->range_end >> PAGE_CACHE_SHIFT;
2821
2822         while (!done && (index <= end)) {
2823                 int i;
2824
2825                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2826                               PAGECACHE_TAG_DIRTY,
2827                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2828                 if (nr_pages == 0)
2829                         break;
2830
2831                 for (i = 0; i < nr_pages; i++) {
2832                         struct page *page = pvec.pages[i];
2833
2834                         /*
2835                          * At this point, the page may be truncated or
2836                          * invalidated (changing page->mapping to NULL), or
2837                          * even swizzled back from swapper_space to tmpfs file
2838                          * mapping. However, page->index will not change
2839                          * because we have a reference on the page.
2840                          */
2841                         if (page->index > end) {
2842                                 done = 1;
2843                                 break;
2844                         }
2845
2846                         lock_page(page);
2847
2848                         /*
2849                          * Page truncated or invalidated. We can freely skip it
2850                          * then, even for data integrity operations: the page
2851                          * has disappeared concurrently, so there could be no
2852                          * real expectation of this data interity operation
2853                          * even if there is now a new, dirty page at the same
2854                          * pagecache address.
2855                          */
2856                         if (unlikely(page->mapping != mapping)) {
2857 continue_unlock:
2858                                 unlock_page(page);
2859                                 continue;
2860                         }
2861
2862                         if (!PageDirty(page)) {
2863                                 /* someone wrote it for us */
2864                                 goto continue_unlock;
2865                         }
2866
2867                         if (PageWriteback(page)) {
2868                                 if (wbc->sync_mode != WB_SYNC_NONE)
2869                                         wait_on_page_writeback(page);
2870                                 else
2871                                         goto continue_unlock;
2872                         }
2873
2874                         BUG_ON(PageWriteback(page));
2875                         if (!clear_page_dirty_for_io(page))
2876                                 goto continue_unlock;
2877
2878                         ret = __mpage_da_writepage(page, wbc, mpd);
2879                         if (unlikely(ret)) {
2880                                 if (ret == AOP_WRITEPAGE_ACTIVATE) {
2881                                         unlock_page(page);
2882                                         ret = 0;
2883                                 } else {
2884                                         done = 1;
2885                                         break;
2886                                 }
2887                         }
2888
2889                         if (nr_to_write > 0) {
2890                                 nr_to_write--;
2891                                 if (nr_to_write == 0 &&
2892                                     wbc->sync_mode == WB_SYNC_NONE) {
2893                                         /*
2894                                          * We stop writing back only if we are
2895                                          * not doing integrity sync. In case of
2896                                          * integrity sync we have to keep going
2897                                          * because someone may be concurrently
2898                                          * dirtying pages, and we might have
2899                                          * synced a lot of newly appeared dirty
2900                                          * pages, but have not synced all of the
2901                                          * old dirty pages.
2902                                          */
2903                                         done = 1;
2904                                         break;
2905                                 }
2906                         }
2907                 }
2908                 pagevec_release(&pvec);
2909                 cond_resched();
2910         }
2911         return ret;
2912 }
2913
2914
2915 static int ext4_da_writepages(struct address_space *mapping,
2916                               struct writeback_control *wbc)
2917 {
2918         pgoff_t index;
2919         int range_whole = 0;
2920         handle_t *handle = NULL;
2921         struct mpage_da_data mpd;
2922         struct inode *inode = mapping->host;
2923         int pages_written = 0;
2924         long pages_skipped;
2925         unsigned int max_pages;
2926         int range_cyclic, cycled = 1, io_done = 0;
2927         int needed_blocks, ret = 0;
2928         long desired_nr_to_write, nr_to_writebump = 0;
2929         loff_t range_start = wbc->range_start;
2930         struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2931
2932         trace_ext4_da_writepages(inode, wbc);
2933
2934         /*
2935          * No pages to write? This is mainly a kludge to avoid starting
2936          * a transaction for special inodes like journal inode on last iput()
2937          * because that could violate lock ordering on umount
2938          */
2939         if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2940                 return 0;
2941
2942         /*
2943          * If the filesystem has aborted, it is read-only, so return
2944          * right away instead of dumping stack traces later on that
2945          * will obscure the real source of the problem.  We test
2946          * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2947          * the latter could be true if the filesystem is mounted
2948          * read-only, and in that case, ext4_da_writepages should
2949          * *never* be called, so if that ever happens, we would want
2950          * the stack trace.
2951          */
2952         if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2953                 return -EROFS;
2954
2955         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2956                 range_whole = 1;
2957
2958         range_cyclic = wbc->range_cyclic;
2959         if (wbc->range_cyclic) {
2960                 index = mapping->writeback_index;
2961                 if (index)
2962                         cycled = 0;
2963                 wbc->range_start = index << PAGE_CACHE_SHIFT;
2964                 wbc->range_end  = LLONG_MAX;
2965                 wbc->range_cyclic = 0;
2966         } else
2967                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2968
2969         /*
2970          * This works around two forms of stupidity.  The first is in
2971          * the writeback code, which caps the maximum number of pages
2972          * written to be 1024 pages.  This is wrong on multiple
2973          * levels; different architectues have a different page size,
2974          * which changes the maximum amount of data which gets
2975          * written.  Secondly, 4 megabytes is way too small.  XFS
2976          * forces this value to be 16 megabytes by multiplying
2977          * nr_to_write parameter by four, and then relies on its
2978          * allocator to allocate larger extents to make them
2979          * contiguous.  Unfortunately this brings us to the second
2980          * stupidity, which is that ext4's mballoc code only allocates
2981          * at most 2048 blocks.  So we force contiguous writes up to
2982          * the number of dirty blocks in the inode, or
2983          * sbi->max_writeback_mb_bump whichever is smaller.
2984          */
2985         max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2986         if (!range_cyclic && range_whole) {
2987                 if (wbc->nr_to_write == LONG_MAX)
2988                         desired_nr_to_write = wbc->nr_to_write;
2989                 else
2990                         desired_nr_to_write = wbc->nr_to_write * 8;
2991         } else
2992                 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2993                                                            max_pages);
2994         if (desired_nr_to_write > max_pages)
2995                 desired_nr_to_write = max_pages;
2996
2997         if (wbc->nr_to_write < desired_nr_to_write) {
2998                 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2999                 wbc->nr_to_write = desired_nr_to_write;
3000         }
3001
3002         mpd.wbc = wbc;
3003         mpd.inode = mapping->host;
3004
3005         pages_skipped = wbc->pages_skipped;
3006
3007 retry:
3008         while (!ret && wbc->nr_to_write > 0) {
3009
3010                 /*
3011                  * we  insert one extent at a time. So we need
3012                  * credit needed for single extent allocation.
3013                  * journalled mode is currently not supported
3014                  * by delalloc
3015                  */
3016                 BUG_ON(ext4_should_journal_data(inode));
3017                 needed_blocks = ext4_da_writepages_trans_blocks(inode);
3018
3019                 /* start a new transaction*/
3020                 handle = ext4_journal_start(inode, needed_blocks);
3021                 if (IS_ERR(handle)) {
3022                         ret = PTR_ERR(handle);
3023                         ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
3024                                "%ld pages, ino %lu; err %d", __func__,
3025                                 wbc->nr_to_write, inode->i_ino, ret);
3026                         goto out_writepages;
3027                 }
3028
3029                 /*
3030                  * Now call __mpage_da_writepage to find the next
3031                  * contiguous region of logical blocks that need
3032                  * blocks to be allocated by ext4.  We don't actually
3033                  * submit the blocks for I/O here, even though
3034                  * write_cache_pages thinks it will, and will set the
3035                  * pages as clean for write before calling
3036                  * __mpage_da_writepage().
3037                  */
3038                 mpd.b_size = 0;
3039                 mpd.b_state = 0;
3040                 mpd.b_blocknr = 0;
3041                 mpd.first_page = 0;
3042                 mpd.next_page = 0;
3043                 mpd.io_done = 0;
3044                 mpd.pages_written = 0;
3045                 mpd.retval = 0;
3046                 ret = write_cache_pages_da(mapping, wbc, &mpd);
3047                 /*
3048                  * If we have a contiguous extent of pages and we
3049                  * haven't done the I/O yet, map the blocks and submit
3050                  * them for I/O.
3051                  */
3052                 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
3053                         mpage_da_map_and_submit(&mpd);
3054                         ret = MPAGE_DA_EXTENT_TAIL;
3055                 }
3056                 trace_ext4_da_write_pages(inode, &mpd);
3057                 wbc->nr_to_write -= mpd.pages_written;
3058
3059                 ext4_journal_stop(handle);
3060
3061                 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
3062                         /* commit the transaction which would
3063                          * free blocks released in the transaction
3064                          * and try again
3065                          */
3066                         jbd2_journal_force_commit_nested(sbi->s_journal);
3067                         wbc->pages_skipped = pages_skipped;
3068                         ret = 0;
3069                 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
3070                         /*
3071                          * got one extent now try with
3072                          * rest of the pages
3073                          */
3074                         pages_written += mpd.pages_written;
3075                         wbc->pages_skipped = pages_skipped;
3076                         ret = 0;
3077                         io_done = 1;
3078                 } else if (wbc->nr_to_write)
3079                         /*
3080                          * There is no more writeout needed
3081                          * or we requested for a noblocking writeout
3082                          * and we found the device congested
3083                          */
3084                         break;
3085         }
3086         if (!io_done && !cycled) {
3087                 cycled = 1;
3088                 index = 0;
3089                 wbc->range_start = index << PAGE_CACHE_SHIFT;
3090                 wbc->range_end  = mapping->writeback_index - 1;
3091                 goto retry;
3092         }
3093         if (pages_skipped != wbc->pages_skipped)
3094                 ext4_msg(inode->i_sb, KERN_CRIT,
3095                          "This should not happen leaving %s "
3096                          "with nr_to_write = %ld ret = %d",