2 * linux/fs/ext4/ialloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * BSD ufs-inspired inode and directory allocation by
10 * Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
15 #include <linux/time.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
28 #include "ext4_jbd2.h"
32 #include <trace/events/ext4.h>
35 * ialloc.c contains the inodes allocation and deallocation routines
39 * The free inodes are managed by bitmaps. A file system contains several
40 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
41 * block for inodes, N blocks for the inode table and data blocks.
43 * The file system contains group descriptors which are located after the
44 * super block. Each descriptor contains the number of the bitmap block and
45 * the free blocks count in the block.
49 * To avoid calling the atomic setbit hundreds or thousands of times, we only
50 * need to use it within a single byte (to ensure we get endianness right).
51 * We can use memset for the rest of the bitmap as there are no other users.
53 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
57 if (start_bit >= end_bit)
60 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
61 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
62 ext4_set_bit(i, bitmap);
64 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
67 /* Initializes an uninitialized inode bitmap */
68 static unsigned ext4_init_inode_bitmap(struct super_block *sb,
69 struct buffer_head *bh,
70 ext4_group_t block_group,
71 struct ext4_group_desc *gdp)
73 struct ext4_sb_info *sbi = EXT4_SB(sb);
75 J_ASSERT_BH(bh, buffer_locked(bh));
77 /* If checksum is bad mark all blocks and inodes use to prevent
78 * allocation, essentially implementing a per-group read-only flag. */
79 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
80 ext4_error(sb, "Checksum bad for group %u", block_group);
81 ext4_free_group_clusters_set(sb, gdp, 0);
82 ext4_free_inodes_set(sb, gdp, 0);
83 ext4_itable_unused_set(sb, gdp, 0);
84 memset(bh->b_data, 0xff, sb->s_blocksize);
85 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
86 EXT4_INODES_PER_GROUP(sb) / 8);
90 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
91 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
93 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
94 EXT4_INODES_PER_GROUP(sb) / 8);
95 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
97 return EXT4_INODES_PER_GROUP(sb);
100 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
103 set_buffer_uptodate(bh);
104 set_bitmap_uptodate(bh);
111 * Read the inode allocation bitmap for a given block_group, reading
112 * into the specified slot in the superblock's bitmap cache.
114 * Return buffer_head of bitmap on success or NULL.
116 static struct buffer_head *
117 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
119 struct ext4_group_desc *desc;
120 struct buffer_head *bh = NULL;
121 ext4_fsblk_t bitmap_blk;
123 desc = ext4_get_group_desc(sb, block_group, NULL);
127 bitmap_blk = ext4_inode_bitmap(sb, desc);
128 bh = sb_getblk(sb, bitmap_blk);
130 ext4_error(sb, "Cannot read inode bitmap - "
131 "block_group = %u, inode_bitmap = %llu",
132 block_group, bitmap_blk);
135 if (bitmap_uptodate(bh))
139 if (bitmap_uptodate(bh)) {
144 ext4_lock_group(sb, block_group);
145 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
146 ext4_init_inode_bitmap(sb, bh, block_group, desc);
147 set_bitmap_uptodate(bh);
148 set_buffer_uptodate(bh);
149 set_buffer_verified(bh);
150 ext4_unlock_group(sb, block_group);
154 ext4_unlock_group(sb, block_group);
156 if (buffer_uptodate(bh)) {
158 * if not uninit if bh is uptodate,
159 * bitmap is also uptodate
161 set_bitmap_uptodate(bh);
166 * submit the buffer_head for reading
168 trace_ext4_load_inode_bitmap(sb, block_group);
169 bh->b_end_io = ext4_end_bitmap_read;
173 if (!buffer_uptodate(bh)) {
175 ext4_error(sb, "Cannot read inode bitmap - "
176 "block_group = %u, inode_bitmap = %llu",
177 block_group, bitmap_blk);
182 ext4_lock_group(sb, block_group);
183 if (!buffer_verified(bh) &&
184 !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
185 EXT4_INODES_PER_GROUP(sb) / 8)) {
186 ext4_unlock_group(sb, block_group);
188 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
189 "inode_bitmap = %llu", block_group, bitmap_blk);
192 ext4_unlock_group(sb, block_group);
193 set_buffer_verified(bh);
198 * NOTE! When we get the inode, we're the only people
199 * that have access to it, and as such there are no
200 * race conditions we have to worry about. The inode
201 * is not on the hash-lists, and it cannot be reached
202 * through the filesystem because the directory entry
203 * has been deleted earlier.
205 * HOWEVER: we must make sure that we get no aliases,
206 * which means that we have to call "clear_inode()"
207 * _before_ we mark the inode not in use in the inode
208 * bitmaps. Otherwise a newly created file might use
209 * the same inode number (not actually the same pointer
210 * though), and then we'd have two inodes sharing the
211 * same inode number and space on the harddisk.
213 void ext4_free_inode(handle_t *handle, struct inode *inode)
215 struct super_block *sb = inode->i_sb;
218 struct buffer_head *bitmap_bh = NULL;
219 struct buffer_head *bh2;
220 ext4_group_t block_group;
222 struct ext4_group_desc *gdp;
223 struct ext4_super_block *es;
224 struct ext4_sb_info *sbi;
225 int fatal = 0, err, count, cleared;
228 printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
229 "nonexistent device\n", __func__, __LINE__);
232 if (atomic_read(&inode->i_count) > 1) {
233 ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
234 __func__, __LINE__, inode->i_ino,
235 atomic_read(&inode->i_count));
238 if (inode->i_nlink) {
239 ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
240 __func__, __LINE__, inode->i_ino, inode->i_nlink);
246 ext4_debug("freeing inode %lu\n", ino);
247 trace_ext4_free_inode(inode);
250 * Note: we must free any quota before locking the superblock,
251 * as writing the quota to disk may need the lock as well.
253 dquot_initialize(inode);
254 ext4_xattr_delete_inode(handle, inode);
255 dquot_free_inode(inode);
258 is_directory = S_ISDIR(inode->i_mode);
260 /* Do this BEFORE marking the inode not in use or returning an error */
261 ext4_clear_inode(inode);
263 es = EXT4_SB(sb)->s_es;
264 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
265 ext4_error(sb, "reserved or nonexistent inode %lu", ino);
268 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
269 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
270 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
274 BUFFER_TRACE(bitmap_bh, "get_write_access");
275 fatal = ext4_journal_get_write_access(handle, bitmap_bh);
280 gdp = ext4_get_group_desc(sb, block_group, &bh2);
282 BUFFER_TRACE(bh2, "get_write_access");
283 fatal = ext4_journal_get_write_access(handle, bh2);
285 ext4_lock_group(sb, block_group);
286 cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
287 if (fatal || !cleared) {
288 ext4_unlock_group(sb, block_group);
292 count = ext4_free_inodes_count(sb, gdp) + 1;
293 ext4_free_inodes_set(sb, gdp, count);
295 count = ext4_used_dirs_count(sb, gdp) - 1;
296 ext4_used_dirs_set(sb, gdp, count);
297 percpu_counter_dec(&sbi->s_dirs_counter);
299 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
300 EXT4_INODES_PER_GROUP(sb) / 8);
301 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
302 ext4_unlock_group(sb, block_group);
304 percpu_counter_inc(&sbi->s_freeinodes_counter);
305 if (sbi->s_log_groups_per_flex) {
306 ext4_group_t f = ext4_flex_group(sbi, block_group);
308 atomic_inc(&sbi->s_flex_groups[f].free_inodes);
310 atomic_dec(&sbi->s_flex_groups[f].used_dirs);
312 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
313 fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
316 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
317 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
320 ext4_mark_super_dirty(sb);
322 ext4_error(sb, "bit already cleared for inode %lu", ino);
326 ext4_std_error(sb, fatal);
336 * Helper function for Orlov's allocator; returns critical information
337 * for a particular block group or flex_bg. If flex_size is 1, then g
338 * is a block group number; otherwise it is flex_bg number.
340 static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
341 int flex_size, struct orlov_stats *stats)
343 struct ext4_group_desc *desc;
344 struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
347 stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
348 stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
349 stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
353 desc = ext4_get_group_desc(sb, g, NULL);
355 stats->free_inodes = ext4_free_inodes_count(sb, desc);
356 stats->free_clusters = ext4_free_group_clusters(sb, desc);
357 stats->used_dirs = ext4_used_dirs_count(sb, desc);
359 stats->free_inodes = 0;
360 stats->free_clusters = 0;
361 stats->used_dirs = 0;
366 * Orlov's allocator for directories.
368 * We always try to spread first-level directories.
370 * If there are blockgroups with both free inodes and free blocks counts
371 * not worse than average we return one with smallest directory count.
372 * Otherwise we simply return a random group.
374 * For the rest rules look so:
376 * It's OK to put directory into a group unless
377 * it has too many directories already (max_dirs) or
378 * it has too few free inodes left (min_inodes) or
379 * it has too few free blocks left (min_blocks) or
380 * Parent's group is preferred, if it doesn't satisfy these
381 * conditions we search cyclically through the rest. If none
382 * of the groups look good we just look for a group with more
383 * free inodes than average (starting at parent's group).
386 static int find_group_orlov(struct super_block *sb, struct inode *parent,
387 ext4_group_t *group, umode_t mode,
388 const struct qstr *qstr)
390 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
391 struct ext4_sb_info *sbi = EXT4_SB(sb);
392 ext4_group_t real_ngroups = ext4_get_groups_count(sb);
393 int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
394 unsigned int freei, avefreei, grp_free;
395 ext4_fsblk_t freeb, avefreec;
397 int max_dirs, min_inodes;
398 ext4_grpblk_t min_clusters;
399 ext4_group_t i, grp, g, ngroups;
400 struct ext4_group_desc *desc;
401 struct orlov_stats stats;
402 int flex_size = ext4_flex_bg_size(sbi);
403 struct dx_hash_info hinfo;
405 ngroups = real_ngroups;
407 ngroups = (real_ngroups + flex_size - 1) >>
408 sbi->s_log_groups_per_flex;
409 parent_group >>= sbi->s_log_groups_per_flex;
412 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
413 avefreei = freei / ngroups;
414 freeb = EXT4_C2B(sbi,
415 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
417 do_div(avefreec, ngroups);
418 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
421 ((parent == sb->s_root->d_inode) ||
422 (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
423 int best_ndir = inodes_per_group;
427 hinfo.hash_version = DX_HASH_HALF_MD4;
428 hinfo.seed = sbi->s_hash_seed;
429 ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
432 get_random_bytes(&grp, sizeof(grp));
433 parent_group = (unsigned)grp % ngroups;
434 for (i = 0; i < ngroups; i++) {
435 g = (parent_group + i) % ngroups;
436 get_orlov_stats(sb, g, flex_size, &stats);
437 if (!stats.free_inodes)
439 if (stats.used_dirs >= best_ndir)
441 if (stats.free_inodes < avefreei)
443 if (stats.free_clusters < avefreec)
447 best_ndir = stats.used_dirs;
452 if (flex_size == 1) {
458 * We pack inodes at the beginning of the flexgroup's
459 * inode tables. Block allocation decisions will do
460 * something similar, although regular files will
461 * start at 2nd block group of the flexgroup. See
462 * ext4_ext_find_goal() and ext4_find_near().
465 for (i = 0; i < flex_size; i++) {
466 if (grp+i >= real_ngroups)
468 desc = ext4_get_group_desc(sb, grp+i, NULL);
469 if (desc && ext4_free_inodes_count(sb, desc)) {
477 max_dirs = ndirs / ngroups + inodes_per_group / 16;
478 min_inodes = avefreei - inodes_per_group*flex_size / 4;
481 min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
484 * Start looking in the flex group where we last allocated an
485 * inode for this parent directory
487 if (EXT4_I(parent)->i_last_alloc_group != ~0) {
488 parent_group = EXT4_I(parent)->i_last_alloc_group;
490 parent_group >>= sbi->s_log_groups_per_flex;
493 for (i = 0; i < ngroups; i++) {
494 grp = (parent_group + i) % ngroups;
495 get_orlov_stats(sb, grp, flex_size, &stats);
496 if (stats.used_dirs >= max_dirs)
498 if (stats.free_inodes < min_inodes)
500 if (stats.free_clusters < min_clusters)
506 ngroups = real_ngroups;
507 avefreei = freei / ngroups;
509 parent_group = EXT4_I(parent)->i_block_group;
510 for (i = 0; i < ngroups; i++) {
511 grp = (parent_group + i) % ngroups;
512 desc = ext4_get_group_desc(sb, grp, NULL);
513 grp_free = ext4_free_inodes_count(sb, desc);
514 if (desc && grp_free && grp_free >= avefreei) {
522 * The free-inodes counter is approximate, and for really small
523 * filesystems the above test can fail to find any blockgroups
532 static int find_group_other(struct super_block *sb, struct inode *parent,
533 ext4_group_t *group, umode_t mode)
535 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
536 ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
537 struct ext4_group_desc *desc;
538 int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
541 * Try to place the inode is the same flex group as its
542 * parent. If we can't find space, use the Orlov algorithm to
543 * find another flex group, and store that information in the
544 * parent directory's inode information so that use that flex
545 * group for future allocations.
551 parent_group &= ~(flex_size-1);
552 last = parent_group + flex_size;
555 for (i = parent_group; i < last; i++) {
556 desc = ext4_get_group_desc(sb, i, NULL);
557 if (desc && ext4_free_inodes_count(sb, desc)) {
562 if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
564 parent_group = EXT4_I(parent)->i_last_alloc_group;
568 * If this didn't work, use the Orlov search algorithm
569 * to find a new flex group; we pass in the mode to
570 * avoid the topdir algorithms.
572 *group = parent_group + flex_size;
573 if (*group > ngroups)
575 return find_group_orlov(sb, parent, group, mode, NULL);
579 * Try to place the inode in its parent directory
581 *group = parent_group;
582 desc = ext4_get_group_desc(sb, *group, NULL);
583 if (desc && ext4_free_inodes_count(sb, desc) &&
584 ext4_free_group_clusters(sb, desc))
588 * We're going to place this inode in a different blockgroup from its
589 * parent. We want to cause files in a common directory to all land in
590 * the same blockgroup. But we want files which are in a different
591 * directory which shares a blockgroup with our parent to land in a
592 * different blockgroup.
594 * So add our directory's i_ino into the starting point for the hash.
596 *group = (*group + parent->i_ino) % ngroups;
599 * Use a quadratic hash to find a group with a free inode and some free
602 for (i = 1; i < ngroups; i <<= 1) {
604 if (*group >= ngroups)
606 desc = ext4_get_group_desc(sb, *group, NULL);
607 if (desc && ext4_free_inodes_count(sb, desc) &&
608 ext4_free_group_clusters(sb, desc))
613 * That failed: try linear search for a free inode, even if that group
614 * has no free blocks.
616 *group = parent_group;
617 for (i = 0; i < ngroups; i++) {
618 if (++*group >= ngroups)
620 desc = ext4_get_group_desc(sb, *group, NULL);
621 if (desc && ext4_free_inodes_count(sb, desc))
629 * There are two policies for allocating an inode. If the new inode is
630 * a directory, then a forward search is made for a block group with both
631 * free space and a low directory-to-inode ratio; if that fails, then of
632 * the groups with above-average free space, that group with the fewest
633 * directories already is chosen.
635 * For other inodes, search forward from the parent directory's block
636 * group to find a free inode.
638 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
639 const struct qstr *qstr, __u32 goal, uid_t *owner)
641 struct super_block *sb;
642 struct buffer_head *inode_bitmap_bh = NULL;
643 struct buffer_head *group_desc_bh;
644 ext4_group_t ngroups, group = 0;
645 unsigned long ino = 0;
647 struct ext4_group_desc *gdp = NULL;
648 struct ext4_inode_info *ei;
649 struct ext4_sb_info *sbi;
653 ext4_group_t flex_group;
655 /* Cannot create files in a deleted directory */
656 if (!dir || !dir->i_nlink)
657 return ERR_PTR(-EPERM);
660 ngroups = ext4_get_groups_count(sb);
661 trace_ext4_request_inode(dir, mode);
662 inode = new_inode(sb);
664 return ERR_PTR(-ENOMEM);
669 goal = sbi->s_inode_goal;
671 if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
672 group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
673 ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
679 ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
681 ret2 = find_group_other(sb, dir, &group, mode);
684 EXT4_I(dir)->i_last_alloc_group = group;
690 * Normally we will only go through one pass of this loop,
691 * unless we get unlucky and it turns out the group we selected
692 * had its last inode grabbed by someone else.
694 for (i = 0; i < ngroups; i++, ino = 0) {
697 gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
701 brelse(inode_bitmap_bh);
702 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
703 if (!inode_bitmap_bh)
706 repeat_in_this_group:
707 ino = ext4_find_next_zero_bit((unsigned long *)
708 inode_bitmap_bh->b_data,
709 EXT4_INODES_PER_GROUP(sb), ino);
710 if (ino >= EXT4_INODES_PER_GROUP(sb)) {
711 if (++group == ngroups)
715 if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
716 ext4_error(sb, "reserved inode found cleared - "
717 "inode=%lu", ino + 1);
720 ext4_lock_group(sb, group);
721 ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
722 ext4_unlock_group(sb, group);
723 ino++; /* the inode bitmap is zero-based */
725 goto got; /* we grabbed the inode! */
726 if (ino < EXT4_INODES_PER_GROUP(sb))
727 goto repeat_in_this_group;
733 /* We may have to initialize the block bitmap if it isn't already */
734 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
735 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
736 struct buffer_head *block_bitmap_bh;
738 block_bitmap_bh = ext4_read_block_bitmap(sb, group);
739 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
740 err = ext4_journal_get_write_access(handle, block_bitmap_bh);
742 brelse(block_bitmap_bh);
746 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
747 err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
748 brelse(block_bitmap_bh);
750 /* recheck and clear flag under lock if we still need to */
751 ext4_lock_group(sb, group);
752 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
753 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
754 ext4_free_group_clusters_set(sb, gdp,
755 ext4_free_clusters_after_init(sb, group, gdp));
756 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
759 ext4_unlock_group(sb, group);
765 BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
766 err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
770 BUFFER_TRACE(group_desc_bh, "get_write_access");
771 err = ext4_journal_get_write_access(handle, group_desc_bh);
775 /* Update the relevant bg descriptor fields */
776 if (ext4_has_group_desc_csum(sb)) {
778 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
780 down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
781 ext4_lock_group(sb, group); /* while we modify the bg desc */
782 free = EXT4_INODES_PER_GROUP(sb) -
783 ext4_itable_unused_count(sb, gdp);
784 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
785 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
789 * Check the relative inode number against the last used
790 * relative inode number in this group. if it is greater
791 * we need to update the bg_itable_unused count
794 ext4_itable_unused_set(sb, gdp,
795 (EXT4_INODES_PER_GROUP(sb) - ino));
796 up_read(&grp->alloc_sem);
798 ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
800 ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
801 if (sbi->s_log_groups_per_flex) {
802 ext4_group_t f = ext4_flex_group(sbi, group);
804 atomic_inc(&sbi->s_flex_groups[f].used_dirs);
807 if (ext4_has_group_desc_csum(sb)) {
808 ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
809 EXT4_INODES_PER_GROUP(sb) / 8);
810 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
811 ext4_unlock_group(sb, group);
814 BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
815 err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
819 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
820 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
824 percpu_counter_dec(&sbi->s_freeinodes_counter);
826 percpu_counter_inc(&sbi->s_dirs_counter);
827 ext4_mark_super_dirty(sb);
829 if (sbi->s_log_groups_per_flex) {
830 flex_group = ext4_flex_group(sbi, group);
831 atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
834 inode->i_mode = mode;
835 inode->i_uid = owner[0];
836 inode->i_gid = owner[1];
837 } else if (test_opt(sb, GRPID)) {
838 inode->i_mode = mode;
839 inode->i_uid = current_fsuid();
840 inode->i_gid = dir->i_gid;
842 inode_init_owner(inode, dir, mode);
844 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
845 /* This is the optimal IO size (for stat), not the fs block size */
847 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
848 ext4_current_time(inode);
850 memset(ei->i_data, 0, sizeof(ei->i_data));
851 ei->i_dir_start_lookup = 0;
854 /* Don't inherit extent flag from directory, amongst others. */
856 ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
859 ei->i_block_group = group;
860 ei->i_last_alloc_group = ~0;
862 ext4_set_inode_flags(inode);
863 if (IS_DIRSYNC(inode))
864 ext4_handle_sync(handle);
865 if (insert_inode_locked(inode) < 0) {
867 * Likely a bitmap corruption causing inode to be allocated
873 spin_lock(&sbi->s_next_gen_lock);
874 inode->i_generation = sbi->s_next_generation++;
875 spin_unlock(&sbi->s_next_gen_lock);
877 /* Precompute checksum seed for inode metadata */
878 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
879 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
881 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
882 __le32 inum = cpu_to_le32(inode->i_ino);
883 __le32 gen = cpu_to_le32(inode->i_generation);
884 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
886 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
890 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
891 ext4_set_inode_state(inode, EXT4_STATE_NEW);
893 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
896 dquot_initialize(inode);
897 err = dquot_alloc_inode(inode);
901 err = ext4_init_acl(handle, inode, dir);
905 err = ext4_init_security(handle, inode, dir, qstr);
909 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
910 /* set extent flag only for directory, file and normal symlink*/
911 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
912 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
913 ext4_ext_tree_init(handle, inode);
917 if (ext4_handle_valid(handle)) {
918 ei->i_sync_tid = handle->h_transaction->t_tid;
919 ei->i_datasync_tid = handle->h_transaction->t_tid;
922 err = ext4_mark_inode_dirty(handle, inode);
924 ext4_std_error(sb, err);
928 ext4_debug("allocating inode %lu\n", inode->i_ino);
929 trace_ext4_allocate_inode(inode, dir, mode);
932 ext4_std_error(sb, err);
937 brelse(inode_bitmap_bh);
941 dquot_free_inode(inode);
945 inode->i_flags |= S_NOQUOTA;
947 unlock_new_inode(inode);
949 brelse(inode_bitmap_bh);
953 /* Verify that we are loading a valid orphan from disk */
954 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
956 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
957 ext4_group_t block_group;
959 struct buffer_head *bitmap_bh;
960 struct inode *inode = NULL;
963 /* Error cases - e2fsck has already cleaned up for us */
965 ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
969 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
970 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
971 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
973 ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
977 /* Having the inode bit set should be a 100% indicator that this
978 * is a valid orphan (no e2fsck run on fs). Orphans also include
979 * inodes that were being truncated, so we can't check i_nlink==0.
981 if (!ext4_test_bit(bit, bitmap_bh->b_data))
984 inode = ext4_iget(sb, ino);
989 * If the orphans has i_nlinks > 0 then it should be able to be
990 * truncated, otherwise it won't be removed from the orphan list
991 * during processing and an infinite loop will result.
993 if (inode->i_nlink && !ext4_can_truncate(inode))
996 if (NEXT_ORPHAN(inode) > max_ino)
1002 err = PTR_ERR(inode);
1005 ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
1006 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1007 bit, (unsigned long long)bitmap_bh->b_blocknr,
1008 ext4_test_bit(bit, bitmap_bh->b_data));
1009 printk(KERN_NOTICE "inode=%p\n", inode);
1011 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
1012 is_bad_inode(inode));
1013 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
1014 NEXT_ORPHAN(inode));
1015 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1016 printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1017 /* Avoid freeing blocks if we got a bad deleted inode */
1018 if (inode->i_nlink == 0)
1019 inode->i_blocks = 0;
1024 return ERR_PTR(err);
1027 unsigned long ext4_count_free_inodes(struct super_block *sb)
1029 unsigned long desc_count;
1030 struct ext4_group_desc *gdp;
1031 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1033 struct ext4_super_block *es;
1034 unsigned long bitmap_count, x;
1035 struct buffer_head *bitmap_bh = NULL;
1037 es = EXT4_SB(sb)->s_es;
1041 for (i = 0; i < ngroups; i++) {
1042 gdp = ext4_get_group_desc(sb, i, NULL);
1045 desc_count += ext4_free_inodes_count(sb, gdp);
1047 bitmap_bh = ext4_read_inode_bitmap(sb, i);
1051 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
1052 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1053 (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1057 printk(KERN_DEBUG "ext4_count_free_inodes: "
1058 "stored = %u, computed = %lu, %lu\n",
1059 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1063 for (i = 0; i < ngroups; i++) {
1064 gdp = ext4_get_group_desc(sb, i, NULL);
1067 desc_count += ext4_free_inodes_count(sb, gdp);
1074 /* Called at mount-time, super-block is locked */
1075 unsigned long ext4_count_dirs(struct super_block * sb)
1077 unsigned long count = 0;
1078 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1080 for (i = 0; i < ngroups; i++) {
1081 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1084 count += ext4_used_dirs_count(sb, gdp);
1090 * Zeroes not yet zeroed inode table - just write zeroes through the whole
1091 * inode table. Must be called without any spinlock held. The only place
1092 * where it is called from on active part of filesystem is ext4lazyinit
1093 * thread, so we do not need any special locks, however we have to prevent
1094 * inode allocation from the current group, so we take alloc_sem lock, to
1095 * block ext4_new_inode() until we are finished.
1097 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1100 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1101 struct ext4_sb_info *sbi = EXT4_SB(sb);
1102 struct ext4_group_desc *gdp = NULL;
1103 struct buffer_head *group_desc_bh;
1106 int num, ret = 0, used_blks = 0;
1108 /* This should not happen, but just to be sure check this */
1109 if (sb->s_flags & MS_RDONLY) {
1114 gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1119 * We do not need to lock this, because we are the only one
1120 * handling this flag.
1122 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1125 handle = ext4_journal_start_sb(sb, 1);
1126 if (IS_ERR(handle)) {
1127 ret = PTR_ERR(handle);
1131 down_write(&grp->alloc_sem);
1133 * If inode bitmap was already initialized there may be some
1134 * used inodes so we need to skip blocks with used inodes in
1137 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
1138 used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
1139 ext4_itable_unused_count(sb, gdp)),
1140 sbi->s_inodes_per_block);
1142 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1143 ext4_error(sb, "Something is wrong with group %u: "
1144 "used itable blocks: %d; "
1145 "itable unused count: %u",
1147 ext4_itable_unused_count(sb, gdp));
1152 blk = ext4_inode_table(sb, gdp) + used_blks;
1153 num = sbi->s_itb_per_group - used_blks;
1155 BUFFER_TRACE(group_desc_bh, "get_write_access");
1156 ret = ext4_journal_get_write_access(handle,
1162 * Skip zeroout if the inode table is full. But we set the ZEROED
1163 * flag anyway, because obviously, when it is full it does not need
1166 if (unlikely(num == 0))
1169 ext4_debug("going to zero out inode table in group %d\n",
1171 ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1175 blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1178 ext4_lock_group(sb, group);
1179 gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1180 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
1181 ext4_unlock_group(sb, group);
1183 BUFFER_TRACE(group_desc_bh,
1184 "call ext4_handle_dirty_metadata");
1185 ret = ext4_handle_dirty_metadata(handle, NULL,
1189 up_write(&grp->alloc_sem);
1190 ext4_journal_stop(handle);