2 * linux/fs/ext4/ialloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * BSD ufs-inspired inode and directory allocation by
10 * Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
15 #include <linux/time.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
27 #include "ext4_jbd2.h"
33 * ialloc.c contains the inodes allocation and deallocation routines
37 * The free inodes are managed by bitmaps. A file system contains several
38 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
39 * block for inodes, N blocks for the inode table and data blocks.
41 * The file system contains group descriptors which are located after the
42 * super block. Each descriptor contains the number of the bitmap block and
43 * the free blocks count in the block.
47 * To avoid calling the atomic setbit hundreds or thousands of times, we only
48 * need to use it within a single byte (to ensure we get endianness right).
49 * We can use memset for the rest of the bitmap as there are no other users.
51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
55 if (start_bit >= end_bit)
58 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
59 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
60 ext4_set_bit(i, bitmap);
62 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
65 /* Initializes an uninitialized inode bitmap */
66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
67 ext4_group_t block_group,
68 struct ext4_group_desc *gdp)
70 struct ext4_sb_info *sbi = EXT4_SB(sb);
72 J_ASSERT_BH(bh, buffer_locked(bh));
74 /* If checksum is bad mark all blocks and inodes use to prevent
75 * allocation, essentially implementing a per-group read-only flag. */
76 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77 ext4_error(sb, __func__, "Checksum bad for group %lu\n",
79 gdp->bg_free_blocks_count = 0;
80 gdp->bg_free_inodes_count = 0;
81 gdp->bg_itable_unused = 0;
82 memset(bh->b_data, 0xff, sb->s_blocksize);
86 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
90 return EXT4_INODES_PER_GROUP(sb);
94 * Read the inode allocation bitmap for a given block_group, reading
95 * into the specified slot in the superblock's bitmap cache.
97 * Return buffer_head of bitmap on success or NULL.
99 static struct buffer_head *
100 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
102 struct ext4_group_desc *desc;
103 struct buffer_head *bh = NULL;
104 ext4_fsblk_t bitmap_blk;
106 desc = ext4_get_group_desc(sb, block_group, NULL);
109 bitmap_blk = ext4_inode_bitmap(sb, desc);
110 bh = sb_getblk(sb, bitmap_blk);
112 ext4_error(sb, __func__,
113 "Cannot read inode bitmap - "
114 "block_group = %lu, inode_bitmap = %llu",
115 block_group, bitmap_blk);
118 if (bitmap_uptodate(bh))
122 if (bitmap_uptodate(bh)) {
126 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
127 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
128 ext4_init_inode_bitmap(sb, bh, block_group, desc);
129 set_bitmap_uptodate(bh);
130 set_buffer_uptodate(bh);
132 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
135 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
136 if (buffer_uptodate(bh)) {
138 * if not uninit if bh is uptodate,
139 * bitmap is also uptodate
141 set_bitmap_uptodate(bh);
146 * submit the buffer_head for read. We can
147 * safely mark the bitmap as uptodate now.
148 * We do it here so the bitmap uptodate bit
149 * get set with buffer lock held.
151 set_bitmap_uptodate(bh);
152 if (bh_submit_read(bh) < 0) {
154 ext4_error(sb, __func__,
155 "Cannot read inode bitmap - "
156 "block_group = %lu, inode_bitmap = %llu",
157 block_group, bitmap_blk);
164 * NOTE! When we get the inode, we're the only people
165 * that have access to it, and as such there are no
166 * race conditions we have to worry about. The inode
167 * is not on the hash-lists, and it cannot be reached
168 * through the filesystem because the directory entry
169 * has been deleted earlier.
171 * HOWEVER: we must make sure that we get no aliases,
172 * which means that we have to call "clear_inode()"
173 * _before_ we mark the inode not in use in the inode
174 * bitmaps. Otherwise a newly created file might use
175 * the same inode number (not actually the same pointer
176 * though), and then we'd have two inodes sharing the
177 * same inode number and space on the harddisk.
179 void ext4_free_inode (handle_t *handle, struct inode * inode)
181 struct super_block * sb = inode->i_sb;
184 struct buffer_head *bitmap_bh = NULL;
185 struct buffer_head *bh2;
186 ext4_group_t block_group;
188 struct ext4_group_desc * gdp;
189 struct ext4_super_block * es;
190 struct ext4_sb_info *sbi;
192 ext4_group_t flex_group;
194 if (atomic_read(&inode->i_count) > 1) {
195 printk ("ext4_free_inode: inode has count=%d\n",
196 atomic_read(&inode->i_count));
199 if (inode->i_nlink) {
200 printk ("ext4_free_inode: inode has nlink=%d\n",
205 printk("ext4_free_inode: inode on nonexistent device\n");
211 ext4_debug ("freeing inode %lu\n", ino);
214 * Note: we must free any quota before locking the superblock,
215 * as writing the quota to disk may need the lock as well.
218 ext4_xattr_delete_inode(handle, inode);
219 DQUOT_FREE_INODE(inode);
222 is_directory = S_ISDIR(inode->i_mode);
224 /* Do this BEFORE marking the inode not in use or returning an error */
227 es = EXT4_SB(sb)->s_es;
228 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
229 ext4_error (sb, "ext4_free_inode",
230 "reserved or nonexistent inode %lu", ino);
233 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
234 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
235 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
239 BUFFER_TRACE(bitmap_bh, "get_write_access");
240 fatal = ext4_journal_get_write_access(handle, bitmap_bh);
244 /* Ok, now we can actually update the inode bitmaps.. */
245 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
246 bit, bitmap_bh->b_data))
247 ext4_error (sb, "ext4_free_inode",
248 "bit already cleared for inode %lu", ino);
250 gdp = ext4_get_group_desc (sb, block_group, &bh2);
252 BUFFER_TRACE(bh2, "get_write_access");
253 fatal = ext4_journal_get_write_access(handle, bh2);
254 if (fatal) goto error_return;
257 spin_lock(sb_bgl_lock(sbi, block_group));
258 le16_add_cpu(&gdp->bg_free_inodes_count, 1);
260 le16_add_cpu(&gdp->bg_used_dirs_count, -1);
261 gdp->bg_checksum = ext4_group_desc_csum(sbi,
263 spin_unlock(sb_bgl_lock(sbi, block_group));
264 percpu_counter_inc(&sbi->s_freeinodes_counter);
266 percpu_counter_dec(&sbi->s_dirs_counter);
268 if (sbi->s_log_groups_per_flex) {
269 flex_group = ext4_flex_group(sbi, block_group);
270 spin_lock(sb_bgl_lock(sbi, flex_group));
271 sbi->s_flex_groups[flex_group].free_inodes++;
272 spin_unlock(sb_bgl_lock(sbi, flex_group));
275 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
276 err = ext4_journal_dirty_metadata(handle, bh2);
277 if (!fatal) fatal = err;
279 BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
280 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
286 ext4_std_error(sb, fatal);
290 * There are two policies for allocating an inode. If the new inode is
291 * a directory, then a forward search is made for a block group with both
292 * free space and a low directory-to-inode ratio; if that fails, then of
293 * the groups with above-average free space, that group with the fewest
294 * directories already is chosen.
296 * For other inodes, search forward from the parent directory\'s block
297 * group to find a free inode.
299 static int find_group_dir(struct super_block *sb, struct inode *parent,
300 ext4_group_t *best_group)
302 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
303 unsigned int freei, avefreei;
304 struct ext4_group_desc *desc, *best_desc = NULL;
308 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
309 avefreei = freei / ngroups;
311 for (group = 0; group < ngroups; group++) {
312 desc = ext4_get_group_desc (sb, group, NULL);
313 if (!desc || !desc->bg_free_inodes_count)
315 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
318 (le16_to_cpu(desc->bg_free_blocks_count) >
319 le16_to_cpu(best_desc->bg_free_blocks_count))) {
328 #define free_block_ratio 10
330 static int find_group_flex(struct super_block *sb, struct inode *parent,
331 ext4_group_t *best_group)
333 struct ext4_sb_info *sbi = EXT4_SB(sb);
334 struct ext4_group_desc *desc;
335 struct buffer_head *bh;
336 struct flex_groups *flex_group = sbi->s_flex_groups;
337 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
338 ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
339 ext4_group_t ngroups = sbi->s_groups_count;
340 int flex_size = ext4_flex_bg_size(sbi);
341 ext4_group_t best_flex = parent_fbg_group;
342 int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
343 int flexbg_free_blocks;
344 int flex_freeb_ratio;
345 ext4_group_t n_fbg_groups;
348 n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
349 sbi->s_log_groups_per_flex;
351 find_close_to_parent:
352 flexbg_free_blocks = flex_group[best_flex].free_blocks;
353 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
354 if (flex_group[best_flex].free_inodes &&
355 flex_freeb_ratio > free_block_ratio)
358 if (best_flex && best_flex == parent_fbg_group) {
360 goto find_close_to_parent;
363 for (i = 0; i < n_fbg_groups; i++) {
364 if (i == parent_fbg_group || i == parent_fbg_group - 1)
367 flexbg_free_blocks = flex_group[i].free_blocks;
368 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
370 if (flex_freeb_ratio > free_block_ratio &&
371 flex_group[i].free_inodes) {
376 if (flex_group[best_flex].free_inodes == 0 ||
377 (flex_group[i].free_blocks >
378 flex_group[best_flex].free_blocks &&
379 flex_group[i].free_inodes))
383 if (!flex_group[best_flex].free_inodes ||
384 !flex_group[best_flex].free_blocks)
388 for (i = best_flex * flex_size; i < ngroups &&
389 i < (best_flex + 1) * flex_size; i++) {
390 desc = ext4_get_group_desc(sb, i, &bh);
391 if (le16_to_cpu(desc->bg_free_inodes_count)) {
403 * Orlov's allocator for directories.
405 * We always try to spread first-level directories.
407 * If there are blockgroups with both free inodes and free blocks counts
408 * not worse than average we return one with smallest directory count.
409 * Otherwise we simply return a random group.
411 * For the rest rules look so:
413 * It's OK to put directory into a group unless
414 * it has too many directories already (max_dirs) or
415 * it has too few free inodes left (min_inodes) or
416 * it has too few free blocks left (min_blocks) or
417 * it's already running too large debt (max_debt).
418 * Parent's group is preferred, if it doesn't satisfy these
419 * conditions we search cyclically through the rest. If none
420 * of the groups look good we just look for a group with more
421 * free inodes than average (starting at parent's group).
423 * Debt is incremented each time we allocate a directory and decremented
424 * when we allocate an inode, within 0--255.
427 #define INODE_COST 64
428 #define BLOCK_COST 256
430 static int find_group_orlov(struct super_block *sb, struct inode *parent,
433 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
434 struct ext4_sb_info *sbi = EXT4_SB(sb);
435 struct ext4_super_block *es = sbi->s_es;
436 ext4_group_t ngroups = sbi->s_groups_count;
437 int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
438 unsigned int freei, avefreei;
439 ext4_fsblk_t freeb, avefreeb;
440 ext4_fsblk_t blocks_per_dir;
442 int max_debt, max_dirs, min_inodes;
443 ext4_grpblk_t min_blocks;
445 struct ext4_group_desc *desc;
447 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
448 avefreei = freei / ngroups;
449 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
451 do_div(avefreeb, ngroups);
452 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
454 if ((parent == sb->s_root->d_inode) ||
455 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
456 int best_ndir = inodes_per_group;
460 get_random_bytes(&grp, sizeof(grp));
461 parent_group = (unsigned)grp % ngroups;
462 for (i = 0; i < ngroups; i++) {
463 grp = (parent_group + i) % ngroups;
464 desc = ext4_get_group_desc(sb, grp, NULL);
465 if (!desc || !desc->bg_free_inodes_count)
467 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
469 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
471 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
475 best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
482 blocks_per_dir = ext4_blocks_count(es) - freeb;
483 do_div(blocks_per_dir, ndirs);
485 max_dirs = ndirs / ngroups + inodes_per_group / 16;
486 min_inodes = avefreei - inodes_per_group / 4;
487 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
489 max_debt = EXT4_BLOCKS_PER_GROUP(sb);
490 max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
491 if (max_debt * INODE_COST > inodes_per_group)
492 max_debt = inodes_per_group / INODE_COST;
498 for (i = 0; i < ngroups; i++) {
499 *group = (parent_group + i) % ngroups;
500 desc = ext4_get_group_desc(sb, *group, NULL);
501 if (!desc || !desc->bg_free_inodes_count)
503 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
505 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
507 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
513 for (i = 0; i < ngroups; i++) {
514 *group = (parent_group + i) % ngroups;
515 desc = ext4_get_group_desc(sb, *group, NULL);
516 if (desc && desc->bg_free_inodes_count &&
517 le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
523 * The free-inodes counter is approximate, and for really small
524 * filesystems the above test can fail to find any blockgroups
533 static int find_group_other(struct super_block *sb, struct inode *parent,
536 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
537 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
538 struct ext4_group_desc *desc;
542 * Try to place the inode in its parent directory
544 *group = parent_group;
545 desc = ext4_get_group_desc(sb, *group, NULL);
546 if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
547 le16_to_cpu(desc->bg_free_blocks_count))
551 * We're going to place this inode in a different blockgroup from its
552 * parent. We want to cause files in a common directory to all land in
553 * the same blockgroup. But we want files which are in a different
554 * directory which shares a blockgroup with our parent to land in a
555 * different blockgroup.
557 * So add our directory's i_ino into the starting point for the hash.
559 *group = (*group + parent->i_ino) % ngroups;
562 * Use a quadratic hash to find a group with a free inode and some free
565 for (i = 1; i < ngroups; i <<= 1) {
567 if (*group >= ngroups)
569 desc = ext4_get_group_desc(sb, *group, NULL);
570 if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
571 le16_to_cpu(desc->bg_free_blocks_count))
576 * That failed: try linear search for a free inode, even if that group
577 * has no free blocks.
579 *group = parent_group;
580 for (i = 0; i < ngroups; i++) {
581 if (++*group >= ngroups)
583 desc = ext4_get_group_desc(sb, *group, NULL);
584 if (desc && le16_to_cpu(desc->bg_free_inodes_count))
592 * claim the inode from the inode bitmap. If the group
593 * is uninit we need to take the groups's sb_bgl_lock
594 * and clear the uninit flag. The inode bitmap update
595 * and group desc uninit flag clear should be done
596 * after holding sb_bgl_lock so that ext4_read_inode_bitmap
597 * doesn't race with the ext4_claim_inode
599 static int ext4_claim_inode(struct super_block *sb,
600 struct buffer_head *inode_bitmap_bh,
601 unsigned long ino, ext4_group_t group, int mode)
603 int free = 0, retval = 0;
604 struct ext4_sb_info *sbi = EXT4_SB(sb);
605 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
607 spin_lock(sb_bgl_lock(sbi, group));
608 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
609 /* not a free inode */
614 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
615 ino > EXT4_INODES_PER_GROUP(sb)) {
616 spin_unlock(sb_bgl_lock(sbi, group));
617 ext4_error(sb, __func__,
618 "reserved inode or inode > inodes count - "
619 "block_group = %lu, inode=%lu", group,
620 ino + group * EXT4_INODES_PER_GROUP(sb));
623 /* If we didn't allocate from within the initialized part of the inode
624 * table then we need to initialize up to this inode. */
625 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
627 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
628 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
629 /* When marking the block group with
630 * ~EXT4_BG_INODE_UNINIT we don't want to depend
631 * on the value of bg_itable_unused even though
632 * mke2fs could have initialized the same for us.
633 * Instead we calculated the value below
638 free = EXT4_INODES_PER_GROUP(sb) -
639 le16_to_cpu(gdp->bg_itable_unused);
643 * Check the relative inode number against the last used
644 * relative inode number in this group. if it is greater
645 * we need to update the bg_itable_unused count
649 gdp->bg_itable_unused =
650 cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
652 le16_add_cpu(&gdp->bg_free_inodes_count, -1);
654 le16_add_cpu(&gdp->bg_used_dirs_count, 1);
656 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
658 spin_unlock(sb_bgl_lock(sbi, group));
663 * There are two policies for allocating an inode. If the new inode is
664 * a directory, then a forward search is made for a block group with both
665 * free space and a low directory-to-inode ratio; if that fails, then of
666 * the groups with above-average free space, that group with the fewest
667 * directories already is chosen.
669 * For other inodes, search forward from the parent directory's block
670 * group to find a free inode.
672 struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
674 struct super_block *sb;
675 struct buffer_head *bitmap_bh = NULL;
676 struct buffer_head *bh2;
677 ext4_group_t group = 0;
678 unsigned long ino = 0;
679 struct inode * inode;
680 struct ext4_group_desc * gdp = NULL;
681 struct ext4_super_block * es;
682 struct ext4_inode_info *ei;
683 struct ext4_sb_info *sbi;
688 ext4_group_t flex_group;
690 /* Cannot create files in a deleted directory */
691 if (!dir || !dir->i_nlink)
692 return ERR_PTR(-EPERM);
695 inode = new_inode(sb);
697 return ERR_PTR(-ENOMEM);
703 if (sbi->s_log_groups_per_flex) {
704 ret2 = find_group_flex(sb, dir, &group);
706 ret2 = find_group_other(sb, dir, &group);
707 if (ret2 == 0 && printk_ratelimit())
708 printk(KERN_NOTICE "ext4: find_group_flex "
709 "failed, fallback succeeded dir %lu\n",
716 if (test_opt (sb, OLDALLOC))
717 ret2 = find_group_dir(sb, dir, &group);
719 ret2 = find_group_orlov(sb, dir, &group);
721 ret2 = find_group_other(sb, dir, &group);
728 for (i = 0; i < sbi->s_groups_count; i++) {
731 gdp = ext4_get_group_desc(sb, group, &bh2);
736 bitmap_bh = ext4_read_inode_bitmap(sb, group);
742 repeat_in_this_group:
743 ino = ext4_find_next_zero_bit((unsigned long *)
744 bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
745 if (ino < EXT4_INODES_PER_GROUP(sb)) {
747 BUFFER_TRACE(bitmap_bh, "get_write_access");
748 err = ext4_journal_get_write_access(handle, bitmap_bh);
752 BUFFER_TRACE(bh2, "get_write_access");
753 err = ext4_journal_get_write_access(handle, bh2);
756 if (!ext4_claim_inode(sb, bitmap_bh,
759 BUFFER_TRACE(bitmap_bh,
760 "call ext4_journal_dirty_metadata");
761 err = ext4_journal_dirty_metadata(handle,
765 /* zero bit is inode number 1*/
770 jbd2_journal_release_buffer(handle, bitmap_bh);
771 jbd2_journal_release_buffer(handle, bh2);
773 if (++ino < EXT4_INODES_PER_GROUP(sb))
774 goto repeat_in_this_group;
778 * This case is possible in concurrent environment. It is very
779 * rare. We cannot repeat the find_group_xxx() call because
780 * that will simply return the same blockgroup, because the
781 * group descriptor metadata has not yet been updated.
782 * So we just go onto the next blockgroup.
784 if (++group == sbi->s_groups_count)
791 /* We may have to initialize the block bitmap if it isn't already */
792 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
793 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
794 struct buffer_head *block_bh = ext4_read_block_bitmap(sb, group);
796 BUFFER_TRACE(block_bh, "get block bitmap access");
797 err = ext4_journal_get_write_access(handle, block_bh);
804 spin_lock(sb_bgl_lock(sbi, group));
805 /* recheck and clear flag under lock if we still need to */
806 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
807 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
808 free = ext4_free_blocks_after_init(sb, group, gdp);
809 gdp->bg_free_blocks_count = cpu_to_le16(free);
810 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
813 spin_unlock(sb_bgl_lock(sbi, group));
815 /* Don't need to dirty bitmap block if we didn't change it */
817 BUFFER_TRACE(block_bh, "dirty block bitmap");
818 err = ext4_journal_dirty_metadata(handle, block_bh);
825 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
826 err = ext4_journal_dirty_metadata(handle, bh2);
830 percpu_counter_dec(&sbi->s_freeinodes_counter);
832 percpu_counter_inc(&sbi->s_dirs_counter);
835 if (sbi->s_log_groups_per_flex) {
836 flex_group = ext4_flex_group(sbi, group);
837 spin_lock(sb_bgl_lock(sbi, flex_group));
838 sbi->s_flex_groups[flex_group].free_inodes--;
839 spin_unlock(sb_bgl_lock(sbi, flex_group));
842 inode->i_uid = current->fsuid;
843 if (test_opt (sb, GRPID))
844 inode->i_gid = dir->i_gid;
845 else if (dir->i_mode & S_ISGID) {
846 inode->i_gid = dir->i_gid;
850 inode->i_gid = current->fsgid;
851 inode->i_mode = mode;
853 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
854 /* This is the optimal IO size (for stat), not the fs block size */
856 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
857 ext4_current_time(inode);
859 memset(ei->i_data, 0, sizeof(ei->i_data));
860 ei->i_dir_start_lookup = 0;
864 * Don't inherit extent flag from directory. We set extent flag on
865 * newly created directory and file only if -o extent mount option is
868 ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
870 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
871 /* dirsync only applies to directories */
873 ei->i_flags &= ~EXT4_DIRSYNC_FL;
876 ei->i_block_alloc_info = NULL;
877 ei->i_block_group = group;
879 ext4_set_inode_flags(inode);
880 if (IS_DIRSYNC(inode))
882 insert_inode_hash(inode);
883 spin_lock(&sbi->s_next_gen_lock);
884 inode->i_generation = sbi->s_next_generation++;
885 spin_unlock(&sbi->s_next_gen_lock);
887 ei->i_state = EXT4_STATE_NEW;
889 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
892 if(DQUOT_ALLOC_INODE(inode)) {
897 err = ext4_init_acl(handle, inode, dir);
901 err = ext4_init_security(handle,inode, dir);
905 if (test_opt(sb, EXTENTS)) {
906 /* set extent flag only for directory, file and normal symlink*/
907 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
908 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
909 ext4_ext_tree_init(handle, inode);
913 err = ext4_mark_inode_dirty(handle, inode);
915 ext4_std_error(sb, err);
919 ext4_debug("allocating inode %lu\n", inode->i_ino);
922 ext4_std_error(sb, err);
931 DQUOT_FREE_INODE(inode);
935 inode->i_flags |= S_NOQUOTA;
942 /* Verify that we are loading a valid orphan from disk */
943 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
945 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
946 ext4_group_t block_group;
948 struct buffer_head *bitmap_bh;
949 struct inode *inode = NULL;
952 /* Error cases - e2fsck has already cleaned up for us */
954 ext4_warning(sb, __func__,
955 "bad orphan ino %lu! e2fsck was run?", ino);
959 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
960 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
961 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
963 ext4_warning(sb, __func__,
964 "inode bitmap error for orphan %lu", ino);
968 /* Having the inode bit set should be a 100% indicator that this
969 * is a valid orphan (no e2fsck run on fs). Orphans also include
970 * inodes that were being truncated, so we can't check i_nlink==0.
972 if (!ext4_test_bit(bit, bitmap_bh->b_data))
975 inode = ext4_iget(sb, ino);
980 * If the orphans has i_nlinks > 0 then it should be able to be
981 * truncated, otherwise it won't be removed from the orphan list
982 * during processing and an infinite loop will result.
984 if (inode->i_nlink && !ext4_can_truncate(inode))
987 if (NEXT_ORPHAN(inode) > max_ino)
993 err = PTR_ERR(inode);
996 ext4_warning(sb, __func__,
997 "bad orphan inode %lu! e2fsck was run?", ino);
998 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
999 bit, (unsigned long long)bitmap_bh->b_blocknr,
1000 ext4_test_bit(bit, bitmap_bh->b_data));
1001 printk(KERN_NOTICE "inode=%p\n", inode);
1003 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
1004 is_bad_inode(inode));
1005 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
1006 NEXT_ORPHAN(inode));
1007 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1008 printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1009 /* Avoid freeing blocks if we got a bad deleted inode */
1010 if (inode->i_nlink == 0)
1011 inode->i_blocks = 0;
1016 return ERR_PTR(err);
1019 unsigned long ext4_count_free_inodes (struct super_block * sb)
1021 unsigned long desc_count;
1022 struct ext4_group_desc *gdp;
1025 struct ext4_super_block *es;
1026 unsigned long bitmap_count, x;
1027 struct buffer_head *bitmap_bh = NULL;
1029 es = EXT4_SB(sb)->s_es;
1033 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1034 gdp = ext4_get_group_desc (sb, i, NULL);
1037 desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
1039 bitmap_bh = ext4_read_inode_bitmap(sb, i);
1043 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
1044 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1045 i, le16_to_cpu(gdp->bg_free_inodes_count), x);
1049 printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n",
1050 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1054 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1055 gdp = ext4_get_group_desc (sb, i, NULL);
1058 desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
1065 /* Called at mount-time, super-block is locked */
1066 unsigned long ext4_count_dirs (struct super_block * sb)
1068 unsigned long count = 0;
1071 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1072 struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL);
1075 count += le16_to_cpu(gdp->bg_used_dirs_count);