ext4: don't call ext4_should_journal_data() on the journal inode
[pandora-kernel.git] / fs / ext4 / mballoc.c
index 59a3607..7c03826 100644 (file)
@@ -1312,6 +1312,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
        void *buddy2;
        struct super_block *sb = e4b->bd_sb;
 
+       if (WARN_ON(count == 0))
+               return;
        BUG_ON(first + count > (sb->s_blocksize << 3));
        assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
        mb_check_buddy(e4b);
@@ -2027,7 +2029,11 @@ repeat:
                group = ac->ac_g_ex.fe_group;
 
                for (i = 0; i < ngroups; group++, i++) {
-                       if (group == ngroups)
+                       /*
+                        * Artificially restricted ngroups for non-extent
+                        * files makes group > ngroups possible on first loop.
+                        */
+                       if (group >= ngroups)
                                group = 0;
 
                        /* This now checks without needing the buddy page */
@@ -2866,8 +2872,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi,
                                                          ac->ac_b_ex.fe_group);
-               atomic_sub(ac->ac_b_ex.fe_len,
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_sub(ac->ac_b_ex.fe_len,
+                            &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
        err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -3063,7 +3069,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
        }
        BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
                        start > ac->ac_o_ex.fe_logical);
-       BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
+       BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
 
        /* now prepare goal request */
 
@@ -3124,13 +3130,31 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
 {
        struct ext4_prealloc_space *pa = ac->ac_pa;
-       int len;
+       struct ext4_buddy e4b;
+       int err;
 
-       if (pa && pa->pa_type == MB_INODE_PA) {
-               len = ac->ac_b_ex.fe_len;
-               pa->pa_free += len;
+       if (pa == NULL) {
+               if (ac->ac_f_ex.fe_len == 0)
+                       return;
+               err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+               if (err) {
+                       /*
+                        * This should never happen since we pin the
+                        * pages in the ext4_allocation_context so
+                        * ext4_mb_load_buddy() should never fail.
+                        */
+                       WARN(1, "mb_load_buddy failed (%d)", err);
+                       return;
+               }
+               ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+               mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
+                              ac->ac_f_ex.fe_len);
+               ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+               ext4_mb_unload_buddy(&e4b);
+               return;
        }
-
+       if (pa->pa_type == MB_INODE_PA)
+               pa->pa_free += ac->ac_b_ex.fe_len;
 }
 
 /*
@@ -3374,6 +3398,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
 {
        struct ext4_prealloc_space *pa;
        pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
+
+       BUG_ON(atomic_read(&pa->pa_count));
+       BUG_ON(pa->pa_deleted == 0);
        kmem_cache_free(ext4_pspace_cachep, pa);
 }
 
@@ -3387,11 +3414,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
        ext4_group_t grp;
        ext4_fsblk_t grp_blk;
 
-       if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
-               return;
-
        /* in this short window concurrent discard can set pa_deleted */
        spin_lock(&pa->pa_lock);
+       if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
+               spin_unlock(&pa->pa_lock);
+               return;
+       }
+
        if (pa->pa_deleted == 1) {
                spin_unlock(&pa->pa_lock);
                return;
@@ -4058,7 +4087,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
 
        /* set up allocation goals */
        memset(ac, 0, sizeof(struct ext4_allocation_context));
-       ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
+       ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
        ac->ac_status = AC_STATUS_CONTINUE;
        ac->ac_sb = sb;
        ac->ac_inode = ar->inode;
@@ -4596,7 +4625,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
         * blocks at the beginning or the end unless we are explicitly
         * requested to avoid doing so.
         */
-       overflow = block & (sbi->s_cluster_ratio - 1);
+       overflow = EXT4_PBLK_COFF(sbi, block);
        if (overflow) {
                if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
                        overflow = sbi->s_cluster_ratio - overflow;
@@ -4610,7 +4639,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
                        count += overflow;
                }
        }
-       overflow = count & (sbi->s_cluster_ratio - 1);
+       overflow = EXT4_LBLK_COFF(sbi, count);
        if (overflow) {
                if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
                        if (count > overflow)
@@ -4691,13 +4720,12 @@ do_more:
                /*
                 * blocks being freed are metadata. these blocks shouldn't
                 * be used until this transaction is committed
+                *
+                * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
+                * to fail.
                 */
-               new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
-               if (!new_entry) {
-                       ext4_mb_unload_buddy(&e4b);
-                       err = -ENOMEM;
-                       goto error_return;
-               }
+               new_entry = kmem_cache_alloc(ext4_free_ext_cachep,
+                               GFP_NOFS|__GFP_NOFAIL);
                new_entry->start_cluster = bit;
                new_entry->group  = block_group;
                new_entry->count = count_clusters;
@@ -4724,8 +4752,8 @@ do_more:
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
-               atomic_add(count_clusters,
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_add(count_clusters,
+                            &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
        ext4_mb_unload_buddy(&e4b);
@@ -4869,8 +4897,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
-               atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
+                            &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
        ext4_mb_unload_buddy(&e4b);