[XFS] don't call xfs_freesb from xfs_unmountfs
[pandora-kernel.git] / fs / xfs / xfs_mount.c
index 8ed164e..4d41615 100644 (file)
 #include "xfs_rw.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
+#include "xfs_utils.h"
 
-STATIC void    xfs_mount_log_sb(xfs_mount_t *, __int64_t);
+STATIC int     xfs_mount_log_sb(xfs_mount_t *, __int64_t);
 STATIC int     xfs_uuid_mount(xfs_mount_t *);
-STATIC void    xfs_uuid_unmount(xfs_mount_t *mp);
 STATIC void    xfs_unmountfs_wait(xfs_mount_t *);
 
 
 #ifdef HAVE_PERCPU_SB
-STATIC void    xfs_icsb_destroy_counters(xfs_mount_t *);
 STATIC void    xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
-                                               int, int);
-STATIC void    xfs_icsb_sync_counters(xfs_mount_t *);
+                                               int);
+STATIC void    xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
+                                               int);
 STATIC int     xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
                                                int64_t, int);
-STATIC int     xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
+STATIC void    xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
 
 #else
 
-#define xfs_icsb_destroy_counters(mp)                  do { } while (0)
-#define xfs_icsb_balance_counter(mp, a, b, c)          do { } while (0)
-#define xfs_icsb_sync_counters(mp)                     do { } while (0)
+#define xfs_icsb_balance_counter(mp, a, b)             do { } while (0)
+#define xfs_icsb_balance_counter_locked(mp, a, b)      do { } while (0)
 #define xfs_icsb_modify_counters(mp, a, b, c)          do { } while (0)
 
 #endif
@@ -123,34 +122,12 @@ static const struct {
     { sizeof(xfs_sb_t),                         0 }
 };
 
-/*
- * Return a pointer to an initialized xfs_mount structure.
- */
-xfs_mount_t *
-xfs_mount_init(void)
-{
-       xfs_mount_t *mp;
-
-       mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP);
-
-       if (xfs_icsb_init_counters(mp)) {
-               mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
-       }
-
-       spin_lock_init(&mp->m_sb_lock);
-       mutex_init(&mp->m_ilock);
-       mutex_init(&mp->m_growlock);
-       atomic_set(&mp->m_active_trans, 0);
-
-       return mp;
-}
-
 /*
  * Free up the resources associated with a mount structure.  Assume that
  * the structure was initially zeroed, so we can tell which fields got
  * initialized.
  */
-void
+STATIC void
 xfs_mount_free(
        xfs_mount_t     *mp)
 {
@@ -159,11 +136,8 @@ xfs_mount_free(
 
                for (agno = 0; agno < mp->m_maxagi; agno++)
                        if (mp->m_perag[agno].pagb_list)
-                               kmem_free(mp->m_perag[agno].pagb_list,
-                                               sizeof(xfs_perag_busy_t) *
-                                                       XFS_PAGB_NUM_SLOTS);
-               kmem_free(mp->m_perag,
-                         sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
+                               kmem_free(mp->m_perag[agno].pagb_list);
+               kmem_free(mp->m_perag);
        }
 
        spinlock_destroy(&mp->m_ail_lock);
@@ -172,15 +146,6 @@ xfs_mount_free(
        mutex_destroy(&mp->m_growlock);
        if (mp->m_quotainfo)
                XFS_QM_DONE(mp);
-
-       if (mp->m_fsname != NULL)
-               kmem_free(mp->m_fsname, mp->m_fsname_len);
-       if (mp->m_rtname != NULL)
-               kmem_free(mp->m_rtname, strlen(mp->m_rtname) + 1);
-       if (mp->m_logname != NULL)
-               kmem_free(mp->m_logname, strlen(mp->m_logname) + 1);
-
-       xfs_icsb_destroy_counters(mp);
 }
 
 /*
@@ -286,6 +251,19 @@ xfs_mount_validate_sb(
                return XFS_ERROR(EFSCORRUPTED);
        }
 
+       /*
+        * Until this is fixed only page-sized or smaller data blocks work.
+        */
+       if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
+               xfs_fs_mount_cmn_err(flags,
+                       "file system with blocksize %d bytes",
+                       sbp->sb_blocksize);
+               xfs_fs_mount_cmn_err(flags,
+                       "only pagesize (%ld) or less will currently work.",
+                       PAGE_SIZE);
+               return XFS_ERROR(ENOSYS);
+       }
+
        if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
            xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
                xfs_fs_mount_cmn_err(flags,
@@ -307,19 +285,6 @@ xfs_mount_validate_sb(
                return XFS_ERROR(ENOSYS);
        }
 
-       /*
-        * Until this is fixed only page-sized or smaller data blocks work.
-        */
-       if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
-               xfs_fs_mount_cmn_err(flags,
-                       "file system with blocksize %d bytes",
-                       sbp->sb_blocksize);
-               xfs_fs_mount_cmn_err(flags,
-                       "only pagesize (%ld) or less will currently work.",
-                       PAGE_SIZE);
-               return XFS_ERROR(ENOSYS);
-       }
-
        return 0;
 }
 
@@ -732,11 +697,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
  * Update alignment values based on mount options and sb values
  */
 STATIC int
-xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags)
+xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags)
 {
        xfs_sb_t        *sbp = &(mp->m_sb);
 
-       if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
+       if (mp->m_dalign) {
                /*
                 * If stripe unit and stripe width are not multiples
                 * of the fs blocksize turn off alignment.
@@ -892,7 +857,7 @@ xfs_set_inoalignment(xfs_mount_t *mp)
  * Check that the data (and log if separate) are an ok size.
  */
 STATIC int
-xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
+xfs_check_sizes(xfs_mount_t *mp)
 {
        xfs_buf_t       *bp;
        xfs_daddr_t     d;
@@ -915,8 +880,7 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
                return error;
        }
 
-       if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
-           mp->m_logdev_targp != mp->m_ddev_targp) {
+       if (mp->m_logdev_targp != mp->m_ddev_targp) {
                d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
                if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
                        cmn_err(CE_WARN, "XFS: size check 3 failed");
@@ -951,12 +915,10 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
  */
 int
 xfs_mountfs(
-       xfs_mount_t     *mp,
-       int             mfsi_flags)
+       xfs_mount_t     *mp)
 {
        xfs_sb_t        *sbp = &(mp->m_sb);
        xfs_inode_t     *rip;
-       bhv_vnode_t     *rvp = NULL;
        __uint64_t      resblks;
        __int64_t       update_flags = 0LL;
        uint            quotamount, quotaflags;
@@ -964,11 +926,6 @@ xfs_mountfs(
        int             uuid_mounted = 0;
        int             error = 0;
 
-       if (mp->m_sb_bp == NULL) {
-               error = xfs_readsb(mp, mfsi_flags);
-               if (error)
-                       return error;
-       }
        xfs_mount_common(mp, sbp);
 
        /*
@@ -998,9 +955,19 @@ xfs_mountfs(
                 * Re-check for ATTR2 in case it was found in bad_features2
                 * slot.
                 */
-               if (xfs_sb_version_hasattr2(&mp->m_sb))
+               if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+                  !(mp->m_flags & XFS_MOUNT_NOATTR2))
                        mp->m_flags |= XFS_MOUNT_ATTR2;
+       }
+
+       if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+          (mp->m_flags & XFS_MOUNT_NOATTR2)) {
+               xfs_sb_version_removeattr2(&mp->m_sb);
+               update_flags |= XFS_SB_FEATURES2;
 
+               /* update sb_versionnum for the clearing of the morebits */
+               if (!sbp->sb_features2)
+                       update_flags |= XFS_SB_VERSIONNUM;
        }
 
        /*
@@ -1009,7 +976,7 @@ xfs_mountfs(
         * allocator alignment is within an ag, therefore ag has
         * to be aligned at stripe boundary.
         */
-       error = xfs_update_alignment(mp, mfsi_flags, &update_flags);
+       error = xfs_update_alignment(mp, &update_flags);
        if (error)
                goto error1;
 
@@ -1028,8 +995,7 @@ xfs_mountfs(
         * since a single partition filesystem is identical to a single
         * partition volume/filesystem.
         */
-       if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
-           (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
+       if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
                if (xfs_uuid_mount(mp)) {
                        error = XFS_ERROR(EINVAL);
                        goto error1;
@@ -1057,7 +1023,7 @@ xfs_mountfs(
        /*
         * Check that the data (and log if separate) are an ok size.
         */
-       error = xfs_check_sizes(mp, mfsi_flags);
+       error = xfs_check_sizes(mp);
        if (error)
                goto error1;
 
@@ -1070,13 +1036,6 @@ xfs_mountfs(
                goto error1;
        }
 
-       /*
-        * For client case we are done now
-        */
-       if (mfsi_flags & XFS_MFSI_CLIENT) {
-               return 0;
-       }
-
        /*
         *  Copies the low order bits of the timestamp and the randomly
         *  set "sequence" number out of a UUID.
@@ -1163,7 +1122,6 @@ xfs_mountfs(
        }
 
        ASSERT(rip != NULL);
-       rvp = XFS_ITOV(rip);
 
        if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
                cmn_err(CE_WARN, "XFS: corrupted root inode");
@@ -1195,8 +1153,13 @@ xfs_mountfs(
        /*
         * If fs is not mounted readonly, then update the superblock changes.
         */
-       if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY))
-               xfs_mount_log_sb(mp, update_flags);
+       if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               error = xfs_mount_log_sb(mp, update_flags);
+               if (error) {
+                       cmn_err(CE_WARN, "XFS: failed to write sb changes");
+                       goto error4;
+               }
+       }
 
        /*
         * Initialise the XFS quota management subsystem for this mount
@@ -1210,7 +1173,7 @@ xfs_mountfs(
         * delayed until after the root and real-time bitmap inodes
         * were consistently read in.
         */
-       error = xfs_log_mount_finish(mp, mfsi_flags);
+       error = xfs_log_mount_finish(mp);
        if (error) {
                cmn_err(CE_WARN, "XFS: log mount finish failed");
                goto error4;
@@ -1219,7 +1182,7 @@ xfs_mountfs(
        /*
         * Complete the quota initialisation, post-log-replay component.
         */
-       error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags);
+       error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
        if (error)
                goto error4;
 
@@ -1233,12 +1196,15 @@ xfs_mountfs(
         *
         * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
         * This may drive us straight to ENOSPC on mount, but that implies
-        * we were already there on the last unmount.
+        * we were already there on the last unmount. Warn if this occurs.
         */
        resblks = mp->m_sb.sb_dblocks;
        do_div(resblks, 20);
        resblks = min_t(__uint64_t, resblks, 1024);
-       xfs_reserve_blocks(mp, &resblks, NULL);
+       error = xfs_reserve_blocks(mp, &resblks, NULL);
+       if (error)
+               cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. "
+                               "Continuing without a reserve pool.");
 
        return 0;
 
@@ -1246,34 +1212,34 @@ xfs_mountfs(
        /*
         * Free up the root inode.
         */
-       VN_RELE(rvp);
+       IRELE(rip);
  error3:
        xfs_log_unmount_dealloc(mp);
  error2:
        for (agno = 0; agno < sbp->sb_agcount; agno++)
                if (mp->m_perag[agno].pagb_list)
-                       kmem_free(mp->m_perag[agno].pagb_list,
-                         sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS);
-       kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t));
+                       kmem_free(mp->m_perag[agno].pagb_list);
+       kmem_free(mp->m_perag);
        mp->m_perag = NULL;
        /* FALLTHROUGH */
  error1:
        if (uuid_mounted)
-               xfs_uuid_unmount(mp);
-       xfs_freesb(mp);
+               uuid_table_remove(&mp->m_sb.sb_uuid);
        return error;
 }
 
 /*
- * xfs_unmountfs
- *
  * This flushes out the inodes,dquots and the superblock, unmounts the
  * log and makes sure that incore structures are freed.
  */
-int
-xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
+void
+xfs_unmountfs(
+       struct xfs_mount        *mp)
 {
-       __uint64_t      resblks;
+       __uint64_t              resblks;
+       int                     error;
+
+       IRELE(mp->m_rootip);
 
        /*
         * We can potentially deadlock here if we have an inode cluster
@@ -1317,39 +1283,31 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
         * value does not matter....
         */
        resblks = 0;
-       xfs_reserve_blocks(mp, &resblks, NULL);
+       error = xfs_reserve_blocks(mp, &resblks, NULL);
+       if (error)
+               cmn_err(CE_WARN, "XFS: Unable to free reserved block pool. "
+                               "Freespace may not be correct on next mount.");
 
-       xfs_log_sbcount(mp, 1);
+       error = xfs_log_sbcount(mp, 1);
+       if (error)
+               cmn_err(CE_WARN, "XFS: Unable to update superblock counters. "
+                               "Freespace may not be correct on next mount.");
        xfs_unmountfs_writesb(mp);
        xfs_unmountfs_wait(mp);                 /* wait for async bufs */
        xfs_log_unmount(mp);                    /* Done! No more fs ops. */
 
-       xfs_freesb(mp);
-
        /*
         * All inodes from this mount point should be freed.
         */
        ASSERT(mp->m_inodes == NULL);
 
-       xfs_unmountfs_close(mp, cr);
        if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
-               xfs_uuid_unmount(mp);
+               uuid_table_remove(&mp->m_sb.sb_uuid);
 
-#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
+#if defined(DEBUG)
        xfs_errortag_clearall(mp, 0);
 #endif
        xfs_mount_free(mp);
-       return 0;
-}
-
-void
-xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
-{
-       if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
-               xfs_free_buftarg(mp->m_logdev_targp, 1);
-       if (mp->m_rtdev_targp)
-               xfs_free_buftarg(mp->m_rtdev_targp, 1);
-       xfs_free_buftarg(mp->m_ddev_targp, 0);
 }
 
 STATIC void
@@ -1391,7 +1349,7 @@ xfs_log_sbcount(
        if (!xfs_fs_writable(mp))
                return 0;
 
-       xfs_icsb_sync_counters(mp);
+       xfs_icsb_sync_counters(mp, 0);
 
        /*
         * we don't need to do this if we are updating the superblock
@@ -1411,9 +1369,8 @@ xfs_log_sbcount(
        xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
        if (sync)
                xfs_trans_set_sync(tp);
-       xfs_trans_commit(tp, 0);
-
-       return 0;
+       error = xfs_trans_commit(tp, 0);
+       return error;
 }
 
 STATIC void
@@ -1462,7 +1419,6 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
                XFS_BUF_UNASYNC(sbp);
                ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
                xfsbdstrat(mp, sbp);
-               /* Nevermind errors we might get here. */
                error = xfs_iowait(sbp);
                if (error)
                        xfs_ioerror_alert("xfs_unmountfs_writesb",
@@ -1896,39 +1852,33 @@ xfs_uuid_mount(
        return 0;
 }
 
-/*
- * Remove filesystem from the UUID table.
- */
-STATIC void
-xfs_uuid_unmount(
-       xfs_mount_t     *mp)
-{
-       uuid_table_remove(&mp->m_sb.sb_uuid);
-}
-
 /*
  * Used to log changes to the superblock unit and width fields which could
  * be altered by the mount options, as well as any potential sb_features2
  * fixup. Only the first superblock is updated.
  */
-STATIC void
+STATIC int
 xfs_mount_log_sb(
        xfs_mount_t     *mp,
        __int64_t       fields)
 {
        xfs_trans_t     *tp;
+       int             error;
 
        ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
-                        XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2));
+                        XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
+                        XFS_SB_VERSIONNUM));
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
-       if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
-                               XFS_DEFAULT_LOG_COUNT)) {
+       error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
+                               XFS_DEFAULT_LOG_COUNT);
+       if (error) {
                xfs_trans_cancel(tp, 0);
-               return;
+               return error;
        }
        xfs_mod_sb(tp, fields);
-       xfs_trans_commit(tp, 0);
+       error = xfs_trans_commit(tp, 0);
+       return error;
 }
 
 
@@ -2016,9 +1966,9 @@ xfs_icsb_cpu_notify(
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
                xfs_icsb_lock(mp);
-               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
                xfs_icsb_unlock(mp);
                break;
        case CPU_DEAD:
@@ -2038,12 +1988,9 @@ xfs_icsb_cpu_notify(
 
                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
 
-               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
-                                        XFS_ICSB_SB_LOCKED, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
-                                        XFS_ICSB_SB_LOCKED, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
-                                        XFS_ICSB_SB_LOCKED, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
                spin_unlock(&mp->m_sb_lock);
                xfs_icsb_unlock(mp);
                break;
@@ -2095,13 +2042,13 @@ xfs_icsb_reinit_counters(
         * initial balance kicks us off correctly
         */
        mp->m_icsb_counters = -1;
-       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
        xfs_icsb_unlock(mp);
 }
 
-STATIC void
+void
 xfs_icsb_destroy_counters(
        xfs_mount_t     *mp)
 {
@@ -2189,7 +2136,7 @@ xfs_icsb_counter_disabled(
        return test_bit(field, &mp->m_icsb_counters);
 }
 
-STATIC int
+STATIC void
 xfs_icsb_disable_counter(
        xfs_mount_t     *mp,
        xfs_sb_field_t  field)
@@ -2207,13 +2154,13 @@ xfs_icsb_disable_counter(
         * the m_icsb_mutex.
         */
        if (xfs_icsb_counter_disabled(mp, field))
-               return 0;
+               return;
 
        xfs_icsb_lock_all_counters(mp);
        if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
                /* drain back to superblock */
 
-               xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT);
+               xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
                switch(field) {
                case XFS_SBS_ICOUNT:
                        mp->m_sb.sb_icount = cnt.icsb_icount;
@@ -2230,8 +2177,6 @@ xfs_icsb_disable_counter(
        }
 
        xfs_icsb_unlock_all_counters(mp);
-
-       return 0;
 }
 
 STATIC void
@@ -2270,38 +2215,33 @@ xfs_icsb_enable_counter(
 }
 
 void
-xfs_icsb_sync_counters_flags(
+xfs_icsb_sync_counters_locked(
        xfs_mount_t     *mp,
        int             flags)
 {
        xfs_icsb_cnts_t cnt;
 
-       /* Pass 1: lock all counters */
-       if ((flags & XFS_ICSB_SB_LOCKED) == 0)
-               spin_lock(&mp->m_sb_lock);
-
        xfs_icsb_count(mp, &cnt, flags);
 
-       /* Step 3: update mp->m_sb fields */
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
                mp->m_sb.sb_icount = cnt.icsb_icount;
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
                mp->m_sb.sb_ifree = cnt.icsb_ifree;
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
                mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
-
-       if ((flags & XFS_ICSB_SB_LOCKED) == 0)
-               spin_unlock(&mp->m_sb_lock);
 }
 
 /*
  * Accurate update of per-cpu counters to incore superblock
  */
-STATIC void
+void
 xfs_icsb_sync_counters(
-       xfs_mount_t     *mp)
+       xfs_mount_t     *mp,
+       int             flags)
 {
-       xfs_icsb_sync_counters_flags(mp, 0);
+       spin_lock(&mp->m_sb_lock);
+       xfs_icsb_sync_counters_locked(mp, flags);
+       spin_unlock(&mp->m_sb_lock);
 }
 
 /*
@@ -2324,19 +2264,15 @@ xfs_icsb_sync_counters(
 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
                (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
 STATIC void
-xfs_icsb_balance_counter(
+xfs_icsb_balance_counter_locked(
        xfs_mount_t     *mp,
        xfs_sb_field_t  field,
-       int             flags,
        int             min_per_cpu)
 {
        uint64_t        count, resid;
        int             weight = num_online_cpus();
        uint64_t        min = (uint64_t)min_per_cpu;
 
-       if (!(flags & XFS_ICSB_SB_LOCKED))
-               spin_lock(&mp->m_sb_lock);
-
        /* disable counter and sync counter */
        xfs_icsb_disable_counter(mp, field);
 
@@ -2346,19 +2282,19 @@ xfs_icsb_balance_counter(
                count = mp->m_sb.sb_icount;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
-                       goto out;
+                       return;
                break;
        case XFS_SBS_IFREE:
                count = mp->m_sb.sb_ifree;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
-                       goto out;
+                       return;
                break;
        case XFS_SBS_FDBLOCKS:
                count = mp->m_sb.sb_fdblocks;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
-                       goto out;
+                       return;
                break;
        default:
                BUG();
@@ -2367,9 +2303,17 @@ xfs_icsb_balance_counter(
        }
 
        xfs_icsb_enable_counter(mp, field, count, resid);
-out:
-       if (!(flags & XFS_ICSB_SB_LOCKED))
-               spin_unlock(&mp->m_sb_lock);
+}
+
+STATIC void
+xfs_icsb_balance_counter(
+       xfs_mount_t     *mp,
+       xfs_sb_field_t  fields,
+       int             min_per_cpu)
+{
+       spin_lock(&mp->m_sb_lock);
+       xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
+       spin_unlock(&mp->m_sb_lock);
 }
 
 STATIC int
@@ -2476,7 +2420,7 @@ slow_path:
         * we are done.
         */
        if (ret != ENOSPC)
-               xfs_icsb_balance_counter(mp, field, 0, 0);
+               xfs_icsb_balance_counter(mp, field, 0);
        xfs_icsb_unlock(mp);
        return ret;
 
@@ -2500,7 +2444,7 @@ balance_counter:
         * will either succeed through the fast path or slow path without
         * another balance operation being required.
         */
-       xfs_icsb_balance_counter(mp, field, 0, delta);
+       xfs_icsb_balance_counter(mp, field, delta);
        xfs_icsb_unlock(mp);
        goto again;
 }