[XFS] Drop unneeded endian conversion in bulkstat and start readahead for
authorNathan Scott <nathans@sgi.com>
Thu, 28 Sep 2006 01:02:03 +0000 (11:02 +1000)
committerTim Shimmin <tes@sgi.com>
Thu, 28 Sep 2006 01:02:03 +0000 (11:02 +1000)
batches of inode cluster buffers at once, before any blocking reads are
issued.

SGI-PV: 944409
SGI-Modid: xfs-linux-melb:xfs-kern:26606a

Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
fs/xfs/xfs_itable.c

index 305a9d0..e6dbe6b 100644 (file)
@@ -325,9 +325,9 @@ xfs_bulkstat(
        int                     i;      /* loop index */
        int                     icount; /* count of inodes good in irbuf */
        xfs_ino_t               ino;    /* inode number (filesystem) */
-       xfs_inobt_rec_t         *irbp;  /* current irec buffer pointer */
-       xfs_inobt_rec_t         *irbuf; /* start of irec buffer */
-       xfs_inobt_rec_t         *irbufend; /* end of good irec buffer entries */
+       xfs_inobt_rec_incore_t  *irbp;  /* current irec buffer pointer */
+       xfs_inobt_rec_incore_t  *irbuf; /* start of irec buffer */
+       xfs_inobt_rec_incore_t  *irbufend; /* end of good irec buffer entries */
        xfs_ino_t               lastino=0; /* last inode number returned */
        int                     nbcluster; /* # of blocks in a cluster */
        int                     nicluster; /* # of inodes in a cluster */
@@ -398,7 +398,7 @@ xfs_bulkstat(
                 * Allocate and initialize a btree cursor for ialloc btree.
                 */
                cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO,
-                       (xfs_inode_t *)0, 0);
+                                               (xfs_inode_t *)0, 0);
                irbp = irbuf;
                irbufend = irbuf + nirbuf;
                end_of_ag = 0;
@@ -435,9 +435,9 @@ xfs_bulkstat(
                                                gcnt++;
                                }
                                gfree |= XFS_INOBT_MASKN(0, chunkidx);
-                               irbp->ir_startino = cpu_to_be32(gino);
-                               irbp->ir_freecount = cpu_to_be32(gcnt);
-                               irbp->ir_free = cpu_to_be64(gfree);
+                               irbp->ir_startino = gino;
+                               irbp->ir_freecount = gcnt;
+                               irbp->ir_free = gfree;
                                irbp++;
                                agino = gino + XFS_INODES_PER_CHUNK;
                                icount = XFS_INODES_PER_CHUNK - gcnt;
@@ -491,11 +491,27 @@ xfs_bulkstat(
                        }
                        /*
                         * If this chunk has any allocated inodes, save it.
+                        * Also start read-ahead now for this chunk.
                         */
                        if (gcnt < XFS_INODES_PER_CHUNK) {
-                               irbp->ir_startino = cpu_to_be32(gino);
-                               irbp->ir_freecount = cpu_to_be32(gcnt);
-                               irbp->ir_free = cpu_to_be64(gfree);
+                               /*
+                                * Loop over all clusters in the next chunk.
+                                * Do a readahead if there are any allocated
+                                * inodes in that cluster.
+                                */
+                               for (agbno = XFS_AGINO_TO_AGBNO(mp, gino),
+                                    chunkidx = 0;
+                                    chunkidx < XFS_INODES_PER_CHUNK;
+                                    chunkidx += nicluster,
+                                    agbno += nbcluster) {
+                                       if (XFS_INOBT_MASKN(chunkidx,
+                                                           nicluster) & ~gfree)
+                                               xfs_btree_reada_bufs(mp, agno,
+                                                       agbno, nbcluster);
+                               }
+                               irbp->ir_startino = gino;
+                               irbp->ir_freecount = gcnt;
+                               irbp->ir_free = gfree;
                                irbp++;
                                icount += XFS_INODES_PER_CHUNK - gcnt;
                        }
@@ -518,34 +534,12 @@ xfs_bulkstat(
                irbufend = irbp;
                for (irbp = irbuf;
                     irbp < irbufend && ubleft >= statstruct_size; irbp++) {
-                       /*
-                        * Read-ahead the next chunk's worth of inodes.
-                        */
-                       if (&irbp[1] < irbufend) {
-                               /*
-                                * Loop over all clusters in the next chunk.
-                                * Do a readahead if there are any allocated
-                                * inodes in that cluster.
-                                */
-                               for (agbno = XFS_AGINO_TO_AGBNO(mp,
-                                                       be32_to_cpu(irbp[1].ir_startino)),
-                                    chunkidx = 0;
-                                    chunkidx < XFS_INODES_PER_CHUNK;
-                                    chunkidx += nicluster,
-                                    agbno += nbcluster) {
-                                       if (XFS_INOBT_MASKN(chunkidx,
-                                                           nicluster) &
-                                           ~(be64_to_cpu(irbp[1].ir_free)))
-                                               xfs_btree_reada_bufs(mp, agno,
-                                                       agbno, nbcluster);
-                               }
-                       }
                        /*
                         * Now process this chunk of inodes.
                         */
-                       for (agino = be32_to_cpu(irbp->ir_startino), chunkidx = 0, clustidx = 0;
+                       for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
                             ubleft > 0 &&
-                               be32_to_cpu(irbp->ir_freecount) < XFS_INODES_PER_CHUNK;
+                               irbp->ir_freecount < XFS_INODES_PER_CHUNK;
                             chunkidx++, clustidx++, agino++) {
                                ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
                                /*
@@ -565,7 +559,7 @@ xfs_bulkstat(
                                 */
                                if ((chunkidx & (nicluster - 1)) == 0) {
                                        agbno = XFS_AGINO_TO_AGBNO(mp,
-                                                       be32_to_cpu(irbp->ir_startino)) +
+                                                       irbp->ir_startino) +
                                                ((chunkidx & nimask) >>
                                                 mp->m_sb.sb_inopblog);
 
@@ -605,13 +599,13 @@ xfs_bulkstat(
                                /*
                                 * Skip if this inode is free.
                                 */
-                               if (XFS_INOBT_MASK(chunkidx) & be64_to_cpu(irbp->ir_free))
+                               if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
                                        continue;
                                /*
                                 * Count used inodes as free so we can tell
                                 * when the chunk is used up.
                                 */
-                               be32_add(&irbp->ir_freecount, 1);
+                               irbp->ir_freecount++;
                                ino = XFS_AGINO_TO_INO(mp, agno, agino);
                                bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
                                if (!xfs_bulkstat_use_dinode(mp, flags, bp,