Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[pandora-kernel.git] / fs / xfs / xfs_itable.c
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_ialloc.h"
34 #include "xfs_itable.h"
35 #include "xfs_error.h"
36 #include "xfs_btree.h"
37 #include "xfs_trace.h"
38
39 STATIC int
40 xfs_internal_inum(
41         xfs_mount_t     *mp,
42         xfs_ino_t       ino)
43 {
44         return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
45                 (xfs_sb_version_hasquota(&mp->m_sb) &&
46                  (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
47 }
48
49 /*
50  * Return stat information for one inode.
51  * Return 0 if ok, else errno.
52  */
53 int
54 xfs_bulkstat_one_int(
55         struct xfs_mount        *mp,            /* mount point for filesystem */
56         xfs_ino_t               ino,            /* inode to get data for */
57         void __user             *buffer,        /* buffer to place output in */
58         int                     ubsize,         /* size of buffer */
59         bulkstat_one_fmt_pf     formatter,      /* formatter, copy to user */
60         int                     *ubused,        /* bytes used by me */
61         int                     *stat)          /* BULKSTAT_RV_... */
62 {
63         struct xfs_icdinode     *dic;           /* dinode core info pointer */
64         struct xfs_inode        *ip;            /* incore inode pointer */
65         struct inode            *inode;
66         struct xfs_bstat        *buf;           /* return buffer */
67         int                     error = 0;      /* error value */
68
69         *stat = BULKSTAT_RV_NOTHING;
70
71         if (!buffer || xfs_internal_inum(mp, ino))
72                 return XFS_ERROR(EINVAL);
73
74         buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
75         if (!buf)
76                 return XFS_ERROR(ENOMEM);
77
78         error = xfs_iget(mp, NULL, ino,
79                          XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip);
80         if (error) {
81                 *stat = BULKSTAT_RV_NOTHING;
82                 goto out_free;
83         }
84
85         ASSERT(ip != NULL);
86         ASSERT(ip->i_imap.im_blkno != 0);
87
88         dic = &ip->i_d;
89         inode = VFS_I(ip);
90
91         /* xfs_iget returns the following without needing
92          * further change.
93          */
94         buf->bs_nlink = dic->di_nlink;
95         buf->bs_projid_lo = dic->di_projid_lo;
96         buf->bs_projid_hi = dic->di_projid_hi;
97         buf->bs_ino = ino;
98         buf->bs_mode = dic->di_mode;
99         buf->bs_uid = dic->di_uid;
100         buf->bs_gid = dic->di_gid;
101         buf->bs_size = dic->di_size;
102
103         /*
104          * We need to read the timestamps from the Linux inode because
105          * the VFS keeps writing directly into the inode structure instead
106          * of telling us about the updates.
107          */
108         buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
109         buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
110         buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
111         buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
112         buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
113         buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
114
115         buf->bs_xflags = xfs_ip2xflags(ip);
116         buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
117         buf->bs_extents = dic->di_nextents;
118         buf->bs_gen = dic->di_gen;
119         memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
120         buf->bs_dmevmask = dic->di_dmevmask;
121         buf->bs_dmstate = dic->di_dmstate;
122         buf->bs_aextents = dic->di_anextents;
123         buf->bs_forkoff = XFS_IFORK_BOFF(ip);
124
125         switch (dic->di_format) {
126         case XFS_DINODE_FMT_DEV:
127                 buf->bs_rdev = ip->i_df.if_u2.if_rdev;
128                 buf->bs_blksize = BLKDEV_IOSIZE;
129                 buf->bs_blocks = 0;
130                 break;
131         case XFS_DINODE_FMT_LOCAL:
132         case XFS_DINODE_FMT_UUID:
133                 buf->bs_rdev = 0;
134                 buf->bs_blksize = mp->m_sb.sb_blocksize;
135                 buf->bs_blocks = 0;
136                 break;
137         case XFS_DINODE_FMT_EXTENTS:
138         case XFS_DINODE_FMT_BTREE:
139                 buf->bs_rdev = 0;
140                 buf->bs_blksize = mp->m_sb.sb_blocksize;
141                 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
142                 break;
143         }
144         xfs_iunlock(ip, XFS_ILOCK_SHARED);
145         IRELE(ip);
146
147         error = formatter(buffer, ubsize, ubused, buf);
148
149         if (!error)
150                 *stat = BULKSTAT_RV_DIDONE;
151
152  out_free:
153         kmem_free(buf);
154         return error;
155 }
156
157 /* Return 0 on success or positive error */
158 STATIC int
159 xfs_bulkstat_one_fmt(
160         void                    __user *ubuffer,
161         int                     ubsize,
162         int                     *ubused,
163         const xfs_bstat_t       *buffer)
164 {
165         if (ubsize < sizeof(*buffer))
166                 return XFS_ERROR(ENOMEM);
167         if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
168                 return XFS_ERROR(EFAULT);
169         if (ubused)
170                 *ubused = sizeof(*buffer);
171         return 0;
172 }
173
174 int
175 xfs_bulkstat_one(
176         xfs_mount_t     *mp,            /* mount point for filesystem */
177         xfs_ino_t       ino,            /* inode number to get data for */
178         void            __user *buffer, /* buffer to place output in */
179         int             ubsize,         /* size of buffer */
180         int             *ubused,        /* bytes used by me */
181         int             *stat)          /* BULKSTAT_RV_... */
182 {
183         return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
184                                     xfs_bulkstat_one_fmt, ubused, stat);
185 }
186
187 #define XFS_BULKSTAT_UBLEFT(ubleft)     ((ubleft) >= statstruct_size)
188
189 /*
190  * Return stat information in bulk (by-inode) for the filesystem.
191  */
192 int                                     /* error status */
193 xfs_bulkstat(
194         xfs_mount_t             *mp,    /* mount point for filesystem */
195         xfs_ino_t               *lastinop, /* last inode returned */
196         int                     *ubcountp, /* size of buffer/count returned */
197         bulkstat_one_pf         formatter, /* func that'd fill a single buf */
198         size_t                  statstruct_size, /* sizeof struct filling */
199         char                    __user *ubuffer, /* buffer with inode stats */
200         int                     *done)  /* 1 if there are more stats to get */
201 {
202         xfs_agblock_t           agbno=0;/* allocation group block number */
203         xfs_buf_t               *agbp;  /* agi header buffer */
204         xfs_agi_t               *agi;   /* agi header data */
205         xfs_agino_t             agino;  /* inode # in allocation group */
206         xfs_agnumber_t          agno;   /* allocation group number */
207         xfs_daddr_t             bno;    /* inode cluster start daddr */
208         int                     chunkidx; /* current index into inode chunk */
209         int                     clustidx; /* current index into inode cluster */
210         xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
211         int                     end_of_ag; /* set if we've seen the ag end */
212         int                     error;  /* error code */
213         int                     fmterror;/* bulkstat formatter result */
214         int                     i;      /* loop index */
215         int                     icount; /* count of inodes good in irbuf */
216         size_t                  irbsize; /* size of irec buffer in bytes */
217         xfs_ino_t               ino;    /* inode number (filesystem) */
218         xfs_inobt_rec_incore_t  *irbp;  /* current irec buffer pointer */
219         xfs_inobt_rec_incore_t  *irbuf; /* start of irec buffer */
220         xfs_inobt_rec_incore_t  *irbufend; /* end of good irec buffer entries */
221         xfs_ino_t               lastino; /* last inode number returned */
222         int                     nbcluster; /* # of blocks in a cluster */
223         int                     nicluster; /* # of inodes in a cluster */
224         int                     nimask; /* mask for inode clusters */
225         int                     nirbuf; /* size of irbuf */
226         int                     rval;   /* return value error code */
227         int                     tmp;    /* result value from btree calls */
228         int                     ubcount; /* size of user's buffer */
229         int                     ubleft; /* bytes left in user's buffer */
230         char                    __user *ubufp;  /* pointer into user's buffer */
231         int                     ubelem; /* spaces used in user's buffer */
232         int                     ubused; /* bytes used by formatter */
233         xfs_buf_t               *bp;    /* ptr to on-disk inode cluster buf */
234
235         /*
236          * Get the last inode value, see if there's nothing to do.
237          */
238         ino = (xfs_ino_t)*lastinop;
239         lastino = ino;
240         agno = XFS_INO_TO_AGNO(mp, ino);
241         agino = XFS_INO_TO_AGINO(mp, ino);
242         if (agno >= mp->m_sb.sb_agcount ||
243             ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
244                 *done = 1;
245                 *ubcountp = 0;
246                 return 0;
247         }
248         if (!ubcountp || *ubcountp <= 0) {
249                 return EINVAL;
250         }
251         ubcount = *ubcountp; /* statstruct's */
252         ubleft = ubcount * statstruct_size; /* bytes */
253         *ubcountp = ubelem = 0;
254         *done = 0;
255         fmterror = 0;
256         ubufp = ubuffer;
257         nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ?
258                 mp->m_sb.sb_inopblock :
259                 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
260         nimask = ~(nicluster - 1);
261         nbcluster = nicluster >> mp->m_sb.sb_inopblog;
262         irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
263         if (!irbuf)
264                 return ENOMEM;
265
266         nirbuf = irbsize / sizeof(*irbuf);
267
268         /*
269          * Loop over the allocation groups, starting from the last
270          * inode returned; 0 means start of the allocation group.
271          */
272         rval = 0;
273         while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
274                 cond_resched();
275                 bp = NULL;
276                 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
277                 if (error) {
278                         /*
279                          * Skip this allocation group and go to the next one.
280                          */
281                         agno++;
282                         agino = 0;
283                         continue;
284                 }
285                 agi = XFS_BUF_TO_AGI(agbp);
286                 /*
287                  * Allocate and initialize a btree cursor for ialloc btree.
288                  */
289                 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
290                 irbp = irbuf;
291                 irbufend = irbuf + nirbuf;
292                 end_of_ag = 0;
293                 /*
294                  * If we're returning in the middle of an allocation group,
295                  * we need to get the remainder of the chunk we're in.
296                  */
297                 if (agino > 0) {
298                         xfs_inobt_rec_incore_t r;
299
300                         /*
301                          * Lookup the inode chunk that this inode lives in.
302                          */
303                         error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE,
304                                                  &tmp);
305                         if (!error &&   /* no I/O error */
306                             tmp &&      /* lookup succeeded */
307                                         /* got the record, should always work */
308                             !(error = xfs_inobt_get_rec(cur, &r, &i)) &&
309                             i == 1 &&
310                                         /* this is the right chunk */
311                             agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
312                                         /* lastino was not last in chunk */
313                             (chunkidx = agino - r.ir_startino + 1) <
314                                     XFS_INODES_PER_CHUNK &&
315                                         /* there are some left allocated */
316                             xfs_inobt_maskn(chunkidx,
317                                     XFS_INODES_PER_CHUNK - chunkidx) &
318                                     ~r.ir_free) {
319                                 /*
320                                  * Grab the chunk record.  Mark all the
321                                  * uninteresting inodes (because they're
322                                  * before our start point) free.
323                                  */
324                                 for (i = 0; i < chunkidx; i++) {
325                                         if (XFS_INOBT_MASK(i) & ~r.ir_free)
326                                                 r.ir_freecount++;
327                                 }
328                                 r.ir_free |= xfs_inobt_maskn(0, chunkidx);
329                                 irbp->ir_startino = r.ir_startino;
330                                 irbp->ir_freecount = r.ir_freecount;
331                                 irbp->ir_free = r.ir_free;
332                                 irbp++;
333                                 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
334                                 icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
335                         } else {
336                                 /*
337                                  * If any of those tests failed, bump the
338                                  * inode number (just in case).
339                                  */
340                                 agino++;
341                                 icount = 0;
342                         }
343                         /*
344                          * In any case, increment to the next record.
345                          */
346                         if (!error)
347                                 error = xfs_btree_increment(cur, 0, &tmp);
348                 } else {
349                         /*
350                          * Start of ag.  Lookup the first inode chunk.
351                          */
352                         error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
353                         icount = 0;
354                 }
355                 /*
356                  * Loop through inode btree records in this ag,
357                  * until we run out of inodes or space in the buffer.
358                  */
359                 while (irbp < irbufend && icount < ubcount) {
360                         xfs_inobt_rec_incore_t r;
361
362                         /*
363                          * Loop as long as we're unable to read the
364                          * inode btree.
365                          */
366                         while (error) {
367                                 agino += XFS_INODES_PER_CHUNK;
368                                 if (XFS_AGINO_TO_AGBNO(mp, agino) >=
369                                                 be32_to_cpu(agi->agi_length))
370                                         break;
371                                 error = xfs_inobt_lookup(cur, agino,
372                                                          XFS_LOOKUP_GE, &tmp);
373                                 cond_resched();
374                         }
375                         /*
376                          * If ran off the end of the ag either with an error,
377                          * or the normal way, set end and stop collecting.
378                          */
379                         if (error) {
380                                 end_of_ag = 1;
381                                 break;
382                         }
383
384                         error = xfs_inobt_get_rec(cur, &r, &i);
385                         if (error || i == 0) {
386                                 end_of_ag = 1;
387                                 break;
388                         }
389
390                         /*
391                          * If this chunk has any allocated inodes, save it.
392                          * Also start read-ahead now for this chunk.
393                          */
394                         if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
395                                 /*
396                                  * Loop over all clusters in the next chunk.
397                                  * Do a readahead if there are any allocated
398                                  * inodes in that cluster.
399                                  */
400                                 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
401                                 for (chunkidx = 0;
402                                      chunkidx < XFS_INODES_PER_CHUNK;
403                                      chunkidx += nicluster,
404                                      agbno += nbcluster) {
405                                         if (xfs_inobt_maskn(chunkidx, nicluster)
406                                                         & ~r.ir_free)
407                                                 xfs_btree_reada_bufs(mp, agno,
408                                                         agbno, nbcluster);
409                                 }
410                                 irbp->ir_startino = r.ir_startino;
411                                 irbp->ir_freecount = r.ir_freecount;
412                                 irbp->ir_free = r.ir_free;
413                                 irbp++;
414                                 icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
415                         }
416                         /*
417                          * Set agino to after this chunk and bump the cursor.
418                          */
419                         agino = r.ir_startino + XFS_INODES_PER_CHUNK;
420                         error = xfs_btree_increment(cur, 0, &tmp);
421                         cond_resched();
422                 }
423                 /*
424                  * Drop the btree buffers and the agi buffer.
425                  * We can't hold any of the locks these represent
426                  * when calling iget.
427                  */
428                 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
429                 xfs_buf_relse(agbp);
430                 /*
431                  * Now format all the good inodes into the user's buffer.
432                  */
433                 irbufend = irbp;
434                 for (irbp = irbuf;
435                      irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
436                         /*
437                          * Now process this chunk of inodes.
438                          */
439                         for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
440                              XFS_BULKSTAT_UBLEFT(ubleft) &&
441                                 irbp->ir_freecount < XFS_INODES_PER_CHUNK;
442                              chunkidx++, clustidx++, agino++) {
443                                 ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
444                                 /*
445                                  * Recompute agbno if this is the
446                                  * first inode of the cluster.
447                                  *
448                                  * Careful with clustidx.   There can be
449                                  * multiple clusters per chunk, a single
450                                  * cluster per chunk or a cluster that has
451                                  * inodes represented from several different
452                                  * chunks (if blocksize is large).
453                                  *
454                                  * Because of this, the starting clustidx is
455                                  * initialized to zero in this loop but must
456                                  * later be reset after reading in the cluster
457                                  * buffer.
458                                  */
459                                 if ((chunkidx & (nicluster - 1)) == 0) {
460                                         agbno = XFS_AGINO_TO_AGBNO(mp,
461                                                         irbp->ir_startino) +
462                                                 ((chunkidx & nimask) >>
463                                                  mp->m_sb.sb_inopblog);
464                                 }
465                                 ino = XFS_AGINO_TO_INO(mp, agno, agino);
466                                 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
467                                 /*
468                                  * Skip if this inode is free.
469                                  */
470                                 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
471                                         lastino = ino;
472                                         continue;
473                                 }
474                                 /*
475                                  * Count used inodes as free so we can tell
476                                  * when the chunk is used up.
477                                  */
478                                 irbp->ir_freecount++;
479
480                                 /*
481                                  * Get the inode and fill in a single buffer.
482                                  */
483                                 ubused = statstruct_size;
484                                 error = formatter(mp, ino, ubufp, ubleft,
485                                                   &ubused, &fmterror);
486                                 if (fmterror == BULKSTAT_RV_NOTHING) {
487                                         if (error && error != ENOENT &&
488                                                 error != EINVAL) {
489                                                 ubleft = 0;
490                                                 rval = error;
491                                                 break;
492                                         }
493                                         lastino = ino;
494                                         continue;
495                                 }
496                                 if (fmterror == BULKSTAT_RV_GIVEUP) {
497                                         ubleft = 0;
498                                         ASSERT(error);
499                                         rval = error;
500                                         break;
501                                 }
502                                 if (ubufp)
503                                         ubufp += ubused;
504                                 ubleft -= ubused;
505                                 ubelem++;
506                                 lastino = ino;
507                         }
508
509                         cond_resched();
510                 }
511
512                 if (bp)
513                         xfs_buf_relse(bp);
514
515                 /*
516                  * Set up for the next loop iteration.
517                  */
518                 if (XFS_BULKSTAT_UBLEFT(ubleft)) {
519                         if (end_of_ag) {
520                                 agno++;
521                                 agino = 0;
522                         } else
523                                 agino = XFS_INO_TO_AGINO(mp, lastino);
524                 } else
525                         break;
526         }
527         /*
528          * Done, we're either out of filesystem or space to put the data.
529          */
530         kmem_free_large(irbuf);
531         *ubcountp = ubelem;
532         /*
533          * Found some inodes, return them now and return the error next time.
534          */
535         if (ubelem)
536                 rval = 0;
537         if (agno >= mp->m_sb.sb_agcount) {
538                 /*
539                  * If we ran out of filesystem, mark lastino as off
540                  * the end of the filesystem, so the next call
541                  * will return immediately.
542                  */
543                 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
544                 *done = 1;
545         } else
546                 *lastinop = (xfs_ino_t)lastino;
547
548         return rval;
549 }
550
551 /*
552  * Return stat information in bulk (by-inode) for the filesystem.
553  * Special case for non-sequential one inode bulkstat.
554  */
555 int                                     /* error status */
556 xfs_bulkstat_single(
557         xfs_mount_t             *mp,    /* mount point for filesystem */
558         xfs_ino_t               *lastinop, /* inode to return */
559         char                    __user *buffer, /* buffer with inode stats */
560         int                     *done)  /* 1 if there are more stats to get */
561 {
562         int                     count;  /* count value for bulkstat call */
563         int                     error;  /* return value */
564         xfs_ino_t               ino;    /* filesystem inode number */
565         int                     res;    /* result from bs1 */
566
567         /*
568          * note that requesting valid inode numbers which are not allocated
569          * to inodes will most likely cause xfs_itobp to generate warning
570          * messages about bad magic numbers. This is ok. The fact that
571          * the inode isn't actually an inode is handled by the
572          * error check below. Done this way to make the usual case faster
573          * at the expense of the error case.
574          */
575
576         ino = (xfs_ino_t)*lastinop;
577         error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
578         if (error) {
579                 /*
580                  * Special case way failed, do it the "long" way
581                  * to see if that works.
582                  */
583                 (*lastinop)--;
584                 count = 1;
585                 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
586                                 sizeof(xfs_bstat_t), buffer, done))
587                         return error;
588                 if (count == 0 || (xfs_ino_t)*lastinop != ino)
589                         return error == EFSCORRUPTED ?
590                                 XFS_ERROR(EINVAL) : error;
591                 else
592                         return 0;
593         }
594         *done = 0;
595         return 0;
596 }
597
598 int
599 xfs_inumbers_fmt(
600         void                    __user *ubuffer, /* buffer to write to */
601         const xfs_inogrp_t      *buffer,        /* buffer to read from */
602         long                    count,          /* # of elements to read */
603         long                    *written)       /* # of bytes written */
604 {
605         if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
606                 return -EFAULT;
607         *written = count * sizeof(*buffer);
608         return 0;
609 }
610
611 /*
612  * Return inode number table for the filesystem.
613  */
614 int                                     /* error status */
615 xfs_inumbers(
616         xfs_mount_t     *mp,            /* mount point for filesystem */
617         xfs_ino_t       *lastino,       /* last inode returned */
618         int             *count,         /* size of buffer/count returned */
619         void            __user *ubuffer,/* buffer with inode descriptions */
620         inumbers_fmt_pf formatter)
621 {
622         xfs_buf_t       *agbp;
623         xfs_agino_t     agino;
624         xfs_agnumber_t  agno;
625         int             bcount;
626         xfs_inogrp_t    *buffer;
627         int             bufidx;
628         xfs_btree_cur_t *cur;
629         int             error;
630         xfs_inobt_rec_incore_t r;
631         int             i;
632         xfs_ino_t       ino;
633         int             left;
634         int             tmp;
635
636         ino = (xfs_ino_t)*lastino;
637         agno = XFS_INO_TO_AGNO(mp, ino);
638         agino = XFS_INO_TO_AGINO(mp, ino);
639         left = *count;
640         *count = 0;
641         bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
642         buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
643         error = bufidx = 0;
644         cur = NULL;
645         agbp = NULL;
646         while (left > 0 && agno < mp->m_sb.sb_agcount) {
647                 if (agbp == NULL) {
648                         error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
649                         if (error) {
650                                 /*
651                                  * If we can't read the AGI of this ag,
652                                  * then just skip to the next one.
653                                  */
654                                 ASSERT(cur == NULL);
655                                 agbp = NULL;
656                                 agno++;
657                                 agino = 0;
658                                 continue;
659                         }
660                         cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
661                         error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
662                                                  &tmp);
663                         if (error) {
664                                 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
665                                 cur = NULL;
666                                 xfs_buf_relse(agbp);
667                                 agbp = NULL;
668                                 /*
669                                  * Move up the last inode in the current
670                                  * chunk.  The lookup_ge will always get
671                                  * us the first inode in the next chunk.
672                                  */
673                                 agino += XFS_INODES_PER_CHUNK - 1;
674                                 continue;
675                         }
676                 }
677                 error = xfs_inobt_get_rec(cur, &r, &i);
678                 if (error || i == 0) {
679                         xfs_buf_relse(agbp);
680                         agbp = NULL;
681                         xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
682                         cur = NULL;
683                         agno++;
684                         agino = 0;
685                         continue;
686                 }
687                 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
688                 buffer[bufidx].xi_startino =
689                         XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
690                 buffer[bufidx].xi_alloccount =
691                         XFS_INODES_PER_CHUNK - r.ir_freecount;
692                 buffer[bufidx].xi_allocmask = ~r.ir_free;
693                 bufidx++;
694                 left--;
695                 if (bufidx == bcount) {
696                         long written;
697                         if (formatter(ubuffer, buffer, bufidx, &written)) {
698                                 error = XFS_ERROR(EFAULT);
699                                 break;
700                         }
701                         ubuffer += written;
702                         *count += bufidx;
703                         bufidx = 0;
704                 }
705                 if (left) {
706                         error = xfs_btree_increment(cur, 0, &tmp);
707                         if (error) {
708                                 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
709                                 cur = NULL;
710                                 xfs_buf_relse(agbp);
711                                 agbp = NULL;
712                                 /*
713                                  * The agino value has already been bumped.
714                                  * Just try to skip up to it.
715                                  */
716                                 agino += XFS_INODES_PER_CHUNK;
717                                 continue;
718                         }
719                 }
720         }
721         if (!error) {
722                 if (bufidx) {
723                         long written;
724                         if (formatter(ubuffer, buffer, bufidx, &written))
725                                 error = XFS_ERROR(EFAULT);
726                         else
727                                 *count += bufidx;
728                 }
729                 *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
730         }
731         kmem_free(buffer);
732         if (cur)
733                 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
734                                            XFS_BTREE_NOERROR));
735         if (agbp)
736                 xfs_buf_relse(agbp);
737         return error;
738 }