Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[pandora-kernel.git] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
36 #include "xfs_ialloc.h"
37 #include "xfs_log_priv.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_log_recover.h"
40 #include "xfs_extfree_item.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_quota.h"
43 #include "xfs_rw.h"
44 #include "xfs_utils.h"
45 #include "xfs_trace.h"
46
47 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
48 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
49 #if defined(DEBUG)
50 STATIC void     xlog_recover_check_summary(xlog_t *);
51 #else
52 #define xlog_recover_check_summary(log)
53 #endif
54
55 /*
56  * Sector aligned buffer routines for buffer create/read/write/access
57  */
58
59 /*
60  * Verify the given count of basic blocks is valid number of blocks
61  * to specify for an operation involving the given XFS log buffer.
62  * Returns nonzero if the count is valid, 0 otherwise.
63  */
64
65 static inline int
66 xlog_buf_bbcount_valid(
67         xlog_t          *log,
68         int             bbcount)
69 {
70         return bbcount > 0 && bbcount <= log->l_logBBsize;
71 }
72
73 /*
74  * Allocate a buffer to hold log data.  The buffer needs to be able
75  * to map to a range of nbblks basic blocks at any valid (basic
76  * block) offset within the log.
77  */
78 STATIC xfs_buf_t *
79 xlog_get_bp(
80         xlog_t          *log,
81         int             nbblks)
82 {
83         if (!xlog_buf_bbcount_valid(log, nbblks)) {
84                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
85                         nbblks);
86                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
87                 return NULL;
88         }
89
90         /*
91          * We do log I/O in units of log sectors (a power-of-2
92          * multiple of the basic block size), so we round up the
93          * requested size to acommodate the basic blocks required
94          * for complete log sectors.
95          *
96          * In addition, the buffer may be used for a non-sector-
97          * aligned block offset, in which case an I/O of the
98          * requested size could extend beyond the end of the
99          * buffer.  If the requested size is only 1 basic block it
100          * will never straddle a sector boundary, so this won't be
101          * an issue.  Nor will this be a problem if the log I/O is
102          * done in basic blocks (sector size 1).  But otherwise we
103          * extend the buffer by one extra log sector to ensure
104          * there's space to accomodate this possiblility.
105          */
106         if (nbblks > 1 && log->l_sectBBsize > 1)
107                 nbblks += log->l_sectBBsize;
108         nbblks = round_up(nbblks, log->l_sectBBsize);
109
110         return xfs_buf_get_uncached(log->l_mp->m_logdev_targp,
111                                         BBTOB(nbblks), 0);
112 }
113
114 STATIC void
115 xlog_put_bp(
116         xfs_buf_t       *bp)
117 {
118         xfs_buf_free(bp);
119 }
120
121 /*
122  * Return the address of the start of the given block number's data
123  * in a log buffer.  The buffer covers a log sector-aligned region.
124  */
125 STATIC xfs_caddr_t
126 xlog_align(
127         xlog_t          *log,
128         xfs_daddr_t     blk_no,
129         int             nbblks,
130         xfs_buf_t       *bp)
131 {
132         xfs_daddr_t     offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
133
134         ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
135         return XFS_BUF_PTR(bp) + BBTOB(offset);
136 }
137
138
139 /*
140  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
141  */
142 STATIC int
143 xlog_bread_noalign(
144         xlog_t          *log,
145         xfs_daddr_t     blk_no,
146         int             nbblks,
147         xfs_buf_t       *bp)
148 {
149         int             error;
150
151         if (!xlog_buf_bbcount_valid(log, nbblks)) {
152                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
153                         nbblks);
154                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
155                 return EFSCORRUPTED;
156         }
157
158         blk_no = round_down(blk_no, log->l_sectBBsize);
159         nbblks = round_up(nbblks, log->l_sectBBsize);
160
161         ASSERT(nbblks > 0);
162         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
163
164         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
165         XFS_BUF_READ(bp);
166         XFS_BUF_BUSY(bp);
167         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
168         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
169
170         xfsbdstrat(log->l_mp, bp);
171         error = xfs_buf_iowait(bp);
172         if (error)
173                 xfs_ioerror_alert("xlog_bread", log->l_mp,
174                                   bp, XFS_BUF_ADDR(bp));
175         return error;
176 }
177
178 STATIC int
179 xlog_bread(
180         xlog_t          *log,
181         xfs_daddr_t     blk_no,
182         int             nbblks,
183         xfs_buf_t       *bp,
184         xfs_caddr_t     *offset)
185 {
186         int             error;
187
188         error = xlog_bread_noalign(log, blk_no, nbblks, bp);
189         if (error)
190                 return error;
191
192         *offset = xlog_align(log, blk_no, nbblks, bp);
193         return 0;
194 }
195
196 /*
197  * Write out the buffer at the given block for the given number of blocks.
198  * The buffer is kept locked across the write and is returned locked.
199  * This can only be used for synchronous log writes.
200  */
201 STATIC int
202 xlog_bwrite(
203         xlog_t          *log,
204         xfs_daddr_t     blk_no,
205         int             nbblks,
206         xfs_buf_t       *bp)
207 {
208         int             error;
209
210         if (!xlog_buf_bbcount_valid(log, nbblks)) {
211                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
212                         nbblks);
213                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
214                 return EFSCORRUPTED;
215         }
216
217         blk_no = round_down(blk_no, log->l_sectBBsize);
218         nbblks = round_up(nbblks, log->l_sectBBsize);
219
220         ASSERT(nbblks > 0);
221         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
222
223         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
224         XFS_BUF_ZEROFLAGS(bp);
225         XFS_BUF_BUSY(bp);
226         XFS_BUF_HOLD(bp);
227         XFS_BUF_PSEMA(bp, PRIBIO);
228         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
229         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
230
231         if ((error = xfs_bwrite(log->l_mp, bp)))
232                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
233                                   bp, XFS_BUF_ADDR(bp));
234         return error;
235 }
236
237 #ifdef DEBUG
238 /*
239  * dump debug superblock and log record information
240  */
241 STATIC void
242 xlog_header_check_dump(
243         xfs_mount_t             *mp,
244         xlog_rec_header_t       *head)
245 {
246         cmn_err(CE_DEBUG, "%s:  SB : uuid = %pU, fmt = %d\n",
247                 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
248         cmn_err(CE_DEBUG, "    log : uuid = %pU, fmt = %d\n",
249                 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
250 }
251 #else
252 #define xlog_header_check_dump(mp, head)
253 #endif
254
255 /*
256  * check log record header for recovery
257  */
258 STATIC int
259 xlog_header_check_recover(
260         xfs_mount_t             *mp,
261         xlog_rec_header_t       *head)
262 {
263         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
264
265         /*
266          * IRIX doesn't write the h_fmt field and leaves it zeroed
267          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
268          * a dirty log created in IRIX.
269          */
270         if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
271                 xlog_warn(
272         "XFS: dirty log written in incompatible format - can't recover");
273                 xlog_header_check_dump(mp, head);
274                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
275                                  XFS_ERRLEVEL_HIGH, mp);
276                 return XFS_ERROR(EFSCORRUPTED);
277         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
278                 xlog_warn(
279         "XFS: dirty log entry has mismatched uuid - can't recover");
280                 xlog_header_check_dump(mp, head);
281                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
282                                  XFS_ERRLEVEL_HIGH, mp);
283                 return XFS_ERROR(EFSCORRUPTED);
284         }
285         return 0;
286 }
287
288 /*
289  * read the head block of the log and check the header
290  */
291 STATIC int
292 xlog_header_check_mount(
293         xfs_mount_t             *mp,
294         xlog_rec_header_t       *head)
295 {
296         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
297
298         if (uuid_is_nil(&head->h_fs_uuid)) {
299                 /*
300                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
301                  * h_fs_uuid is nil, we assume this log was last mounted
302                  * by IRIX and continue.
303                  */
304                 xlog_warn("XFS: nil uuid in log - IRIX style log");
305         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
306                 xlog_warn("XFS: log has mismatched uuid - can't recover");
307                 xlog_header_check_dump(mp, head);
308                 XFS_ERROR_REPORT("xlog_header_check_mount",
309                                  XFS_ERRLEVEL_HIGH, mp);
310                 return XFS_ERROR(EFSCORRUPTED);
311         }
312         return 0;
313 }
314
315 STATIC void
316 xlog_recover_iodone(
317         struct xfs_buf  *bp)
318 {
319         if (XFS_BUF_GETERROR(bp)) {
320                 /*
321                  * We're not going to bother about retrying
322                  * this during recovery. One strike!
323                  */
324                 xfs_ioerror_alert("xlog_recover_iodone",
325                                         bp->b_target->bt_mount, bp,
326                                         XFS_BUF_ADDR(bp));
327                 xfs_force_shutdown(bp->b_target->bt_mount,
328                                         SHUTDOWN_META_IO_ERROR);
329         }
330         XFS_BUF_CLR_IODONE_FUNC(bp);
331         xfs_buf_ioend(bp, 0);
332 }
333
334 /*
335  * This routine finds (to an approximation) the first block in the physical
336  * log which contains the given cycle.  It uses a binary search algorithm.
337  * Note that the algorithm can not be perfect because the disk will not
338  * necessarily be perfect.
339  */
340 STATIC int
341 xlog_find_cycle_start(
342         xlog_t          *log,
343         xfs_buf_t       *bp,
344         xfs_daddr_t     first_blk,
345         xfs_daddr_t     *last_blk,
346         uint            cycle)
347 {
348         xfs_caddr_t     offset;
349         xfs_daddr_t     mid_blk;
350         xfs_daddr_t     end_blk;
351         uint            mid_cycle;
352         int             error;
353
354         end_blk = *last_blk;
355         mid_blk = BLK_AVG(first_blk, end_blk);
356         while (mid_blk != first_blk && mid_blk != end_blk) {
357                 error = xlog_bread(log, mid_blk, 1, bp, &offset);
358                 if (error)
359                         return error;
360                 mid_cycle = xlog_get_cycle(offset);
361                 if (mid_cycle == cycle)
362                         end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
363                 else
364                         first_blk = mid_blk; /* first_half_cycle == mid_cycle */
365                 mid_blk = BLK_AVG(first_blk, end_blk);
366         }
367         ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
368                (mid_blk == end_blk && mid_blk-1 == first_blk));
369
370         *last_blk = end_blk;
371
372         return 0;
373 }
374
375 /*
376  * Check that a range of blocks does not contain stop_on_cycle_no.
377  * Fill in *new_blk with the block offset where such a block is
378  * found, or with -1 (an invalid block number) if there is no such
379  * block in the range.  The scan needs to occur from front to back
380  * and the pointer into the region must be updated since a later
381  * routine will need to perform another test.
382  */
383 STATIC int
384 xlog_find_verify_cycle(
385         xlog_t          *log,
386         xfs_daddr_t     start_blk,
387         int             nbblks,
388         uint            stop_on_cycle_no,
389         xfs_daddr_t     *new_blk)
390 {
391         xfs_daddr_t     i, j;
392         uint            cycle;
393         xfs_buf_t       *bp;
394         xfs_daddr_t     bufblks;
395         xfs_caddr_t     buf = NULL;
396         int             error = 0;
397
398         /*
399          * Greedily allocate a buffer big enough to handle the full
400          * range of basic blocks we'll be examining.  If that fails,
401          * try a smaller size.  We need to be able to read at least
402          * a log sector, or we're out of luck.
403          */
404         bufblks = 1 << ffs(nbblks);
405         while (!(bp = xlog_get_bp(log, bufblks))) {
406                 bufblks >>= 1;
407                 if (bufblks < log->l_sectBBsize)
408                         return ENOMEM;
409         }
410
411         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
412                 int     bcount;
413
414                 bcount = min(bufblks, (start_blk + nbblks - i));
415
416                 error = xlog_bread(log, i, bcount, bp, &buf);
417                 if (error)
418                         goto out;
419
420                 for (j = 0; j < bcount; j++) {
421                         cycle = xlog_get_cycle(buf);
422                         if (cycle == stop_on_cycle_no) {
423                                 *new_blk = i+j;
424                                 goto out;
425                         }
426
427                         buf += BBSIZE;
428                 }
429         }
430
431         *new_blk = -1;
432
433 out:
434         xlog_put_bp(bp);
435         return error;
436 }
437
438 /*
439  * Potentially backup over partial log record write.
440  *
441  * In the typical case, last_blk is the number of the block directly after
442  * a good log record.  Therefore, we subtract one to get the block number
443  * of the last block in the given buffer.  extra_bblks contains the number
444  * of blocks we would have read on a previous read.  This happens when the
445  * last log record is split over the end of the physical log.
446  *
447  * extra_bblks is the number of blocks potentially verified on a previous
448  * call to this routine.
449  */
450 STATIC int
451 xlog_find_verify_log_record(
452         xlog_t                  *log,
453         xfs_daddr_t             start_blk,
454         xfs_daddr_t             *last_blk,
455         int                     extra_bblks)
456 {
457         xfs_daddr_t             i;
458         xfs_buf_t               *bp;
459         xfs_caddr_t             offset = NULL;
460         xlog_rec_header_t       *head = NULL;
461         int                     error = 0;
462         int                     smallmem = 0;
463         int                     num_blks = *last_blk - start_blk;
464         int                     xhdrs;
465
466         ASSERT(start_blk != 0 || *last_blk != start_blk);
467
468         if (!(bp = xlog_get_bp(log, num_blks))) {
469                 if (!(bp = xlog_get_bp(log, 1)))
470                         return ENOMEM;
471                 smallmem = 1;
472         } else {
473                 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
474                 if (error)
475                         goto out;
476                 offset += ((num_blks - 1) << BBSHIFT);
477         }
478
479         for (i = (*last_blk) - 1; i >= 0; i--) {
480                 if (i < start_blk) {
481                         /* valid log record not found */
482                         xlog_warn(
483                 "XFS: Log inconsistent (didn't find previous header)");
484                         ASSERT(0);
485                         error = XFS_ERROR(EIO);
486                         goto out;
487                 }
488
489                 if (smallmem) {
490                         error = xlog_bread(log, i, 1, bp, &offset);
491                         if (error)
492                                 goto out;
493                 }
494
495                 head = (xlog_rec_header_t *)offset;
496
497                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
498                         break;
499
500                 if (!smallmem)
501                         offset -= BBSIZE;
502         }
503
504         /*
505          * We hit the beginning of the physical log & still no header.  Return
506          * to caller.  If caller can handle a return of -1, then this routine
507          * will be called again for the end of the physical log.
508          */
509         if (i == -1) {
510                 error = -1;
511                 goto out;
512         }
513
514         /*
515          * We have the final block of the good log (the first block
516          * of the log record _before_ the head. So we check the uuid.
517          */
518         if ((error = xlog_header_check_mount(log->l_mp, head)))
519                 goto out;
520
521         /*
522          * We may have found a log record header before we expected one.
523          * last_blk will be the 1st block # with a given cycle #.  We may end
524          * up reading an entire log record.  In this case, we don't want to
525          * reset last_blk.  Only when last_blk points in the middle of a log
526          * record do we update last_blk.
527          */
528         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
529                 uint    h_size = be32_to_cpu(head->h_size);
530
531                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
532                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
533                         xhdrs++;
534         } else {
535                 xhdrs = 1;
536         }
537
538         if (*last_blk - i + extra_bblks !=
539             BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
540                 *last_blk = i;
541
542 out:
543         xlog_put_bp(bp);
544         return error;
545 }
546
547 /*
548  * Head is defined to be the point of the log where the next log write
549  * write could go.  This means that incomplete LR writes at the end are
550  * eliminated when calculating the head.  We aren't guaranteed that previous
551  * LR have complete transactions.  We only know that a cycle number of
552  * current cycle number -1 won't be present in the log if we start writing
553  * from our current block number.
554  *
555  * last_blk contains the block number of the first block with a given
556  * cycle number.
557  *
558  * Return: zero if normal, non-zero if error.
559  */
560 STATIC int
561 xlog_find_head(
562         xlog_t          *log,
563         xfs_daddr_t     *return_head_blk)
564 {
565         xfs_buf_t       *bp;
566         xfs_caddr_t     offset;
567         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
568         int             num_scan_bblks;
569         uint            first_half_cycle, last_half_cycle;
570         uint            stop_on_cycle;
571         int             error, log_bbnum = log->l_logBBsize;
572
573         /* Is the end of the log device zeroed? */
574         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
575                 *return_head_blk = first_blk;
576
577                 /* Is the whole lot zeroed? */
578                 if (!first_blk) {
579                         /* Linux XFS shouldn't generate totally zeroed logs -
580                          * mkfs etc write a dummy unmount record to a fresh
581                          * log so we can store the uuid in there
582                          */
583                         xlog_warn("XFS: totally zeroed log");
584                 }
585
586                 return 0;
587         } else if (error) {
588                 xlog_warn("XFS: empty log check failed");
589                 return error;
590         }
591
592         first_blk = 0;                  /* get cycle # of 1st block */
593         bp = xlog_get_bp(log, 1);
594         if (!bp)
595                 return ENOMEM;
596
597         error = xlog_bread(log, 0, 1, bp, &offset);
598         if (error)
599                 goto bp_err;
600
601         first_half_cycle = xlog_get_cycle(offset);
602
603         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
604         error = xlog_bread(log, last_blk, 1, bp, &offset);
605         if (error)
606                 goto bp_err;
607
608         last_half_cycle = xlog_get_cycle(offset);
609         ASSERT(last_half_cycle != 0);
610
611         /*
612          * If the 1st half cycle number is equal to the last half cycle number,
613          * then the entire log is stamped with the same cycle number.  In this
614          * case, head_blk can't be set to zero (which makes sense).  The below
615          * math doesn't work out properly with head_blk equal to zero.  Instead,
616          * we set it to log_bbnum which is an invalid block number, but this
617          * value makes the math correct.  If head_blk doesn't changed through
618          * all the tests below, *head_blk is set to zero at the very end rather
619          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
620          * in a circular file.
621          */
622         if (first_half_cycle == last_half_cycle) {
623                 /*
624                  * In this case we believe that the entire log should have
625                  * cycle number last_half_cycle.  We need to scan backwards
626                  * from the end verifying that there are no holes still
627                  * containing last_half_cycle - 1.  If we find such a hole,
628                  * then the start of that hole will be the new head.  The
629                  * simple case looks like
630                  *        x | x ... | x - 1 | x
631                  * Another case that fits this picture would be
632                  *        x | x + 1 | x ... | x
633                  * In this case the head really is somewhere at the end of the
634                  * log, as one of the latest writes at the beginning was
635                  * incomplete.
636                  * One more case is
637                  *        x | x + 1 | x ... | x - 1 | x
638                  * This is really the combination of the above two cases, and
639                  * the head has to end up at the start of the x-1 hole at the
640                  * end of the log.
641                  *
642                  * In the 256k log case, we will read from the beginning to the
643                  * end of the log and search for cycle numbers equal to x-1.
644                  * We don't worry about the x+1 blocks that we encounter,
645                  * because we know that they cannot be the head since the log
646                  * started with x.
647                  */
648                 head_blk = log_bbnum;
649                 stop_on_cycle = last_half_cycle - 1;
650         } else {
651                 /*
652                  * In this case we want to find the first block with cycle
653                  * number matching last_half_cycle.  We expect the log to be
654                  * some variation on
655                  *        x + 1 ... | x ... | x
656                  * The first block with cycle number x (last_half_cycle) will
657                  * be where the new head belongs.  First we do a binary search
658                  * for the first occurrence of last_half_cycle.  The binary
659                  * search may not be totally accurate, so then we scan back
660                  * from there looking for occurrences of last_half_cycle before
661                  * us.  If that backwards scan wraps around the beginning of
662                  * the log, then we look for occurrences of last_half_cycle - 1
663                  * at the end of the log.  The cases we're looking for look
664                  * like
665                  *                               v binary search stopped here
666                  *        x + 1 ... | x | x + 1 | x ... | x
667                  *                   ^ but we want to locate this spot
668                  * or
669                  *        <---------> less than scan distance
670                  *        x + 1 ... | x ... | x - 1 | x
671                  *                           ^ we want to locate this spot
672                  */
673                 stop_on_cycle = last_half_cycle;
674                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
675                                                 &head_blk, last_half_cycle)))
676                         goto bp_err;
677         }
678
679         /*
680          * Now validate the answer.  Scan back some number of maximum possible
681          * blocks and make sure each one has the expected cycle number.  The
682          * maximum is determined by the total possible amount of buffering
683          * in the in-core log.  The following number can be made tighter if
684          * we actually look at the block size of the filesystem.
685          */
686         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
687         if (head_blk >= num_scan_bblks) {
688                 /*
689                  * We are guaranteed that the entire check can be performed
690                  * in one buffer.
691                  */
692                 start_blk = head_blk - num_scan_bblks;
693                 if ((error = xlog_find_verify_cycle(log,
694                                                 start_blk, num_scan_bblks,
695                                                 stop_on_cycle, &new_blk)))
696                         goto bp_err;
697                 if (new_blk != -1)
698                         head_blk = new_blk;
699         } else {                /* need to read 2 parts of log */
700                 /*
701                  * We are going to scan backwards in the log in two parts.
702                  * First we scan the physical end of the log.  In this part
703                  * of the log, we are looking for blocks with cycle number
704                  * last_half_cycle - 1.
705                  * If we find one, then we know that the log starts there, as
706                  * we've found a hole that didn't get written in going around
707                  * the end of the physical log.  The simple case for this is
708                  *        x + 1 ... | x ... | x - 1 | x
709                  *        <---------> less than scan distance
710                  * If all of the blocks at the end of the log have cycle number
711                  * last_half_cycle, then we check the blocks at the start of
712                  * the log looking for occurrences of last_half_cycle.  If we
713                  * find one, then our current estimate for the location of the
714                  * first occurrence of last_half_cycle is wrong and we move
715                  * back to the hole we've found.  This case looks like
716                  *        x + 1 ... | x | x + 1 | x ...
717                  *                               ^ binary search stopped here
718                  * Another case we need to handle that only occurs in 256k
719                  * logs is
720                  *        x + 1 ... | x ... | x+1 | x ...
721                  *                   ^ binary search stops here
722                  * In a 256k log, the scan at the end of the log will see the
723                  * x + 1 blocks.  We need to skip past those since that is
724                  * certainly not the head of the log.  By searching for
725                  * last_half_cycle-1 we accomplish that.
726                  */
727                 ASSERT(head_blk <= INT_MAX &&
728                         (xfs_daddr_t) num_scan_bblks >= head_blk);
729                 start_blk = log_bbnum - (num_scan_bblks - head_blk);
730                 if ((error = xlog_find_verify_cycle(log, start_blk,
731                                         num_scan_bblks - (int)head_blk,
732                                         (stop_on_cycle - 1), &new_blk)))
733                         goto bp_err;
734                 if (new_blk != -1) {
735                         head_blk = new_blk;
736                         goto validate_head;
737                 }
738
739                 /*
740                  * Scan beginning of log now.  The last part of the physical
741                  * log is good.  This scan needs to verify that it doesn't find
742                  * the last_half_cycle.
743                  */
744                 start_blk = 0;
745                 ASSERT(head_blk <= INT_MAX);
746                 if ((error = xlog_find_verify_cycle(log,
747                                         start_blk, (int)head_blk,
748                                         stop_on_cycle, &new_blk)))
749                         goto bp_err;
750                 if (new_blk != -1)
751                         head_blk = new_blk;
752         }
753
754 validate_head:
755         /*
756          * Now we need to make sure head_blk is not pointing to a block in
757          * the middle of a log record.
758          */
759         num_scan_bblks = XLOG_REC_SHIFT(log);
760         if (head_blk >= num_scan_bblks) {
761                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
762
763                 /* start ptr at last block ptr before head_blk */
764                 if ((error = xlog_find_verify_log_record(log, start_blk,
765                                                         &head_blk, 0)) == -1) {
766                         error = XFS_ERROR(EIO);
767                         goto bp_err;
768                 } else if (error)
769                         goto bp_err;
770         } else {
771                 start_blk = 0;
772                 ASSERT(head_blk <= INT_MAX);
773                 if ((error = xlog_find_verify_log_record(log, start_blk,
774                                                         &head_blk, 0)) == -1) {
775                         /* We hit the beginning of the log during our search */
776                         start_blk = log_bbnum - (num_scan_bblks - head_blk);
777                         new_blk = log_bbnum;
778                         ASSERT(start_blk <= INT_MAX &&
779                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
780                         ASSERT(head_blk <= INT_MAX);
781                         if ((error = xlog_find_verify_log_record(log,
782                                                         start_blk, &new_blk,
783                                                         (int)head_blk)) == -1) {
784                                 error = XFS_ERROR(EIO);
785                                 goto bp_err;
786                         } else if (error)
787                                 goto bp_err;
788                         if (new_blk != log_bbnum)
789                                 head_blk = new_blk;
790                 } else if (error)
791                         goto bp_err;
792         }
793
794         xlog_put_bp(bp);
795         if (head_blk == log_bbnum)
796                 *return_head_blk = 0;
797         else
798                 *return_head_blk = head_blk;
799         /*
800          * When returning here, we have a good block number.  Bad block
801          * means that during a previous crash, we didn't have a clean break
802          * from cycle number N to cycle number N-1.  In this case, we need
803          * to find the first block with cycle number N-1.
804          */
805         return 0;
806
807  bp_err:
808         xlog_put_bp(bp);
809
810         if (error)
811             xlog_warn("XFS: failed to find log head");
812         return error;
813 }
814
815 /*
816  * Find the sync block number or the tail of the log.
817  *
818  * This will be the block number of the last record to have its
819  * associated buffers synced to disk.  Every log record header has
820  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
821  * to get a sync block number.  The only concern is to figure out which
822  * log record header to believe.
823  *
824  * The following algorithm uses the log record header with the largest
825  * lsn.  The entire log record does not need to be valid.  We only care
826  * that the header is valid.
827  *
828  * We could speed up search by using current head_blk buffer, but it is not
829  * available.
830  */
831 STATIC int
832 xlog_find_tail(
833         xlog_t                  *log,
834         xfs_daddr_t             *head_blk,
835         xfs_daddr_t             *tail_blk)
836 {
837         xlog_rec_header_t       *rhead;
838         xlog_op_header_t        *op_head;
839         xfs_caddr_t             offset = NULL;
840         xfs_buf_t               *bp;
841         int                     error, i, found;
842         xfs_daddr_t             umount_data_blk;
843         xfs_daddr_t             after_umount_blk;
844         xfs_lsn_t               tail_lsn;
845         int                     hblks;
846
847         found = 0;
848
849         /*
850          * Find previous log record
851          */
852         if ((error = xlog_find_head(log, head_blk)))
853                 return error;
854
855         bp = xlog_get_bp(log, 1);
856         if (!bp)
857                 return ENOMEM;
858         if (*head_blk == 0) {                           /* special case */
859                 error = xlog_bread(log, 0, 1, bp, &offset);
860                 if (error)
861                         goto done;
862
863                 if (xlog_get_cycle(offset) == 0) {
864                         *tail_blk = 0;
865                         /* leave all other log inited values alone */
866                         goto done;
867                 }
868         }
869
870         /*
871          * Search backwards looking for log record header block
872          */
873         ASSERT(*head_blk < INT_MAX);
874         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
875                 error = xlog_bread(log, i, 1, bp, &offset);
876                 if (error)
877                         goto done;
878
879                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
880                         found = 1;
881                         break;
882                 }
883         }
884         /*
885          * If we haven't found the log record header block, start looking
886          * again from the end of the physical log.  XXXmiken: There should be
887          * a check here to make sure we didn't search more than N blocks in
888          * the previous code.
889          */
890         if (!found) {
891                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
892                         error = xlog_bread(log, i, 1, bp, &offset);
893                         if (error)
894                                 goto done;
895
896                         if (XLOG_HEADER_MAGIC_NUM ==
897                             be32_to_cpu(*(__be32 *)offset)) {
898                                 found = 2;
899                                 break;
900                         }
901                 }
902         }
903         if (!found) {
904                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
905                 ASSERT(0);
906                 return XFS_ERROR(EIO);
907         }
908
909         /* find blk_no of tail of log */
910         rhead = (xlog_rec_header_t *)offset;
911         *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
912
913         /*
914          * Reset log values according to the state of the log when we
915          * crashed.  In the case where head_blk == 0, we bump curr_cycle
916          * one because the next write starts a new cycle rather than
917          * continuing the cycle of the last good log record.  At this
918          * point we have guaranteed that all partial log records have been
919          * accounted for.  Therefore, we know that the last good log record
920          * written was complete and ended exactly on the end boundary
921          * of the physical log.
922          */
923         log->l_prev_block = i;
924         log->l_curr_block = (int)*head_blk;
925         log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
926         if (found == 2)
927                 log->l_curr_cycle++;
928         log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
929         log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
930         log->l_grant_reserve_cycle = log->l_curr_cycle;
931         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
932         log->l_grant_write_cycle = log->l_curr_cycle;
933         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
934
935         /*
936          * Look for unmount record.  If we find it, then we know there
937          * was a clean unmount.  Since 'i' could be the last block in
938          * the physical log, we convert to a log block before comparing
939          * to the head_blk.
940          *
941          * Save the current tail lsn to use to pass to
942          * xlog_clear_stale_blocks() below.  We won't want to clear the
943          * unmount record if there is one, so we pass the lsn of the
944          * unmount record rather than the block after it.
945          */
946         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
947                 int     h_size = be32_to_cpu(rhead->h_size);
948                 int     h_version = be32_to_cpu(rhead->h_version);
949
950                 if ((h_version & XLOG_VERSION_2) &&
951                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
952                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
953                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
954                                 hblks++;
955                 } else {
956                         hblks = 1;
957                 }
958         } else {
959                 hblks = 1;
960         }
961         after_umount_blk = (i + hblks + (int)
962                 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
963         tail_lsn = log->l_tail_lsn;
964         if (*head_blk == after_umount_blk &&
965             be32_to_cpu(rhead->h_num_logops) == 1) {
966                 umount_data_blk = (i + hblks) % log->l_logBBsize;
967                 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
968                 if (error)
969                         goto done;
970
971                 op_head = (xlog_op_header_t *)offset;
972                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
973                         /*
974                          * Set tail and last sync so that newly written
975                          * log records will point recovery to after the
976                          * current unmount record.
977                          */
978                         log->l_tail_lsn =
979                                 xlog_assign_lsn(log->l_curr_cycle,
980                                                 after_umount_blk);
981                         log->l_last_sync_lsn =
982                                 xlog_assign_lsn(log->l_curr_cycle,
983                                                 after_umount_blk);
984                         *tail_blk = after_umount_blk;
985
986                         /*
987                          * Note that the unmount was clean. If the unmount
988                          * was not clean, we need to know this to rebuild the
989                          * superblock counters from the perag headers if we
990                          * have a filesystem using non-persistent counters.
991                          */
992                         log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
993                 }
994         }
995
996         /*
997          * Make sure that there are no blocks in front of the head
998          * with the same cycle number as the head.  This can happen
999          * because we allow multiple outstanding log writes concurrently,
1000          * and the later writes might make it out before earlier ones.
1001          *
1002          * We use the lsn from before modifying it so that we'll never
1003          * overwrite the unmount record after a clean unmount.
1004          *
1005          * Do this only if we are going to recover the filesystem
1006          *
1007          * NOTE: This used to say "if (!readonly)"
1008          * However on Linux, we can & do recover a read-only filesystem.
1009          * We only skip recovery if NORECOVERY is specified on mount,
1010          * in which case we would not be here.
1011          *
1012          * But... if the -device- itself is readonly, just skip this.
1013          * We can't recover this device anyway, so it won't matter.
1014          */
1015         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1016                 error = xlog_clear_stale_blocks(log, tail_lsn);
1017
1018 done:
1019         xlog_put_bp(bp);
1020
1021         if (error)
1022                 xlog_warn("XFS: failed to locate log tail");
1023         return error;
1024 }
1025
1026 /*
1027  * Is the log zeroed at all?
1028  *
1029  * The last binary search should be changed to perform an X block read
1030  * once X becomes small enough.  You can then search linearly through
1031  * the X blocks.  This will cut down on the number of reads we need to do.
1032  *
1033  * If the log is partially zeroed, this routine will pass back the blkno
1034  * of the first block with cycle number 0.  It won't have a complete LR
1035  * preceding it.
1036  *
1037  * Return:
1038  *      0  => the log is completely written to
1039  *      -1 => use *blk_no as the first block of the log
1040  *      >0 => error has occurred
1041  */
1042 STATIC int
1043 xlog_find_zeroed(
1044         xlog_t          *log,
1045         xfs_daddr_t     *blk_no)
1046 {
1047         xfs_buf_t       *bp;
1048         xfs_caddr_t     offset;
1049         uint            first_cycle, last_cycle;
1050         xfs_daddr_t     new_blk, last_blk, start_blk;
1051         xfs_daddr_t     num_scan_bblks;
1052         int             error, log_bbnum = log->l_logBBsize;
1053
1054         *blk_no = 0;
1055
1056         /* check totally zeroed log */
1057         bp = xlog_get_bp(log, 1);
1058         if (!bp)
1059                 return ENOMEM;
1060         error = xlog_bread(log, 0, 1, bp, &offset);
1061         if (error)
1062                 goto bp_err;
1063
1064         first_cycle = xlog_get_cycle(offset);
1065         if (first_cycle == 0) {         /* completely zeroed log */
1066                 *blk_no = 0;
1067                 xlog_put_bp(bp);
1068                 return -1;
1069         }
1070
1071         /* check partially zeroed log */
1072         error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1073         if (error)
1074                 goto bp_err;
1075
1076         last_cycle = xlog_get_cycle(offset);
1077         if (last_cycle != 0) {          /* log completely written to */
1078                 xlog_put_bp(bp);
1079                 return 0;
1080         } else if (first_cycle != 1) {
1081                 /*
1082                  * If the cycle of the last block is zero, the cycle of
1083                  * the first block must be 1. If it's not, maybe we're
1084                  * not looking at a log... Bail out.
1085                  */
1086                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1087                 return XFS_ERROR(EINVAL);
1088         }
1089
1090         /* we have a partially zeroed log */
1091         last_blk = log_bbnum-1;
1092         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1093                 goto bp_err;
1094
1095         /*
1096          * Validate the answer.  Because there is no way to guarantee that
1097          * the entire log is made up of log records which are the same size,
1098          * we scan over the defined maximum blocks.  At this point, the maximum
1099          * is not chosen to mean anything special.   XXXmiken
1100          */
1101         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1102         ASSERT(num_scan_bblks <= INT_MAX);
1103
1104         if (last_blk < num_scan_bblks)
1105                 num_scan_bblks = last_blk;
1106         start_blk = last_blk - num_scan_bblks;
1107
1108         /*
1109          * We search for any instances of cycle number 0 that occur before
1110          * our current estimate of the head.  What we're trying to detect is
1111          *        1 ... | 0 | 1 | 0...
1112          *                       ^ binary search ends here
1113          */
1114         if ((error = xlog_find_verify_cycle(log, start_blk,
1115                                          (int)num_scan_bblks, 0, &new_blk)))
1116                 goto bp_err;
1117         if (new_blk != -1)
1118                 last_blk = new_blk;
1119
1120         /*
1121          * Potentially backup over partial log record write.  We don't need
1122          * to search the end of the log because we know it is zero.
1123          */
1124         if ((error = xlog_find_verify_log_record(log, start_blk,
1125                                 &last_blk, 0)) == -1) {
1126             error = XFS_ERROR(EIO);
1127             goto bp_err;
1128         } else if (error)
1129             goto bp_err;
1130
1131         *blk_no = last_blk;
1132 bp_err:
1133         xlog_put_bp(bp);
1134         if (error)
1135                 return error;
1136         return -1;
1137 }
1138
1139 /*
1140  * These are simple subroutines used by xlog_clear_stale_blocks() below
1141  * to initialize a buffer full of empty log record headers and write
1142  * them into the log.
1143  */
1144 STATIC void
1145 xlog_add_record(
1146         xlog_t                  *log,
1147         xfs_caddr_t             buf,
1148         int                     cycle,
1149         int                     block,
1150         int                     tail_cycle,
1151         int                     tail_block)
1152 {
1153         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1154
1155         memset(buf, 0, BBSIZE);
1156         recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1157         recp->h_cycle = cpu_to_be32(cycle);
1158         recp->h_version = cpu_to_be32(
1159                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1160         recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1161         recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1162         recp->h_fmt = cpu_to_be32(XLOG_FMT);
1163         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1164 }
1165
1166 STATIC int
1167 xlog_write_log_records(
1168         xlog_t          *log,
1169         int             cycle,
1170         int             start_block,
1171         int             blocks,
1172         int             tail_cycle,
1173         int             tail_block)
1174 {
1175         xfs_caddr_t     offset;
1176         xfs_buf_t       *bp;
1177         int             balign, ealign;
1178         int             sectbb = log->l_sectBBsize;
1179         int             end_block = start_block + blocks;
1180         int             bufblks;
1181         int             error = 0;
1182         int             i, j = 0;
1183
1184         /*
1185          * Greedily allocate a buffer big enough to handle the full
1186          * range of basic blocks to be written.  If that fails, try
1187          * a smaller size.  We need to be able to write at least a
1188          * log sector, or we're out of luck.
1189          */
1190         bufblks = 1 << ffs(blocks);
1191         while (!(bp = xlog_get_bp(log, bufblks))) {
1192                 bufblks >>= 1;
1193                 if (bufblks < sectbb)
1194                         return ENOMEM;
1195         }
1196
1197         /* We may need to do a read at the start to fill in part of
1198          * the buffer in the starting sector not covered by the first
1199          * write below.
1200          */
1201         balign = round_down(start_block, sectbb);
1202         if (balign != start_block) {
1203                 error = xlog_bread_noalign(log, start_block, 1, bp);
1204                 if (error)
1205                         goto out_put_bp;
1206
1207                 j = start_block - balign;
1208         }
1209
1210         for (i = start_block; i < end_block; i += bufblks) {
1211                 int             bcount, endcount;
1212
1213                 bcount = min(bufblks, end_block - start_block);
1214                 endcount = bcount - j;
1215
1216                 /* We may need to do a read at the end to fill in part of
1217                  * the buffer in the final sector not covered by the write.
1218                  * If this is the same sector as the above read, skip it.
1219                  */
1220                 ealign = round_down(end_block, sectbb);
1221                 if (j == 0 && (start_block + endcount > ealign)) {
1222                         offset = XFS_BUF_PTR(bp);
1223                         balign = BBTOB(ealign - start_block);
1224                         error = XFS_BUF_SET_PTR(bp, offset + balign,
1225                                                 BBTOB(sectbb));
1226                         if (error)
1227                                 break;
1228
1229                         error = xlog_bread_noalign(log, ealign, sectbb, bp);
1230                         if (error)
1231                                 break;
1232
1233                         error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1234                         if (error)
1235                                 break;
1236                 }
1237
1238                 offset = xlog_align(log, start_block, endcount, bp);
1239                 for (; j < endcount; j++) {
1240                         xlog_add_record(log, offset, cycle, i+j,
1241                                         tail_cycle, tail_block);
1242                         offset += BBSIZE;
1243                 }
1244                 error = xlog_bwrite(log, start_block, endcount, bp);
1245                 if (error)
1246                         break;
1247                 start_block += endcount;
1248                 j = 0;
1249         }
1250
1251  out_put_bp:
1252         xlog_put_bp(bp);
1253         return error;
1254 }
1255
1256 /*
1257  * This routine is called to blow away any incomplete log writes out
1258  * in front of the log head.  We do this so that we won't become confused
1259  * if we come up, write only a little bit more, and then crash again.
1260  * If we leave the partial log records out there, this situation could
1261  * cause us to think those partial writes are valid blocks since they
1262  * have the current cycle number.  We get rid of them by overwriting them
1263  * with empty log records with the old cycle number rather than the
1264  * current one.
1265  *
1266  * The tail lsn is passed in rather than taken from
1267  * the log so that we will not write over the unmount record after a
1268  * clean unmount in a 512 block log.  Doing so would leave the log without
1269  * any valid log records in it until a new one was written.  If we crashed
1270  * during that time we would not be able to recover.
1271  */
1272 STATIC int
1273 xlog_clear_stale_blocks(
1274         xlog_t          *log,
1275         xfs_lsn_t       tail_lsn)
1276 {
1277         int             tail_cycle, head_cycle;
1278         int             tail_block, head_block;
1279         int             tail_distance, max_distance;
1280         int             distance;
1281         int             error;
1282
1283         tail_cycle = CYCLE_LSN(tail_lsn);
1284         tail_block = BLOCK_LSN(tail_lsn);
1285         head_cycle = log->l_curr_cycle;
1286         head_block = log->l_curr_block;
1287
1288         /*
1289          * Figure out the distance between the new head of the log
1290          * and the tail.  We want to write over any blocks beyond the
1291          * head that we may have written just before the crash, but
1292          * we don't want to overwrite the tail of the log.
1293          */
1294         if (head_cycle == tail_cycle) {
1295                 /*
1296                  * The tail is behind the head in the physical log,
1297                  * so the distance from the head to the tail is the
1298                  * distance from the head to the end of the log plus
1299                  * the distance from the beginning of the log to the
1300                  * tail.
1301                  */
1302                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1303                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1304                                          XFS_ERRLEVEL_LOW, log->l_mp);
1305                         return XFS_ERROR(EFSCORRUPTED);
1306                 }
1307                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1308         } else {
1309                 /*
1310                  * The head is behind the tail in the physical log,
1311                  * so the distance from the head to the tail is just
1312                  * the tail block minus the head block.
1313                  */
1314                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1315                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1316                                          XFS_ERRLEVEL_LOW, log->l_mp);
1317                         return XFS_ERROR(EFSCORRUPTED);
1318                 }
1319                 tail_distance = tail_block - head_block;
1320         }
1321
1322         /*
1323          * If the head is right up against the tail, we can't clear
1324          * anything.
1325          */
1326         if (tail_distance <= 0) {
1327                 ASSERT(tail_distance == 0);
1328                 return 0;
1329         }
1330
1331         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1332         /*
1333          * Take the smaller of the maximum amount of outstanding I/O
1334          * we could have and the distance to the tail to clear out.
1335          * We take the smaller so that we don't overwrite the tail and
1336          * we don't waste all day writing from the head to the tail
1337          * for no reason.
1338          */
1339         max_distance = MIN(max_distance, tail_distance);
1340
1341         if ((head_block + max_distance) <= log->l_logBBsize) {
1342                 /*
1343                  * We can stomp all the blocks we need to without
1344                  * wrapping around the end of the log.  Just do it
1345                  * in a single write.  Use the cycle number of the
1346                  * current cycle minus one so that the log will look like:
1347                  *     n ... | n - 1 ...
1348                  */
1349                 error = xlog_write_log_records(log, (head_cycle - 1),
1350                                 head_block, max_distance, tail_cycle,
1351                                 tail_block);
1352                 if (error)
1353                         return error;
1354         } else {
1355                 /*
1356                  * We need to wrap around the end of the physical log in
1357                  * order to clear all the blocks.  Do it in two separate
1358                  * I/Os.  The first write should be from the head to the
1359                  * end of the physical log, and it should use the current
1360                  * cycle number minus one just like above.
1361                  */
1362                 distance = log->l_logBBsize - head_block;
1363                 error = xlog_write_log_records(log, (head_cycle - 1),
1364                                 head_block, distance, tail_cycle,
1365                                 tail_block);
1366
1367                 if (error)
1368                         return error;
1369
1370                 /*
1371                  * Now write the blocks at the start of the physical log.
1372                  * This writes the remainder of the blocks we want to clear.
1373                  * It uses the current cycle number since we're now on the
1374                  * same cycle as the head so that we get:
1375                  *    n ... n ... | n - 1 ...
1376                  *    ^^^^^ blocks we're writing
1377                  */
1378                 distance = max_distance - (log->l_logBBsize - head_block);
1379                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1380                                 tail_cycle, tail_block);
1381                 if (error)
1382                         return error;
1383         }
1384
1385         return 0;
1386 }
1387
1388 /******************************************************************************
1389  *
1390  *              Log recover routines
1391  *
1392  ******************************************************************************
1393  */
1394
1395 STATIC xlog_recover_t *
1396 xlog_recover_find_tid(
1397         struct hlist_head       *head,
1398         xlog_tid_t              tid)
1399 {
1400         xlog_recover_t          *trans;
1401         struct hlist_node       *n;
1402
1403         hlist_for_each_entry(trans, n, head, r_list) {
1404                 if (trans->r_log_tid == tid)
1405                         return trans;
1406         }
1407         return NULL;
1408 }
1409
1410 STATIC void
1411 xlog_recover_new_tid(
1412         struct hlist_head       *head,
1413         xlog_tid_t              tid,
1414         xfs_lsn_t               lsn)
1415 {
1416         xlog_recover_t          *trans;
1417
1418         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1419         trans->r_log_tid   = tid;
1420         trans->r_lsn       = lsn;
1421         INIT_LIST_HEAD(&trans->r_itemq);
1422
1423         INIT_HLIST_NODE(&trans->r_list);
1424         hlist_add_head(&trans->r_list, head);
1425 }
1426
1427 STATIC void
1428 xlog_recover_add_item(
1429         struct list_head        *head)
1430 {
1431         xlog_recover_item_t     *item;
1432
1433         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1434         INIT_LIST_HEAD(&item->ri_list);
1435         list_add_tail(&item->ri_list, head);
1436 }
1437
1438 STATIC int
1439 xlog_recover_add_to_cont_trans(
1440         struct log              *log,
1441         xlog_recover_t          *trans,
1442         xfs_caddr_t             dp,
1443         int                     len)
1444 {
1445         xlog_recover_item_t     *item;
1446         xfs_caddr_t             ptr, old_ptr;
1447         int                     old_len;
1448
1449         if (list_empty(&trans->r_itemq)) {
1450                 /* finish copying rest of trans header */
1451                 xlog_recover_add_item(&trans->r_itemq);
1452                 ptr = (xfs_caddr_t) &trans->r_theader +
1453                                 sizeof(xfs_trans_header_t) - len;
1454                 memcpy(ptr, dp, len); /* d, s, l */
1455                 return 0;
1456         }
1457         /* take the tail entry */
1458         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1459
1460         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1461         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1462
1463         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1464         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1465         item->ri_buf[item->ri_cnt-1].i_len += len;
1466         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1467         trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1468         return 0;
1469 }
1470
1471 /*
1472  * The next region to add is the start of a new region.  It could be
1473  * a whole region or it could be the first part of a new region.  Because
1474  * of this, the assumption here is that the type and size fields of all
1475  * format structures fit into the first 32 bits of the structure.
1476  *
1477  * This works because all regions must be 32 bit aligned.  Therefore, we
1478  * either have both fields or we have neither field.  In the case we have
1479  * neither field, the data part of the region is zero length.  We only have
1480  * a log_op_header and can throw away the header since a new one will appear
1481  * later.  If we have at least 4 bytes, then we can determine how many regions
1482  * will appear in the current log item.
1483  */
1484 STATIC int
1485 xlog_recover_add_to_trans(
1486         struct log              *log,
1487         xlog_recover_t          *trans,
1488         xfs_caddr_t             dp,
1489         int                     len)
1490 {
1491         xfs_inode_log_format_t  *in_f;                  /* any will do */
1492         xlog_recover_item_t     *item;
1493         xfs_caddr_t             ptr;
1494
1495         if (!len)
1496                 return 0;
1497         if (list_empty(&trans->r_itemq)) {
1498                 /* we need to catch log corruptions here */
1499                 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1500                         xlog_warn("XFS: xlog_recover_add_to_trans: "
1501                                   "bad header magic number");
1502                         ASSERT(0);
1503                         return XFS_ERROR(EIO);
1504                 }
1505                 if (len == sizeof(xfs_trans_header_t))
1506                         xlog_recover_add_item(&trans->r_itemq);
1507                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1508                 return 0;
1509         }
1510
1511         ptr = kmem_alloc(len, KM_SLEEP);
1512         memcpy(ptr, dp, len);
1513         in_f = (xfs_inode_log_format_t *)ptr;
1514
1515         /* take the tail entry */
1516         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1517         if (item->ri_total != 0 &&
1518              item->ri_total == item->ri_cnt) {
1519                 /* tail item is in use, get a new one */
1520                 xlog_recover_add_item(&trans->r_itemq);
1521                 item = list_entry(trans->r_itemq.prev,
1522                                         xlog_recover_item_t, ri_list);
1523         }
1524
1525         if (item->ri_total == 0) {              /* first region to be added */
1526                 if (in_f->ilf_size == 0 ||
1527                     in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1528                         xlog_warn(
1529         "XFS: bad number of regions (%d) in inode log format",
1530                                   in_f->ilf_size);
1531                         ASSERT(0);
1532                         return XFS_ERROR(EIO);
1533                 }
1534
1535                 item->ri_total = in_f->ilf_size;
1536                 item->ri_buf =
1537                         kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1538                                     KM_SLEEP);
1539         }
1540         ASSERT(item->ri_total > item->ri_cnt);
1541         /* Description region is ri_buf[0] */
1542         item->ri_buf[item->ri_cnt].i_addr = ptr;
1543         item->ri_buf[item->ri_cnt].i_len  = len;
1544         item->ri_cnt++;
1545         trace_xfs_log_recover_item_add(log, trans, item, 0);
1546         return 0;
1547 }
1548
1549 /*
1550  * Sort the log items in the transaction. Cancelled buffers need
1551  * to be put first so they are processed before any items that might
1552  * modify the buffers. If they are cancelled, then the modifications
1553  * don't need to be replayed.
1554  */
1555 STATIC int
1556 xlog_recover_reorder_trans(
1557         struct log              *log,
1558         xlog_recover_t          *trans,
1559         int                     pass)
1560 {
1561         xlog_recover_item_t     *item, *n;
1562         LIST_HEAD(sort_list);
1563
1564         list_splice_init(&trans->r_itemq, &sort_list);
1565         list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1566                 xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
1567
1568                 switch (ITEM_TYPE(item)) {
1569                 case XFS_LI_BUF:
1570                         if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1571                                 trace_xfs_log_recover_item_reorder_head(log,
1572                                                         trans, item, pass);
1573                                 list_move(&item->ri_list, &trans->r_itemq);
1574                                 break;
1575                         }
1576                 case XFS_LI_INODE:
1577                 case XFS_LI_DQUOT:
1578                 case XFS_LI_QUOTAOFF:
1579                 case XFS_LI_EFD:
1580                 case XFS_LI_EFI:
1581                         trace_xfs_log_recover_item_reorder_tail(log,
1582                                                         trans, item, pass);
1583                         list_move_tail(&item->ri_list, &trans->r_itemq);
1584                         break;
1585                 default:
1586                         xlog_warn(
1587         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1588                         ASSERT(0);
1589                         return XFS_ERROR(EIO);
1590                 }
1591         }
1592         ASSERT(list_empty(&sort_list));
1593         return 0;
1594 }
1595
1596 /*
1597  * Build up the table of buf cancel records so that we don't replay
1598  * cancelled data in the second pass.  For buffer records that are
1599  * not cancel records, there is nothing to do here so we just return.
1600  *
1601  * If we get a cancel record which is already in the table, this indicates
1602  * that the buffer was cancelled multiple times.  In order to ensure
1603  * that during pass 2 we keep the record in the table until we reach its
1604  * last occurrence in the log, we keep a reference count in the cancel
1605  * record in the table to tell us how many times we expect to see this
1606  * record during the second pass.
1607  */
1608 STATIC void
1609 xlog_recover_do_buffer_pass1(
1610         xlog_t                  *log,
1611         xfs_buf_log_format_t    *buf_f)
1612 {
1613         xfs_buf_cancel_t        *bcp;
1614         xfs_buf_cancel_t        *nextp;
1615         xfs_buf_cancel_t        *prevp;
1616         xfs_buf_cancel_t        **bucket;
1617         xfs_daddr_t             blkno = 0;
1618         uint                    len = 0;
1619         ushort                  flags = 0;
1620
1621         switch (buf_f->blf_type) {
1622         case XFS_LI_BUF:
1623                 blkno = buf_f->blf_blkno;
1624                 len = buf_f->blf_len;
1625                 flags = buf_f->blf_flags;
1626                 break;
1627         }
1628
1629         /*
1630          * If this isn't a cancel buffer item, then just return.
1631          */
1632         if (!(flags & XFS_BLF_CANCEL)) {
1633                 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1634                 return;
1635         }
1636
1637         /*
1638          * Insert an xfs_buf_cancel record into the hash table of
1639          * them.  If there is already an identical record, bump
1640          * its reference count.
1641          */
1642         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1643                                           XLOG_BC_TABLE_SIZE];
1644         /*
1645          * If the hash bucket is empty then just insert a new record into
1646          * the bucket.
1647          */
1648         if (*bucket == NULL) {
1649                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1650                                                      KM_SLEEP);
1651                 bcp->bc_blkno = blkno;
1652                 bcp->bc_len = len;
1653                 bcp->bc_refcount = 1;
1654                 bcp->bc_next = NULL;
1655                 *bucket = bcp;
1656                 return;
1657         }
1658
1659         /*
1660          * The hash bucket is not empty, so search for duplicates of our
1661          * record.  If we find one them just bump its refcount.  If not
1662          * then add us at the end of the list.
1663          */
1664         prevp = NULL;
1665         nextp = *bucket;
1666         while (nextp != NULL) {
1667                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1668                         nextp->bc_refcount++;
1669                         trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1670                         return;
1671                 }
1672                 prevp = nextp;
1673                 nextp = nextp->bc_next;
1674         }
1675         ASSERT(prevp != NULL);
1676         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1677                                              KM_SLEEP);
1678         bcp->bc_blkno = blkno;
1679         bcp->bc_len = len;
1680         bcp->bc_refcount = 1;
1681         bcp->bc_next = NULL;
1682         prevp->bc_next = bcp;
1683         trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1684 }
1685
1686 /*
1687  * Check to see whether the buffer being recovered has a corresponding
1688  * entry in the buffer cancel record table.  If it does then return 1
1689  * so that it will be cancelled, otherwise return 0.  If the buffer is
1690  * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1691  * the refcount on the entry in the table and remove it from the table
1692  * if this is the last reference.
1693  *
1694  * We remove the cancel record from the table when we encounter its
1695  * last occurrence in the log so that if the same buffer is re-used
1696  * again after its last cancellation we actually replay the changes
1697  * made at that point.
1698  */
1699 STATIC int
1700 xlog_check_buffer_cancelled(
1701         xlog_t                  *log,
1702         xfs_daddr_t             blkno,
1703         uint                    len,
1704         ushort                  flags)
1705 {
1706         xfs_buf_cancel_t        *bcp;
1707         xfs_buf_cancel_t        *prevp;
1708         xfs_buf_cancel_t        **bucket;
1709
1710         if (log->l_buf_cancel_table == NULL) {
1711                 /*
1712                  * There is nothing in the table built in pass one,
1713                  * so this buffer must not be cancelled.
1714                  */
1715                 ASSERT(!(flags & XFS_BLF_CANCEL));
1716                 return 0;
1717         }
1718
1719         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1720                                           XLOG_BC_TABLE_SIZE];
1721         bcp = *bucket;
1722         if (bcp == NULL) {
1723                 /*
1724                  * There is no corresponding entry in the table built
1725                  * in pass one, so this buffer has not been cancelled.
1726                  */
1727                 ASSERT(!(flags & XFS_BLF_CANCEL));
1728                 return 0;
1729         }
1730
1731         /*
1732          * Search for an entry in the buffer cancel table that
1733          * matches our buffer.
1734          */
1735         prevp = NULL;
1736         while (bcp != NULL) {
1737                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1738                         /*
1739                          * We've go a match, so return 1 so that the
1740                          * recovery of this buffer is cancelled.
1741                          * If this buffer is actually a buffer cancel
1742                          * log item, then decrement the refcount on the
1743                          * one in the table and remove it if this is the
1744                          * last reference.
1745                          */
1746                         if (flags & XFS_BLF_CANCEL) {
1747                                 bcp->bc_refcount--;
1748                                 if (bcp->bc_refcount == 0) {
1749                                         if (prevp == NULL) {
1750                                                 *bucket = bcp->bc_next;
1751                                         } else {
1752                                                 prevp->bc_next = bcp->bc_next;
1753                                         }
1754                                         kmem_free(bcp);
1755                                 }
1756                         }
1757                         return 1;
1758                 }
1759                 prevp = bcp;
1760                 bcp = bcp->bc_next;
1761         }
1762         /*
1763          * We didn't find a corresponding entry in the table, so
1764          * return 0 so that the buffer is NOT cancelled.
1765          */
1766         ASSERT(!(flags & XFS_BLF_CANCEL));
1767         return 0;
1768 }
1769
1770 STATIC int
1771 xlog_recover_do_buffer_pass2(
1772         xlog_t                  *log,
1773         xfs_buf_log_format_t    *buf_f)
1774 {
1775         xfs_daddr_t             blkno = 0;
1776         ushort                  flags = 0;
1777         uint                    len = 0;
1778
1779         switch (buf_f->blf_type) {
1780         case XFS_LI_BUF:
1781                 blkno = buf_f->blf_blkno;
1782                 flags = buf_f->blf_flags;
1783                 len = buf_f->blf_len;
1784                 break;
1785         }
1786
1787         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1788 }
1789
1790 /*
1791  * Perform recovery for a buffer full of inodes.  In these buffers,
1792  * the only data which should be recovered is that which corresponds
1793  * to the di_next_unlinked pointers in the on disk inode structures.
1794  * The rest of the data for the inodes is always logged through the
1795  * inodes themselves rather than the inode buffer and is recovered
1796  * in xlog_recover_do_inode_trans().
1797  *
1798  * The only time when buffers full of inodes are fully recovered is
1799  * when the buffer is full of newly allocated inodes.  In this case
1800  * the buffer will not be marked as an inode buffer and so will be
1801  * sent to xlog_recover_do_reg_buffer() below during recovery.
1802  */
1803 STATIC int
1804 xlog_recover_do_inode_buffer(
1805         xfs_mount_t             *mp,
1806         xlog_recover_item_t     *item,
1807         xfs_buf_t               *bp,
1808         xfs_buf_log_format_t    *buf_f)
1809 {
1810         int                     i;
1811         int                     item_index;
1812         int                     bit;
1813         int                     nbits;
1814         int                     reg_buf_offset;
1815         int                     reg_buf_bytes;
1816         int                     next_unlinked_offset;
1817         int                     inodes_per_buf;
1818         xfs_agino_t             *logged_nextp;
1819         xfs_agino_t             *buffer_nextp;
1820         unsigned int            *data_map = NULL;
1821         unsigned int            map_size = 0;
1822
1823         trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1824
1825         switch (buf_f->blf_type) {
1826         case XFS_LI_BUF:
1827                 data_map = buf_f->blf_data_map;
1828                 map_size = buf_f->blf_map_size;
1829                 break;
1830         }
1831         /*
1832          * Set the variables corresponding to the current region to
1833          * 0 so that we'll initialize them on the first pass through
1834          * the loop.
1835          */
1836         reg_buf_offset = 0;
1837         reg_buf_bytes = 0;
1838         bit = 0;
1839         nbits = 0;
1840         item_index = 0;
1841         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1842         for (i = 0; i < inodes_per_buf; i++) {
1843                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1844                         offsetof(xfs_dinode_t, di_next_unlinked);
1845
1846                 while (next_unlinked_offset >=
1847                        (reg_buf_offset + reg_buf_bytes)) {
1848                         /*
1849                          * The next di_next_unlinked field is beyond
1850                          * the current logged region.  Find the next
1851                          * logged region that contains or is beyond
1852                          * the current di_next_unlinked field.
1853                          */
1854                         bit += nbits;
1855                         bit = xfs_next_bit(data_map, map_size, bit);
1856
1857                         /*
1858                          * If there are no more logged regions in the
1859                          * buffer, then we're done.
1860                          */
1861                         if (bit == -1) {
1862                                 return 0;
1863                         }
1864
1865                         nbits = xfs_contig_bits(data_map, map_size,
1866                                                          bit);
1867                         ASSERT(nbits > 0);
1868                         reg_buf_offset = bit << XFS_BLF_SHIFT;
1869                         reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1870                         item_index++;
1871                 }
1872
1873                 /*
1874                  * If the current logged region starts after the current
1875                  * di_next_unlinked field, then move on to the next
1876                  * di_next_unlinked field.
1877                  */
1878                 if (next_unlinked_offset < reg_buf_offset) {
1879                         continue;
1880                 }
1881
1882                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1883                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1884                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1885
1886                 /*
1887                  * The current logged region contains a copy of the
1888                  * current di_next_unlinked field.  Extract its value
1889                  * and copy it to the buffer copy.
1890                  */
1891                 logged_nextp = item->ri_buf[item_index].i_addr +
1892                                 next_unlinked_offset - reg_buf_offset;
1893                 if (unlikely(*logged_nextp == 0)) {
1894                         xfs_fs_cmn_err(CE_ALERT, mp,
1895                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1896                                 item, bp);
1897                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1898                                          XFS_ERRLEVEL_LOW, mp);
1899                         return XFS_ERROR(EFSCORRUPTED);
1900                 }
1901
1902                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1903                                               next_unlinked_offset);
1904                 *buffer_nextp = *logged_nextp;
1905         }
1906
1907         return 0;
1908 }
1909
1910 /*
1911  * Perform a 'normal' buffer recovery.  Each logged region of the
1912  * buffer should be copied over the corresponding region in the
1913  * given buffer.  The bitmap in the buf log format structure indicates
1914  * where to place the logged data.
1915  */
1916 /*ARGSUSED*/
1917 STATIC void
1918 xlog_recover_do_reg_buffer(
1919         struct xfs_mount        *mp,
1920         xlog_recover_item_t     *item,
1921         xfs_buf_t               *bp,
1922         xfs_buf_log_format_t    *buf_f)
1923 {
1924         int                     i;
1925         int                     bit;
1926         int                     nbits;
1927         unsigned int            *data_map = NULL;
1928         unsigned int            map_size = 0;
1929         int                     error;
1930
1931         trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1932
1933         switch (buf_f->blf_type) {
1934         case XFS_LI_BUF:
1935                 data_map = buf_f->blf_data_map;
1936                 map_size = buf_f->blf_map_size;
1937                 break;
1938         }
1939         bit = 0;
1940         i = 1;  /* 0 is the buf format structure */
1941         while (1) {
1942                 bit = xfs_next_bit(data_map, map_size, bit);
1943                 if (bit == -1)
1944                         break;
1945                 nbits = xfs_contig_bits(data_map, map_size, bit);
1946                 ASSERT(nbits > 0);
1947                 ASSERT(item->ri_buf[i].i_addr != NULL);
1948                 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1949                 ASSERT(XFS_BUF_COUNT(bp) >=
1950                        ((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT));
1951
1952                 /*
1953                  * Do a sanity check if this is a dquot buffer. Just checking
1954                  * the first dquot in the buffer should do. XXXThis is
1955                  * probably a good thing to do for other buf types also.
1956                  */
1957                 error = 0;
1958                 if (buf_f->blf_flags &
1959                    (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1960                         if (item->ri_buf[i].i_addr == NULL) {
1961                                 cmn_err(CE_ALERT,
1962                                         "XFS: NULL dquot in %s.", __func__);
1963                                 goto next;
1964                         }
1965                         if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1966                                 cmn_err(CE_ALERT,
1967                                         "XFS: dquot too small (%d) in %s.",
1968                                         item->ri_buf[i].i_len, __func__);
1969                                 goto next;
1970                         }
1971                         error = xfs_qm_dqcheck(item->ri_buf[i].i_addr,
1972                                                -1, 0, XFS_QMOPT_DOWARN,
1973                                                "dquot_buf_recover");
1974                         if (error)
1975                                 goto next;
1976                 }
1977
1978                 memcpy(xfs_buf_offset(bp,
1979                         (uint)bit << XFS_BLF_SHIFT),    /* dest */
1980                         item->ri_buf[i].i_addr,         /* source */
1981                         nbits<<XFS_BLF_SHIFT);          /* length */
1982  next:
1983                 i++;
1984                 bit += nbits;
1985         }
1986
1987         /* Shouldn't be any more regions */
1988         ASSERT(i == item->ri_total);
1989 }
1990
1991 /*
1992  * Do some primitive error checking on ondisk dquot data structures.
1993  */
1994 int
1995 xfs_qm_dqcheck(
1996         xfs_disk_dquot_t *ddq,
1997         xfs_dqid_t       id,
1998         uint             type,    /* used only when IO_dorepair is true */
1999         uint             flags,
2000         char             *str)
2001 {
2002         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
2003         int             errs = 0;
2004
2005         /*
2006          * We can encounter an uninitialized dquot buffer for 2 reasons:
2007          * 1. If we crash while deleting the quotainode(s), and those blks got
2008          *    used for user data. This is because we take the path of regular
2009          *    file deletion; however, the size field of quotainodes is never
2010          *    updated, so all the tricks that we play in itruncate_finish
2011          *    don't quite matter.
2012          *
2013          * 2. We don't play the quota buffers when there's a quotaoff logitem.
2014          *    But the allocation will be replayed so we'll end up with an
2015          *    uninitialized quota block.
2016          *
2017          * This is all fine; things are still consistent, and we haven't lost
2018          * any quota information. Just don't complain about bad dquot blks.
2019          */
2020         if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
2021                 if (flags & XFS_QMOPT_DOWARN)
2022                         cmn_err(CE_ALERT,
2023                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2024                         str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2025                 errs++;
2026         }
2027         if (ddq->d_version != XFS_DQUOT_VERSION) {
2028                 if (flags & XFS_QMOPT_DOWARN)
2029                         cmn_err(CE_ALERT,
2030                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2031                         str, id, ddq->d_version, XFS_DQUOT_VERSION);
2032                 errs++;
2033         }
2034
2035         if (ddq->d_flags != XFS_DQ_USER &&
2036             ddq->d_flags != XFS_DQ_PROJ &&
2037             ddq->d_flags != XFS_DQ_GROUP) {
2038                 if (flags & XFS_QMOPT_DOWARN)
2039                         cmn_err(CE_ALERT,
2040                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2041                         str, id, ddq->d_flags);
2042                 errs++;
2043         }
2044
2045         if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2046                 if (flags & XFS_QMOPT_DOWARN)
2047                         cmn_err(CE_ALERT,
2048                         "%s : ondisk-dquot 0x%p, ID mismatch: "
2049                         "0x%x expected, found id 0x%x",
2050                         str, ddq, id, be32_to_cpu(ddq->d_id));
2051                 errs++;
2052         }
2053
2054         if (!errs && ddq->d_id) {
2055                 if (ddq->d_blk_softlimit &&
2056                     be64_to_cpu(ddq->d_bcount) >=
2057                                 be64_to_cpu(ddq->d_blk_softlimit)) {
2058                         if (!ddq->d_btimer) {
2059                                 if (flags & XFS_QMOPT_DOWARN)
2060                                         cmn_err(CE_ALERT,
2061                                         "%s : Dquot ID 0x%x (0x%p) "
2062                                         "BLK TIMER NOT STARTED",
2063                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2064                                 errs++;
2065                         }
2066                 }
2067                 if (ddq->d_ino_softlimit &&
2068                     be64_to_cpu(ddq->d_icount) >=
2069                                 be64_to_cpu(ddq->d_ino_softlimit)) {
2070                         if (!ddq->d_itimer) {
2071                                 if (flags & XFS_QMOPT_DOWARN)
2072                                         cmn_err(CE_ALERT,
2073                                         "%s : Dquot ID 0x%x (0x%p) "
2074                                         "INODE TIMER NOT STARTED",
2075                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2076                                 errs++;
2077                         }
2078                 }
2079                 if (ddq->d_rtb_softlimit &&
2080                     be64_to_cpu(ddq->d_rtbcount) >=
2081                                 be64_to_cpu(ddq->d_rtb_softlimit)) {
2082                         if (!ddq->d_rtbtimer) {
2083                                 if (flags & XFS_QMOPT_DOWARN)
2084                                         cmn_err(CE_ALERT,
2085                                         "%s : Dquot ID 0x%x (0x%p) "
2086                                         "RTBLK TIMER NOT STARTED",
2087                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2088                                 errs++;
2089                         }
2090                 }
2091         }
2092
2093         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2094                 return errs;
2095
2096         if (flags & XFS_QMOPT_DOWARN)
2097                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2098
2099         /*
2100          * Typically, a repair is only requested by quotacheck.
2101          */
2102         ASSERT(id != -1);
2103         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2104         memset(d, 0, sizeof(xfs_dqblk_t));
2105
2106         d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2107         d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2108         d->dd_diskdq.d_flags = type;
2109         d->dd_diskdq.d_id = cpu_to_be32(id);
2110
2111         return errs;
2112 }
2113
2114 /*
2115  * Perform a dquot buffer recovery.
2116  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2117  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2118  * Else, treat it as a regular buffer and do recovery.
2119  */
2120 STATIC void
2121 xlog_recover_do_dquot_buffer(
2122         xfs_mount_t             *mp,
2123         xlog_t                  *log,
2124         xlog_recover_item_t     *item,
2125         xfs_buf_t               *bp,
2126         xfs_buf_log_format_t    *buf_f)
2127 {
2128         uint                    type;
2129
2130         trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2131
2132         /*
2133          * Filesystems are required to send in quota flags at mount time.
2134          */
2135         if (mp->m_qflags == 0) {
2136                 return;
2137         }
2138
2139         type = 0;
2140         if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2141                 type |= XFS_DQ_USER;
2142         if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2143                 type |= XFS_DQ_PROJ;
2144         if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2145                 type |= XFS_DQ_GROUP;
2146         /*
2147          * This type of quotas was turned off, so ignore this buffer
2148          */
2149         if (log->l_quotaoffs_flag & type)
2150                 return;
2151
2152         xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2153 }
2154
2155 /*
2156  * This routine replays a modification made to a buffer at runtime.
2157  * There are actually two types of buffer, regular and inode, which
2158  * are handled differently.  Inode buffers are handled differently
2159  * in that we only recover a specific set of data from them, namely
2160  * the inode di_next_unlinked fields.  This is because all other inode
2161  * data is actually logged via inode records and any data we replay
2162  * here which overlaps that may be stale.
2163  *
2164  * When meta-data buffers are freed at run time we log a buffer item
2165  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2166  * of the buffer in the log should not be replayed at recovery time.
2167  * This is so that if the blocks covered by the buffer are reused for
2168  * file data before we crash we don't end up replaying old, freed
2169  * meta-data into a user's file.
2170  *
2171  * To handle the cancellation of buffer log items, we make two passes
2172  * over the log during recovery.  During the first we build a table of
2173  * those buffers which have been cancelled, and during the second we
2174  * only replay those buffers which do not have corresponding cancel
2175  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2176  * for more details on the implementation of the table of cancel records.
2177  */
2178 STATIC int
2179 xlog_recover_do_buffer_trans(
2180         xlog_t                  *log,
2181         xlog_recover_item_t     *item,
2182         int                     pass)
2183 {
2184         xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
2185         xfs_mount_t             *mp;
2186         xfs_buf_t               *bp;
2187         int                     error;
2188         int                     cancel;
2189         xfs_daddr_t             blkno;
2190         int                     len;
2191         ushort                  flags;
2192         uint                    buf_flags;
2193
2194         if (pass == XLOG_RECOVER_PASS1) {
2195                 /*
2196                  * In this pass we're only looking for buf items
2197                  * with the XFS_BLF_CANCEL bit set.
2198                  */
2199                 xlog_recover_do_buffer_pass1(log, buf_f);
2200                 return 0;
2201         } else {
2202                 /*
2203                  * In this pass we want to recover all the buffers
2204                  * which have not been cancelled and are not
2205                  * cancellation buffers themselves.  The routine
2206                  * we call here will tell us whether or not to
2207                  * continue with the replay of this buffer.
2208                  */
2209                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2210                 if (cancel) {
2211                         trace_xfs_log_recover_buf_cancel(log, buf_f);
2212                         return 0;
2213                 }
2214         }
2215         trace_xfs_log_recover_buf_recover(log, buf_f);
2216         switch (buf_f->blf_type) {
2217         case XFS_LI_BUF:
2218                 blkno = buf_f->blf_blkno;
2219                 len = buf_f->blf_len;
2220                 flags = buf_f->blf_flags;
2221                 break;
2222         default:
2223                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2224                         "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2225                         buf_f->blf_type, log->l_mp->m_logname ?
2226                         log->l_mp->m_logname : "internal");
2227                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2228                                  XFS_ERRLEVEL_LOW, log->l_mp);
2229                 return XFS_ERROR(EFSCORRUPTED);
2230         }
2231
2232         mp = log->l_mp;
2233         buf_flags = XBF_LOCK;
2234         if (!(flags & XFS_BLF_INODE_BUF))
2235                 buf_flags |= XBF_MAPPED;
2236
2237         bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
2238         if (XFS_BUF_ISERROR(bp)) {
2239                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2240                                   bp, blkno);
2241                 error = XFS_BUF_GETERROR(bp);
2242                 xfs_buf_relse(bp);
2243                 return error;
2244         }
2245
2246         error = 0;
2247         if (flags & XFS_BLF_INODE_BUF) {
2248                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2249         } else if (flags &
2250                   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2251                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2252         } else {
2253                 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2254         }
2255         if (error)
2256                 return XFS_ERROR(error);
2257
2258         /*
2259          * Perform delayed write on the buffer.  Asynchronous writes will be
2260          * slower when taking into account all the buffers to be flushed.
2261          *
2262          * Also make sure that only inode buffers with good sizes stay in
2263          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2264          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2265          * buffers in the log can be a different size if the log was generated
2266          * by an older kernel using unclustered inode buffers or a newer kernel
2267          * running with a different inode cluster size.  Regardless, if the
2268          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2269          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2270          * the buffer out of the buffer cache so that the buffer won't
2271          * overlap with future reads of those inodes.
2272          */
2273         if (XFS_DINODE_MAGIC ==
2274             be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2275             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2276                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2277                 XFS_BUF_STALE(bp);
2278                 error = xfs_bwrite(mp, bp);
2279         } else {
2280                 ASSERT(bp->b_target->bt_mount == mp);
2281                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2282                 xfs_bdwrite(mp, bp);
2283         }
2284
2285         return (error);
2286 }
2287
2288 STATIC int
2289 xlog_recover_do_inode_trans(
2290         xlog_t                  *log,
2291         xlog_recover_item_t     *item,
2292         int                     pass)
2293 {
2294         xfs_inode_log_format_t  *in_f;
2295         xfs_mount_t             *mp;
2296         xfs_buf_t               *bp;
2297         xfs_dinode_t            *dip;
2298         xfs_ino_t               ino;
2299         int                     len;
2300         xfs_caddr_t             src;
2301         xfs_caddr_t             dest;
2302         int                     error;
2303         int                     attr_index;
2304         uint                    fields;
2305         xfs_icdinode_t          *dicp;
2306         int                     need_free = 0;
2307
2308         if (pass == XLOG_RECOVER_PASS1) {
2309                 return 0;
2310         }
2311
2312         if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2313                 in_f = item->ri_buf[0].i_addr;
2314         } else {
2315                 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2316                 need_free = 1;
2317                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2318                 if (error)
2319                         goto error;
2320         }
2321         ino = in_f->ilf_ino;
2322         mp = log->l_mp;
2323
2324         /*
2325          * Inode buffers can be freed, look out for it,
2326          * and do not replay the inode.
2327          */
2328         if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2329                                         in_f->ilf_len, 0)) {
2330                 error = 0;
2331                 trace_xfs_log_recover_inode_cancel(log, in_f);
2332                 goto error;
2333         }
2334         trace_xfs_log_recover_inode_recover(log, in_f);
2335
2336         bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
2337                           XBF_LOCK);
2338         if (XFS_BUF_ISERROR(bp)) {
2339                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2340                                   bp, in_f->ilf_blkno);
2341                 error = XFS_BUF_GETERROR(bp);
2342                 xfs_buf_relse(bp);
2343                 goto error;
2344         }
2345         error = 0;
2346         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2347         dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2348
2349         /*
2350          * Make sure the place we're flushing out to really looks
2351          * like an inode!
2352          */
2353         if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
2354                 xfs_buf_relse(bp);
2355                 xfs_fs_cmn_err(CE_ALERT, mp,
2356                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2357                         dip, bp, ino);
2358                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2359                                  XFS_ERRLEVEL_LOW, mp);
2360                 error = EFSCORRUPTED;
2361                 goto error;
2362         }
2363         dicp = item->ri_buf[1].i_addr;
2364         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2365                 xfs_buf_relse(bp);
2366                 xfs_fs_cmn_err(CE_ALERT, mp,
2367                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2368                         item, ino);
2369                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2370                                  XFS_ERRLEVEL_LOW, mp);
2371                 error = EFSCORRUPTED;
2372                 goto error;
2373         }
2374
2375         /* Skip replay when the on disk inode is newer than the log one */
2376         if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2377                 /*
2378                  * Deal with the wrap case, DI_MAX_FLUSH is less
2379                  * than smaller numbers
2380                  */
2381                 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2382                     dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2383                         /* do nothing */
2384                 } else {
2385                         xfs_buf_relse(bp);
2386                         trace_xfs_log_recover_inode_skip(log, in_f);
2387                         error = 0;
2388                         goto error;
2389                 }
2390         }
2391         /* Take the opportunity to reset the flush iteration count */
2392         dicp->di_flushiter = 0;
2393
2394         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2395                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2396                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2397                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2398                                          XFS_ERRLEVEL_LOW, mp, dicp);
2399                         xfs_buf_relse(bp);
2400                         xfs_fs_cmn_err(CE_ALERT, mp,
2401                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2402                                 item, dip, bp, ino);
2403                         error = EFSCORRUPTED;
2404                         goto error;
2405                 }
2406         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2407                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2408                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2409                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2410                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2411                                              XFS_ERRLEVEL_LOW, mp, dicp);
2412                         xfs_buf_relse(bp);
2413                         xfs_fs_cmn_err(CE_ALERT, mp,
2414                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2415                                 item, dip, bp, ino);
2416                         error = EFSCORRUPTED;
2417                         goto error;
2418                 }
2419         }
2420         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2421                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2422                                      XFS_ERRLEVEL_LOW, mp, dicp);
2423                 xfs_buf_relse(bp);
2424                 xfs_fs_cmn_err(CE_ALERT, mp,
2425                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2426                         item, dip, bp, ino,
2427                         dicp->di_nextents + dicp->di_anextents,
2428                         dicp->di_nblocks);
2429                 error = EFSCORRUPTED;
2430                 goto error;
2431         }
2432         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2433                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2434                                      XFS_ERRLEVEL_LOW, mp, dicp);
2435                 xfs_buf_relse(bp);
2436                 xfs_fs_cmn_err(CE_ALERT, mp,
2437                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2438                         item, dip, bp, ino, dicp->di_forkoff);
2439                 error = EFSCORRUPTED;
2440                 goto error;
2441         }
2442         if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2443                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2444                                      XFS_ERRLEVEL_LOW, mp, dicp);
2445                 xfs_buf_relse(bp);
2446                 xfs_fs_cmn_err(CE_ALERT, mp,
2447                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2448                         item->ri_buf[1].i_len, item);
2449                 error = EFSCORRUPTED;
2450                 goto error;
2451         }
2452
2453         /* The core is in in-core format */
2454         xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
2455
2456         /* the rest is in on-disk format */
2457         if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2458                 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2459                         item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2460                         item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2461         }
2462
2463         fields = in_f->ilf_fields;
2464         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2465         case XFS_ILOG_DEV:
2466                 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2467                 break;
2468         case XFS_ILOG_UUID:
2469                 memcpy(XFS_DFORK_DPTR(dip),
2470                        &in_f->ilf_u.ilfu_uuid,
2471                        sizeof(uuid_t));
2472                 break;
2473         }
2474
2475         if (in_f->ilf_size == 2)
2476                 goto write_inode_buffer;
2477         len = item->ri_buf[2].i_len;
2478         src = item->ri_buf[2].i_addr;
2479         ASSERT(in_f->ilf_size <= 4);
2480         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2481         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2482                (len == in_f->ilf_dsize));
2483
2484         switch (fields & XFS_ILOG_DFORK) {
2485         case XFS_ILOG_DDATA:
2486         case XFS_ILOG_DEXT:
2487                 memcpy(XFS_DFORK_DPTR(dip), src, len);
2488                 break;
2489
2490         case XFS_ILOG_DBROOT:
2491                 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2492                                  (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2493                                  XFS_DFORK_DSIZE(dip, mp));
2494                 break;
2495
2496         default:
2497                 /*
2498                  * There are no data fork flags set.
2499                  */
2500                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2501                 break;
2502         }
2503
2504         /*
2505          * If we logged any attribute data, recover it.  There may or
2506          * may not have been any other non-core data logged in this
2507          * transaction.
2508          */
2509         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2510                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2511                         attr_index = 3;
2512                 } else {
2513                         attr_index = 2;
2514                 }
2515                 len = item->ri_buf[attr_index].i_len;
2516                 src = item->ri_buf[attr_index].i_addr;
2517                 ASSERT(len == in_f->ilf_asize);
2518
2519                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2520                 case XFS_ILOG_ADATA:
2521                 case XFS_ILOG_AEXT:
2522                         dest = XFS_DFORK_APTR(dip);
2523                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2524                         memcpy(dest, src, len);
2525                         break;
2526
2527                 case XFS_ILOG_ABROOT:
2528                         dest = XFS_DFORK_APTR(dip);
2529                         xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2530                                          len, (xfs_bmdr_block_t*)dest,
2531                                          XFS_DFORK_ASIZE(dip, mp));
2532                         break;
2533
2534                 default:
2535                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2536                         ASSERT(0);
2537                         xfs_buf_relse(bp);
2538                         error = EIO;
2539                         goto error;
2540                 }
2541         }
2542
2543 write_inode_buffer:
2544         ASSERT(bp->b_target->bt_mount == mp);
2545         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2546         xfs_bdwrite(mp, bp);
2547 error:
2548         if (need_free)
2549                 kmem_free(in_f);
2550         return XFS_ERROR(error);
2551 }
2552
2553 /*
2554  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2555  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2556  * of that type.
2557  */
2558 STATIC int
2559 xlog_recover_do_quotaoff_trans(
2560         xlog_t                  *log,
2561         xlog_recover_item_t     *item,
2562         int                     pass)
2563 {
2564         xfs_qoff_logformat_t    *qoff_f;
2565
2566         if (pass == XLOG_RECOVER_PASS2) {
2567                 return (0);
2568         }
2569
2570         qoff_f = item->ri_buf[0].i_addr;
2571         ASSERT(qoff_f);
2572
2573         /*
2574          * The logitem format's flag tells us if this was user quotaoff,
2575          * group/project quotaoff or both.
2576          */
2577         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2578                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2579         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2580                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2581         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2582                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2583
2584         return (0);
2585 }
2586
2587 /*
2588  * Recover a dquot record
2589  */
2590 STATIC int
2591 xlog_recover_do_dquot_trans(
2592         xlog_t                  *log,
2593         xlog_recover_item_t     *item,
2594         int                     pass)
2595 {
2596         xfs_mount_t             *mp;
2597         xfs_buf_t               *bp;
2598         struct xfs_disk_dquot   *ddq, *recddq;
2599         int                     error;
2600         xfs_dq_logformat_t      *dq_f;
2601         uint                    type;
2602
2603         if (pass == XLOG_RECOVER_PASS1) {
2604                 return 0;
2605         }
2606         mp = log->l_mp;
2607
2608         /*
2609          * Filesystems are required to send in quota flags at mount time.
2610          */
2611         if (mp->m_qflags == 0)
2612                 return (0);
2613
2614         recddq = item->ri_buf[1].i_addr;
2615         if (recddq == NULL) {
2616                 cmn_err(CE_ALERT,
2617                         "XFS: NULL dquot in %s.", __func__);
2618                 return XFS_ERROR(EIO);
2619         }
2620         if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2621                 cmn_err(CE_ALERT,
2622                         "XFS: dquot too small (%d) in %s.",
2623                         item->ri_buf[1].i_len, __func__);
2624                 return XFS_ERROR(EIO);
2625         }
2626
2627         /*
2628          * This type of quotas was turned off, so ignore this record.
2629          */
2630         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2631         ASSERT(type);
2632         if (log->l_quotaoffs_flag & type)
2633                 return (0);
2634
2635         /*
2636          * At this point we know that quota was _not_ turned off.
2637          * Since the mount flags are not indicating to us otherwise, this
2638          * must mean that quota is on, and the dquot needs to be replayed.
2639          * Remember that we may not have fully recovered the superblock yet,
2640          * so we can't do the usual trick of looking at the SB quota bits.
2641          *
2642          * The other possibility, of course, is that the quota subsystem was
2643          * removed since the last mount - ENOSYS.
2644          */
2645         dq_f = item->ri_buf[0].i_addr;
2646         ASSERT(dq_f);
2647         if ((error = xfs_qm_dqcheck(recddq,
2648                            dq_f->qlf_id,
2649                            0, XFS_QMOPT_DOWARN,
2650                            "xlog_recover_do_dquot_trans (log copy)"))) {
2651                 return XFS_ERROR(EIO);
2652         }
2653         ASSERT(dq_f->qlf_len == 1);
2654
2655         error = xfs_read_buf(mp, mp->m_ddev_targp,
2656                              dq_f->qlf_blkno,
2657                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2658                              0, &bp);
2659         if (error) {
2660                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2661                                   bp, dq_f->qlf_blkno);
2662                 return error;
2663         }
2664         ASSERT(bp);
2665         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2666
2667         /*
2668          * At least the magic num portion should be on disk because this
2669          * was among a chunk of dquots created earlier, and we did some
2670          * minimal initialization then.
2671          */
2672         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2673                            "xlog_recover_do_dquot_trans")) {
2674                 xfs_buf_relse(bp);
2675                 return XFS_ERROR(EIO);
2676         }
2677
2678         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2679
2680         ASSERT(dq_f->qlf_size == 2);
2681         ASSERT(bp->b_target->bt_mount == mp);
2682         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2683         xfs_bdwrite(mp, bp);
2684
2685         return (0);
2686 }
2687
2688 /*
2689  * This routine is called to create an in-core extent free intent
2690  * item from the efi format structure which was logged on disk.
2691  * It allocates an in-core efi, copies the extents from the format
2692  * structure into it, and adds the efi to the AIL with the given
2693  * LSN.
2694  */
2695 STATIC int
2696 xlog_recover_do_efi_trans(
2697         xlog_t                  *log,
2698         xlog_recover_item_t     *item,
2699         xfs_lsn_t               lsn,
2700         int                     pass)
2701 {
2702         int                     error;
2703         xfs_mount_t             *mp;
2704         xfs_efi_log_item_t      *efip;
2705         xfs_efi_log_format_t    *efi_formatp;
2706
2707         if (pass == XLOG_RECOVER_PASS1) {
2708                 return 0;
2709         }
2710
2711         efi_formatp = item->ri_buf[0].i_addr;
2712
2713         mp = log->l_mp;
2714         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2715         if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2716                                          &(efip->efi_format)))) {
2717                 xfs_efi_item_free(efip);
2718                 return error;
2719         }
2720         efip->efi_next_extent = efi_formatp->efi_nextents;
2721         efip->efi_flags |= XFS_EFI_COMMITTED;
2722
2723         spin_lock(&log->l_ailp->xa_lock);
2724         /*
2725          * xfs_trans_ail_update() drops the AIL lock.
2726          */
2727         xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
2728         return 0;
2729 }
2730
2731
2732 /*
2733  * This routine is called when an efd format structure is found in
2734  * a committed transaction in the log.  It's purpose is to cancel
2735  * the corresponding efi if it was still in the log.  To do this
2736  * it searches the AIL for the efi with an id equal to that in the
2737  * efd format structure.  If we find it, we remove the efi from the
2738  * AIL and free it.
2739  */
2740 STATIC void
2741 xlog_recover_do_efd_trans(
2742         xlog_t                  *log,
2743         xlog_recover_item_t     *item,
2744         int                     pass)
2745 {
2746         xfs_efd_log_format_t    *efd_formatp;
2747         xfs_efi_log_item_t      *efip = NULL;
2748         xfs_log_item_t          *lip;
2749         __uint64_t              efi_id;
2750         struct xfs_ail_cursor   cur;
2751         struct xfs_ail          *ailp = log->l_ailp;
2752
2753         if (pass == XLOG_RECOVER_PASS1) {
2754                 return;
2755         }
2756
2757         efd_formatp = item->ri_buf[0].i_addr;
2758         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2759                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2760                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2761                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2762         efi_id = efd_formatp->efd_efi_id;
2763
2764         /*
2765          * Search for the efi with the id in the efd format structure
2766          * in the AIL.
2767          */
2768         spin_lock(&ailp->xa_lock);
2769         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2770         while (lip != NULL) {
2771                 if (lip->li_type == XFS_LI_EFI) {
2772                         efip = (xfs_efi_log_item_t *)lip;
2773                         if (efip->efi_format.efi_id == efi_id) {
2774                                 /*
2775                                  * xfs_trans_ail_delete() drops the
2776                                  * AIL lock.
2777                                  */
2778                                 xfs_trans_ail_delete(ailp, lip);
2779                                 xfs_efi_item_free(efip);
2780                                 spin_lock(&ailp->xa_lock);
2781                                 break;
2782                         }
2783                 }
2784                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2785         }
2786         xfs_trans_ail_cursor_done(ailp, &cur);
2787         spin_unlock(&ailp->xa_lock);
2788 }
2789
2790 /*
2791  * Perform the transaction
2792  *
2793  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2794  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2795  */
2796 STATIC int
2797 xlog_recover_do_trans(
2798         xlog_t                  *log,
2799         xlog_recover_t          *trans,
2800         int                     pass)
2801 {
2802         int                     error = 0;
2803         xlog_recover_item_t     *item;
2804
2805         error = xlog_recover_reorder_trans(log, trans, pass);
2806         if (error)
2807                 return error;
2808
2809         list_for_each_entry(item, &trans->r_itemq, ri_list) {
2810                 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2811                 switch (ITEM_TYPE(item)) {
2812                 case XFS_LI_BUF:
2813                         error = xlog_recover_do_buffer_trans(log, item, pass);
2814                         break;
2815                 case XFS_LI_INODE:
2816                         error = xlog_recover_do_inode_trans(log, item, pass);
2817                         break;
2818                 case XFS_LI_EFI:
2819                         error = xlog_recover_do_efi_trans(log, item,
2820                                                           trans->r_lsn, pass);
2821                         break;
2822                 case XFS_LI_EFD:
2823                         xlog_recover_do_efd_trans(log, item, pass);
2824                         error = 0;
2825                         break;
2826                 case XFS_LI_DQUOT:
2827                         error = xlog_recover_do_dquot_trans(log, item, pass);
2828                         break;
2829                 case XFS_LI_QUOTAOFF:
2830                         error = xlog_recover_do_quotaoff_trans(log, item,
2831                                                                pass);
2832                         break;
2833                 default:
2834                         xlog_warn(
2835         "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
2836                         ASSERT(0);
2837                         error = XFS_ERROR(EIO);
2838                         break;
2839                 }
2840
2841                 if (error)
2842                         return error;
2843         }
2844
2845         return 0;
2846 }
2847
2848 /*
2849  * Free up any resources allocated by the transaction
2850  *
2851  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2852  */
2853 STATIC void
2854 xlog_recover_free_trans(
2855         xlog_recover_t          *trans)
2856 {
2857         xlog_recover_item_t     *item, *n;
2858         int                     i;
2859
2860         list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2861                 /* Free the regions in the item. */
2862                 list_del(&item->ri_list);
2863                 for (i = 0; i < item->ri_cnt; i++)
2864                         kmem_free(item->ri_buf[i].i_addr);
2865                 /* Free the item itself */
2866                 kmem_free(item->ri_buf);
2867                 kmem_free(item);
2868         }
2869         /* Free the transaction recover structure */
2870         kmem_free(trans);
2871 }
2872
2873 STATIC int
2874 xlog_recover_commit_trans(
2875         xlog_t                  *log,
2876         xlog_recover_t          *trans,
2877         int                     pass)
2878 {
2879         int                     error;
2880
2881         hlist_del(&trans->r_list);
2882         if ((error = xlog_recover_do_trans(log, trans, pass)))
2883                 return error;
2884         xlog_recover_free_trans(trans);                 /* no error */
2885         return 0;
2886 }
2887
2888 STATIC int
2889 xlog_recover_unmount_trans(
2890         xlog_recover_t          *trans)
2891 {
2892         /* Do nothing now */
2893         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2894         return 0;
2895 }
2896
2897 /*
2898  * There are two valid states of the r_state field.  0 indicates that the
2899  * transaction structure is in a normal state.  We have either seen the
2900  * start of the transaction or the last operation we added was not a partial
2901  * operation.  If the last operation we added to the transaction was a
2902  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2903  *
2904  * NOTE: skip LRs with 0 data length.
2905  */
2906 STATIC int
2907 xlog_recover_process_data(
2908         xlog_t                  *log,
2909         struct hlist_head       rhash[],
2910         xlog_rec_header_t       *rhead,
2911         xfs_caddr_t             dp,
2912         int                     pass)
2913 {
2914         xfs_caddr_t             lp;
2915         int                     num_logops;
2916         xlog_op_header_t        *ohead;
2917         xlog_recover_t          *trans;
2918         xlog_tid_t              tid;
2919         int                     error;
2920         unsigned long           hash;
2921         uint                    flags;
2922
2923         lp = dp + be32_to_cpu(rhead->h_len);
2924         num_logops = be32_to_cpu(rhead->h_num_logops);
2925
2926         /* check the log format matches our own - else we can't recover */
2927         if (xlog_header_check_recover(log->l_mp, rhead))
2928                 return (XFS_ERROR(EIO));
2929
2930         while ((dp < lp) && num_logops) {
2931                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2932                 ohead = (xlog_op_header_t *)dp;
2933                 dp += sizeof(xlog_op_header_t);
2934                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2935                     ohead->oh_clientid != XFS_LOG) {
2936                         xlog_warn(
2937                 "XFS: xlog_recover_process_data: bad clientid");
2938                         ASSERT(0);
2939                         return (XFS_ERROR(EIO));
2940                 }
2941                 tid = be32_to_cpu(ohead->oh_tid);
2942                 hash = XLOG_RHASH(tid);
2943                 trans = xlog_recover_find_tid(&rhash[hash], tid);
2944                 if (trans == NULL) {               /* not found; add new tid */
2945                         if (ohead->oh_flags & XLOG_START_TRANS)
2946                                 xlog_recover_new_tid(&rhash[hash], tid,
2947                                         be64_to_cpu(rhead->h_lsn));
2948                 } else {
2949                         if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2950                                 xlog_warn(
2951                         "XFS: xlog_recover_process_data: bad length");
2952                                 WARN_ON(1);
2953                                 return (XFS_ERROR(EIO));
2954                         }
2955                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2956                         if (flags & XLOG_WAS_CONT_TRANS)
2957                                 flags &= ~XLOG_CONTINUE_TRANS;
2958                         switch (flags) {
2959                         case XLOG_COMMIT_TRANS:
2960                                 error = xlog_recover_commit_trans(log,
2961                                                                 trans, pass);
2962                                 break;
2963                         case XLOG_UNMOUNT_TRANS:
2964                                 error = xlog_recover_unmount_trans(trans);
2965                                 break;
2966                         case XLOG_WAS_CONT_TRANS:
2967                                 error = xlog_recover_add_to_cont_trans(log,
2968                                                 trans, dp,
2969                                                 be32_to_cpu(ohead->oh_len));
2970                                 break;
2971                         case XLOG_START_TRANS:
2972                                 xlog_warn(
2973                         "XFS: xlog_recover_process_data: bad transaction");
2974                                 ASSERT(0);
2975                                 error = XFS_ERROR(EIO);
2976                                 break;
2977                         case 0:
2978                         case XLOG_CONTINUE_TRANS:
2979                                 error = xlog_recover_add_to_trans(log, trans,
2980                                                 dp, be32_to_cpu(ohead->oh_len));
2981                                 break;
2982                         default:
2983                                 xlog_warn(
2984                         "XFS: xlog_recover_process_data: bad flag");
2985                                 ASSERT(0);
2986                                 error = XFS_ERROR(EIO);
2987                                 break;
2988                         }
2989                         if (error)
2990                                 return error;
2991                 }
2992                 dp += be32_to_cpu(ohead->oh_len);
2993                 num_logops--;
2994         }
2995         return 0;
2996 }
2997
2998 /*
2999  * Process an extent free intent item that was recovered from
3000  * the log.  We need to free the extents that it describes.
3001  */
3002 STATIC int
3003 xlog_recover_process_efi(
3004         xfs_mount_t             *mp,
3005         xfs_efi_log_item_t      *efip)
3006 {
3007         xfs_efd_log_item_t      *efdp;
3008         xfs_trans_t             *tp;
3009         int                     i;
3010         int                     error = 0;
3011         xfs_extent_t            *extp;
3012         xfs_fsblock_t           startblock_fsb;
3013
3014         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
3015
3016         /*
3017          * First check the validity of the extents described by the
3018          * EFI.  If any are bad, then assume that all are bad and
3019          * just toss the EFI.
3020          */
3021         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3022                 extp = &(efip->efi_format.efi_extents[i]);
3023                 startblock_fsb = XFS_BB_TO_FSB(mp,
3024                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
3025                 if ((startblock_fsb == 0) ||
3026                     (extp->ext_len == 0) ||
3027                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3028                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3029                         /*
3030                          * This will pull the EFI from the AIL and
3031                          * free the memory associated with it.
3032                          */
3033                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3034                         return XFS_ERROR(EIO);
3035                 }
3036         }
3037
3038         tp = xfs_trans_alloc(mp, 0);
3039         error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3040         if (error)
3041                 goto abort_error;
3042         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3043
3044         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3045                 extp = &(efip->efi_format.efi_extents[i]);
3046                 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3047                 if (error)
3048                         goto abort_error;
3049                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3050                                          extp->ext_len);
3051         }
3052
3053         efip->efi_flags |= XFS_EFI_RECOVERED;
3054         error = xfs_trans_commit(tp, 0);
3055         return error;
3056
3057 abort_error:
3058         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3059         return error;
3060 }
3061
3062 /*
3063  * When this is called, all of the EFIs which did not have
3064  * corresponding EFDs should be in the AIL.  What we do now
3065  * is free the extents associated with each one.
3066  *
3067  * Since we process the EFIs in normal transactions, they
3068  * will be removed at some point after the commit.  This prevents
3069  * us from just walking down the list processing each one.
3070  * We'll use a flag in the EFI to skip those that we've already
3071  * processed and use the AIL iteration mechanism's generation
3072  * count to try to speed this up at least a bit.
3073  *
3074  * When we start, we know that the EFIs are the only things in
3075  * the AIL.  As we process them, however, other items are added
3076  * to the AIL.  Since everything added to the AIL must come after
3077  * everything already in the AIL, we stop processing as soon as
3078  * we see something other than an EFI in the AIL.
3079  */
3080 STATIC int
3081 xlog_recover_process_efis(
3082         xlog_t                  *log)
3083 {
3084         xfs_log_item_t          *lip;
3085         xfs_efi_log_item_t      *efip;
3086         int                     error = 0;
3087         struct xfs_ail_cursor   cur;
3088         struct xfs_ail          *ailp;
3089
3090         ailp = log->l_ailp;
3091         spin_lock(&ailp->xa_lock);
3092         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3093         while (lip != NULL) {
3094                 /*
3095                  * We're done when we see something other than an EFI.
3096                  * There should be no EFIs left in the AIL now.
3097                  */
3098                 if (lip->li_type != XFS_LI_EFI) {
3099 #ifdef DEBUG
3100                         for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3101                                 ASSERT(lip->li_type != XFS_LI_EFI);
3102 #endif
3103                         break;
3104                 }
3105
3106                 /*
3107                  * Skip EFIs that we've already processed.
3108                  */
3109                 efip = (xfs_efi_log_item_t *)lip;
3110                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3111                         lip = xfs_trans_ail_cursor_next(ailp, &cur);
3112                         continue;
3113                 }
3114
3115                 spin_unlock(&ailp->xa_lock);
3116                 error = xlog_recover_process_efi(log->l_mp, efip);
3117                 spin_lock(&ailp->xa_lock);
3118                 if (error)
3119                         goto out;
3120                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3121         }
3122 out:
3123         xfs_trans_ail_cursor_done(ailp, &cur);
3124         spin_unlock(&ailp->xa_lock);
3125         return error;
3126 }
3127
3128 /*
3129  * This routine performs a transaction to null out a bad inode pointer
3130  * in an agi unlinked inode hash bucket.
3131  */
3132 STATIC void
3133 xlog_recover_clear_agi_bucket(
3134         xfs_mount_t     *mp,
3135         xfs_agnumber_t  agno,
3136         int             bucket)
3137 {
3138         xfs_trans_t     *tp;
3139         xfs_agi_t       *agi;
3140         xfs_buf_t       *agibp;
3141         int             offset;
3142         int             error;
3143
3144         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3145         error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3146                                   0, 0, 0);
3147         if (error)
3148                 goto out_abort;
3149
3150         error = xfs_read_agi(mp, tp, agno, &agibp);
3151         if (error)
3152                 goto out_abort;
3153
3154         agi = XFS_BUF_TO_AGI(agibp);
3155         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3156         offset = offsetof(xfs_agi_t, agi_unlinked) +
3157                  (sizeof(xfs_agino_t) * bucket);
3158         xfs_trans_log_buf(tp, agibp, offset,
3159                           (offset + sizeof(xfs_agino_t) - 1));
3160
3161         error = xfs_trans_commit(tp, 0);
3162         if (error)
3163                 goto out_error;
3164         return;
3165
3166 out_abort:
3167         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3168 out_error:
3169         xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
3170                         "failed to clear agi %d. Continuing.", agno);
3171         return;
3172 }
3173
3174 STATIC xfs_agino_t
3175 xlog_recover_process_one_iunlink(
3176         struct xfs_mount                *mp,
3177         xfs_agnumber_t                  agno,
3178         xfs_agino_t                     agino,
3179         int                             bucket)
3180 {
3181         struct xfs_buf                  *ibp;
3182         struct xfs_dinode               *dip;
3183         struct xfs_inode                *ip;
3184         xfs_ino_t                       ino;
3185         int                             error;
3186
3187         ino = XFS_AGINO_TO_INO(mp, agno, agino);
3188         error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3189         if (error)
3190                 goto fail;
3191
3192         /*
3193          * Get the on disk inode to find the next inode in the bucket.
3194          */
3195         error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
3196         if (error)
3197                 goto fail_iput;
3198
3199         ASSERT(ip->i_d.di_nlink == 0);
3200         ASSERT(ip->i_d.di_mode != 0);
3201
3202         /* setup for the next pass */
3203         agino = be32_to_cpu(dip->di_next_unlinked);
3204         xfs_buf_relse(ibp);
3205
3206         /*
3207          * Prevent any DMAPI event from being sent when the reference on
3208          * the inode is dropped.
3209          */
3210         ip->i_d.di_dmevmask = 0;
3211
3212         IRELE(ip);
3213         return agino;
3214
3215  fail_iput:
3216         IRELE(ip);
3217  fail:
3218         /*
3219          * We can't read in the inode this bucket points to, or this inode
3220          * is messed up.  Just ditch this bucket of inodes.  We will lose
3221          * some inodes and space, but at least we won't hang.
3222          *
3223          * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3224          * clear the inode pointer in the bucket.
3225          */
3226         xlog_recover_clear_agi_bucket(mp, agno, bucket);
3227         return NULLAGINO;
3228 }
3229
3230 /*
3231  * xlog_iunlink_recover
3232  *
3233  * This is called during recovery to process any inodes which
3234  * we unlinked but not freed when the system crashed.  These
3235  * inodes will be on the lists in the AGI blocks.  What we do
3236  * here is scan all the AGIs and fully truncate and free any
3237  * inodes found on the lists.  Each inode is removed from the
3238  * lists when it has been fully truncated and is freed.  The
3239  * freeing of the inode and its removal from the list must be
3240  * atomic.
3241  */
3242 STATIC void
3243 xlog_recover_process_iunlinks(
3244         xlog_t          *log)
3245 {
3246         xfs_mount_t     *mp;
3247         xfs_agnumber_t  agno;
3248         xfs_agi_t       *agi;
3249         xfs_buf_t       *agibp;
3250         xfs_agino_t     agino;
3251         int             bucket;
3252         int             error;
3253         uint            mp_dmevmask;
3254
3255         mp = log->l_mp;
3256
3257         /*
3258          * Prevent any DMAPI event from being sent while in this function.
3259          */
3260         mp_dmevmask = mp->m_dmevmask;
3261         mp->m_dmevmask = 0;
3262
3263         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3264                 /*
3265                  * Find the agi for this ag.
3266                  */
3267                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3268                 if (error) {
3269                         /*
3270                          * AGI is b0rked. Don't process it.
3271                          *
3272                          * We should probably mark the filesystem as corrupt
3273                          * after we've recovered all the ag's we can....
3274                          */
3275                         continue;
3276                 }
3277                 agi = XFS_BUF_TO_AGI(agibp);
3278
3279                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3280                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3281                         while (agino != NULLAGINO) {
3282                                 /*
3283                                  * Release the agi buffer so that it can
3284                                  * be acquired in the normal course of the
3285                                  * transaction to truncate and free the inode.
3286                                  */
3287                                 xfs_buf_relse(agibp);
3288
3289                                 agino = xlog_recover_process_one_iunlink(mp,
3290                                                         agno, agino, bucket);
3291
3292                                 /*
3293                                  * Reacquire the agibuffer and continue around
3294                                  * the loop. This should never fail as we know
3295                                  * the buffer was good earlier on.
3296                                  */
3297                                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3298                                 ASSERT(error == 0);
3299                                 agi = XFS_BUF_TO_AGI(agibp);
3300                         }
3301                 }
3302
3303                 /*
3304                  * Release the buffer for the current agi so we can
3305                  * go on to the next one.
3306                  */
3307                 xfs_buf_relse(agibp);
3308         }
3309
3310         mp->m_dmevmask = mp_dmevmask;
3311 }
3312
3313
3314 #ifdef DEBUG
3315 STATIC void
3316 xlog_pack_data_checksum(
3317         xlog_t          *log,
3318         xlog_in_core_t  *iclog,
3319         int             size)
3320 {
3321         int             i;
3322         __be32          *up;
3323         uint            chksum = 0;
3324
3325         up = (__be32 *)iclog->ic_datap;
3326         /* divide length by 4 to get # words */
3327         for (i = 0; i < (size >> 2); i++) {
3328                 chksum ^= be32_to_cpu(*up);
3329                 up++;
3330         }
3331         iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3332 }
3333 #else
3334 #define xlog_pack_data_checksum(log, iclog, size)
3335 #endif
3336
3337 /*
3338  * Stamp cycle number in every block
3339  */
3340 void
3341 xlog_pack_data(
3342         xlog_t                  *log,
3343         xlog_in_core_t          *iclog,
3344         int                     roundoff)
3345 {
3346         int                     i, j, k;
3347         int                     size = iclog->ic_offset + roundoff;
3348         __be32                  cycle_lsn;
3349         xfs_caddr_t             dp;
3350
3351         xlog_pack_data_checksum(log, iclog, size);
3352
3353         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3354
3355         dp = iclog->ic_datap;
3356         for (i = 0; i < BTOBB(size) &&
3357                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3358                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3359                 *(__be32 *)dp = cycle_lsn;
3360                 dp += BBSIZE;
3361         }
3362
3363         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3364                 xlog_in_core_2_t *xhdr = iclog->ic_data;
3365
3366                 for ( ; i < BTOBB(size); i++) {
3367                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3368                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3369                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3370                         *(__be32 *)dp = cycle_lsn;
3371                         dp += BBSIZE;
3372                 }
3373
3374                 for (i = 1; i < log->l_iclog_heads; i++) {
3375                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3376                 }
3377         }
3378 }
3379
3380 STATIC void
3381 xlog_unpack_data(
3382         xlog_rec_header_t       *rhead,
3383         xfs_caddr_t             dp,
3384         xlog_t                  *log)
3385 {
3386         int                     i, j, k;
3387
3388         for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3389                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3390                 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3391                 dp += BBSIZE;
3392         }
3393
3394         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3395                 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3396                 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3397                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3398                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3399                         *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3400                         dp += BBSIZE;
3401                 }
3402         }
3403 }
3404
3405 STATIC int
3406 xlog_valid_rec_header(
3407         xlog_t                  *log,
3408         xlog_rec_header_t       *rhead,
3409         xfs_daddr_t             blkno)
3410 {
3411         int                     hlen;
3412
3413         if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
3414                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3415                                 XFS_ERRLEVEL_LOW, log->l_mp);
3416                 return XFS_ERROR(EFSCORRUPTED);
3417         }
3418         if (unlikely(
3419             (!rhead->h_version ||
3420             (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3421                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3422                         __func__, be32_to_cpu(rhead->h_version));
3423                 return XFS_ERROR(EIO);
3424         }
3425
3426         /* LR body must have data or it wouldn't have been written */
3427         hlen = be32_to_cpu(rhead->h_len);
3428         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3429                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3430                                 XFS_ERRLEVEL_LOW, log->l_mp);
3431                 return XFS_ERROR(EFSCORRUPTED);
3432         }
3433         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3434                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3435                                 XFS_ERRLEVEL_LOW, log->l_mp);
3436                 return XFS_ERROR(EFSCORRUPTED);
3437         }
3438         return 0;
3439 }
3440
3441 /*
3442  * Read the log from tail to head and process the log records found.
3443  * Handle the two cases where the tail and head are in the same cycle
3444  * and where the active portion of the log wraps around the end of
3445  * the physical log separately.  The pass parameter is passed through
3446  * to the routines called to process the data and is not looked at
3447  * here.
3448  */
3449 STATIC int
3450 xlog_do_recovery_pass(
3451         xlog_t                  *log,
3452         xfs_daddr_t             head_blk,
3453         xfs_daddr_t             tail_blk,
3454         int                     pass)
3455 {
3456         xlog_rec_header_t       *rhead;
3457         xfs_daddr_t             blk_no;
3458         xfs_caddr_t             offset;
3459         xfs_buf_t               *hbp, *dbp;
3460         int                     error = 0, h_size;
3461         int                     bblks, split_bblks;
3462         int                     hblks, split_hblks, wrapped_hblks;
3463         struct hlist_head       rhash[XLOG_RHASH_SIZE];
3464
3465         ASSERT(head_blk != tail_blk);
3466
3467         /*
3468          * Read the header of the tail block and get the iclog buffer size from
3469          * h_size.  Use this to tell how many sectors make up the log header.
3470          */
3471         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3472                 /*
3473                  * When using variable length iclogs, read first sector of
3474                  * iclog header and extract the header size from it.  Get a
3475                  * new hbp that is the correct size.
3476                  */
3477                 hbp = xlog_get_bp(log, 1);
3478                 if (!hbp)
3479                         return ENOMEM;
3480
3481                 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3482                 if (error)
3483                         goto bread_err1;
3484
3485                 rhead = (xlog_rec_header_t *)offset;
3486                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3487                 if (error)
3488                         goto bread_err1;
3489                 h_size = be32_to_cpu(rhead->h_size);
3490                 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3491                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3492                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3493                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3494                                 hblks++;
3495                         xlog_put_bp(hbp);
3496                         hbp = xlog_get_bp(log, hblks);
3497                 } else {
3498                         hblks = 1;
3499                 }
3500         } else {
3501                 ASSERT(log->l_sectBBsize == 1);
3502                 hblks = 1;
3503                 hbp = xlog_get_bp(log, 1);
3504                 h_size = XLOG_BIG_RECORD_BSIZE;
3505         }
3506
3507         if (!hbp)
3508                 return ENOMEM;
3509         dbp = xlog_get_bp(log, BTOBB(h_size));
3510         if (!dbp) {
3511                 xlog_put_bp(hbp);
3512                 return ENOMEM;
3513         }
3514
3515         memset(rhash, 0, sizeof(rhash));
3516         if (tail_blk <= head_blk) {
3517                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3518                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3519                         if (error)
3520                                 goto bread_err2;
3521
3522                         rhead = (xlog_rec_header_t *)offset;
3523                         error = xlog_valid_rec_header(log, rhead, blk_no);
3524                         if (error)
3525                                 goto bread_err2;
3526
3527                         /* blocks in data section */
3528                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3529                         error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3530                                            &offset);
3531                         if (error)
3532                                 goto bread_err2;
3533
3534                         xlog_unpack_data(rhead, offset, log);
3535                         if ((error = xlog_recover_process_data(log,
3536                                                 rhash, rhead, offset, pass)))
3537                                 goto bread_err2;
3538                         blk_no += bblks + hblks;
3539                 }
3540         } else {
3541                 /*
3542                  * Perform recovery around the end of the physical log.
3543                  * When the head is not on the same cycle number as the tail,
3544                  * we can't do a sequential recovery as above.
3545                  */
3546                 blk_no = tail_blk;
3547                 while (blk_no < log->l_logBBsize) {
3548                         /*
3549                          * Check for header wrapping around physical end-of-log
3550                          */
3551                         offset = XFS_BUF_PTR(hbp);
3552                         split_hblks = 0;
3553                         wrapped_hblks = 0;
3554                         if (blk_no + hblks <= log->l_logBBsize) {
3555                                 /* Read header in one read */
3556                                 error = xlog_bread(log, blk_no, hblks, hbp,
3557                                                    &offset);
3558                                 if (error)
3559                                         goto bread_err2;
3560                         } else {
3561                                 /* This LR is split across physical log end */
3562                                 if (blk_no != log->l_logBBsize) {
3563                                         /* some data before physical log end */
3564                                         ASSERT(blk_no <= INT_MAX);
3565                                         split_hblks = log->l_logBBsize - (int)blk_no;
3566                                         ASSERT(split_hblks > 0);
3567                                         error = xlog_bread(log, blk_no,
3568                                                            split_hblks, hbp,
3569                                                            &offset);
3570                                         if (error)
3571                                                 goto bread_err2;
3572                                 }
3573
3574                                 /*
3575                                  * Note: this black magic still works with
3576                                  * large sector sizes (non-512) only because:
3577                                  * - we increased the buffer size originally
3578                                  *   by 1 sector giving us enough extra space
3579                                  *   for the second read;
3580                                  * - the log start is guaranteed to be sector
3581                                  *   aligned;
3582                                  * - we read the log end (LR header start)
3583                                  *   _first_, then the log start (LR header end)
3584                                  *   - order is important.
3585                                  */
3586                                 wrapped_hblks = hblks - split_hblks;
3587                                 error = XFS_BUF_SET_PTR(hbp,
3588                                                 offset + BBTOB(split_hblks),
3589                                                 BBTOB(hblks - split_hblks));
3590                                 if (error)
3591                                         goto bread_err2;
3592
3593                                 error = xlog_bread_noalign(log, 0,
3594                                                            wrapped_hblks, hbp);
3595                                 if (error)
3596                                         goto bread_err2;
3597
3598                                 error = XFS_BUF_SET_PTR(hbp, offset,
3599                                                         BBTOB(hblks));
3600                                 if (error)
3601                                         goto bread_err2;
3602                         }
3603                         rhead = (xlog_rec_header_t *)offset;
3604                         error = xlog_valid_rec_header(log, rhead,
3605                                                 split_hblks ? blk_no : 0);
3606                         if (error)
3607                                 goto bread_err2;
3608
3609                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3610                         blk_no += hblks;
3611
3612                         /* Read in data for log record */
3613                         if (blk_no + bblks <= log->l_logBBsize) {
3614                                 error = xlog_bread(log, blk_no, bblks, dbp,
3615                                                    &offset);
3616                                 if (error)
3617                                         goto bread_err2;
3618                         } else {
3619                                 /* This log record is split across the
3620                                  * physical end of log */
3621                                 offset = XFS_BUF_PTR(dbp);
3622                                 split_bblks = 0;
3623                                 if (blk_no != log->l_logBBsize) {
3624                                         /* some data is before the physical
3625                                          * end of log */
3626                                         ASSERT(!wrapped_hblks);
3627                                         ASSERT(blk_no <= INT_MAX);
3628                                         split_bblks =
3629                                                 log->l_logBBsize - (int)blk_no;
3630                                         ASSERT(split_bblks > 0);
3631                                         error = xlog_bread(log, blk_no,
3632                                                         split_bblks, dbp,
3633                                                         &offset);
3634                                         if (error)
3635                                                 goto bread_err2;
3636                                 }
3637
3638                                 /*
3639                                  * Note: this black magic still works with
3640                                  * large sector sizes (non-512) only because:
3641                                  * - we increased the buffer size originally
3642                                  *   by 1 sector giving us enough extra space
3643                                  *   for the second read;
3644                                  * - the log start is guaranteed to be sector
3645                                  *   aligned;
3646                                  * - we read the log end (LR header start)
3647                                  *   _first_, then the log start (LR header end)
3648                                  *   - order is important.
3649                                  */
3650                                 error = XFS_BUF_SET_PTR(dbp,
3651                                                 offset + BBTOB(split_bblks),
3652                                                 BBTOB(bblks - split_bblks));
3653                                 if (error)
3654                                         goto bread_err2;
3655
3656                                 error = xlog_bread_noalign(log, wrapped_hblks,
3657                                                 bblks - split_bblks,
3658                                                 dbp);
3659                                 if (error)
3660                                         goto bread_err2;
3661
3662                                 error = XFS_BUF_SET_PTR(dbp, offset, h_size);
3663                                 if (error)
3664                                         goto bread_err2;
3665                         }
3666                         xlog_unpack_data(rhead, offset, log);
3667                         if ((error = xlog_recover_process_data(log, rhash,
3668                                                         rhead, offset, pass)))
3669                                 goto bread_err2;
3670                         blk_no += bblks;
3671                 }
3672
3673                 ASSERT(blk_no >= log->l_logBBsize);
3674                 blk_no -= log->l_logBBsize;
3675
3676                 /* read first part of physical log */
3677                 while (blk_no < head_blk) {
3678                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3679                         if (error)
3680                                 goto bread_err2;
3681
3682                         rhead = (xlog_rec_header_t *)offset;
3683                         error = xlog_valid_rec_header(log, rhead, blk_no);
3684                         if (error)
3685                                 goto bread_err2;
3686
3687                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3688                         error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3689                                            &offset);
3690                         if (error)
3691                                 goto bread_err2;
3692
3693                         xlog_unpack_data(rhead, offset, log);
3694                         if ((error = xlog_recover_process_data(log, rhash,
3695                                                         rhead, offset, pass)))
3696                                 goto bread_err2;
3697                         blk_no += bblks + hblks;
3698                 }
3699         }
3700
3701  bread_err2:
3702         xlog_put_bp(dbp);
3703  bread_err1:
3704         xlog_put_bp(hbp);
3705         return error;
3706 }
3707
3708 /*
3709  * Do the recovery of the log.  We actually do this in two phases.
3710  * The two passes are necessary in order to implement the function
3711  * of cancelling a record written into the log.  The first pass
3712  * determines those things which have been cancelled, and the
3713  * second pass replays log items normally except for those which
3714  * have been cancelled.  The handling of the replay and cancellations
3715  * takes place in the log item type specific routines.
3716  *
3717  * The table of items which have cancel records in the log is allocated
3718  * and freed at this level, since only here do we know when all of
3719  * the log recovery has been completed.
3720  */
3721 STATIC int
3722 xlog_do_log_recovery(
3723         xlog_t          *log,
3724         xfs_daddr_t     head_blk,
3725         xfs_daddr_t     tail_blk)
3726 {
3727         int             error;
3728
3729         ASSERT(head_blk != tail_blk);
3730
3731         /*
3732          * First do a pass to find all of the cancelled buf log items.
3733          * Store them in the buf_cancel_table for use in the second pass.
3734          */
3735         log->l_buf_cancel_table =
3736                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3737                                                  sizeof(xfs_buf_cancel_t*),
3738                                                  KM_SLEEP);
3739         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3740                                       XLOG_RECOVER_PASS1);
3741         if (error != 0) {
3742                 kmem_free(log->l_buf_cancel_table);
3743                 log->l_buf_cancel_table = NULL;
3744                 return error;
3745         }
3746         /*
3747          * Then do a second pass to actually recover the items in the log.
3748          * When it is complete free the table of buf cancel items.
3749          */
3750         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3751                                       XLOG_RECOVER_PASS2);
3752 #ifdef DEBUG
3753         if (!error) {
3754                 int     i;
3755
3756                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3757                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3758         }
3759 #endif  /* DEBUG */
3760
3761         kmem_free(log->l_buf_cancel_table);
3762         log->l_buf_cancel_table = NULL;
3763
3764         return error;
3765 }
3766
3767 /*
3768  * Do the actual recovery
3769  */
3770 STATIC int
3771 xlog_do_recover(
3772         xlog_t          *log,
3773         xfs_daddr_t     head_blk,
3774         xfs_daddr_t     tail_blk)
3775 {
3776         int             error;
3777         xfs_buf_t       *bp;
3778         xfs_sb_t        *sbp;
3779
3780         /*
3781          * First replay the images in the log.
3782          */
3783         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3784         if (error) {
3785                 return error;
3786         }
3787
3788         XFS_bflush(log->l_mp->m_ddev_targp);
3789
3790         /*
3791          * If IO errors happened during recovery, bail out.
3792          */
3793         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3794                 return (EIO);
3795         }
3796
3797         /*
3798          * We now update the tail_lsn since much of the recovery has completed
3799          * and there may be space available to use.  If there were no extent
3800          * or iunlinks, we can free up the entire log and set the tail_lsn to
3801          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3802          * lsn of the last known good LR on disk.  If there are extent frees
3803          * or iunlinks they will have some entries in the AIL; so we look at
3804          * the AIL to determine how to set the tail_lsn.
3805          */
3806         xlog_assign_tail_lsn(log->l_mp);
3807
3808         /*
3809          * Now that we've finished replaying all buffer and inode
3810          * updates, re-read in the superblock.
3811          */
3812         bp = xfs_getsb(log->l_mp, 0);
3813         XFS_BUF_UNDONE(bp);
3814         ASSERT(!(XFS_BUF_ISWRITE(bp)));
3815         ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3816         XFS_BUF_READ(bp);
3817         XFS_BUF_UNASYNC(bp);
3818         xfsbdstrat(log->l_mp, bp);
3819         error = xfs_buf_iowait(bp);
3820         if (error) {
3821                 xfs_ioerror_alert("xlog_do_recover",
3822                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3823                 ASSERT(0);
3824                 xfs_buf_relse(bp);
3825                 return error;
3826         }
3827
3828         /* Convert superblock from on-disk format */
3829         sbp = &log->l_mp->m_sb;
3830         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3831         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3832         ASSERT(xfs_sb_good_version(sbp));
3833         xfs_buf_relse(bp);
3834
3835         /* We've re-read the superblock so re-initialize per-cpu counters */
3836         xfs_icsb_reinit_counters(log->l_mp);
3837
3838         xlog_recover_check_summary(log);
3839
3840         /* Normal transactions can now occur */
3841         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3842         return 0;
3843 }
3844
3845 /*
3846  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3847  *
3848  * Return error or zero.
3849  */
3850 int
3851 xlog_recover(
3852         xlog_t          *log)
3853 {
3854         xfs_daddr_t     head_blk, tail_blk;
3855         int             error;
3856
3857         /* find the tail of the log */
3858         if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3859                 return error;
3860
3861         if (tail_blk != head_blk) {
3862                 /* There used to be a comment here:
3863                  *
3864                  * disallow recovery on read-only mounts.  note -- mount
3865                  * checks for ENOSPC and turns it into an intelligent
3866                  * error message.
3867                  * ...but this is no longer true.  Now, unless you specify
3868                  * NORECOVERY (in which case this function would never be
3869                  * called), we just go ahead and recover.  We do this all
3870                  * under the vfs layer, so we can get away with it unless
3871                  * the device itself is read-only, in which case we fail.
3872                  */
3873                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3874                         return error;
3875                 }
3876
3877                 cmn_err(CE_NOTE,
3878                         "Starting XFS recovery on filesystem: %s (logdev: %s)",
3879                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3880                         log->l_mp->m_logname : "internal");
3881
3882                 error = xlog_do_recover(log, head_blk, tail_blk);
3883                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3884         }
3885         return error;
3886 }
3887
3888 /*
3889  * In the first part of recovery we replay inodes and buffers and build
3890  * up the list of extent free items which need to be processed.  Here
3891  * we process the extent free items and clean up the on disk unlinked
3892  * inode lists.  This is separated from the first part of recovery so
3893  * that the root and real-time bitmap inodes can be read in from disk in
3894  * between the two stages.  This is necessary so that we can free space
3895  * in the real-time portion of the file system.
3896  */
3897 int
3898 xlog_recover_finish(
3899         xlog_t          *log)
3900 {
3901         /*
3902          * Now we're ready to do the transactions needed for the
3903          * rest of recovery.  Start with completing all the extent
3904          * free intent records and then process the unlinked inode
3905          * lists.  At this point, we essentially run in normal mode
3906          * except that we're still performing recovery actions
3907          * rather than accepting new requests.
3908          */
3909         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3910                 int     error;
3911                 error = xlog_recover_process_efis(log);
3912                 if (error) {
3913                         cmn_err(CE_ALERT,
3914                                 "Failed to recover EFIs on filesystem: %s",
3915                                 log->l_mp->m_fsname);
3916                         return error;
3917                 }
3918                 /*
3919                  * Sync the log to get all the EFIs out of the AIL.
3920                  * This isn't absolutely necessary, but it helps in
3921                  * case the unlink transactions would have problems
3922                  * pushing the EFIs out of the way.
3923                  */
3924                 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3925
3926                 xlog_recover_process_iunlinks(log);
3927
3928                 xlog_recover_check_summary(log);
3929
3930                 cmn_err(CE_NOTE,
3931                         "Ending XFS recovery on filesystem: %s (logdev: %s)",
3932                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3933                         log->l_mp->m_logname : "internal");
3934                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3935         } else {
3936                 cmn_err(CE_DEBUG,
3937                         "!Ending clean XFS mount for filesystem: %s\n",
3938                         log->l_mp->m_fsname);
3939         }
3940         return 0;
3941 }
3942
3943
3944 #if defined(DEBUG)
3945 /*
3946  * Read all of the agf and agi counters and check that they
3947  * are consistent with the superblock counters.
3948  */
3949 void
3950 xlog_recover_check_summary(
3951         xlog_t          *log)
3952 {
3953         xfs_mount_t     *mp;
3954         xfs_agf_t       *agfp;
3955         xfs_buf_t       *agfbp;
3956         xfs_buf_t       *agibp;
3957         xfs_agnumber_t  agno;
3958         __uint64_t      freeblks;
3959         __uint64_t      itotal;
3960         __uint64_t      ifree;
3961         int             error;
3962
3963         mp = log->l_mp;
3964
3965         freeblks = 0LL;
3966         itotal = 0LL;
3967         ifree = 0LL;
3968         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3969                 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3970                 if (error) {
3971                         xfs_fs_cmn_err(CE_ALERT, mp,
3972                                         "xlog_recover_check_summary(agf)"
3973                                         "agf read failed agno %d error %d",
3974                                                         agno, error);
3975                 } else {
3976                         agfp = XFS_BUF_TO_AGF(agfbp);
3977                         freeblks += be32_to_cpu(agfp->agf_freeblks) +
3978                                     be32_to_cpu(agfp->agf_flcount);
3979                         xfs_buf_relse(agfbp);
3980                 }
3981
3982                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3983                 if (!error) {
3984                         struct xfs_agi  *agi = XFS_BUF_TO_AGI(agibp);
3985
3986                         itotal += be32_to_cpu(agi->agi_count);
3987                         ifree += be32_to_cpu(agi->agi_freecount);
3988                         xfs_buf_relse(agibp);
3989                 }
3990         }
3991 }
3992 #endif /* DEBUG */