2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_inode.h"
37 #include "xfs_dinode.h"
38 #include "xfs_error.h"
39 #include "xfs_mru_cache.h"
40 #include "xfs_filestream.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_utils.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_inode_item.h"
47 #include <linux/kthread.h>
48 #include <linux/freezer.h>
51 * xfs_sync flushes any pending I/O to file system vfsp.
53 * This routine is called by vfs_sync() to make sure that things make it
54 * out to disk eventually, on sync() system calls to flush out everything,
55 * and when the file system is unmounted. For the vfs_sync() case, all
56 * we really need to do is sync out the log to make all of our meta-data
57 * updates permanent (except for timestamps). For calls from pflushd(),
58 * dirty pages are kept moving by calling pdflush() on the inodes
59 * containing them. We also flush the inodes that we can lock without
60 * sleeping and the superblock if we can lock it without sleeping from
61 * vfs_sync() so that items at the tail of the log are always moving out.
64 * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want
65 * to sleep if we can help it. All we really need
66 * to do is ensure that the log is synced at least
67 * periodically. We also push the inodes and
68 * superblock if we can lock them without sleeping
69 * and they are not pinned.
70 * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not
71 * set, then we really want to lock each inode and flush
73 * SYNC_WAIT - All the flushes that take place in this call should
75 * SYNC_DELWRI - This tells us to push dirty pages associated with
76 * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to
77 * determine if they should be flushed sync, async, or
79 * SYNC_CLOSE - This flag is passed when the system is being
80 * unmounted. We should sync and invalidate everything.
81 * SYNC_FSDATA - This indicates that the caller would like to make
82 * sure the superblock is safe on disk. We can ensure
83 * this by simply making sure the log gets flushed
84 * if SYNC_BDFLUSH is set, and by actually writing it
86 * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete
87 * before we return (including direct I/O). Forms the drain
88 * side of the write barrier needed to safely quiesce the
100 * Get the Quota Manager to flush the dquots.
102 * If XFS quota support is not enabled or this filesystem
103 * instance does not use quotas XFS_QM_DQSYNC will always
106 error = XFS_QM_DQSYNC(mp, flags);
109 * If we got an IO error, we will be shutting down.
110 * So, there's nothing more for us to do here.
112 ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
113 if (XFS_FORCED_SHUTDOWN(mp))
114 return XFS_ERROR(error);
117 if (flags & SYNC_IOWAIT)
118 xfs_filestream_flush(mp);
120 return xfs_syncsub(mp, flags, NULL);
124 * xfs sync routine for internal use
126 * This routine supports all of the flags defined for the generic vfs_sync
127 * interface as explained above under xfs_sync.
136 xfs_inode_t *ip = NULL;
137 struct inode *vp = NULL;
142 uint base_lock_flags;
143 boolean_t mount_locked;
144 boolean_t vnode_refed;
146 xfs_iptr_t *ipointer;
148 boolean_t ipointer_in = B_FALSE;
150 #define IPOINTER_SET ipointer_in = B_TRUE
151 #define IPOINTER_CLR ipointer_in = B_FALSE
158 /* Insert a marker record into the inode list after inode ip. The list
159 * must be locked when this is called. After the call the list will no
162 #define IPOINTER_INSERT(ip, mp) { \
163 ASSERT(ipointer_in == B_FALSE); \
164 ipointer->ip_mnext = ip->i_mnext; \
165 ipointer->ip_mprev = ip; \
166 ip->i_mnext = (xfs_inode_t *)ipointer; \
167 ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \
169 XFS_MOUNT_IUNLOCK(mp); \
170 mount_locked = B_FALSE; \
174 /* Remove the marker from the inode list. If the marker was the only item
175 * in the list then there are no remaining inodes and we should zero out
176 * the whole list. If we are the current head of the list then move the head
179 #define IPOINTER_REMOVE(ip, mp) { \
180 ASSERT(ipointer_in == B_TRUE); \
181 if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \
182 ip = ipointer->ip_mnext; \
183 ip->i_mprev = ipointer->ip_mprev; \
184 ipointer->ip_mprev->i_mnext = ip; \
185 if (mp->m_inodes == (xfs_inode_t *)ipointer) { \
189 ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \
190 mp->m_inodes = NULL; \
196 #define XFS_PREEMPT_MASK 0x7f
198 ASSERT(!(flags & SYNC_BDFLUSH));
202 if (mp->m_flags & XFS_MOUNT_RDONLY)
208 /* Allocate a reference marker */
209 ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP);
211 fflag = XFS_B_ASYNC; /* default is don't wait */
212 if (flags & SYNC_DELWRI)
213 fflag = XFS_B_DELWRI;
214 if (flags & SYNC_WAIT)
215 fflag = 0; /* synchronous overrides all */
217 base_lock_flags = XFS_ILOCK_SHARED;
218 if (flags & (SYNC_DELWRI | SYNC_CLOSE)) {
220 * We need the I/O lock if we're going to call any of
221 * the flush/inval routines.
223 base_lock_flags |= XFS_IOLOCK_SHARED;
230 mount_locked = B_TRUE;
231 vnode_refed = B_FALSE;
236 ASSERT(ipointer_in == B_FALSE);
237 ASSERT(vnode_refed == B_FALSE);
239 lock_flags = base_lock_flags;
242 * There were no inodes in the list, just break out
250 * We found another sync thread marker - skip it
252 if (ip->i_mount == NULL) {
260 * If the vnode is gone then this is being torn down,
261 * call reclaim if it is flushed, else let regular flush
262 * code deal with it later in the loop.
266 /* Skip ones already in reclaim */
267 if (ip->i_flags & XFS_IRECLAIM) {
271 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
273 } else if ((xfs_ipincount(ip) == 0) &&
274 xfs_iflock_nowait(ip)) {
275 IPOINTER_INSERT(ip, mp);
277 xfs_finish_reclaim(ip, 1,
278 XFS_IFLUSH_DELWRI_ELSE_ASYNC);
281 mount_locked = B_TRUE;
282 IPOINTER_REMOVE(ip, mp);
284 xfs_iunlock(ip, XFS_ILOCK_EXCL);
295 if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
296 XFS_MOUNT_IUNLOCK(mp);
302 * Try to lock without sleeping. We're out of order with
303 * the inode list lock here, so if we fail we need to drop
304 * the mount lock and try again. If we're called from
305 * bdflush() here, then don't bother.
307 * The inode lock here actually coordinates with the
308 * almost spurious inode lock in xfs_ireclaim() to prevent
309 * the vnode we handle here without a reference from
310 * being freed while we reference it. If we lock the inode
311 * while it's on the mount list here, then the spurious inode
312 * lock in xfs_ireclaim() after the inode is pulled from
313 * the mount list will sleep until we release it here.
314 * This keeps the vnode from being freed while we reference
317 if (xfs_ilock_nowait(ip, lock_flags) == 0) {
329 IPOINTER_INSERT(ip, mp);
330 xfs_ilock(ip, lock_flags);
332 ASSERT(vp == VFS_I(ip));
333 ASSERT(ip->i_mount == mp);
335 vnode_refed = B_TRUE;
338 /* From here on in the loop we may have a marker record
343 * If we have to flush data or wait for I/O completion
344 * we need to drop the ilock that we currently hold.
345 * If we need to drop the lock, insert a marker if we
346 * have not already done so.
348 if ((flags & (SYNC_CLOSE|SYNC_IOWAIT)) ||
349 ((flags & SYNC_DELWRI) && VN_DIRTY(vp))) {
351 IPOINTER_INSERT(ip, mp);
353 xfs_iunlock(ip, XFS_ILOCK_SHARED);
355 if (flags & SYNC_CLOSE) {
356 /* Shutdown case. Flush and invalidate. */
357 if (XFS_FORCED_SHUTDOWN(mp))
358 xfs_tosspages(ip, 0, -1,
361 error = xfs_flushinval_pages(ip,
363 } else if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) {
364 error = xfs_flush_pages(ip, 0,
369 * When freezing, we need to wait ensure all I/O (including direct
370 * I/O) is complete to ensure no further data modification can take
371 * place after this point
373 if (flags & SYNC_IOWAIT)
376 xfs_ilock(ip, XFS_ILOCK_SHARED);
379 if ((flags & SYNC_ATTR) &&
380 (ip->i_update_core ||
381 (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) {
383 IPOINTER_INSERT(ip, mp);
385 if (flags & SYNC_WAIT) {
387 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
390 * If we can't acquire the flush lock, then the inode
391 * is already being flushed so don't bother waiting.
393 * If we can lock it then do a delwri flush so we can
394 * combine multiple inode flushes in each disk write.
396 } else if (xfs_iflock_nowait(ip)) {
397 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
398 } else if (bypassed) {
403 if (lock_flags != 0) {
404 xfs_iunlock(ip, lock_flags);
409 * If we had to take a reference on the vnode
410 * above, then wait until after we've unlocked
411 * the inode to release the reference. This is
412 * because we can be already holding the inode
413 * lock when IRELE() calls xfs_inactive().
415 * Make sure to drop the mount lock before calling
416 * IRELE() so that we don't trip over ourselves if
417 * we have to go for the mount lock again in the
421 IPOINTER_INSERT(ip, mp);
426 vnode_refed = B_FALSE;
434 * bail out if the filesystem is corrupted.
436 if (error == EFSCORRUPTED) {
439 IPOINTER_REMOVE(ip, mp);
441 XFS_MOUNT_IUNLOCK(mp);
442 ASSERT(ipointer_in == B_FALSE);
444 return XFS_ERROR(error);
447 /* Let other threads have a chance at the mount lock
448 * if we have looped many times without dropping the
451 if ((++preempt & XFS_PREEMPT_MASK) == 0) {
453 IPOINTER_INSERT(ip, mp);
457 if (mount_locked == B_FALSE) {
459 mount_locked = B_TRUE;
460 IPOINTER_REMOVE(ip, mp);
464 ASSERT(ipointer_in == B_FALSE);
467 } while (ip != mp->m_inodes);
469 XFS_MOUNT_IUNLOCK(mp);
471 ASSERT(ipointer_in == B_FALSE);
474 return XFS_ERROR(last_error);
478 * xfs sync routine for internal use
480 * This routine supports all of the flags defined for the generic vfs_sync
481 * interface as explained above under xfs_sync.
492 uint log_flags = XFS_LOG_FORCE;
494 xfs_buf_log_item_t *bip;
497 * Sync out the log. This ensures that the log is periodically
498 * flushed even if there is not enough activity to fill it up.
500 if (flags & SYNC_WAIT)
501 log_flags |= XFS_LOG_SYNC;
503 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
505 if (flags & (SYNC_ATTR|SYNC_DELWRI)) {
506 if (flags & SYNC_BDFLUSH)
507 xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
509 error = xfs_sync_inodes(mp, flags, bypassed);
513 * Flushing out dirty data above probably generated more
514 * log activity, so if this isn't vfs_sync() then flush
517 if (flags & SYNC_DELWRI) {
518 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
521 if (flags & SYNC_FSDATA) {
523 * If this is vfs_sync() then only sync the superblock
524 * if we can lock it without sleeping and it is not pinned.
526 if (flags & SYNC_BDFLUSH) {
527 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
529 bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
531 xfs_buf_item_dirty(bip)) {
532 if (!(XFS_BUF_ISPINNED(bp))) {
534 error = xfs_bwrite(mp, bp);
543 bp = xfs_getsb(mp, 0);
545 * If the buffer is pinned then push on the log so
546 * we won't get stuck waiting in the write for
547 * someone, maybe ourselves, to flush the log.
548 * Even though we just pushed the log above, we
549 * did not have the superblock buffer locked at
550 * that point so it can become pinned in between
553 if (XFS_BUF_ISPINNED(bp))
554 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
555 if (flags & SYNC_WAIT)
559 error = xfs_bwrite(mp, bp);
567 * Now check to see if the log needs a "dummy" transaction.
569 if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) {
574 * Put a dummy transaction in the log to tell
575 * recovery that all others are OK.
577 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
578 if ((error = xfs_trans_reserve(tp, 0,
579 XFS_ICHANGE_LOG_RES(mp),
581 xfs_trans_cancel(tp, 0);
586 xfs_ilock(ip, XFS_ILOCK_EXCL);
588 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
589 xfs_trans_ihold(tp, ip);
590 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
591 error = xfs_trans_commit(tp, 0);
592 xfs_iunlock(ip, XFS_ILOCK_EXCL);
593 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
597 * When shutting down, we need to insure that the AIL is pushed
598 * to disk or the filesystem can appear corrupt from the PROM.
600 if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) {
601 XFS_bflush(mp->m_ddev_targp);
602 if (mp->m_rtdev_targp) {
603 XFS_bflush(mp->m_rtdev_targp);
607 return XFS_ERROR(last_error);
611 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
612 * Doing this has two advantages:
613 * - It saves on stack space, which is tight in certain situations
614 * - It can be used (with care) as a mechanism to avoid deadlocks.
615 * Flushing while allocating in a full filesystem requires both.
618 xfs_syncd_queue_work(
619 struct xfs_mount *mp,
621 void (*syncer)(struct xfs_mount *, void *))
623 struct bhv_vfs_sync_work *work;
625 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
626 INIT_LIST_HEAD(&work->w_list);
627 work->w_syncer = syncer;
630 spin_lock(&mp->m_sync_lock);
631 list_add_tail(&work->w_list, &mp->m_sync_list);
632 spin_unlock(&mp->m_sync_lock);
633 wake_up_process(mp->m_sync_task);
637 * Flush delayed allocate data, attempting to free up reserved space
638 * from existing allocations. At this point a new allocation attempt
639 * has failed with ENOSPC and we are in the process of scratching our
640 * heads, looking about for more room...
643 xfs_flush_inode_work(
644 struct xfs_mount *mp,
647 struct inode *inode = arg;
648 filemap_flush(inode->i_mapping);
656 struct inode *inode = VFS_I(ip);
659 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
660 delay(msecs_to_jiffies(500));
664 * This is the "bigger hammer" version of xfs_flush_inode_work...
665 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
668 xfs_flush_device_work(
669 struct xfs_mount *mp,
672 struct inode *inode = arg;
673 sync_blockdev(mp->m_super->s_bdev);
681 struct inode *inode = VFS_I(ip);
684 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
685 delay(msecs_to_jiffies(500));
686 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
691 struct xfs_mount *mp,
696 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
697 error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR);
699 wake_up(&mp->m_wait_single_sync_task);
706 struct xfs_mount *mp = arg;
708 bhv_vfs_sync_work_t *work, *n;
712 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
714 timeleft = schedule_timeout_interruptible(timeleft);
717 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
720 spin_lock(&mp->m_sync_lock);
722 * We can get woken by laptop mode, to do a sync -
723 * that's the (only!) case where the list would be
724 * empty with time remaining.
726 if (!timeleft || list_empty(&mp->m_sync_list)) {
728 timeleft = xfs_syncd_centisecs *
729 msecs_to_jiffies(10);
730 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
731 list_add_tail(&mp->m_sync_work.w_list,
734 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
735 list_move(&work->w_list, &tmp);
736 spin_unlock(&mp->m_sync_lock);
738 list_for_each_entry_safe(work, n, &tmp, w_list) {
739 (*work->w_syncer)(mp, work->w_data);
740 list_del(&work->w_list);
741 if (work == &mp->m_sync_work)
752 struct xfs_mount *mp)
754 mp->m_sync_work.w_syncer = xfs_sync_worker;
755 mp->m_sync_work.w_mount = mp;
756 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
757 if (IS_ERR(mp->m_sync_task))
758 return -PTR_ERR(mp->m_sync_task);
764 struct xfs_mount *mp)
766 kthread_stop(mp->m_sync_task);