2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_dinode.h"
31 #include "xfs_error.h"
32 #include "xfs_filestream.h"
33 #include "xfs_vnodeops.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_quota.h"
36 #include "xfs_trace.h"
37 #include "xfs_fsops.h"
39 #include <linux/kthread.h>
40 #include <linux/freezer.h>
43 xfs_inode_ag_walk_grab(
46 struct inode *inode = VFS_I(ip);
48 /* nothing to sync during shutdown */
49 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
52 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
53 if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
56 /* If we can't grab the inode, it must on it's way to reclaim. */
60 if (is_bad_inode(inode)) {
73 struct xfs_perag *pag,
74 int (*execute)(struct xfs_inode *ip,
75 struct xfs_perag *pag, int flags),
92 read_lock(&pag->pag_ici_lock);
93 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
94 (void **)&ip, first_index, 1);
96 read_unlock(&pag->pag_ici_lock);
101 * Update the index for the next lookup. Catch overflows
102 * into the next AG range which can occur if we have inodes
103 * in the last block of the AG and we are currently
104 * pointing to the last inode.
106 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
107 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
110 if (xfs_inode_ag_walk_grab(ip)) {
111 read_unlock(&pag->pag_ici_lock);
114 read_unlock(&pag->pag_ici_lock);
116 error = execute(ip, pag, flags);
118 if (error == EAGAIN) {
125 /* bail out if the filesystem is corrupted. */
126 if (error == EFSCORRUPTED)
139 xfs_inode_ag_iterator(
140 struct xfs_mount *mp,
141 int (*execute)(struct xfs_inode *ip,
142 struct xfs_perag *pag, int flags),
145 struct xfs_perag *pag;
151 while ((pag = xfs_perag_get(mp, ag))) {
152 ag = pag->pag_agno + 1;
153 error = xfs_inode_ag_walk(mp, pag, execute, flags);
157 if (error == EFSCORRUPTED)
161 return XFS_ERROR(last_error);
166 struct xfs_inode *ip,
167 struct xfs_perag *pag,
170 struct inode *inode = VFS_I(ip);
171 struct address_space *mapping = inode->i_mapping;
174 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
177 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
178 if (flags & SYNC_TRYLOCK)
180 xfs_ilock(ip, XFS_IOLOCK_SHARED);
183 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
184 0 : XBF_ASYNC, FI_NONE);
185 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
188 if (flags & SYNC_WAIT)
195 struct xfs_inode *ip,
196 struct xfs_perag *pag,
201 xfs_ilock(ip, XFS_ILOCK_SHARED);
202 if (xfs_inode_clean(ip))
204 if (!xfs_iflock_nowait(ip)) {
205 if (!(flags & SYNC_WAIT))
210 if (xfs_inode_clean(ip)) {
215 error = xfs_iflush(ip, flags);
218 xfs_iunlock(ip, XFS_ILOCK_SHARED);
223 * Write out pagecache data for the whole filesystem.
227 struct xfs_mount *mp,
232 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
234 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
236 return XFS_ERROR(error);
238 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
243 * Write out inode metadata (attributes) for the whole filesystem.
247 struct xfs_mount *mp,
250 ASSERT((flags & ~SYNC_WAIT) == 0);
252 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
257 struct xfs_mount *mp)
262 * If the buffer is pinned then push on the log so we won't get stuck
263 * waiting in the write for someone, maybe ourselves, to flush the log.
265 * Even though we just pushed the log above, we did not have the
266 * superblock buffer locked at that point so it can become pinned in
267 * between there and here.
269 bp = xfs_getsb(mp, 0);
270 if (XFS_BUF_ISPINNED(bp))
271 xfs_log_force(mp, 0);
273 return xfs_bwrite(mp, bp);
277 * When remounting a filesystem read-only or freezing the filesystem, we have
278 * two phases to execute. This first phase is syncing the data before we
279 * quiesce the filesystem, and the second is flushing all the inodes out after
280 * we've waited for all the transactions created by the first phase to
281 * complete. The second phase ensures that the inodes are written to their
282 * location on disk rather than just existing in transactions in the log. This
283 * means after a quiesce there is no log replay required to write the inodes to
284 * disk (this is the main difference between a sync and a quiesce).
287 * First stage of freeze - no writers will make progress now we are here,
288 * so we flush delwri and delalloc buffers here, then wait for all I/O to
289 * complete. Data is frozen at that point. Metadata is not frozen,
290 * transactions can still occur here so don't bother flushing the buftarg
291 * because it'll just get dirty again.
295 struct xfs_mount *mp)
297 int error, error2 = 0;
299 /* push non-blocking */
300 xfs_sync_data(mp, 0);
301 xfs_qm_sync(mp, SYNC_TRYLOCK);
303 /* push and block till complete */
304 xfs_sync_data(mp, SYNC_WAIT);
305 xfs_qm_sync(mp, SYNC_WAIT);
307 /* write superblock and hoover up shutdown errors */
308 error = xfs_sync_fsdata(mp);
310 /* make sure all delwri buffers are written out */
311 xfs_flush_buftarg(mp->m_ddev_targp, 1);
313 /* mark the log as covered if needed */
314 if (xfs_log_need_covered(mp))
315 error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
317 /* flush data-only devices */
318 if (mp->m_rtdev_targp)
319 XFS_bflush(mp->m_rtdev_targp);
321 return error ? error : error2;
326 struct xfs_mount *mp)
328 int count = 0, pincount;
330 xfs_reclaim_inodes(mp, 0);
331 xfs_flush_buftarg(mp->m_ddev_targp, 0);
334 * This loop must run at least twice. The first instance of the loop
335 * will flush most meta data but that will generate more meta data
336 * (typically directory updates). Which then must be flushed and
337 * logged before we can write the unmount record. We also so sync
338 * reclaim of inodes to catch any that the above delwri flush skipped.
341 xfs_reclaim_inodes(mp, SYNC_WAIT);
342 xfs_sync_attr(mp, SYNC_WAIT);
343 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
352 * Second stage of a quiesce. The data is already synced, now we have to take
353 * care of the metadata. New transactions are already blocked, so we need to
354 * wait for any remaining transactions to drain out before proceding.
358 struct xfs_mount *mp)
362 /* wait for all modifications to complete */
363 while (atomic_read(&mp->m_active_trans) > 0)
366 /* flush inodes and push all remaining buffers out to disk */
370 * Just warn here till VFS can correctly support
371 * read-only remount without racing.
373 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
375 /* Push the superblock and write an unmount record */
376 error = xfs_log_sbcount(mp, 1);
378 xfs_fs_cmn_err(CE_WARN, mp,
379 "xfs_attr_quiesce: failed to log sb changes. "
380 "Frozen image may not be consistent.");
381 xfs_log_unmount_write(mp);
382 xfs_unmountfs_writesb(mp);
386 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
387 * Doing this has two advantages:
388 * - It saves on stack space, which is tight in certain situations
389 * - It can be used (with care) as a mechanism to avoid deadlocks.
390 * Flushing while allocating in a full filesystem requires both.
393 xfs_syncd_queue_work(
394 struct xfs_mount *mp,
396 void (*syncer)(struct xfs_mount *, void *),
397 struct completion *completion)
399 struct xfs_sync_work *work;
401 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
402 INIT_LIST_HEAD(&work->w_list);
403 work->w_syncer = syncer;
406 work->w_completion = completion;
407 spin_lock(&mp->m_sync_lock);
408 list_add_tail(&work->w_list, &mp->m_sync_list);
409 spin_unlock(&mp->m_sync_lock);
410 wake_up_process(mp->m_sync_task);
414 * Flush delayed allocate data, attempting to free up reserved space
415 * from existing allocations. At this point a new allocation attempt
416 * has failed with ENOSPC and we are in the process of scratching our
417 * heads, looking about for more room...
420 xfs_flush_inodes_work(
421 struct xfs_mount *mp,
424 struct inode *inode = arg;
425 xfs_sync_data(mp, SYNC_TRYLOCK);
426 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
434 struct inode *inode = VFS_I(ip);
435 DECLARE_COMPLETION_ONSTACK(completion);
438 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
439 wait_for_completion(&completion);
440 xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
444 * Every sync period we need to unpin all items, reclaim inodes and sync
445 * disk quotas. We might need to cover the log to indicate that the
446 * filesystem is idle and not frozen.
450 struct xfs_mount *mp,
455 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
456 xfs_log_force(mp, 0);
457 xfs_reclaim_inodes(mp, 0);
458 /* dgc: errors ignored here */
459 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
460 if (mp->m_super->s_frozen == SB_UNFROZEN &&
461 xfs_log_need_covered(mp))
462 error = xfs_fs_log_dummy(mp, 0);
465 wake_up(&mp->m_wait_single_sync_task);
472 struct xfs_mount *mp = arg;
474 xfs_sync_work_t *work, *n;
478 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
480 if (list_empty(&mp->m_sync_list))
481 timeleft = schedule_timeout_interruptible(timeleft);
484 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
487 spin_lock(&mp->m_sync_lock);
489 * We can get woken by laptop mode, to do a sync -
490 * that's the (only!) case where the list would be
491 * empty with time remaining.
493 if (!timeleft || list_empty(&mp->m_sync_list)) {
495 timeleft = xfs_syncd_centisecs *
496 msecs_to_jiffies(10);
497 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
498 list_add_tail(&mp->m_sync_work.w_list,
501 list_splice_init(&mp->m_sync_list, &tmp);
502 spin_unlock(&mp->m_sync_lock);
504 list_for_each_entry_safe(work, n, &tmp, w_list) {
505 (*work->w_syncer)(mp, work->w_data);
506 list_del(&work->w_list);
507 if (work == &mp->m_sync_work)
509 if (work->w_completion)
510 complete(work->w_completion);
520 struct xfs_mount *mp)
522 mp->m_sync_work.w_syncer = xfs_sync_worker;
523 mp->m_sync_work.w_mount = mp;
524 mp->m_sync_work.w_completion = NULL;
525 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
526 if (IS_ERR(mp->m_sync_task))
527 return -PTR_ERR(mp->m_sync_task);
533 struct xfs_mount *mp)
535 kthread_stop(mp->m_sync_task);
539 __xfs_inode_set_reclaim_tag(
540 struct xfs_perag *pag,
541 struct xfs_inode *ip)
543 radix_tree_tag_set(&pag->pag_ici_root,
544 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
545 XFS_ICI_RECLAIM_TAG);
547 if (!pag->pag_ici_reclaimable) {
548 /* propagate the reclaim tag up into the perag radix tree */
549 spin_lock(&ip->i_mount->m_perag_lock);
550 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
551 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
552 XFS_ICI_RECLAIM_TAG);
553 spin_unlock(&ip->i_mount->m_perag_lock);
554 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
557 pag->pag_ici_reclaimable++;
561 * We set the inode flag atomically with the radix tree tag.
562 * Once we get tag lookups on the radix tree, this inode flag
566 xfs_inode_set_reclaim_tag(
569 struct xfs_mount *mp = ip->i_mount;
570 struct xfs_perag *pag;
572 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
573 write_lock(&pag->pag_ici_lock);
574 spin_lock(&ip->i_flags_lock);
575 __xfs_inode_set_reclaim_tag(pag, ip);
576 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
577 spin_unlock(&ip->i_flags_lock);
578 write_unlock(&pag->pag_ici_lock);
583 __xfs_inode_clear_reclaim(
587 pag->pag_ici_reclaimable--;
588 if (!pag->pag_ici_reclaimable) {
589 /* clear the reclaim tag from the perag radix tree */
590 spin_lock(&ip->i_mount->m_perag_lock);
591 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
592 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
593 XFS_ICI_RECLAIM_TAG);
594 spin_unlock(&ip->i_mount->m_perag_lock);
595 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
601 __xfs_inode_clear_reclaim_tag(
606 radix_tree_tag_clear(&pag->pag_ici_root,
607 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
608 __xfs_inode_clear_reclaim(pag, ip);
612 * Inodes in different states need to be treated differently, and the return
613 * value of xfs_iflush is not sufficient to get this right. The following table
614 * lists the inode states and the reclaim actions necessary for non-blocking
618 * inode state iflush ret required action
619 * --------------- ---------- ---------------
621 * shutdown EIO unpin and reclaim
622 * clean, unpinned 0 reclaim
623 * stale, unpinned 0 reclaim
624 * clean, pinned(*) 0 requeue
625 * stale, pinned EAGAIN requeue
626 * dirty, delwri ok 0 requeue
627 * dirty, delwri blocked EAGAIN requeue
628 * dirty, sync flush 0 reclaim
630 * (*) dgc: I don't think the clean, pinned state is possible but it gets
631 * handled anyway given the order of checks implemented.
633 * As can be seen from the table, the return value of xfs_iflush() is not
634 * sufficient to correctly decide the reclaim action here. The checks in
635 * xfs_iflush() might look like duplicates, but they are not.
637 * Also, because we get the flush lock first, we know that any inode that has
638 * been flushed delwri has had the flush completed by the time we check that
639 * the inode is clean. The clean inode check needs to be done before flushing
640 * the inode delwri otherwise we would loop forever requeuing clean inodes as
641 * we cannot tell apart a successful delwri flush and a clean inode from the
642 * return value of xfs_iflush().
644 * Note that because the inode is flushed delayed write by background
645 * writeback, the flush lock may already be held here and waiting on it can
646 * result in very long latencies. Hence for sync reclaims, where we wait on the
647 * flush lock, the caller should push out delayed write inodes first before
648 * trying to reclaim them to minimise the amount of time spent waiting. For
649 * background relaim, we just requeue the inode for the next pass.
651 * Hence the order of actions after gaining the locks should be:
653 * shutdown => unpin and reclaim
654 * pinned, delwri => requeue
655 * pinned, sync => unpin
658 * dirty, delwri => flush and requeue
659 * dirty, sync => flush, wait and reclaim
663 struct xfs_inode *ip,
664 struct xfs_perag *pag,
670 * The radix tree lock here protects a thread in xfs_iget from racing
671 * with us starting reclaim on the inode. Once we have the
672 * XFS_IRECLAIM flag set it will not touch us.
674 spin_lock(&ip->i_flags_lock);
675 ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
676 if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
677 /* ignore as it is already under reclaim */
678 spin_unlock(&ip->i_flags_lock);
679 write_unlock(&pag->pag_ici_lock);
682 __xfs_iflags_set(ip, XFS_IRECLAIM);
683 spin_unlock(&ip->i_flags_lock);
684 write_unlock(&pag->pag_ici_lock);
686 xfs_ilock(ip, XFS_ILOCK_EXCL);
687 if (!xfs_iflock_nowait(ip)) {
688 if (!(sync_mode & SYNC_WAIT))
693 if (is_bad_inode(VFS_I(ip)))
695 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
699 if (xfs_ipincount(ip)) {
700 if (!(sync_mode & SYNC_WAIT)) {
706 if (xfs_iflags_test(ip, XFS_ISTALE))
708 if (xfs_inode_clean(ip))
711 /* Now we have an inode that needs flushing */
712 error = xfs_iflush(ip, sync_mode);
713 if (sync_mode & SYNC_WAIT) {
719 * When we have to flush an inode but don't have SYNC_WAIT set, we
720 * flush the inode out using a delwri buffer and wait for the next
721 * call into reclaim to find it in a clean state instead of waiting for
722 * it now. We also don't return errors here - if the error is transient
723 * then the next reclaim pass will flush the inode, and if the error
724 * is permanent then the next sync reclaim will reclaim the inode and
727 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
728 xfs_fs_cmn_err(CE_WARN, ip->i_mount,
729 "inode 0x%llx background reclaim flush failed with %d",
730 (long long)ip->i_ino, error);
733 xfs_iflags_clear(ip, XFS_IRECLAIM);
734 xfs_iunlock(ip, XFS_ILOCK_EXCL);
736 * We could return EAGAIN here to make reclaim rescan the inode tree in
737 * a short while. However, this just burns CPU time scanning the tree
738 * waiting for IO to complete and xfssyncd never goes back to the idle
739 * state. Instead, return 0 to let the next scheduled background reclaim
740 * attempt to reclaim the inode again.
746 xfs_iunlock(ip, XFS_ILOCK_EXCL);
748 XFS_STATS_INC(xs_ig_reclaims);
750 * Remove the inode from the per-AG radix tree.
752 * Because radix_tree_delete won't complain even if the item was never
753 * added to the tree assert that it's been there before to catch
754 * problems with the inode life time early on.
756 write_lock(&pag->pag_ici_lock);
757 if (!radix_tree_delete(&pag->pag_ici_root,
758 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
760 __xfs_inode_clear_reclaim(pag, ip);
761 write_unlock(&pag->pag_ici_lock);
764 * Here we do an (almost) spurious inode lock in order to coordinate
765 * with inode cache radix tree lookups. This is because the lookup
766 * can reference the inodes in the cache without taking references.
768 * We make that OK here by ensuring that we wait until the inode is
769 * unlocked after the lookup before we go ahead and free it. We get
770 * both the ilock and the iolock because the code may need to drop the
771 * ilock one but will still hold the iolock.
773 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
775 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
783 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
784 * corrupted, we still want to try to reclaim all the inodes. If we don't,
785 * then a shut down during filesystem unmount reclaim walk leak all the
786 * unreclaimed inodes.
789 xfs_reclaim_inodes_ag(
790 struct xfs_mount *mp,
794 struct xfs_perag *pag;
800 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
801 unsigned long first_index = 0;
804 ag = pag->pag_agno + 1;
807 struct xfs_inode *ip;
810 write_lock(&pag->pag_ici_lock);
811 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
812 (void **)&ip, first_index, 1,
813 XFS_ICI_RECLAIM_TAG);
815 write_unlock(&pag->pag_ici_lock);
820 * Update the index for the next lookup. Catch overflows
821 * into the next AG range which can occur if we have inodes
822 * in the last block of the AG and we are currently
823 * pointing to the last inode.
825 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
826 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
829 error = xfs_reclaim_inode(ip, pag, flags);
830 if (error && last_error != EFSCORRUPTED)
833 } while (!done && (*nr_to_scan)--);
837 return XFS_ERROR(last_error);
845 int nr_to_scan = INT_MAX;
847 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
851 * Shrinker infrastructure.
854 xfs_reclaim_inode_shrink(
855 struct shrinker *shrink,
859 struct xfs_mount *mp;
860 struct xfs_perag *pag;
864 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
866 if (!(gfp_mask & __GFP_FS))
869 xfs_reclaim_inodes_ag(mp, 0, &nr_to_scan);
870 /* terminate if we don't exhaust the scan */
877 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
878 ag = pag->pag_agno + 1;
879 reclaimable += pag->pag_ici_reclaimable;
886 xfs_inode_shrinker_register(
887 struct xfs_mount *mp)
889 mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
890 mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
891 register_shrinker(&mp->m_inode_shrink);
895 xfs_inode_shrinker_unregister(
896 struct xfs_mount *mp)
898 unregister_shrinker(&mp->m_inode_shrink);