aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_sync.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c91
1 files changed, 91 insertions, 0 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index b2b708254ae6..79038ea55b03 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -583,3 +583,94 @@ xfs_syncd_stop(
583 kthread_stop(mp->m_sync_task); 583 kthread_stop(mp->m_sync_task);
584} 584}
585 585
586int
587xfs_finish_reclaim(
588 xfs_inode_t *ip,
589 int locked,
590 int sync_mode)
591{
592 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
593
594 /* The hash lock here protects a thread in xfs_iget_core from
595 * racing with us on linking the inode back with a vnode.
596 * Once we have the XFS_IRECLAIM flag set it will not touch
597 * us.
598 */
599 write_lock(&pag->pag_ici_lock);
600 spin_lock(&ip->i_flags_lock);
601 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
602 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
603 spin_unlock(&ip->i_flags_lock);
604 write_unlock(&pag->pag_ici_lock);
605 if (locked) {
606 xfs_ifunlock(ip);
607 xfs_iunlock(ip, XFS_ILOCK_EXCL);
608 }
609 return 1;
610 }
611 __xfs_iflags_set(ip, XFS_IRECLAIM);
612 spin_unlock(&ip->i_flags_lock);
613 write_unlock(&pag->pag_ici_lock);
614 xfs_put_perag(ip->i_mount, pag);
615
616 /*
617 * If the inode is still dirty, then flush it out. If the inode
618 * is not in the AIL, then it will be OK to flush it delwri as
619 * long as xfs_iflush() does not keep any references to the inode.
620 * We leave that decision up to xfs_iflush() since it has the
621 * knowledge of whether it's OK to simply do a delwri flush of
622 * the inode or whether we need to wait until the inode is
623 * pulled from the AIL.
624 * We get the flush lock regardless, though, just to make sure
625 * we don't free it while it is being flushed.
626 */
627 if (!locked) {
628 xfs_ilock(ip, XFS_ILOCK_EXCL);
629 xfs_iflock(ip);
630 }
631
632 /*
633 * In the case of a forced shutdown we rely on xfs_iflush() to
634 * wait for the inode to be unpinned before returning an error.
635 */
636 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
637 /* synchronize with xfs_iflush_done */
638 xfs_iflock(ip);
639 xfs_ifunlock(ip);
640 }
641
642 xfs_iunlock(ip, XFS_ILOCK_EXCL);
643 xfs_ireclaim(ip);
644 return 0;
645}
646
647int
648xfs_finish_reclaim_all(
649 xfs_mount_t *mp,
650 int noblock,
651 int mode)
652{
653 xfs_inode_t *ip, *n;
654
655restart:
656 XFS_MOUNT_ILOCK(mp);
657 list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) {
658 if (noblock) {
659 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
660 continue;
661 if (xfs_ipincount(ip) ||
662 !xfs_iflock_nowait(ip)) {
663 xfs_iunlock(ip, XFS_ILOCK_EXCL);
664 continue;
665 }
666 }
667 XFS_MOUNT_IUNLOCK(mp);
668 if (xfs_finish_reclaim(ip, noblock, mode))
669 delay(1);
670 goto restart;
671 }
672 XFS_MOUNT_IUNLOCK(mp);
673 return 0;
674}
675
676