aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c91
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h3
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_vnodeops.c90
4 files changed, 94 insertions, 92 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index b2b708254ae6..79038ea55b03 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -583,3 +583,94 @@ xfs_syncd_stop(
583 kthread_stop(mp->m_sync_task); 583 kthread_stop(mp->m_sync_task);
584} 584}
585 585
586int
587xfs_finish_reclaim(
588 xfs_inode_t *ip,
589 int locked,
590 int sync_mode)
591{
592 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
593
594 /* The hash lock here protects a thread in xfs_iget_core from
595 * racing with us on linking the inode back with a vnode.
596 * Once we have the XFS_IRECLAIM flag set it will not touch
597 * us.
598 */
599 write_lock(&pag->pag_ici_lock);
600 spin_lock(&ip->i_flags_lock);
601 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
602 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
603 spin_unlock(&ip->i_flags_lock);
604 write_unlock(&pag->pag_ici_lock);
605 if (locked) {
606 xfs_ifunlock(ip);
607 xfs_iunlock(ip, XFS_ILOCK_EXCL);
608 }
609 return 1;
610 }
611 __xfs_iflags_set(ip, XFS_IRECLAIM);
612 spin_unlock(&ip->i_flags_lock);
613 write_unlock(&pag->pag_ici_lock);
614 xfs_put_perag(ip->i_mount, pag);
615
616 /*
617 * If the inode is still dirty, then flush it out. If the inode
618 * is not in the AIL, then it will be OK to flush it delwri as
619 * long as xfs_iflush() does not keep any references to the inode.
620 * We leave that decision up to xfs_iflush() since it has the
621 * knowledge of whether it's OK to simply do a delwri flush of
622 * the inode or whether we need to wait until the inode is
623 * pulled from the AIL.
624 * We get the flush lock regardless, though, just to make sure
625 * we don't free it while it is being flushed.
626 */
627 if (!locked) {
628 xfs_ilock(ip, XFS_ILOCK_EXCL);
629 xfs_iflock(ip);
630 }
631
632 /*
633 * In the case of a forced shutdown we rely on xfs_iflush() to
634 * wait for the inode to be unpinned before returning an error.
635 */
636 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
637 /* synchronize with xfs_iflush_done */
638 xfs_iflock(ip);
639 xfs_ifunlock(ip);
640 }
641
642 xfs_iunlock(ip, XFS_ILOCK_EXCL);
643 xfs_ireclaim(ip);
644 return 0;
645}
646
647int
648xfs_finish_reclaim_all(
649 xfs_mount_t *mp,
650 int noblock,
651 int mode)
652{
653 xfs_inode_t *ip, *n;
654
655restart:
656 XFS_MOUNT_ILOCK(mp);
657 list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) {
658 if (noblock) {
659 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
660 continue;
661 if (xfs_ipincount(ip) ||
662 !xfs_iflock_nowait(ip)) {
663 xfs_iunlock(ip, XFS_ILOCK_EXCL);
664 continue;
665 }
666 }
667 XFS_MOUNT_IUNLOCK(mp);
668 if (xfs_finish_reclaim(ip, noblock, mode))
669 delay(1);
670 goto restart;
671 }
672 XFS_MOUNT_IUNLOCK(mp);
673 return 0;
674}
675
676
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 3b49aa3bb5fc..23117a17fdef 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -45,4 +45,7 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
45void xfs_flush_inode(struct xfs_inode *ip); 45void xfs_flush_inode(struct xfs_inode *ip);
46void xfs_flush_device(struct xfs_inode *ip); 46void xfs_flush_device(struct xfs_inode *ip);
47 47
48int xfs_finish_reclaim(struct xfs_inode *ip, int locked, int sync_mode);
49int xfs_finish_reclaim_all(struct xfs_mount *mp, int noblock, int mode);
50
48#endif 51#endif
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 345b43a90eb5..64e50ff9ad23 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -496,8 +496,6 @@ int xfs_isilocked(xfs_inode_t *, uint);
496uint xfs_ilock_map_shared(xfs_inode_t *); 496uint xfs_ilock_map_shared(xfs_inode_t *);
497void xfs_iunlock_map_shared(xfs_inode_t *, uint); 497void xfs_iunlock_map_shared(xfs_inode_t *, uint);
498void xfs_ireclaim(xfs_inode_t *); 498void xfs_ireclaim(xfs_inode_t *);
499int xfs_finish_reclaim(xfs_inode_t *, int, int);
500int xfs_finish_reclaim_all(struct xfs_mount *, int, int);
501 499
502/* 500/*
503 * xfs_inode.c prototypes. 501 * xfs_inode.c prototypes.
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 7fb577c9f9d8..cdcc835bc5a5 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2849,96 +2849,6 @@ xfs_reclaim(
2849 return 0; 2849 return 0;
2850} 2850}
2851 2851
2852int
2853xfs_finish_reclaim(
2854 xfs_inode_t *ip,
2855 int locked,
2856 int sync_mode)
2857{
2858 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
2859
2860 /* The hash lock here protects a thread in xfs_iget_core from
2861 * racing with us on linking the inode back with a vnode.
2862 * Once we have the XFS_IRECLAIM flag set it will not touch
2863 * us.
2864 */
2865 write_lock(&pag->pag_ici_lock);
2866 spin_lock(&ip->i_flags_lock);
2867 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
2868 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
2869 spin_unlock(&ip->i_flags_lock);
2870 write_unlock(&pag->pag_ici_lock);
2871 if (locked) {
2872 xfs_ifunlock(ip);
2873 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2874 }
2875 return 1;
2876 }
2877 __xfs_iflags_set(ip, XFS_IRECLAIM);
2878 spin_unlock(&ip->i_flags_lock);
2879 write_unlock(&pag->pag_ici_lock);
2880 xfs_put_perag(ip->i_mount, pag);
2881
2882 /*
2883 * If the inode is still dirty, then flush it out. If the inode
2884 * is not in the AIL, then it will be OK to flush it delwri as
2885 * long as xfs_iflush() does not keep any references to the inode.
2886 * We leave that decision up to xfs_iflush() since it has the
2887 * knowledge of whether it's OK to simply do a delwri flush of
2888 * the inode or whether we need to wait until the inode is
2889 * pulled from the AIL.
2890 * We get the flush lock regardless, though, just to make sure
2891 * we don't free it while it is being flushed.
2892 */
2893 if (!locked) {
2894 xfs_ilock(ip, XFS_ILOCK_EXCL);
2895 xfs_iflock(ip);
2896 }
2897
2898 /*
2899 * In the case of a forced shutdown we rely on xfs_iflush() to
2900 * wait for the inode to be unpinned before returning an error.
2901 */
2902 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
2903 /* synchronize with xfs_iflush_done */
2904 xfs_iflock(ip);
2905 xfs_ifunlock(ip);
2906 }
2907
2908 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2909 xfs_ireclaim(ip);
2910 return 0;
2911}
2912
2913int
2914xfs_finish_reclaim_all(
2915 xfs_mount_t *mp,
2916 int noblock,
2917 int mode)
2918{
2919 xfs_inode_t *ip, *n;
2920
2921restart:
2922 XFS_MOUNT_ILOCK(mp);
2923 list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) {
2924 if (noblock) {
2925 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
2926 continue;
2927 if (xfs_ipincount(ip) ||
2928 !xfs_iflock_nowait(ip)) {
2929 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2930 continue;
2931 }
2932 }
2933 XFS_MOUNT_IUNLOCK(mp);
2934 if (xfs_finish_reclaim(ip, noblock, mode))
2935 delay(1);
2936 goto restart;
2937 }
2938 XFS_MOUNT_IUNLOCK(mp);
2939 return 0;
2940}
2941
2942/* 2852/*
2943 * xfs_alloc_file_space() 2853 * xfs_alloc_file_space()
2944 * This routine allocates disk space for the given file. 2854 * This routine allocates disk space for the given file.