diff options
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 110 |
1 files changed, 77 insertions, 33 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 0ed3d0ae3c28..754bc591a247 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -631,6 +631,43 @@ __xfs_inode_clear_reclaim_tag( | |||
631 | } | 631 | } |
632 | 632 | ||
633 | /* | 633 | /* |
634 | * Grab the inode for reclaim exclusively. | ||
635 | * Return 0 if we grabbed it, non-zero otherwise. | ||
636 | */ | ||
637 | STATIC int | ||
638 | xfs_reclaim_inode_grab( | ||
639 | struct xfs_inode *ip, | ||
640 | int flags) | ||
641 | { | ||
642 | |||
643 | /* | ||
644 | * do some unlocked checks first to avoid unnecceary lock traffic. | ||
645 | * The first is a flush lock check, the second is a already in reclaim | ||
646 | * check. Only do these checks if we are not going to block on locks. | ||
647 | */ | ||
648 | if ((flags & SYNC_TRYLOCK) && | ||
649 | (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) { | ||
650 | return 1; | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * The radix tree lock here protects a thread in xfs_iget from racing | ||
655 | * with us starting reclaim on the inode. Once we have the | ||
656 | * XFS_IRECLAIM flag set it will not touch us. | ||
657 | */ | ||
658 | spin_lock(&ip->i_flags_lock); | ||
659 | ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | ||
660 | if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { | ||
661 | /* ignore as it is already under reclaim */ | ||
662 | spin_unlock(&ip->i_flags_lock); | ||
663 | return 1; | ||
664 | } | ||
665 | __xfs_iflags_set(ip, XFS_IRECLAIM); | ||
666 | spin_unlock(&ip->i_flags_lock); | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | /* | ||
634 | * Inodes in different states need to be treated differently, and the return | 671 | * Inodes in different states need to be treated differently, and the return |
635 | * value of xfs_iflush is not sufficient to get this right. The following table | 672 | * value of xfs_iflush is not sufficient to get this right. The following table |
636 | * lists the inode states and the reclaim actions necessary for non-blocking | 673 | * lists the inode states and the reclaim actions necessary for non-blocking |
@@ -688,23 +725,6 @@ xfs_reclaim_inode( | |||
688 | { | 725 | { |
689 | int error = 0; | 726 | int error = 0; |
690 | 727 | ||
691 | /* | ||
692 | * The radix tree lock here protects a thread in xfs_iget from racing | ||
693 | * with us starting reclaim on the inode. Once we have the | ||
694 | * XFS_IRECLAIM flag set it will not touch us. | ||
695 | */ | ||
696 | spin_lock(&ip->i_flags_lock); | ||
697 | ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | ||
698 | if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { | ||
699 | /* ignore as it is already under reclaim */ | ||
700 | spin_unlock(&ip->i_flags_lock); | ||
701 | write_unlock(&pag->pag_ici_lock); | ||
702 | return 0; | ||
703 | } | ||
704 | __xfs_iflags_set(ip, XFS_IRECLAIM); | ||
705 | spin_unlock(&ip->i_flags_lock); | ||
706 | write_unlock(&pag->pag_ici_lock); | ||
707 | |||
708 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 728 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
709 | if (!xfs_iflock_nowait(ip)) { | 729 | if (!xfs_iflock_nowait(ip)) { |
710 | if (!(sync_mode & SYNC_WAIT)) | 730 | if (!(sync_mode & SYNC_WAIT)) |
@@ -822,16 +842,19 @@ xfs_reclaim_inodes_ag( | |||
822 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | 842 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
823 | unsigned long first_index = 0; | 843 | unsigned long first_index = 0; |
824 | int done = 0; | 844 | int done = 0; |
845 | int nr_found = 0; | ||
825 | 846 | ||
826 | ag = pag->pag_agno + 1; | 847 | ag = pag->pag_agno + 1; |
827 | 848 | ||
828 | do { | 849 | do { |
829 | struct xfs_inode *ip; | 850 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
830 | int nr_found; | 851 | int i; |
831 | 852 | ||
832 | write_lock(&pag->pag_ici_lock); | 853 | write_lock(&pag->pag_ici_lock); |
833 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | 854 | nr_found = radix_tree_gang_lookup_tag( |
834 | (void **)&ip, first_index, 1, | 855 | &pag->pag_ici_root, |
856 | (void **)batch, first_index, | ||
857 | XFS_LOOKUP_BATCH, | ||
835 | XFS_ICI_RECLAIM_TAG); | 858 | XFS_ICI_RECLAIM_TAG); |
836 | if (!nr_found) { | 859 | if (!nr_found) { |
837 | write_unlock(&pag->pag_ici_lock); | 860 | write_unlock(&pag->pag_ici_lock); |
@@ -839,20 +862,41 @@ xfs_reclaim_inodes_ag( | |||
839 | } | 862 | } |
840 | 863 | ||
841 | /* | 864 | /* |
842 | * Update the index for the next lookup. Catch overflows | 865 | * Grab the inodes before we drop the lock. if we found |
843 | * into the next AG range which can occur if we have inodes | 866 | * nothing, nr == 0 and the loop will be skipped. |
844 | * in the last block of the AG and we are currently | ||
845 | * pointing to the last inode. | ||
846 | */ | 867 | */ |
847 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 868 | for (i = 0; i < nr_found; i++) { |
848 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 869 | struct xfs_inode *ip = batch[i]; |
849 | done = 1; | 870 | |
871 | if (done || xfs_reclaim_inode_grab(ip, flags)) | ||
872 | batch[i] = NULL; | ||
873 | |||
874 | /* | ||
875 | * Update the index for the next lookup. Catch | ||
876 | * overflows into the next AG range which can | ||
877 | * occur if we have inodes in the last block of | ||
878 | * the AG and we are currently pointing to the | ||
879 | * last inode. | ||
880 | */ | ||
881 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
882 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | ||
883 | done = 1; | ||
884 | } | ||
850 | 885 | ||
851 | error = xfs_reclaim_inode(ip, pag, flags); | 886 | /* unlock now we've grabbed the inodes. */ |
852 | if (error && last_error != EFSCORRUPTED) | 887 | write_unlock(&pag->pag_ici_lock); |
853 | last_error = error; | 888 | |
889 | for (i = 0; i < nr_found; i++) { | ||
890 | if (!batch[i]) | ||
891 | continue; | ||
892 | error = xfs_reclaim_inode(batch[i], pag, flags); | ||
893 | if (error && last_error != EFSCORRUPTED) | ||
894 | last_error = error; | ||
895 | } | ||
896 | |||
897 | *nr_to_scan -= XFS_LOOKUP_BATCH; | ||
854 | 898 | ||
855 | } while (!done && (*nr_to_scan)--); | 899 | } while (nr_found && !done && *nr_to_scan > 0); |
856 | 900 | ||
857 | xfs_perag_put(pag); | 901 | xfs_perag_put(pag); |
858 | } | 902 | } |
@@ -888,7 +932,7 @@ xfs_reclaim_inode_shrink( | |||
888 | if (!(gfp_mask & __GFP_FS)) | 932 | if (!(gfp_mask & __GFP_FS)) |
889 | return -1; | 933 | return -1; |
890 | 934 | ||
891 | xfs_reclaim_inodes_ag(mp, 0, &nr_to_scan); | 935 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan); |
892 | /* terminate if we don't exhaust the scan */ | 936 | /* terminate if we don't exhaust the scan */ |
893 | if (nr_to_scan > 0) | 937 | if (nr_to_scan > 0) |
894 | return -1; | 938 | return -1; |