diff options
author | Dave Chinner <dchinner@redhat.com> | 2010-07-19 18:07:02 -0400 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2010-07-19 18:07:02 -0400 |
commit | 70e60ce71516c3a9e882edb70a09f696a05961db (patch) | |
tree | 324a400f2d21d071f6c462edbb6efa789125f6f1 /fs/xfs/linux-2.6/xfs_sync.c | |
parent | 7f8275d0d660c146de6ee3017e1e2e594c49e820 (diff) |
xfs: convert inode shrinker to per-filesystem contexts
Now the shrinker passes us a context, wire up a shrinker context per
filesystem. This allows us to remove the global mount list and the
locking problems that introduced. It also means that a shrinker call
does not need to traverse clean filesystems before finding a
filesystem with reclaimable inodes. This significantly reduces
scanning overhead when lots of filesystems are present.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 62 |
1 files changed, 14 insertions, 48 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index be375827af9..f433819611c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -828,14 +828,7 @@ xfs_reclaim_inodes( | |||
828 | 828 | ||
829 | /* | 829 | /* |
830 | * Shrinker infrastructure. | 830 | * Shrinker infrastructure. |
831 | * | ||
832 | * This is all far more complex than it needs to be. It adds a global list of | ||
833 | * mounts because the shrinkers can only call a global context. We need to make | ||
834 | * the shrinkers pass a context to avoid the need for global state. | ||
835 | */ | 831 | */ |
836 | static LIST_HEAD(xfs_mount_list); | ||
837 | static struct rw_semaphore xfs_mount_list_lock; | ||
838 | |||
839 | static int | 832 | static int |
840 | xfs_reclaim_inode_shrink( | 833 | xfs_reclaim_inode_shrink( |
841 | struct shrinker *shrink, | 834 | struct shrinker *shrink, |
@@ -847,65 +840,38 @@ xfs_reclaim_inode_shrink( | |||
847 | xfs_agnumber_t ag; | 840 | xfs_agnumber_t ag; |
848 | int reclaimable = 0; | 841 | int reclaimable = 0; |
849 | 842 | ||
843 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); | ||
850 | if (nr_to_scan) { | 844 | if (nr_to_scan) { |
851 | if (!(gfp_mask & __GFP_FS)) | 845 | if (!(gfp_mask & __GFP_FS)) |
852 | return -1; | 846 | return -1; |
853 | 847 | ||
854 | down_read(&xfs_mount_list_lock); | 848 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, |
855 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | ||
856 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, | ||
857 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); | 849 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); |
858 | if (nr_to_scan <= 0) | 850 | /* if we don't exhaust the scan, don't bother coming back */ |
859 | break; | 851 | if (nr_to_scan > 0) |
860 | } | 852 | return -1; |
861 | up_read(&xfs_mount_list_lock); | 853 | } |
862 | } | ||
863 | 854 | ||
864 | down_read(&xfs_mount_list_lock); | 855 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { |
865 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | 856 | pag = xfs_perag_get(mp, ag); |
866 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 857 | reclaimable += pag->pag_ici_reclaimable; |
867 | pag = xfs_perag_get(mp, ag); | 858 | xfs_perag_put(pag); |
868 | reclaimable += pag->pag_ici_reclaimable; | ||
869 | xfs_perag_put(pag); | ||
870 | } | ||
871 | } | 859 | } |
872 | up_read(&xfs_mount_list_lock); | ||
873 | return reclaimable; | 860 | return reclaimable; |
874 | } | 861 | } |
875 | 862 | ||
876 | static struct shrinker xfs_inode_shrinker = { | ||
877 | .shrink = xfs_reclaim_inode_shrink, | ||
878 | .seeks = DEFAULT_SEEKS, | ||
879 | }; | ||
880 | |||
881 | void __init | ||
882 | xfs_inode_shrinker_init(void) | ||
883 | { | ||
884 | init_rwsem(&xfs_mount_list_lock); | ||
885 | register_shrinker(&xfs_inode_shrinker); | ||
886 | } | ||
887 | |||
888 | void | ||
889 | xfs_inode_shrinker_destroy(void) | ||
890 | { | ||
891 | ASSERT(list_empty(&xfs_mount_list)); | ||
892 | unregister_shrinker(&xfs_inode_shrinker); | ||
893 | } | ||
894 | |||
895 | void | 863 | void |
896 | xfs_inode_shrinker_register( | 864 | xfs_inode_shrinker_register( |
897 | struct xfs_mount *mp) | 865 | struct xfs_mount *mp) |
898 | { | 866 | { |
899 | down_write(&xfs_mount_list_lock); | 867 | mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; |
900 | list_add_tail(&mp->m_mplist, &xfs_mount_list); | 868 | mp->m_inode_shrink.seeks = DEFAULT_SEEKS; |
901 | up_write(&xfs_mount_list_lock); | 869 | register_shrinker(&mp->m_inode_shrink); |
902 | } | 870 | } |
903 | 871 | ||
904 | void | 872 | void |
905 | xfs_inode_shrinker_unregister( | 873 | xfs_inode_shrinker_unregister( |
906 | struct xfs_mount *mp) | 874 | struct xfs_mount *mp) |
907 | { | 875 | { |
908 | down_write(&xfs_mount_list_lock); | 876 | unregister_shrinker(&mp->m_inode_shrink); |
909 | list_del(&mp->m_mplist); | ||
910 | up_write(&xfs_mount_list_lock); | ||
911 | } | 877 | } |