aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_sync.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-07-08 00:14:46 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2011-07-20 20:47:42 -0400
commit8daaa83145ef1f0a146680618328dbbd0fa76939 (patch)
tree8ca49f03ed2fc4dada7283a6e109b13e3fb5501e /fs/xfs/linux-2.6/xfs_sync.c
parent8ab47664d51a69ea79fe70bb07ca80664f74f76b (diff)
xfs: make use of new shrinker callout for the inode cache
Convert the inode reclaim shrinker to use the new per-sb shrinker operations. This allows much bigger reclaim batches to be used, and allows the XFS inode cache to be shrunk in proportion with the VFS dentry and inode caches. This avoids the problem of the VFS caches being shrunk significantly before the XFS inode cache is shrunk resulting in imbalances in the caches during reclaim. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c71
1 files changed, 27 insertions, 44 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 8ecad5ff9f9..9bd7e895a4e 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -179,6 +179,8 @@ restart:
179 if (error == EFSCORRUPTED) 179 if (error == EFSCORRUPTED)
180 break; 180 break;
181 181
182 cond_resched();
183
182 } while (nr_found && !done); 184 } while (nr_found && !done);
183 185
184 if (skipped) { 186 if (skipped) {
@@ -986,6 +988,8 @@ restart:
986 988
987 *nr_to_scan -= XFS_LOOKUP_BATCH; 989 *nr_to_scan -= XFS_LOOKUP_BATCH;
988 990
991 cond_resched();
992
989 } while (nr_found && !done && *nr_to_scan > 0); 993 } while (nr_found && !done && *nr_to_scan > 0);
990 994
991 if (trylock && !done) 995 if (trylock && !done)
@@ -1003,7 +1007,7 @@ restart:
1003 * ensure that when we get more reclaimers than AGs we block rather 1007 * ensure that when we get more reclaimers than AGs we block rather
1004 * than spin trying to execute reclaim. 1008 * than spin trying to execute reclaim.
1005 */ 1009 */
1006 if (trylock && skipped && *nr_to_scan > 0) { 1010 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1007 trylock = 0; 1011 trylock = 0;
1008 goto restart; 1012 goto restart;
1009 } 1013 }
@@ -1021,44 +1025,38 @@ xfs_reclaim_inodes(
1021} 1025}
1022 1026
1023/* 1027/*
1024 * Inode cache shrinker. 1028 * Scan a certain number of inodes for reclaim.
1025 * 1029 *
1026 * When called we make sure that there is a background (fast) inode reclaim in 1030 * When called we make sure that there is a background (fast) inode reclaim in
1027 * progress, while we will throttle the speed of reclaim via doiing synchronous 1031 * progress, while we will throttle the speed of reclaim via doing synchronous
1028 * reclaim of inodes. That means if we come across dirty inodes, we wait for 1032 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1029 * them to be cleaned, which we hope will not be very long due to the 1033 * them to be cleaned, which we hope will not be very long due to the
1030 * background walker having already kicked the IO off on those dirty inodes. 1034 * background walker having already kicked the IO off on those dirty inodes.
1031 */ 1035 */
1032static int 1036void
1033xfs_reclaim_inode_shrink( 1037xfs_reclaim_inodes_nr(
1034 struct shrinker *shrink, 1038 struct xfs_mount *mp,
1035 struct shrink_control *sc) 1039 int nr_to_scan)
1036{ 1040{
1037 struct xfs_mount *mp; 1041 /* kick background reclaimer and push the AIL */
1038 struct xfs_perag *pag; 1042 xfs_syncd_queue_reclaim(mp);
1039 xfs_agnumber_t ag; 1043 xfs_ail_push_all(mp->m_ail);
1040 int reclaimable;
1041 int nr_to_scan = sc->nr_to_scan;
1042 gfp_t gfp_mask = sc->gfp_mask;
1043
1044 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1045 if (nr_to_scan) {
1046 /* kick background reclaimer and push the AIL */
1047 xfs_syncd_queue_reclaim(mp);
1048 xfs_ail_push_all(mp->m_ail);
1049 1044
1050 if (!(gfp_mask & __GFP_FS)) 1045 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1051 return -1; 1046}
1052 1047
1053 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, 1048/*
1054 &nr_to_scan); 1049 * Return the number of reclaimable inodes in the filesystem for
1055 /* terminate if we don't exhaust the scan */ 1050 * the shrinker to determine how much to reclaim.
1056 if (nr_to_scan > 0) 1051 */
1057 return -1; 1052int
1058 } 1053xfs_reclaim_inodes_count(
1054 struct xfs_mount *mp)
1055{
1056 struct xfs_perag *pag;
1057 xfs_agnumber_t ag = 0;
1058 int reclaimable = 0;
1059 1059
1060 reclaimable = 0;
1061 ag = 0;
1062 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1060 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1063 ag = pag->pag_agno + 1; 1061 ag = pag->pag_agno + 1;
1064 reclaimable += pag->pag_ici_reclaimable; 1062 reclaimable += pag->pag_ici_reclaimable;
@@ -1067,18 +1065,3 @@ xfs_reclaim_inode_shrink(
1067 return reclaimable; 1065 return reclaimable;
1068} 1066}
1069 1067
1070void
1071xfs_inode_shrinker_register(
1072 struct xfs_mount *mp)
1073{
1074 mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
1075 mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
1076 register_shrinker(&mp->m_inode_shrink);
1077}
1078
1079void
1080xfs_inode_shrinker_unregister(
1081 struct xfs_mount *mp)
1082{
1083 unregister_shrinker(&mp->m_inode_shrink);
1084}