aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:18:09 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:31 -0400
commit1ab6c4997e04a00c50c6d786c2f046adc0d1f5de (patch)
tree55561fc74c062a8ed0e03fe56f54d7db9cfd9e12
parent35163417fb7a55a24b6b0ebb102e9991adf309aa (diff)
fs: convert fs shrinkers to new scan/count API
Convert the filesystem shrinkers to use the new API, and standardise some of the behaviours of the shrinkers at the same time. For example, nr_to_scan means the number of objects to scan, not the number of objects to free. I refactored the CIFS idmap shrinker a little - it really needs to be broken up into a shrinker per tree and keep an item count with the tree root so that we don't need to walk the tree every time the shrinker needs to count the number of objects in the tree (i.e. all the time under memory pressure). [glommer@openvz.org: fixes for ext4, ubifs, nfs, cifs and glock. Fixes are needed mainly due to new code merged in the tree] [assorted fixes folded in] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Acked-by: Jan Kara <jack@suse.cz> Acked-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/ext4/extents_status.c33
-rw-r--r--fs/gfs2/glock.c28
-rw-r--r--fs/gfs2/main.c3
-rw-r--r--fs/gfs2/quota.c16
-rw-r--r--fs/gfs2/quota.h6
-rw-r--r--fs/mbcache.c47
-rw-r--r--fs/nfs/dir.c14
-rw-r--r--fs/nfs/internal.h6
-rw-r--r--fs/nfs/super.c3
-rw-r--r--fs/nfsd/nfscache.c32
-rw-r--r--fs/quota/dquot.c29
-rw-r--r--fs/ubifs/shrinker.c29
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/ubifs/ubifs.h5
14 files changed, 158 insertions, 96 deletions
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 2d1bdbe78c04..3981ff783950 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -931,13 +931,15 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
931 struct ext4_inode_info *ei; 931 struct ext4_inode_info *ei;
932 struct list_head *cur, *tmp; 932 struct list_head *cur, *tmp;
933 LIST_HEAD(skipped); 933 LIST_HEAD(skipped);
934 int ret, nr_shrunk = 0; 934 int nr_shrunk = 0;
935 int retried = 0, skip_precached = 1, nr_skipped = 0; 935 int retried = 0, skip_precached = 1, nr_skipped = 0;
936 936
937 spin_lock(&sbi->s_es_lru_lock); 937 spin_lock(&sbi->s_es_lru_lock);
938 938
939retry: 939retry:
940 list_for_each_safe(cur, tmp, &sbi->s_es_lru) { 940 list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
941 int shrunk;
942
941 /* 943 /*
942 * If we have already reclaimed all extents from extent 944 * If we have already reclaimed all extents from extent
943 * status tree, just stop the loop immediately. 945 * status tree, just stop the loop immediately.
@@ -964,13 +966,13 @@ retry:
964 continue; 966 continue;
965 967
966 write_lock(&ei->i_es_lock); 968 write_lock(&ei->i_es_lock);
967 ret = __es_try_to_reclaim_extents(ei, nr_to_scan); 969 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
968 if (ei->i_es_lru_nr == 0) 970 if (ei->i_es_lru_nr == 0)
969 list_del_init(&ei->i_es_lru); 971 list_del_init(&ei->i_es_lru);
970 write_unlock(&ei->i_es_lock); 972 write_unlock(&ei->i_es_lock);
971 973
972 nr_shrunk += ret; 974 nr_shrunk += shrunk;
973 nr_to_scan -= ret; 975 nr_to_scan -= shrunk;
974 if (nr_to_scan == 0) 976 if (nr_to_scan == 0)
975 break; 977 break;
976 } 978 }
@@ -1007,7 +1009,20 @@ retry:
1007 return nr_shrunk; 1009 return nr_shrunk;
1008} 1010}
1009 1011
1010static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) 1012static unsigned long ext4_es_count(struct shrinker *shrink,
1013 struct shrink_control *sc)
1014{
1015 unsigned long nr;
1016 struct ext4_sb_info *sbi;
1017
1018 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1019 nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
1020 trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr);
1021 return nr;
1022}
1023
1024static unsigned long ext4_es_scan(struct shrinker *shrink,
1025 struct shrink_control *sc)
1011{ 1026{
1012 struct ext4_sb_info *sbi = container_of(shrink, 1027 struct ext4_sb_info *sbi = container_of(shrink,
1013 struct ext4_sb_info, s_es_shrinker); 1028 struct ext4_sb_info, s_es_shrinker);
@@ -1022,9 +1037,8 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
1022 1037
1023 nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); 1038 nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
1024 1039
1025 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
1026 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); 1040 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
1027 return ret; 1041 return nr_shrunk;
1028} 1042}
1029 1043
1030void ext4_es_register_shrinker(struct ext4_sb_info *sbi) 1044void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
@@ -1032,7 +1046,8 @@ void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1032 INIT_LIST_HEAD(&sbi->s_es_lru); 1046 INIT_LIST_HEAD(&sbi->s_es_lru);
1033 spin_lock_init(&sbi->s_es_lru_lock); 1047 spin_lock_init(&sbi->s_es_lru_lock);
1034 sbi->s_es_last_sorted = 0; 1048 sbi->s_es_last_sorted = 0;
1035 sbi->s_es_shrinker.shrink = ext4_es_shrink; 1049 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
1050 sbi->s_es_shrinker.count_objects = ext4_es_count;
1036 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 1051 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
1037 register_shrinker(&sbi->s_es_shrinker); 1052 register_shrinker(&sbi->s_es_shrinker);
1038} 1053}
@@ -1076,7 +1091,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
1076 struct ext4_es_tree *tree = &ei->i_es_tree; 1091 struct ext4_es_tree *tree = &ei->i_es_tree;
1077 struct rb_node *node; 1092 struct rb_node *node;
1078 struct extent_status *es; 1093 struct extent_status *es;
1079 int nr_shrunk = 0; 1094 unsigned long nr_shrunk = 0;
1080 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 1095 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1081 DEFAULT_RATELIMIT_BURST); 1096 DEFAULT_RATELIMIT_BURST);
1082 1097
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b782bb56085d..c2f41b4d00b9 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1427,21 +1427,22 @@ __acquires(&lru_lock)
1427 * gfs2_dispose_glock_lru() above. 1427 * gfs2_dispose_glock_lru() above.
1428 */ 1428 */
1429 1429
1430static void gfs2_scan_glock_lru(int nr) 1430static long gfs2_scan_glock_lru(int nr)
1431{ 1431{
1432 struct gfs2_glock *gl; 1432 struct gfs2_glock *gl;
1433 LIST_HEAD(skipped); 1433 LIST_HEAD(skipped);
1434 LIST_HEAD(dispose); 1434 LIST_HEAD(dispose);
1435 long freed = 0;
1435 1436
1436 spin_lock(&lru_lock); 1437 spin_lock(&lru_lock);
1437 while(nr && !list_empty(&lru_list)) { 1438 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1438 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1439 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1439 1440
1440 /* Test for being demotable */ 1441 /* Test for being demotable */
1441 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1442 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1442 list_move(&gl->gl_lru, &dispose); 1443 list_move(&gl->gl_lru, &dispose);
1443 atomic_dec(&lru_count); 1444 atomic_dec(&lru_count);
1444 nr--; 1445 freed++;
1445 continue; 1446 continue;
1446 } 1447 }
1447 1448
@@ -1451,23 +1452,28 @@ static void gfs2_scan_glock_lru(int nr)
1451 if (!list_empty(&dispose)) 1452 if (!list_empty(&dispose))
1452 gfs2_dispose_glock_lru(&dispose); 1453 gfs2_dispose_glock_lru(&dispose);
1453 spin_unlock(&lru_lock); 1454 spin_unlock(&lru_lock);
1455
1456 return freed;
1454} 1457}
1455 1458
1456static int gfs2_shrink_glock_memory(struct shrinker *shrink, 1459static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1457 struct shrink_control *sc) 1460 struct shrink_control *sc)
1458{ 1461{
1459 if (sc->nr_to_scan) { 1462 if (!(sc->gfp_mask & __GFP_FS))
1460 if (!(sc->gfp_mask & __GFP_FS)) 1463 return SHRINK_STOP;
1461 return -1; 1464 return gfs2_scan_glock_lru(sc->nr_to_scan);
1462 gfs2_scan_glock_lru(sc->nr_to_scan); 1465}
1463 }
1464 1466
1467static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1468 struct shrink_control *sc)
1469{
1465 return vfs_pressure_ratio(atomic_read(&lru_count)); 1470 return vfs_pressure_ratio(atomic_read(&lru_count));
1466} 1471}
1467 1472
1468static struct shrinker glock_shrinker = { 1473static struct shrinker glock_shrinker = {
1469 .shrink = gfs2_shrink_glock_memory,
1470 .seeks = DEFAULT_SEEKS, 1474 .seeks = DEFAULT_SEEKS,
1475 .count_objects = gfs2_glock_shrink_count,
1476 .scan_objects = gfs2_glock_shrink_scan,
1471}; 1477};
1472 1478
1473/** 1479/**
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 7b0f5043cf24..351586e24e30 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -32,7 +32,8 @@
32struct workqueue_struct *gfs2_control_wq; 32struct workqueue_struct *gfs2_control_wq;
33 33
34static struct shrinker qd_shrinker = { 34static struct shrinker qd_shrinker = {
35 .shrink = gfs2_shrink_qd_memory, 35 .count_objects = gfs2_qd_shrink_count,
36 .scan_objects = gfs2_qd_shrink_scan,
36 .seeks = DEFAULT_SEEKS, 37 .seeks = DEFAULT_SEEKS,
37}; 38};
38 39
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index d550a5d6a05f..db441359ee8c 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -75,17 +75,16 @@ static LIST_HEAD(qd_lru_list);
75static atomic_t qd_lru_count = ATOMIC_INIT(0); 75static atomic_t qd_lru_count = ATOMIC_INIT(0);
76static DEFINE_SPINLOCK(qd_lru_lock); 76static DEFINE_SPINLOCK(qd_lru_lock);
77 77
78int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) 78unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
79 struct shrink_control *sc)
79{ 80{
80 struct gfs2_quota_data *qd; 81 struct gfs2_quota_data *qd;
81 struct gfs2_sbd *sdp; 82 struct gfs2_sbd *sdp;
82 int nr_to_scan = sc->nr_to_scan; 83 int nr_to_scan = sc->nr_to_scan;
83 84 long freed = 0;
84 if (nr_to_scan == 0)
85 goto out;
86 85
87 if (!(sc->gfp_mask & __GFP_FS)) 86 if (!(sc->gfp_mask & __GFP_FS))
88 return -1; 87 return SHRINK_STOP;
89 88
90 spin_lock(&qd_lru_lock); 89 spin_lock(&qd_lru_lock);
91 while (nr_to_scan && !list_empty(&qd_lru_list)) { 90 while (nr_to_scan && !list_empty(&qd_lru_list)) {
@@ -110,10 +109,15 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
110 kmem_cache_free(gfs2_quotad_cachep, qd); 109 kmem_cache_free(gfs2_quotad_cachep, qd);
111 spin_lock(&qd_lru_lock); 110 spin_lock(&qd_lru_lock);
112 nr_to_scan--; 111 nr_to_scan--;
112 freed++;
113 } 113 }
114 spin_unlock(&qd_lru_lock); 114 spin_unlock(&qd_lru_lock);
115 return freed;
116}
115 117
116out: 118unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
119 struct shrink_control *sc)
120{
117 return vfs_pressure_ratio(atomic_read(&qd_lru_count)); 121 return vfs_pressure_ratio(atomic_read(&qd_lru_count));
118} 122}
119 123
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 4f5e6e44ed83..0f64d9deb1b0 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -53,8 +53,10 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
53 return ret; 53 return ret;
54} 54}
55 55
56extern int gfs2_shrink_qd_memory(struct shrinker *shrink, 56extern unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
57 struct shrink_control *sc); 57 struct shrink_control *sc);
58extern unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
59 struct shrink_control *sc);
58extern const struct quotactl_ops gfs2_quotactl_ops; 60extern const struct quotactl_ops gfs2_quotactl_ops;
59 61
60#endif /* __QUOTA_DOT_H__ */ 62#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 5eb04767cb29..e519e45bf673 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -86,18 +86,6 @@ static LIST_HEAD(mb_cache_list);
86static LIST_HEAD(mb_cache_lru_list); 86static LIST_HEAD(mb_cache_lru_list);
87static DEFINE_SPINLOCK(mb_cache_spinlock); 87static DEFINE_SPINLOCK(mb_cache_spinlock);
88 88
89/*
90 * What the mbcache registers as to get shrunk dynamically.
91 */
92
93static int mb_cache_shrink_fn(struct shrinker *shrink,
94 struct shrink_control *sc);
95
96static struct shrinker mb_cache_shrinker = {
97 .shrink = mb_cache_shrink_fn,
98 .seeks = DEFAULT_SEEKS,
99};
100
101static inline int 89static inline int
102__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) 90__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
103{ 91{
@@ -151,7 +139,7 @@ forget:
151 139
152 140
153/* 141/*
154 * mb_cache_shrink_fn() memory pressure callback 142 * mb_cache_shrink_scan() memory pressure callback
155 * 143 *
156 * This function is called by the kernel memory management when memory 144 * This function is called by the kernel memory management when memory
157 * gets low. 145 * gets low.
@@ -159,17 +147,16 @@ forget:
159 * @shrink: (ignored) 147 * @shrink: (ignored)
160 * @sc: shrink_control passed from reclaim 148 * @sc: shrink_control passed from reclaim
161 * 149 *
162 * Returns the number of objects which are present in the cache. 150 * Returns the number of objects freed.
163 */ 151 */
164static int 152static unsigned long
165mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) 153mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
166{ 154{
167 LIST_HEAD(free_list); 155 LIST_HEAD(free_list);
168 struct mb_cache *cache;
169 struct mb_cache_entry *entry, *tmp; 156 struct mb_cache_entry *entry, *tmp;
170 int count = 0;
171 int nr_to_scan = sc->nr_to_scan; 157 int nr_to_scan = sc->nr_to_scan;
172 gfp_t gfp_mask = sc->gfp_mask; 158 gfp_t gfp_mask = sc->gfp_mask;
159 unsigned long freed = 0;
173 160
174 mb_debug("trying to free %d entries", nr_to_scan); 161 mb_debug("trying to free %d entries", nr_to_scan);
175 spin_lock(&mb_cache_spinlock); 162 spin_lock(&mb_cache_spinlock);
@@ -179,19 +166,37 @@ mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
179 struct mb_cache_entry, e_lru_list); 166 struct mb_cache_entry, e_lru_list);
180 list_move_tail(&ce->e_lru_list, &free_list); 167 list_move_tail(&ce->e_lru_list, &free_list);
181 __mb_cache_entry_unhash(ce); 168 __mb_cache_entry_unhash(ce);
169 freed++;
170 }
171 spin_unlock(&mb_cache_spinlock);
172 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
173 __mb_cache_entry_forget(entry, gfp_mask);
182 } 174 }
175 return freed;
176}
177
178static unsigned long
179mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
180{
181 struct mb_cache *cache;
182 unsigned long count = 0;
183
184 spin_lock(&mb_cache_spinlock);
183 list_for_each_entry(cache, &mb_cache_list, c_cache_list) { 185 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
184 mb_debug("cache %s (%d)", cache->c_name, 186 mb_debug("cache %s (%d)", cache->c_name,
185 atomic_read(&cache->c_entry_count)); 187 atomic_read(&cache->c_entry_count));
186 count += atomic_read(&cache->c_entry_count); 188 count += atomic_read(&cache->c_entry_count);
187 } 189 }
188 spin_unlock(&mb_cache_spinlock); 190 spin_unlock(&mb_cache_spinlock);
189 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { 191
190 __mb_cache_entry_forget(entry, gfp_mask);
191 }
192 return vfs_pressure_ratio(count); 192 return vfs_pressure_ratio(count);
193} 193}
194 194
195static struct shrinker mb_cache_shrinker = {
196 .count_objects = mb_cache_shrink_count,
197 .scan_objects = mb_cache_shrink_scan,
198 .seeks = DEFAULT_SEEKS,
199};
195 200
196/* 201/*
197 * mb_cache_create() create a new cache 202 * mb_cache_create() create a new cache
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 813ef2571545..de434f309af0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2006,17 +2006,18 @@ static void nfs_access_free_list(struct list_head *head)
2006 } 2006 }
2007} 2007}
2008 2008
2009int nfs_access_cache_shrinker(struct shrinker *shrink, 2009unsigned long
2010 struct shrink_control *sc) 2010nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
2011{ 2011{
2012 LIST_HEAD(head); 2012 LIST_HEAD(head);
2013 struct nfs_inode *nfsi, *next; 2013 struct nfs_inode *nfsi, *next;
2014 struct nfs_access_entry *cache; 2014 struct nfs_access_entry *cache;
2015 int nr_to_scan = sc->nr_to_scan; 2015 int nr_to_scan = sc->nr_to_scan;
2016 gfp_t gfp_mask = sc->gfp_mask; 2016 gfp_t gfp_mask = sc->gfp_mask;
2017 long freed = 0;
2017 2018
2018 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 2019 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2019 return (nr_to_scan == 0) ? 0 : -1; 2020 return SHRINK_STOP;
2020 2021
2021 spin_lock(&nfs_access_lru_lock); 2022 spin_lock(&nfs_access_lru_lock);
2022 list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { 2023 list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
@@ -2032,6 +2033,7 @@ int nfs_access_cache_shrinker(struct shrinker *shrink,
2032 struct nfs_access_entry, lru); 2033 struct nfs_access_entry, lru);
2033 list_move(&cache->lru, &head); 2034 list_move(&cache->lru, &head);
2034 rb_erase(&cache->rb_node, &nfsi->access_cache); 2035 rb_erase(&cache->rb_node, &nfsi->access_cache);
2036 freed++;
2035 if (!list_empty(&nfsi->access_cache_entry_lru)) 2037 if (!list_empty(&nfsi->access_cache_entry_lru))
2036 list_move_tail(&nfsi->access_cache_inode_lru, 2038 list_move_tail(&nfsi->access_cache_inode_lru,
2037 &nfs_access_lru_list); 2039 &nfs_access_lru_list);
@@ -2046,6 +2048,12 @@ remove_lru_entry:
2046 } 2048 }
2047 spin_unlock(&nfs_access_lru_lock); 2049 spin_unlock(&nfs_access_lru_lock);
2048 nfs_access_free_list(&head); 2050 nfs_access_free_list(&head);
2051 return freed;
2052}
2053
2054unsigned long
2055nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
2056{
2049 return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries)); 2057 return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
2050} 2058}
2051 2059
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index d388302c005f..38da8c2b81ac 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -273,8 +273,10 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
273 const char *ip_addr); 273 const char *ip_addr);
274 274
275/* dir.c */ 275/* dir.c */
276extern int nfs_access_cache_shrinker(struct shrinker *shrink, 276extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
277 struct shrink_control *sc); 277 struct shrink_control *sc);
278extern unsigned long nfs_access_cache_scan(struct shrinker *shrink,
279 struct shrink_control *sc);
278struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int); 280struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
279int nfs_create(struct inode *, struct dentry *, umode_t, bool); 281int nfs_create(struct inode *, struct dentry *, umode_t, bool);
280int nfs_mkdir(struct inode *, struct dentry *, umode_t); 282int nfs_mkdir(struct inode *, struct dentry *, umode_t);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 5793f24613c8..a03b9c6f9489 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -360,7 +360,8 @@ static void unregister_nfs4_fs(void)
360#endif 360#endif
361 361
362static struct shrinker acl_shrinker = { 362static struct shrinker acl_shrinker = {
363 .shrink = nfs_access_cache_shrinker, 363 .count_objects = nfs_access_cache_count,
364 .scan_objects = nfs_access_cache_scan,
364 .seeks = DEFAULT_SEEKS, 365 .seeks = DEFAULT_SEEKS,
365}; 366};
366 367
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index e76244edd748..9186c7ce0b14 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -59,11 +59,14 @@ static unsigned int longest_chain_cachesize;
59 59
60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61static void cache_cleaner_func(struct work_struct *unused); 61static void cache_cleaner_func(struct work_struct *unused);
62static int nfsd_reply_cache_shrink(struct shrinker *shrink, 62static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
63 struct shrink_control *sc); 63 struct shrink_control *sc);
64static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
65 struct shrink_control *sc);
64 66
65static struct shrinker nfsd_reply_cache_shrinker = { 67static struct shrinker nfsd_reply_cache_shrinker = {
66 .shrink = nfsd_reply_cache_shrink, 68 .scan_objects = nfsd_reply_cache_scan,
69 .count_objects = nfsd_reply_cache_count,
67 .seeks = 1, 70 .seeks = 1,
68}; 71};
69 72
@@ -232,16 +235,18 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp)
232 * Walk the LRU list and prune off entries that are older than RC_EXPIRE. 235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
233 * Also prune the oldest ones when the total exceeds the max number of entries. 236 * Also prune the oldest ones when the total exceeds the max number of entries.
234 */ 237 */
235static void 238static long
236prune_cache_entries(void) 239prune_cache_entries(void)
237{ 240{
238 struct svc_cacherep *rp, *tmp; 241 struct svc_cacherep *rp, *tmp;
242 long freed = 0;
239 243
240 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { 244 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
241 if (!nfsd_cache_entry_expired(rp) && 245 if (!nfsd_cache_entry_expired(rp) &&
242 num_drc_entries <= max_drc_entries) 246 num_drc_entries <= max_drc_entries)
243 break; 247 break;
244 nfsd_reply_cache_free_locked(rp); 248 nfsd_reply_cache_free_locked(rp);
249 freed++;
245 } 250 }
246 251
247 /* 252 /*
@@ -254,6 +259,7 @@ prune_cache_entries(void)
254 cancel_delayed_work(&cache_cleaner); 259 cancel_delayed_work(&cache_cleaner);
255 else 260 else
256 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); 261 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
262 return freed;
257} 263}
258 264
259static void 265static void
@@ -264,20 +270,28 @@ cache_cleaner_func(struct work_struct *unused)
264 spin_unlock(&cache_lock); 270 spin_unlock(&cache_lock);
265} 271}
266 272
267static int 273static unsigned long
268nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) 274nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
269{ 275{
270 unsigned int num; 276 unsigned long num;
271 277
272 spin_lock(&cache_lock); 278 spin_lock(&cache_lock);
273 if (sc->nr_to_scan)
274 prune_cache_entries();
275 num = num_drc_entries; 279 num = num_drc_entries;
276 spin_unlock(&cache_lock); 280 spin_unlock(&cache_lock);
277 281
278 return num; 282 return num;
279} 283}
280 284
285static unsigned long
286nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
287{
288 unsigned long freed;
289
290 spin_lock(&cache_lock);
291 freed = prune_cache_entries();
292 spin_unlock(&cache_lock);
293 return freed;
294}
281/* 295/*
282 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
283 */ 297 */
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 13eee847605c..831d49a4111f 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -687,44 +687,37 @@ int dquot_quota_sync(struct super_block *sb, int type)
687} 687}
688EXPORT_SYMBOL(dquot_quota_sync); 688EXPORT_SYMBOL(dquot_quota_sync);
689 689
690/* Free unused dquots from cache */ 690static unsigned long
691static void prune_dqcache(int count) 691dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
692{ 692{
693 struct list_head *head; 693 struct list_head *head;
694 struct dquot *dquot; 694 struct dquot *dquot;
695 unsigned long freed = 0;
695 696
696 head = free_dquots.prev; 697 head = free_dquots.prev;
697 while (head != &free_dquots && count) { 698 while (head != &free_dquots && sc->nr_to_scan) {
698 dquot = list_entry(head, struct dquot, dq_free); 699 dquot = list_entry(head, struct dquot, dq_free);
699 remove_dquot_hash(dquot); 700 remove_dquot_hash(dquot);
700 remove_free_dquot(dquot); 701 remove_free_dquot(dquot);
701 remove_inuse(dquot); 702 remove_inuse(dquot);
702 do_destroy_dquot(dquot); 703 do_destroy_dquot(dquot);
703 count--; 704 sc->nr_to_scan--;
705 freed++;
704 head = free_dquots.prev; 706 head = free_dquots.prev;
705 } 707 }
708 return freed;
706} 709}
707 710
708/* 711static unsigned long
709 * This is called from kswapd when we think we need some 712dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
710 * more memory
711 */
712static int shrink_dqcache_memory(struct shrinker *shrink,
713 struct shrink_control *sc)
714{ 713{
715 int nr = sc->nr_to_scan;
716
717 if (nr) {
718 spin_lock(&dq_list_lock);
719 prune_dqcache(nr);
720 spin_unlock(&dq_list_lock);
721 }
722 return vfs_pressure_ratio( 714 return vfs_pressure_ratio(
723 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])); 715 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
724} 716}
725 717
726static struct shrinker dqcache_shrinker = { 718static struct shrinker dqcache_shrinker = {
727 .shrink = shrink_dqcache_memory, 719 .count_objects = dqcache_shrink_count,
720 .scan_objects = dqcache_shrink_scan,
728 .seeks = DEFAULT_SEEKS, 721 .seeks = DEFAULT_SEEKS,
729}; 722};
730 723
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 9e1d05666fed..f35135e28e96 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -277,18 +277,25 @@ static int kick_a_thread(void)
277 return 0; 277 return 0;
278} 278}
279 279
280int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) 280unsigned long ubifs_shrink_count(struct shrinker *shrink,
281 struct shrink_control *sc)
281{ 282{
282 int nr = sc->nr_to_scan;
283 int freed, contention = 0;
284 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); 283 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
285 284
286 if (nr == 0) 285 /*
287 /* 286 * Due to the way UBIFS updates the clean znode counter it may
288 * Due to the way UBIFS updates the clean znode counter it may 287 * temporarily be negative.
289 * temporarily be negative. 288 */
290 */ 289 return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
291 return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; 290}
291
292unsigned long ubifs_shrink_scan(struct shrinker *shrink,
293 struct shrink_control *sc)
294{
295 unsigned long nr = sc->nr_to_scan;
296 int contention = 0;
297 unsigned long freed;
298 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
292 299
293 if (!clean_zn_cnt) { 300 if (!clean_zn_cnt) {
294 /* 301 /*
@@ -316,10 +323,10 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
316 323
317 if (!freed && contention) { 324 if (!freed && contention) {
318 dbg_tnc("freed nothing, but contention"); 325 dbg_tnc("freed nothing, but contention");
319 return -1; 326 return SHRINK_STOP;
320 } 327 }
321 328
322out: 329out:
323 dbg_tnc("%d znodes were freed, requested %d", freed, nr); 330 dbg_tnc("%lu znodes were freed, requested %lu", freed, nr);
324 return freed; 331 return freed;
325} 332}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 879b9976c12b..3e4aa7281e04 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -49,7 +49,8 @@ struct kmem_cache *ubifs_inode_slab;
49 49
50/* UBIFS TNC shrinker description */ 50/* UBIFS TNC shrinker description */
51static struct shrinker ubifs_shrinker_info = { 51static struct shrinker ubifs_shrinker_info = {
52 .shrink = ubifs_shrinker, 52 .scan_objects = ubifs_shrink_scan,
53 .count_objects = ubifs_shrink_count,
53 .seeks = DEFAULT_SEEKS, 54 .seeks = DEFAULT_SEEKS,
54}; 55};
55 56
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index b2babce4d70f..e8c8cfe1435c 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1624,7 +1624,10 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
1624int ubifs_tnc_end_commit(struct ubifs_info *c); 1624int ubifs_tnc_end_commit(struct ubifs_info *c);
1625 1625
1626/* shrinker.c */ 1626/* shrinker.c */
1627int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc); 1627unsigned long ubifs_shrink_scan(struct shrinker *shrink,
1628 struct shrink_control *sc);
1629unsigned long ubifs_shrink_count(struct shrinker *shrink,
1630 struct shrink_control *sc);
1628 1631
1629/* commit.c */ 1632/* commit.c */
1630int ubifs_bg_thread(void *info); 1633int ubifs_bg_thread(void *info);