diff options
author | Dave Chinner <dchinner@redhat.com> | 2013-08-27 20:18:09 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-10 18:56:31 -0400 |
commit | 1ab6c4997e04a00c50c6d786c2f046adc0d1f5de (patch) | |
tree | 55561fc74c062a8ed0e03fe56f54d7db9cfd9e12 /fs/nfsd/nfscache.c | |
parent | 35163417fb7a55a24b6b0ebb102e9991adf309aa (diff) |
fs: convert fs shrinkers to new scan/count API
Convert the filesystem shrinkers to use the new API, and standardise some
of the behaviours of the shrinkers at the same time. For example,
nr_to_scan means the number of objects to scan, not the number of objects
to free.
I refactored the CIFS idmap shrinker a little - it really needs to be
broken up into a shrinker per tree and keep an item count with the tree
root so that we don't need to walk the tree every time the shrinker needs
to count the number of objects in the tree (i.e. all the time under
memory pressure).
[glommer@openvz.org: fixes for ext4, ubifs, nfs, cifs and glock. Fixes are needed mainly due to new code merged in the tree]
[assorted fixes folded in]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Acked-by: Jan Kara <jack@suse.cz>
Acked-by: Steven Whitehouse <swhiteho@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/nfsd/nfscache.c')
-rw-r--r-- | fs/nfsd/nfscache.c | 32 |
1 files changed, 23 insertions, 9 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index e76244edd748..9186c7ce0b14 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -59,11 +59,14 @@ static unsigned int longest_chain_cachesize; | |||
59 | 59 | ||
60 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); | 60 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
61 | static void cache_cleaner_func(struct work_struct *unused); | 61 | static void cache_cleaner_func(struct work_struct *unused); |
62 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, | 62 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
63 | struct shrink_control *sc); | 63 | struct shrink_control *sc); |
64 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, | ||
65 | struct shrink_control *sc); | ||
64 | 66 | ||
65 | static struct shrinker nfsd_reply_cache_shrinker = { | 67 | static struct shrinker nfsd_reply_cache_shrinker = { |
66 | .shrink = nfsd_reply_cache_shrink, | 68 | .scan_objects = nfsd_reply_cache_scan, |
69 | .count_objects = nfsd_reply_cache_count, | ||
67 | .seeks = 1, | 70 | .seeks = 1, |
68 | }; | 71 | }; |
69 | 72 | ||
@@ -232,16 +235,18 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp) | |||
232 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | 235 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. |
233 | * Also prune the oldest ones when the total exceeds the max number of entries. | 236 | * Also prune the oldest ones when the total exceeds the max number of entries. |
234 | */ | 237 | */ |
235 | static void | 238 | static long |
236 | prune_cache_entries(void) | 239 | prune_cache_entries(void) |
237 | { | 240 | { |
238 | struct svc_cacherep *rp, *tmp; | 241 | struct svc_cacherep *rp, *tmp; |
242 | long freed = 0; | ||
239 | 243 | ||
240 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { | 244 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { |
241 | if (!nfsd_cache_entry_expired(rp) && | 245 | if (!nfsd_cache_entry_expired(rp) && |
242 | num_drc_entries <= max_drc_entries) | 246 | num_drc_entries <= max_drc_entries) |
243 | break; | 247 | break; |
244 | nfsd_reply_cache_free_locked(rp); | 248 | nfsd_reply_cache_free_locked(rp); |
249 | freed++; | ||
245 | } | 250 | } |
246 | 251 | ||
247 | /* | 252 | /* |
@@ -254,6 +259,7 @@ prune_cache_entries(void) | |||
254 | cancel_delayed_work(&cache_cleaner); | 259 | cancel_delayed_work(&cache_cleaner); |
255 | else | 260 | else |
256 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); | 261 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); |
262 | return freed; | ||
257 | } | 263 | } |
258 | 264 | ||
259 | static void | 265 | static void |
@@ -264,20 +270,28 @@ cache_cleaner_func(struct work_struct *unused) | |||
264 | spin_unlock(&cache_lock); | 270 | spin_unlock(&cache_lock); |
265 | } | 271 | } |
266 | 272 | ||
267 | static int | 273 | static unsigned long |
268 | nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) | 274 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
269 | { | 275 | { |
270 | unsigned int num; | 276 | unsigned long num; |
271 | 277 | ||
272 | spin_lock(&cache_lock); | 278 | spin_lock(&cache_lock); |
273 | if (sc->nr_to_scan) | ||
274 | prune_cache_entries(); | ||
275 | num = num_drc_entries; | 279 | num = num_drc_entries; |
276 | spin_unlock(&cache_lock); | 280 | spin_unlock(&cache_lock); |
277 | 281 | ||
278 | return num; | 282 | return num; |
279 | } | 283 | } |
280 | 284 | ||
285 | static unsigned long | ||
286 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | ||
287 | { | ||
288 | unsigned long freed; | ||
289 | |||
290 | spin_lock(&cache_lock); | ||
291 | freed = prune_cache_entries(); | ||
292 | spin_unlock(&cache_lock); | ||
293 | return freed; | ||
294 | } | ||
281 | /* | 295 | /* |
282 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | 296 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes |
283 | */ | 297 | */ |