aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd
diff options
context:
space:
mode:
authorJeff Layton <jlayton@poochiereds.net>2015-11-04 11:02:29 -0500
committerJ. Bruce Fields <bfields@redhat.com>2015-11-10 09:25:51 -0500
commit3e80dbcda7f3e1e349a779d7a14c0e08677c39fa (patch)
treec64a688e9642457c5a727cd63d42b698ffc720b7 /fs/nfsd
parentea833f5de3ab49a0aac79a16633fd510390b83a6 (diff)
nfsd: remove recurring workqueue job to clean DRC
We have a shrinker, we clean out the cache when nfsd is shut down, and prune the chains on each request. A recurring workqueue job seems like unnecessary overhead. Just remove it. Signed-off-by: Jeff Layton <jeff.layton@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/nfscache.c26
1 files changed, 0 insertions, 26 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 116940c739e1..54cde9a5864e 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -63,7 +63,6 @@ static unsigned int longest_chain;
63static unsigned int longest_chain_cachesize; 63static unsigned int longest_chain_cachesize;
64 64
65static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 65static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
66static void cache_cleaner_func(struct work_struct *unused);
67static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, 66static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
68 struct shrink_control *sc); 67 struct shrink_control *sc);
69static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, 68static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
@@ -76,13 +75,6 @@ static struct shrinker nfsd_reply_cache_shrinker = {
76}; 75};
77 76
78/* 77/*
79 * locking for the reply cache:
80 * A cache entry is "single use" if c_state == RC_INPROG
81 * Otherwise, it when accessing _prev or _next, the lock must be held.
82 */
83static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
84
85/*
86 * Put a cap on the size of the DRC based on the amount of available 78 * Put a cap on the size of the DRC based on the amount of available
87 * low memory in the machine. 79 * low memory in the machine.
88 * 80 *
@@ -203,7 +195,6 @@ void nfsd_reply_cache_shutdown(void)
203 unsigned int i; 195 unsigned int i;
204 196
205 unregister_shrinker(&nfsd_reply_cache_shrinker); 197 unregister_shrinker(&nfsd_reply_cache_shrinker);
206 cancel_delayed_work_sync(&cache_cleaner);
207 198
208 for (i = 0; i < drc_hashsize; i++) { 199 for (i = 0; i < drc_hashsize; i++) {
209 struct list_head *head = &drc_hashtbl[i].lru_head; 200 struct list_head *head = &drc_hashtbl[i].lru_head;
@@ -230,7 +221,6 @@ lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
230{ 221{
231 rp->c_timestamp = jiffies; 222 rp->c_timestamp = jiffies;
232 list_move_tail(&rp->c_lru, &b->lru_head); 223 list_move_tail(&rp->c_lru, &b->lru_head);
233 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
234} 224}
235 225
236static long 226static long
@@ -264,7 +254,6 @@ prune_cache_entries(void)
264{ 254{
265 unsigned int i; 255 unsigned int i;
266 long freed = 0; 256 long freed = 0;
267 bool cancel = true;
268 257
269 for (i = 0; i < drc_hashsize; i++) { 258 for (i = 0; i < drc_hashsize; i++) {
270 struct nfsd_drc_bucket *b = &drc_hashtbl[i]; 259 struct nfsd_drc_bucket *b = &drc_hashtbl[i];
@@ -273,26 +262,11 @@ prune_cache_entries(void)
273 continue; 262 continue;
274 spin_lock(&b->cache_lock); 263 spin_lock(&b->cache_lock);
275 freed += prune_bucket(b); 264 freed += prune_bucket(b);
276 if (!list_empty(&b->lru_head))
277 cancel = false;
278 spin_unlock(&b->cache_lock); 265 spin_unlock(&b->cache_lock);
279 } 266 }
280
281 /*
282 * Conditionally rearm the job to run in RC_EXPIRE since we just
283 * ran the pruner.
284 */
285 if (!cancel)
286 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
287 return freed; 267 return freed;
288} 268}
289 269
290static void
291cache_cleaner_func(struct work_struct *unused)
292{
293 prune_cache_entries();
294}
295
296static unsigned long 270static unsigned long
297nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) 271nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
298{ 272{