aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:01:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:01:38 -0400
commit26935fb06ee88f1188789807687c03041f3c70d9 (patch)
tree381c487716540b52348d78bee6555f8fa61d77ef /fs/nfsd
parent3cc69b638e11bfda5d013c2b75b60934aa0e88a1 (diff)
parentbf2ba3bc185269eca274b458aac46ba1ad7c1121 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile 4 from Al Viro: "list_lru pile, mostly" This came out of Andrew's pile, Al ended up doing the merge work so that Andrew didn't have to. Additionally, a few fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (42 commits) super: fix for destroy lrus list_lru: dynamically adjust node arrays shrinker: Kill old ->shrink API. shrinker: convert remaining shrinkers to count/scan API staging/lustre/libcfs: cleanup linux-mem.h staging/lustre/ptlrpc: convert to new shrinker API staging/lustre/obdclass: convert lu_object shrinker to count/scan API staging/lustre/ldlm: convert to shrinkers to count/scan API hugepage: convert huge zero page shrinker to new shrinker API i915: bail out earlier when shrinker cannot acquire mutex drivers: convert shrinkers to new count/scan API fs: convert fs shrinkers to new scan/count API xfs: fix dquot isolation hang xfs-convert-dquot-cache-lru-to-list_lru-fix xfs: convert dquot cache lru to list_lru xfs: rework buffer dispose list tracking xfs-convert-buftarg-lru-to-generic-code-fix xfs: convert buftarg LRU to generic code fs: convert inode and dentry shrinking to be node aware vmscan: per-node deferred work ...
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/nfscache.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index e76244edd748..9186c7ce0b14 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -59,11 +59,14 @@ static unsigned int longest_chain_cachesize;
59 59
60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61static void cache_cleaner_func(struct work_struct *unused); 61static void cache_cleaner_func(struct work_struct *unused);
62static int nfsd_reply_cache_shrink(struct shrinker *shrink, 62static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
63 struct shrink_control *sc); 63 struct shrink_control *sc);
64static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
65 struct shrink_control *sc);
64 66
65static struct shrinker nfsd_reply_cache_shrinker = { 67static struct shrinker nfsd_reply_cache_shrinker = {
66 .shrink = nfsd_reply_cache_shrink, 68 .scan_objects = nfsd_reply_cache_scan,
69 .count_objects = nfsd_reply_cache_count,
67 .seeks = 1, 70 .seeks = 1,
68}; 71};
69 72
@@ -232,16 +235,18 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp)
232 * Walk the LRU list and prune off entries that are older than RC_EXPIRE. 235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
233 * Also prune the oldest ones when the total exceeds the max number of entries. 236 * Also prune the oldest ones when the total exceeds the max number of entries.
234 */ 237 */
235static void 238static long
236prune_cache_entries(void) 239prune_cache_entries(void)
237{ 240{
238 struct svc_cacherep *rp, *tmp; 241 struct svc_cacherep *rp, *tmp;
242 long freed = 0;
239 243
240 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { 244 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
241 if (!nfsd_cache_entry_expired(rp) && 245 if (!nfsd_cache_entry_expired(rp) &&
242 num_drc_entries <= max_drc_entries) 246 num_drc_entries <= max_drc_entries)
243 break; 247 break;
244 nfsd_reply_cache_free_locked(rp); 248 nfsd_reply_cache_free_locked(rp);
249 freed++;
245 } 250 }
246 251
247 /* 252 /*
@@ -254,6 +259,7 @@ prune_cache_entries(void)
254 cancel_delayed_work(&cache_cleaner); 259 cancel_delayed_work(&cache_cleaner);
255 else 260 else
256 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); 261 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
262 return freed;
257} 263}
258 264
259static void 265static void
@@ -264,20 +270,28 @@ cache_cleaner_func(struct work_struct *unused)
264 spin_unlock(&cache_lock); 270 spin_unlock(&cache_lock);
265} 271}
266 272
267static int 273static unsigned long
268nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) 274nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
269{ 275{
270 unsigned int num; 276 unsigned long num;
271 277
272 spin_lock(&cache_lock); 278 spin_lock(&cache_lock);
273 if (sc->nr_to_scan)
274 prune_cache_entries();
275 num = num_drc_entries; 279 num = num_drc_entries;
276 spin_unlock(&cache_lock); 280 spin_unlock(&cache_lock);
277 281
278 return num; 282 return num;
279} 283}
280 284
285static unsigned long
286nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
287{
288 unsigned long freed;
289
290 spin_lock(&cache_lock);
291 freed = prune_cache_entries();
292 spin_unlock(&cache_lock);
293 return freed;
294}
281/* 295/*
282 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
283 */ 297 */