aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfscache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfsd/nfscache.c')
-rw-r--r--fs/nfsd/nfscache.c214
1 files changed, 113 insertions, 101 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index ff9567633245..122f69185ef5 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -27,8 +27,12 @@
27 */ 27 */
28#define TARGET_BUCKET_SIZE 64 28#define TARGET_BUCKET_SIZE 64
29 29
30static struct hlist_head * cache_hash; 30struct nfsd_drc_bucket {
31static struct list_head lru_head; 31 struct list_head lru_head;
32 spinlock_t cache_lock;
33};
34
35static struct nfsd_drc_bucket *drc_hashtbl;
32static struct kmem_cache *drc_slab; 36static struct kmem_cache *drc_slab;
33 37
34/* max number of entries allowed in the cache */ 38/* max number of entries allowed in the cache */
@@ -36,6 +40,7 @@ static unsigned int max_drc_entries;
36 40
37/* number of significant bits in the hash value */ 41/* number of significant bits in the hash value */
38static unsigned int maskbits; 42static unsigned int maskbits;
43static unsigned int drc_hashsize;
39 44
40/* 45/*
41 * Stats and other tracking of on the duplicate reply cache. All of these and 46 * Stats and other tracking of on the duplicate reply cache. All of these and
@@ -43,7 +48,7 @@ static unsigned int maskbits;
43 */ 48 */
44 49
45/* total number of entries */ 50/* total number of entries */
46static unsigned int num_drc_entries; 51static atomic_t num_drc_entries;
47 52
48/* cache misses due only to checksum comparison failures */ 53/* cache misses due only to checksum comparison failures */
49static unsigned int payload_misses; 54static unsigned int payload_misses;
@@ -75,7 +80,6 @@ static struct shrinker nfsd_reply_cache_shrinker = {
75 * A cache entry is "single use" if c_state == RC_INPROG 80 * A cache entry is "single use" if c_state == RC_INPROG
76 * Otherwise, it when accessing _prev or _next, the lock must be held. 81 * Otherwise, it when accessing _prev or _next, the lock must be held.
77 */ 82 */
78static DEFINE_SPINLOCK(cache_lock);
79static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); 83static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
80 84
81/* 85/*
@@ -116,6 +120,12 @@ nfsd_hashsize(unsigned int limit)
116 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); 120 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
117} 121}
118 122
123static u32
124nfsd_cache_hash(__be32 xid)
125{
126 return hash_32(be32_to_cpu(xid), maskbits);
127}
128
119static struct svc_cacherep * 129static struct svc_cacherep *
120nfsd_reply_cache_alloc(void) 130nfsd_reply_cache_alloc(void)
121{ 131{
@@ -126,7 +136,6 @@ nfsd_reply_cache_alloc(void)
126 rp->c_state = RC_UNUSED; 136 rp->c_state = RC_UNUSED;
127 rp->c_type = RC_NOCACHE; 137 rp->c_type = RC_NOCACHE;
128 INIT_LIST_HEAD(&rp->c_lru); 138 INIT_LIST_HEAD(&rp->c_lru);
129 INIT_HLIST_NODE(&rp->c_hash);
130 } 139 }
131 return rp; 140 return rp;
132} 141}
@@ -138,29 +147,27 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
138 drc_mem_usage -= rp->c_replvec.iov_len; 147 drc_mem_usage -= rp->c_replvec.iov_len;
139 kfree(rp->c_replvec.iov_base); 148 kfree(rp->c_replvec.iov_base);
140 } 149 }
141 if (!hlist_unhashed(&rp->c_hash))
142 hlist_del(&rp->c_hash);
143 list_del(&rp->c_lru); 150 list_del(&rp->c_lru);
144 --num_drc_entries; 151 atomic_dec(&num_drc_entries);
145 drc_mem_usage -= sizeof(*rp); 152 drc_mem_usage -= sizeof(*rp);
146 kmem_cache_free(drc_slab, rp); 153 kmem_cache_free(drc_slab, rp);
147} 154}
148 155
149static void 156static void
150nfsd_reply_cache_free(struct svc_cacherep *rp) 157nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
151{ 158{
152 spin_lock(&cache_lock); 159 spin_lock(&b->cache_lock);
153 nfsd_reply_cache_free_locked(rp); 160 nfsd_reply_cache_free_locked(rp);
154 spin_unlock(&cache_lock); 161 spin_unlock(&b->cache_lock);
155} 162}
156 163
157int nfsd_reply_cache_init(void) 164int nfsd_reply_cache_init(void)
158{ 165{
159 unsigned int hashsize; 166 unsigned int hashsize;
167 unsigned int i;
160 168
161 INIT_LIST_HEAD(&lru_head);
162 max_drc_entries = nfsd_cache_size_limit(); 169 max_drc_entries = nfsd_cache_size_limit();
163 num_drc_entries = 0; 170 atomic_set(&num_drc_entries, 0);
164 hashsize = nfsd_hashsize(max_drc_entries); 171 hashsize = nfsd_hashsize(max_drc_entries);
165 maskbits = ilog2(hashsize); 172 maskbits = ilog2(hashsize);
166 173
@@ -170,9 +177,14 @@ int nfsd_reply_cache_init(void)
170 if (!drc_slab) 177 if (!drc_slab)
171 goto out_nomem; 178 goto out_nomem;
172 179
173 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL); 180 drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
174 if (!cache_hash) 181 if (!drc_hashtbl)
175 goto out_nomem; 182 goto out_nomem;
183 for (i = 0; i < hashsize; i++) {
184 INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
185 spin_lock_init(&drc_hashtbl[i].cache_lock);
186 }
187 drc_hashsize = hashsize;
176 188
177 return 0; 189 return 0;
178out_nomem: 190out_nomem:
@@ -184,17 +196,22 @@ out_nomem:
184void nfsd_reply_cache_shutdown(void) 196void nfsd_reply_cache_shutdown(void)
185{ 197{
186 struct svc_cacherep *rp; 198 struct svc_cacherep *rp;
199 unsigned int i;
187 200
188 unregister_shrinker(&nfsd_reply_cache_shrinker); 201 unregister_shrinker(&nfsd_reply_cache_shrinker);
189 cancel_delayed_work_sync(&cache_cleaner); 202 cancel_delayed_work_sync(&cache_cleaner);
190 203
191 while (!list_empty(&lru_head)) { 204 for (i = 0; i < drc_hashsize; i++) {
192 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); 205 struct list_head *head = &drc_hashtbl[i].lru_head;
193 nfsd_reply_cache_free_locked(rp); 206 while (!list_empty(head)) {
207 rp = list_first_entry(head, struct svc_cacherep, c_lru);
208 nfsd_reply_cache_free_locked(rp);
209 }
194 } 210 }
195 211
196 kfree (cache_hash); 212 kfree (drc_hashtbl);
197 cache_hash = NULL; 213 drc_hashtbl = NULL;
214 drc_hashsize = 0;
198 215
199 if (drc_slab) { 216 if (drc_slab) {
200 kmem_cache_destroy(drc_slab); 217 kmem_cache_destroy(drc_slab);
@@ -207,61 +224,63 @@ void nfsd_reply_cache_shutdown(void)
207 * not already scheduled. 224 * not already scheduled.
208 */ 225 */
209static void 226static void
210lru_put_end(struct svc_cacherep *rp) 227lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
211{ 228{
212 rp->c_timestamp = jiffies; 229 rp->c_timestamp = jiffies;
213 list_move_tail(&rp->c_lru, &lru_head); 230 list_move_tail(&rp->c_lru, &b->lru_head);
214 schedule_delayed_work(&cache_cleaner, RC_EXPIRE); 231 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
215} 232}
216 233
217/*
218 * Move a cache entry from one hash list to another
219 */
220static void
221hash_refile(struct svc_cacherep *rp)
222{
223 hlist_del_init(&rp->c_hash);
224 /*
225 * No point in byte swapping c_xid since we're just using it to pick
226 * a hash bucket.
227 */
228 hlist_add_head(&rp->c_hash, cache_hash +
229 hash_32((__force u32)rp->c_xid, maskbits));
230}
231
232/*
233 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
234 * Also prune the oldest ones when the total exceeds the max number of entries.
235 */
236static long 234static long
237prune_cache_entries(void) 235prune_bucket(struct nfsd_drc_bucket *b)
238{ 236{
239 struct svc_cacherep *rp, *tmp; 237 struct svc_cacherep *rp, *tmp;
240 long freed = 0; 238 long freed = 0;
241 239
242 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { 240 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
243 /* 241 /*
244 * Don't free entries attached to calls that are still 242 * Don't free entries attached to calls that are still
245 * in-progress, but do keep scanning the list. 243 * in-progress, but do keep scanning the list.
246 */ 244 */
247 if (rp->c_state == RC_INPROG) 245 if (rp->c_state == RC_INPROG)
248 continue; 246 continue;
249 if (num_drc_entries <= max_drc_entries && 247 if (atomic_read(&num_drc_entries) <= max_drc_entries &&
250 time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) 248 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
251 break; 249 break;
252 nfsd_reply_cache_free_locked(rp); 250 nfsd_reply_cache_free_locked(rp);
253 freed++; 251 freed++;
254 } 252 }
253 return freed;
254}
255
256/*
257 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
258 * Also prune the oldest ones when the total exceeds the max number of entries.
259 */
260static long
261prune_cache_entries(void)
262{
263 unsigned int i;
264 long freed = 0;
265 bool cancel = true;
266
267 for (i = 0; i < drc_hashsize; i++) {
268 struct nfsd_drc_bucket *b = &drc_hashtbl[i];
269
270 if (list_empty(&b->lru_head))
271 continue;
272 spin_lock(&b->cache_lock);
273 freed += prune_bucket(b);
274 if (!list_empty(&b->lru_head))
275 cancel = false;
276 spin_unlock(&b->cache_lock);
277 }
255 278
256 /* 279 /*
257 * Conditionally rearm the job. If we cleaned out the list, then 280 * Conditionally rearm the job to run in RC_EXPIRE since we just
258 * cancel any pending run (since there won't be any work to do). 281 * ran the pruner.
259 * Otherwise, we rearm the job or modify the existing one to run in
260 * RC_EXPIRE since we just ran the pruner.
261 */ 282 */
262 if (list_empty(&lru_head)) 283 if (!cancel)
263 cancel_delayed_work(&cache_cleaner);
264 else
265 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); 284 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
266 return freed; 285 return freed;
267} 286}
@@ -269,32 +288,19 @@ prune_cache_entries(void)
269static void 288static void
270cache_cleaner_func(struct work_struct *unused) 289cache_cleaner_func(struct work_struct *unused)
271{ 290{
272 spin_lock(&cache_lock);
273 prune_cache_entries(); 291 prune_cache_entries();
274 spin_unlock(&cache_lock);
275} 292}
276 293
277static unsigned long 294static unsigned long
278nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) 295nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
279{ 296{
280 unsigned long num; 297 return atomic_read(&num_drc_entries);
281
282 spin_lock(&cache_lock);
283 num = num_drc_entries;
284 spin_unlock(&cache_lock);
285
286 return num;
287} 298}
288 299
289static unsigned long 300static unsigned long
290nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) 301nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
291{ 302{
292 unsigned long freed; 303 return prune_cache_entries();
293
294 spin_lock(&cache_lock);
295 freed = prune_cache_entries();
296 spin_unlock(&cache_lock);
297 return freed;
298} 304}
299/* 305/*
300 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 306 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
@@ -332,20 +338,24 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
332static bool 338static bool
333nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) 339nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
334{ 340{
335 /* Check RPC header info first */ 341 /* Check RPC XID first */
336 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc || 342 if (rqstp->rq_xid != rp->c_xid)
337 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
338 rqstp->rq_arg.len != rp->c_len ||
339 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
340 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
341 return false; 343 return false;
342
343 /* compare checksum of NFS data */ 344 /* compare checksum of NFS data */
344 if (csum != rp->c_csum) { 345 if (csum != rp->c_csum) {
345 ++payload_misses; 346 ++payload_misses;
346 return false; 347 return false;
347 } 348 }
348 349
350 /* Other discriminators */
351 if (rqstp->rq_proc != rp->c_proc ||
352 rqstp->rq_prot != rp->c_prot ||
353 rqstp->rq_vers != rp->c_vers ||
354 rqstp->rq_arg.len != rp->c_len ||
355 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
356 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
357 return false;
358
349 return true; 359 return true;
350} 360}
351 361
@@ -355,18 +365,14 @@ nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
355 * NULL on failure. 365 * NULL on failure.
356 */ 366 */
357static struct svc_cacherep * 367static struct svc_cacherep *
358nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) 368nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
369 __wsum csum)
359{ 370{
360 struct svc_cacherep *rp, *ret = NULL; 371 struct svc_cacherep *rp, *ret = NULL;
361 struct hlist_head *rh; 372 struct list_head *rh = &b->lru_head;
362 unsigned int entries = 0; 373 unsigned int entries = 0;
363 374
364 /* 375 list_for_each_entry(rp, rh, c_lru) {
365 * No point in byte swapping rq_xid since we're just using it to pick
366 * a hash bucket.
367 */
368 rh = &cache_hash[hash_32((__force u32)rqstp->rq_xid, maskbits)];
369 hlist_for_each_entry(rp, rh, c_hash) {
370 ++entries; 376 ++entries;
371 if (nfsd_cache_match(rqstp, csum, rp)) { 377 if (nfsd_cache_match(rqstp, csum, rp)) {
372 ret = rp; 378 ret = rp;
@@ -377,11 +383,12 @@ nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
377 /* tally hash chain length stats */ 383 /* tally hash chain length stats */
378 if (entries > longest_chain) { 384 if (entries > longest_chain) {
379 longest_chain = entries; 385 longest_chain = entries;
380 longest_chain_cachesize = num_drc_entries; 386 longest_chain_cachesize = atomic_read(&num_drc_entries);
381 } else if (entries == longest_chain) { 387 } else if (entries == longest_chain) {
382 /* prefer to keep the smallest cachesize possible here */ 388 /* prefer to keep the smallest cachesize possible here */
383 longest_chain_cachesize = min(longest_chain_cachesize, 389 longest_chain_cachesize = min_t(unsigned int,
384 num_drc_entries); 390 longest_chain_cachesize,
391 atomic_read(&num_drc_entries));
385 } 392 }
386 393
387 return ret; 394 return ret;
@@ -403,6 +410,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
403 vers = rqstp->rq_vers, 410 vers = rqstp->rq_vers,
404 proc = rqstp->rq_proc; 411 proc = rqstp->rq_proc;
405 __wsum csum; 412 __wsum csum;
413 u32 hash = nfsd_cache_hash(xid);
414 struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
406 unsigned long age; 415 unsigned long age;
407 int type = rqstp->rq_cachetype; 416 int type = rqstp->rq_cachetype;
408 int rtn = RC_DOIT; 417 int rtn = RC_DOIT;
@@ -420,16 +429,16 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
420 * preallocate an entry. 429 * preallocate an entry.
421 */ 430 */
422 rp = nfsd_reply_cache_alloc(); 431 rp = nfsd_reply_cache_alloc();
423 spin_lock(&cache_lock); 432 spin_lock(&b->cache_lock);
424 if (likely(rp)) { 433 if (likely(rp)) {
425 ++num_drc_entries; 434 atomic_inc(&num_drc_entries);
426 drc_mem_usage += sizeof(*rp); 435 drc_mem_usage += sizeof(*rp);
427 } 436 }
428 437
429 /* go ahead and prune the cache */ 438 /* go ahead and prune the cache */
430 prune_cache_entries(); 439 prune_bucket(b);
431 440
432 found = nfsd_cache_search(rqstp, csum); 441 found = nfsd_cache_search(b, rqstp, csum);
433 if (found) { 442 if (found) {
434 if (likely(rp)) 443 if (likely(rp))
435 nfsd_reply_cache_free_locked(rp); 444 nfsd_reply_cache_free_locked(rp);
@@ -454,8 +463,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
454 rp->c_len = rqstp->rq_arg.len; 463 rp->c_len = rqstp->rq_arg.len;
455 rp->c_csum = csum; 464 rp->c_csum = csum;
456 465
457 hash_refile(rp); 466 lru_put_end(b, rp);
458 lru_put_end(rp);
459 467
460 /* release any buffer */ 468 /* release any buffer */
461 if (rp->c_type == RC_REPLBUFF) { 469 if (rp->c_type == RC_REPLBUFF) {
@@ -465,14 +473,14 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
465 } 473 }
466 rp->c_type = RC_NOCACHE; 474 rp->c_type = RC_NOCACHE;
467 out: 475 out:
468 spin_unlock(&cache_lock); 476 spin_unlock(&b->cache_lock);
469 return rtn; 477 return rtn;
470 478
471found_entry: 479found_entry:
472 nfsdstats.rchits++; 480 nfsdstats.rchits++;
473 /* We found a matching entry which is either in progress or done. */ 481 /* We found a matching entry which is either in progress or done. */
474 age = jiffies - rp->c_timestamp; 482 age = jiffies - rp->c_timestamp;
475 lru_put_end(rp); 483 lru_put_end(b, rp);
476 484
477 rtn = RC_DROPIT; 485 rtn = RC_DROPIT;
478 /* Request being processed or excessive rexmits */ 486 /* Request being processed or excessive rexmits */
@@ -527,18 +535,23 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
527{ 535{
528 struct svc_cacherep *rp = rqstp->rq_cacherep; 536 struct svc_cacherep *rp = rqstp->rq_cacherep;
529 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 537 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
538 u32 hash;
539 struct nfsd_drc_bucket *b;
530 int len; 540 int len;
531 size_t bufsize = 0; 541 size_t bufsize = 0;
532 542
533 if (!rp) 543 if (!rp)
534 return; 544 return;
535 545
546 hash = nfsd_cache_hash(rp->c_xid);
547 b = &drc_hashtbl[hash];
548
536 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 549 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
537 len >>= 2; 550 len >>= 2;
538 551
539 /* Don't cache excessive amounts of data and XDR failures */ 552 /* Don't cache excessive amounts of data and XDR failures */
540 if (!statp || len > (256 >> 2)) { 553 if (!statp || len > (256 >> 2)) {
541 nfsd_reply_cache_free(rp); 554 nfsd_reply_cache_free(b, rp);
542 return; 555 return;
543 } 556 }
544 557
@@ -553,23 +566,23 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
553 bufsize = len << 2; 566 bufsize = len << 2;
554 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); 567 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
555 if (!cachv->iov_base) { 568 if (!cachv->iov_base) {
556 nfsd_reply_cache_free(rp); 569 nfsd_reply_cache_free(b, rp);
557 return; 570 return;
558 } 571 }
559 cachv->iov_len = bufsize; 572 cachv->iov_len = bufsize;
560 memcpy(cachv->iov_base, statp, bufsize); 573 memcpy(cachv->iov_base, statp, bufsize);
561 break; 574 break;
562 case RC_NOCACHE: 575 case RC_NOCACHE:
563 nfsd_reply_cache_free(rp); 576 nfsd_reply_cache_free(b, rp);
564 return; 577 return;
565 } 578 }
566 spin_lock(&cache_lock); 579 spin_lock(&b->cache_lock);
567 drc_mem_usage += bufsize; 580 drc_mem_usage += bufsize;
568 lru_put_end(rp); 581 lru_put_end(b, rp);
569 rp->c_secure = rqstp->rq_secure; 582 rp->c_secure = rqstp->rq_secure;
570 rp->c_type = cachetype; 583 rp->c_type = cachetype;
571 rp->c_state = RC_DONE; 584 rp->c_state = RC_DONE;
572 spin_unlock(&cache_lock); 585 spin_unlock(&b->cache_lock);
573 return; 586 return;
574} 587}
575 588
@@ -600,9 +613,9 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
600 */ 613 */
601static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 614static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
602{ 615{
603 spin_lock(&cache_lock);
604 seq_printf(m, "max entries: %u\n", max_drc_entries); 616 seq_printf(m, "max entries: %u\n", max_drc_entries);
605 seq_printf(m, "num entries: %u\n", num_drc_entries); 617 seq_printf(m, "num entries: %u\n",
618 atomic_read(&num_drc_entries));
606 seq_printf(m, "hash buckets: %u\n", 1 << maskbits); 619 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
607 seq_printf(m, "mem usage: %u\n", drc_mem_usage); 620 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
608 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); 621 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
@@ -611,7 +624,6 @@ static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
611 seq_printf(m, "payload misses: %u\n", payload_misses); 624 seq_printf(m, "payload misses: %u\n", payload_misses);
612 seq_printf(m, "longest chain len: %u\n", longest_chain); 625 seq_printf(m, "longest chain len: %u\n", longest_chain);
613 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); 626 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
614 spin_unlock(&cache_lock);
615 return 0; 627 return 0;
616} 628}
617 629