aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfscache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfsd/nfscache.c')
-rw-r--r--fs/nfsd/nfscache.c475
1 files changed, 386 insertions, 89 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 2cbac34a55da..e76244edd748 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -9,34 +9,63 @@
9 */ 9 */
10 10
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sunrpc/addr.h>
13#include <linux/highmem.h>
14#include <linux/log2.h>
15#include <linux/hash.h>
16#include <net/checksum.h>
12 17
13#include "nfsd.h" 18#include "nfsd.h"
14#include "cache.h" 19#include "cache.h"
15 20
16/* Size of reply cache. Common values are: 21#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
17 * 4.3BSD: 128 22
18 * 4.4BSD: 256 23/*
19 * Solaris2: 1024 24 * We use this value to determine the number of hash buckets from the max
20 * DEC Unix: 512-4096 25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
21 */ 27 */
22#define CACHESIZE 1024 28#define TARGET_BUCKET_SIZE 64
23#define HASHSIZE 64
24 29
25static struct hlist_head * cache_hash; 30static struct hlist_head * cache_hash;
26static struct list_head lru_head; 31static struct list_head lru_head;
27static int cache_disabled = 1; 32static struct kmem_cache *drc_slab;
33
34/* max number of entries allowed in the cache */
35static unsigned int max_drc_entries;
36
37/* number of significant bits in the hash value */
38static unsigned int maskbits;
28 39
29/* 40/*
30 * Calculate the hash index from an XID. 41 * Stats and other tracking of on the duplicate reply cache. All of these and
42 * the "rc" fields in nfsdstats are protected by the cache_lock
31 */ 43 */
32static inline u32 request_hash(u32 xid) 44
33{ 45/* total number of entries */
34 u32 h = xid; 46static unsigned int num_drc_entries;
35 h ^= (xid >> 24); 47
36 return h & (HASHSIZE-1); 48/* cache misses due only to checksum comparison failures */
37} 49static unsigned int payload_misses;
50
51/* amount of memory (in bytes) currently consumed by the DRC */
52static unsigned int drc_mem_usage;
53
54/* longest hash chain seen */
55static unsigned int longest_chain;
56
57/* size of cache when we saw the longest hash chain */
58static unsigned int longest_chain_cachesize;
38 59
39static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61static void cache_cleaner_func(struct work_struct *unused);
62static int nfsd_reply_cache_shrink(struct shrinker *shrink,
63 struct shrink_control *sc);
64
65static struct shrinker nfsd_reply_cache_shrinker = {
66 .shrink = nfsd_reply_cache_shrink,
67 .seeks = 1,
68};
40 69
41/* 70/*
42 * locking for the reply cache: 71 * locking for the reply cache:
@@ -44,30 +73,104 @@ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
44 * Otherwise, it when accessing _prev or _next, the lock must be held. 73 * Otherwise, it when accessing _prev or _next, the lock must be held.
45 */ 74 */
46static DEFINE_SPINLOCK(cache_lock); 75static DEFINE_SPINLOCK(cache_lock);
76static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
47 77
48int nfsd_reply_cache_init(void) 78/*
79 * Put a cap on the size of the DRC based on the amount of available
80 * low memory in the machine.
81 *
82 * 64MB: 8192
83 * 128MB: 11585
84 * 256MB: 16384
85 * 512MB: 23170
86 * 1GB: 32768
87 * 2GB: 46340
88 * 4GB: 65536
89 * 8GB: 92681
90 * 16GB: 131072
91 *
92 * ...with a hard cap of 256k entries. In the worst case, each entry will be
93 * ~1k, so the above numbers should give a rough max of the amount of memory
94 * used in k.
95 */
96static unsigned int
97nfsd_cache_size_limit(void)
98{
99 unsigned int limit;
100 unsigned long low_pages = totalram_pages - totalhigh_pages;
101
102 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
103 return min_t(unsigned int, limit, 256*1024);
104}
105
106/*
107 * Compute the number of hash buckets we need. Divide the max cachesize by
108 * the "target" max bucket size, and round up to next power of two.
109 */
110static unsigned int
111nfsd_hashsize(unsigned int limit)
112{
113 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
114}
115
116static struct svc_cacherep *
117nfsd_reply_cache_alloc(void)
49{ 118{
50 struct svc_cacherep *rp; 119 struct svc_cacherep *rp;
51 int i;
52 120
53 INIT_LIST_HEAD(&lru_head); 121 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
54 i = CACHESIZE; 122 if (rp) {
55 while (i) {
56 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
57 if (!rp)
58 goto out_nomem;
59 list_add(&rp->c_lru, &lru_head);
60 rp->c_state = RC_UNUSED; 123 rp->c_state = RC_UNUSED;
61 rp->c_type = RC_NOCACHE; 124 rp->c_type = RC_NOCACHE;
125 INIT_LIST_HEAD(&rp->c_lru);
62 INIT_HLIST_NODE(&rp->c_hash); 126 INIT_HLIST_NODE(&rp->c_hash);
63 i--;
64 } 127 }
128 return rp;
129}
130
131static void
132nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
133{
134 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
135 drc_mem_usage -= rp->c_replvec.iov_len;
136 kfree(rp->c_replvec.iov_base);
137 }
138 if (!hlist_unhashed(&rp->c_hash))
139 hlist_del(&rp->c_hash);
140 list_del(&rp->c_lru);
141 --num_drc_entries;
142 drc_mem_usage -= sizeof(*rp);
143 kmem_cache_free(drc_slab, rp);
144}
65 145
66 cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); 146static void
147nfsd_reply_cache_free(struct svc_cacherep *rp)
148{
149 spin_lock(&cache_lock);
150 nfsd_reply_cache_free_locked(rp);
151 spin_unlock(&cache_lock);
152}
153
154int nfsd_reply_cache_init(void)
155{
156 unsigned int hashsize;
157
158 INIT_LIST_HEAD(&lru_head);
159 max_drc_entries = nfsd_cache_size_limit();
160 num_drc_entries = 0;
161 hashsize = nfsd_hashsize(max_drc_entries);
162 maskbits = ilog2(hashsize);
163
164 register_shrinker(&nfsd_reply_cache_shrinker);
165 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
166 0, 0, NULL);
167 if (!drc_slab)
168 goto out_nomem;
169
170 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
67 if (!cache_hash) 171 if (!cache_hash)
68 goto out_nomem; 172 goto out_nomem;
69 173
70 cache_disabled = 0;
71 return 0; 174 return 0;
72out_nomem: 175out_nomem:
73 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 176 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
@@ -79,27 +182,33 @@ void nfsd_reply_cache_shutdown(void)
79{ 182{
80 struct svc_cacherep *rp; 183 struct svc_cacherep *rp;
81 184
185 unregister_shrinker(&nfsd_reply_cache_shrinker);
186 cancel_delayed_work_sync(&cache_cleaner);
187
82 while (!list_empty(&lru_head)) { 188 while (!list_empty(&lru_head)) {
83 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); 189 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
84 if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF) 190 nfsd_reply_cache_free_locked(rp);
85 kfree(rp->c_replvec.iov_base);
86 list_del(&rp->c_lru);
87 kfree(rp);
88 } 191 }
89 192
90 cache_disabled = 1;
91
92 kfree (cache_hash); 193 kfree (cache_hash);
93 cache_hash = NULL; 194 cache_hash = NULL;
195
196 if (drc_slab) {
197 kmem_cache_destroy(drc_slab);
198 drc_slab = NULL;
199 }
94} 200}
95 201
96/* 202/*
97 * Move cache entry to end of LRU list 203 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
204 * not already scheduled.
98 */ 205 */
99static void 206static void
100lru_put_end(struct svc_cacherep *rp) 207lru_put_end(struct svc_cacherep *rp)
101{ 208{
209 rp->c_timestamp = jiffies;
102 list_move_tail(&rp->c_lru, &lru_head); 210 list_move_tail(&rp->c_lru, &lru_head);
211 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
103} 212}
104 213
105/* 214/*
@@ -109,89 +218,247 @@ static void
109hash_refile(struct svc_cacherep *rp) 218hash_refile(struct svc_cacherep *rp)
110{ 219{
111 hlist_del_init(&rp->c_hash); 220 hlist_del_init(&rp->c_hash);
112 hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); 221 hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
222}
223
224static inline bool
225nfsd_cache_entry_expired(struct svc_cacherep *rp)
226{
227 return rp->c_state != RC_INPROG &&
228 time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
229}
230
231/*
232 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
233 * Also prune the oldest ones when the total exceeds the max number of entries.
234 */
235static void
236prune_cache_entries(void)
237{
238 struct svc_cacherep *rp, *tmp;
239
240 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
241 if (!nfsd_cache_entry_expired(rp) &&
242 num_drc_entries <= max_drc_entries)
243 break;
244 nfsd_reply_cache_free_locked(rp);
245 }
246
247 /*
248 * Conditionally rearm the job. If we cleaned out the list, then
249 * cancel any pending run (since there won't be any work to do).
250 * Otherwise, we rearm the job or modify the existing one to run in
251 * RC_EXPIRE since we just ran the pruner.
252 */
253 if (list_empty(&lru_head))
254 cancel_delayed_work(&cache_cleaner);
255 else
256 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
257}
258
259static void
260cache_cleaner_func(struct work_struct *unused)
261{
262 spin_lock(&cache_lock);
263 prune_cache_entries();
264 spin_unlock(&cache_lock);
265}
266
267static int
268nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
269{
270 unsigned int num;
271
272 spin_lock(&cache_lock);
273 if (sc->nr_to_scan)
274 prune_cache_entries();
275 num = num_drc_entries;
276 spin_unlock(&cache_lock);
277
278 return num;
279}
280
281/*
282 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
283 */
284static __wsum
285nfsd_cache_csum(struct svc_rqst *rqstp)
286{
287 int idx;
288 unsigned int base;
289 __wsum csum;
290 struct xdr_buf *buf = &rqstp->rq_arg;
291 const unsigned char *p = buf->head[0].iov_base;
292 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
293 RC_CSUMLEN);
294 size_t len = min(buf->head[0].iov_len, csum_len);
295
296 /* rq_arg.head first */
297 csum = csum_partial(p, len, 0);
298 csum_len -= len;
299
300 /* Continue into page array */
301 idx = buf->page_base / PAGE_SIZE;
302 base = buf->page_base & ~PAGE_MASK;
303 while (csum_len) {
304 p = page_address(buf->pages[idx]) + base;
305 len = min_t(size_t, PAGE_SIZE - base, csum_len);
306 csum = csum_partial(p, len, csum);
307 csum_len -= len;
308 base = 0;
309 ++idx;
310 }
311 return csum;
312}
313
314static bool
315nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
316{
317 /* Check RPC header info first */
318 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
319 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
320 rqstp->rq_arg.len != rp->c_len ||
321 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
322 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
323 return false;
324
325 /* compare checksum of NFS data */
326 if (csum != rp->c_csum) {
327 ++payload_misses;
328 return false;
329 }
330
331 return true;
332}
333
334/*
335 * Search the request hash for an entry that matches the given rqstp.
336 * Must be called with cache_lock held. Returns the found entry or
337 * NULL on failure.
338 */
339static struct svc_cacherep *
340nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
341{
342 struct svc_cacherep *rp, *ret = NULL;
343 struct hlist_head *rh;
344 unsigned int entries = 0;
345
346 rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
347 hlist_for_each_entry(rp, rh, c_hash) {
348 ++entries;
349 if (nfsd_cache_match(rqstp, csum, rp)) {
350 ret = rp;
351 break;
352 }
353 }
354
355 /* tally hash chain length stats */
356 if (entries > longest_chain) {
357 longest_chain = entries;
358 longest_chain_cachesize = num_drc_entries;
359 } else if (entries == longest_chain) {
360 /* prefer to keep the smallest cachesize possible here */
361 longest_chain_cachesize = min(longest_chain_cachesize,
362 num_drc_entries);
363 }
364
365 return ret;
113} 366}
114 367
115/* 368/*
116 * Try to find an entry matching the current call in the cache. When none 369 * Try to find an entry matching the current call in the cache. When none
117 * is found, we grab the oldest unlocked entry off the LRU list. 370 * is found, we try to grab the oldest expired entry off the LRU list. If
118 * Note that no operation within the loop may sleep. 371 * a suitable one isn't there, then drop the cache_lock and allocate a
372 * new one, then search again in case one got inserted while this thread
373 * didn't hold the lock.
119 */ 374 */
120int 375int
121nfsd_cache_lookup(struct svc_rqst *rqstp) 376nfsd_cache_lookup(struct svc_rqst *rqstp)
122{ 377{
123 struct hlist_node *hn; 378 struct svc_cacherep *rp, *found;
124 struct hlist_head *rh;
125 struct svc_cacherep *rp;
126 __be32 xid = rqstp->rq_xid; 379 __be32 xid = rqstp->rq_xid;
127 u32 proto = rqstp->rq_prot, 380 u32 proto = rqstp->rq_prot,
128 vers = rqstp->rq_vers, 381 vers = rqstp->rq_vers,
129 proc = rqstp->rq_proc; 382 proc = rqstp->rq_proc;
383 __wsum csum;
130 unsigned long age; 384 unsigned long age;
131 int type = rqstp->rq_cachetype; 385 int type = rqstp->rq_cachetype;
132 int rtn; 386 int rtn = RC_DOIT;
133 387
134 rqstp->rq_cacherep = NULL; 388 rqstp->rq_cacherep = NULL;
135 if (cache_disabled || type == RC_NOCACHE) { 389 if (type == RC_NOCACHE) {
136 nfsdstats.rcnocache++; 390 nfsdstats.rcnocache++;
137 return RC_DOIT; 391 return rtn;
138 } 392 }
139 393
140 spin_lock(&cache_lock); 394 csum = nfsd_cache_csum(rqstp);
141 rtn = RC_DOIT;
142 395
143 rh = &cache_hash[request_hash(xid)]; 396 /*
144 hlist_for_each_entry(rp, hn, rh, c_hash) { 397 * Since the common case is a cache miss followed by an insert,
145 if (rp->c_state != RC_UNUSED && 398 * preallocate an entry. First, try to reuse the first entry on the LRU
146 xid == rp->c_xid && proc == rp->c_proc && 399 * if it works, then go ahead and prune the LRU list.
147 proto == rp->c_prot && vers == rp->c_vers && 400 */
148 time_before(jiffies, rp->c_timestamp + 120*HZ) && 401 spin_lock(&cache_lock);
149 memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) { 402 if (!list_empty(&lru_head)) {
150 nfsdstats.rchits++; 403 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
151 goto found_entry; 404 if (nfsd_cache_entry_expired(rp) ||
405 num_drc_entries >= max_drc_entries) {
406 lru_put_end(rp);
407 prune_cache_entries();
408 goto search_cache;
152 } 409 }
153 } 410 }
154 nfsdstats.rcmisses++;
155 411
156 /* This loop shouldn't take more than a few iterations normally */ 412 /* No expired ones available, allocate a new one. */
157 { 413 spin_unlock(&cache_lock);
158 int safe = 0; 414 rp = nfsd_reply_cache_alloc();
159 list_for_each_entry(rp, &lru_head, c_lru) { 415 spin_lock(&cache_lock);
160 if (rp->c_state != RC_INPROG) 416 if (likely(rp)) {
161 break; 417 ++num_drc_entries;
162 if (safe++ > CACHESIZE) { 418 drc_mem_usage += sizeof(*rp);
163 printk("nfsd: loop in repcache LRU list\n");
164 cache_disabled = 1;
165 goto out;
166 }
167 }
168 } 419 }
169 420
170 /* All entries on the LRU are in-progress. This should not happen */ 421search_cache:
171 if (&rp->c_lru == &lru_head) { 422 found = nfsd_cache_search(rqstp, csum);
172 static int complaints; 423 if (found) {
424 if (likely(rp))
425 nfsd_reply_cache_free_locked(rp);
426 rp = found;
427 goto found_entry;
428 }
173 429
174 printk(KERN_WARNING "nfsd: all repcache entries locked!\n"); 430 if (!rp) {
175 if (++complaints > 5) { 431 dprintk("nfsd: unable to allocate DRC entry!\n");
176 printk(KERN_WARNING "nfsd: disabling repcache.\n");
177 cache_disabled = 1;
178 }
179 goto out; 432 goto out;
180 } 433 }
181 434
435 /*
436 * We're keeping the one we just allocated. Are we now over the
437 * limit? Prune one off the tip of the LRU in trade for the one we
438 * just allocated if so.
439 */
440 if (num_drc_entries >= max_drc_entries)
441 nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
442 struct svc_cacherep, c_lru));
443
444 nfsdstats.rcmisses++;
182 rqstp->rq_cacherep = rp; 445 rqstp->rq_cacherep = rp;
183 rp->c_state = RC_INPROG; 446 rp->c_state = RC_INPROG;
184 rp->c_xid = xid; 447 rp->c_xid = xid;
185 rp->c_proc = proc; 448 rp->c_proc = proc;
186 memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr)); 449 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
450 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
187 rp->c_prot = proto; 451 rp->c_prot = proto;
188 rp->c_vers = vers; 452 rp->c_vers = vers;
189 rp->c_timestamp = jiffies; 453 rp->c_len = rqstp->rq_arg.len;
454 rp->c_csum = csum;
190 455
191 hash_refile(rp); 456 hash_refile(rp);
457 lru_put_end(rp);
192 458
193 /* release any buffer */ 459 /* release any buffer */
194 if (rp->c_type == RC_REPLBUFF) { 460 if (rp->c_type == RC_REPLBUFF) {
461 drc_mem_usage -= rp->c_replvec.iov_len;
195 kfree(rp->c_replvec.iov_base); 462 kfree(rp->c_replvec.iov_base);
196 rp->c_replvec.iov_base = NULL; 463 rp->c_replvec.iov_base = NULL;
197 } 464 }
@@ -201,9 +468,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
201 return rtn; 468 return rtn;
202 469
203found_entry: 470found_entry:
471 nfsdstats.rchits++;
204 /* We found a matching entry which is either in progress or done. */ 472 /* We found a matching entry which is either in progress or done. */
205 age = jiffies - rp->c_timestamp; 473 age = jiffies - rp->c_timestamp;
206 rp->c_timestamp = jiffies;
207 lru_put_end(rp); 474 lru_put_end(rp);
208 475
209 rtn = RC_DROPIT; 476 rtn = RC_DROPIT;
@@ -232,7 +499,7 @@ found_entry:
232 break; 499 break;
233 default: 500 default:
234 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); 501 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
235 rp->c_state = RC_UNUSED; 502 nfsd_reply_cache_free_locked(rp);
236 } 503 }
237 504
238 goto out; 505 goto out;
@@ -257,11 +524,12 @@ found_entry:
257void 524void
258nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) 525nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
259{ 526{
260 struct svc_cacherep *rp; 527 struct svc_cacherep *rp = rqstp->rq_cacherep;
261 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 528 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
262 int len; 529 int len;
530 size_t bufsize = 0;
263 531
264 if (!(rp = rqstp->rq_cacherep) || cache_disabled) 532 if (!rp)
265 return; 533 return;
266 534
267 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 535 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
@@ -269,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
269 537
270 /* Don't cache excessive amounts of data and XDR failures */ 538 /* Don't cache excessive amounts of data and XDR failures */
271 if (!statp || len > (256 >> 2)) { 539 if (!statp || len > (256 >> 2)) {
272 rp->c_state = RC_UNUSED; 540 nfsd_reply_cache_free(rp);
273 return; 541 return;
274 } 542 }
275 543
@@ -281,23 +549,25 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
281 break; 549 break;
282 case RC_REPLBUFF: 550 case RC_REPLBUFF:
283 cachv = &rp->c_replvec; 551 cachv = &rp->c_replvec;
284 cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); 552 bufsize = len << 2;
553 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
285 if (!cachv->iov_base) { 554 if (!cachv->iov_base) {
286 spin_lock(&cache_lock); 555 nfsd_reply_cache_free(rp);
287 rp->c_state = RC_UNUSED;
288 spin_unlock(&cache_lock);
289 return; 556 return;
290 } 557 }
291 cachv->iov_len = len << 2; 558 cachv->iov_len = bufsize;
292 memcpy(cachv->iov_base, statp, len << 2); 559 memcpy(cachv->iov_base, statp, bufsize);
293 break; 560 break;
561 case RC_NOCACHE:
562 nfsd_reply_cache_free(rp);
563 return;
294 } 564 }
295 spin_lock(&cache_lock); 565 spin_lock(&cache_lock);
566 drc_mem_usage += bufsize;
296 lru_put_end(rp); 567 lru_put_end(rp);
297 rp->c_secure = rqstp->rq_secure; 568 rp->c_secure = rqstp->rq_secure;
298 rp->c_type = cachetype; 569 rp->c_type = cachetype;
299 rp->c_state = RC_DONE; 570 rp->c_state = RC_DONE;
300 rp->c_timestamp = jiffies;
301 spin_unlock(&cache_lock); 571 spin_unlock(&cache_lock);
302 return; 572 return;
303} 573}
@@ -321,3 +591,30 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
321 vec->iov_len += data->iov_len; 591 vec->iov_len += data->iov_len;
322 return 1; 592 return 1;
323} 593}
594
595/*
596 * Note that fields may be added, removed or reordered in the future. Programs
597 * scraping this file for info should test the labels to ensure they're
598 * getting the correct field.
599 */
600static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
601{
602 spin_lock(&cache_lock);
603 seq_printf(m, "max entries: %u\n", max_drc_entries);
604 seq_printf(m, "num entries: %u\n", num_drc_entries);
605 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
606 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
607 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
608 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
609 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
610 seq_printf(m, "payload misses: %u\n", payload_misses);
611 seq_printf(m, "longest chain len: %u\n", longest_chain);
612 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
613 spin_unlock(&cache_lock);
614 return 0;
615}
616
617int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, nfsd_reply_cache_stats_show, NULL);
620}