aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfscache.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2013-02-04 08:18:04 -0500
committerJ. Bruce Fields <bfields@redhat.com>2013-02-04 17:19:11 -0500
commit2c6b691c05bf77c4bc7c9f1a9b6d93a160928421 (patch)
tree298eb991a69be4a86aae6c3396dec27ff0122c52 /fs/nfsd/nfscache.c
parent13cc8a78e89db0469e67ac9b3ae466b661af93fa (diff)
nfsd: when updating an entry with RC_NOCACHE, just free it
There's no need to keep entries around that we're declaring RC_NOCACHE. Ditto if there's a problem with the entry. With this change too, there's no need to test for RC_UNUSED in the search function. If the entry's in the hash table then it's either INPROG or DONE. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfscache.c')
-rw-r--r--fs/nfsd/nfscache.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 69d29d4ea579..e8ea785e295d 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -98,6 +98,14 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
98 kmem_cache_free(drc_slab, rp); 98 kmem_cache_free(drc_slab, rp);
99} 99}
100 100
101static void
102nfsd_reply_cache_free(struct svc_cacherep *rp)
103{
104 spin_lock(&cache_lock);
105 nfsd_reply_cache_free_locked(rp);
106 spin_unlock(&cache_lock);
107}
108
101int nfsd_reply_cache_init(void) 109int nfsd_reply_cache_init(void)
102{ 110{
103 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 111 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
@@ -182,8 +190,7 @@ nfsd_cache_search(struct svc_rqst *rqstp)
182 190
183 rh = &cache_hash[request_hash(xid)]; 191 rh = &cache_hash[request_hash(xid)];
184 hlist_for_each_entry(rp, hn, rh, c_hash) { 192 hlist_for_each_entry(rp, hn, rh, c_hash) {
185 if (rp->c_state != RC_UNUSED && 193 if (xid == rp->c_xid && proc == rp->c_proc &&
186 xid == rp->c_xid && proc == rp->c_proc &&
187 proto == rp->c_prot && vers == rp->c_vers && 194 proto == rp->c_prot && vers == rp->c_vers &&
188 !nfsd_cache_entry_expired(rp) && 195 !nfsd_cache_entry_expired(rp) &&
189 rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && 196 rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
@@ -353,7 +360,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
353 360
354 /* Don't cache excessive amounts of data and XDR failures */ 361 /* Don't cache excessive amounts of data and XDR failures */
355 if (!statp || len > (256 >> 2)) { 362 if (!statp || len > (256 >> 2)) {
356 rp->c_state = RC_UNUSED; 363 nfsd_reply_cache_free(rp);
357 return; 364 return;
358 } 365 }
359 366
@@ -367,12 +374,15 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
367 cachv = &rp->c_replvec; 374 cachv = &rp->c_replvec;
368 cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); 375 cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
369 if (!cachv->iov_base) { 376 if (!cachv->iov_base) {
370 rp->c_state = RC_UNUSED; 377 nfsd_reply_cache_free(rp);
371 return; 378 return;
372 } 379 }
373 cachv->iov_len = len << 2; 380 cachv->iov_len = len << 2;
374 memcpy(cachv->iov_base, statp, len << 2); 381 memcpy(cachv->iov_base, statp, len << 2);
375 break; 382 break;
383 case RC_NOCACHE:
384 nfsd_reply_cache_free(rp);
385 return;
376 } 386 }
377 spin_lock(&cache_lock); 387 spin_lock(&cache_lock);
378 lru_put_end(rp); 388 lru_put_end(rp);