aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/nfsd/nfscache.c41
1 files changed, 19 insertions, 22 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index ca05f6dc3544..c61391e8e09d 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -318,55 +318,53 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
318 __wsum csum; 318 __wsum csum;
319 unsigned long age; 319 unsigned long age;
320 int type = rqstp->rq_cachetype; 320 int type = rqstp->rq_cachetype;
321 int rtn; 321 int rtn = RC_DOIT;
322 322
323 rqstp->rq_cacherep = NULL; 323 rqstp->rq_cacherep = NULL;
324 if (type == RC_NOCACHE) { 324 if (type == RC_NOCACHE) {
325 nfsdstats.rcnocache++; 325 nfsdstats.rcnocache++;
326 return RC_DOIT; 326 return rtn;
327 } 327 }
328 328
329 csum = nfsd_cache_csum(rqstp); 329 csum = nfsd_cache_csum(rqstp);
330 330
331 /*
332 * Since the common case is a cache miss followed by an insert,
333 * preallocate an entry. First, try to reuse the first entry on the LRU
334 * if it works, then go ahead and prune the LRU list.
335 */
331 spin_lock(&cache_lock); 336 spin_lock(&cache_lock);
332 rtn = RC_DOIT;
333
334 rp = nfsd_cache_search(rqstp, csum);
335 if (rp)
336 goto found_entry;
337
338 /* Try to use the first entry on the LRU */
339 if (!list_empty(&lru_head)) { 337 if (!list_empty(&lru_head)) {
340 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 338 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
341 if (nfsd_cache_entry_expired(rp) || 339 if (nfsd_cache_entry_expired(rp) ||
342 num_drc_entries >= max_drc_entries) { 340 num_drc_entries >= max_drc_entries) {
343 lru_put_end(rp); 341 lru_put_end(rp);
344 prune_cache_entries(); 342 prune_cache_entries();
345 goto setup_entry; 343 goto search_cache;
346 } 344 }
347 } 345 }
348 346
349 /* Drop the lock and allocate a new entry */ 347 /* No expired ones available, allocate a new one. */
350 spin_unlock(&cache_lock); 348 spin_unlock(&cache_lock);
351 rp = nfsd_reply_cache_alloc(); 349 rp = nfsd_reply_cache_alloc();
352 if (!rp) {
353 dprintk("nfsd: unable to allocate DRC entry!\n");
354 return RC_DOIT;
355 }
356 spin_lock(&cache_lock); 350 spin_lock(&cache_lock);
357 ++num_drc_entries; 351 if (likely(rp))
352 ++num_drc_entries;
358 353
359 /* 354search_cache:
360 * Must search again just in case someone inserted one
361 * after we dropped the lock above.
362 */
363 found = nfsd_cache_search(rqstp, csum); 355 found = nfsd_cache_search(rqstp, csum);
364 if (found) { 356 if (found) {
365 nfsd_reply_cache_free_locked(rp); 357 if (likely(rp))
358 nfsd_reply_cache_free_locked(rp);
366 rp = found; 359 rp = found;
367 goto found_entry; 360 goto found_entry;
368 } 361 }
369 362
363 if (!rp) {
364 dprintk("nfsd: unable to allocate DRC entry!\n");
365 goto out;
366 }
367
370 /* 368 /*
371 * We're keeping the one we just allocated. Are we now over the 369 * We're keeping the one we just allocated. Are we now over the
372 * limit? Prune one off the tip of the LRU in trade for the one we 370 * limit? Prune one off the tip of the LRU in trade for the one we
@@ -376,7 +374,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
376 nfsd_reply_cache_free_locked(list_first_entry(&lru_head, 374 nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
377 struct svc_cacherep, c_lru)); 375 struct svc_cacherep, c_lru));
378 376
379setup_entry:
380 nfsdstats.rcmisses++; 377 nfsdstats.rcmisses++;
381 rqstp->rq_cacherep = rp; 378 rqstp->rq_cacherep = rp;
382 rp->c_state = RC_INPROG; 379 rp->c_state = RC_INPROG;