diff options
author | J.Bruce Fields <bfields@fieldses.org> | 2006-12-13 03:35:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-13 12:05:54 -0500 |
commit | 01f3bd1f03599470e4695392b6ae055ed8506978 (patch) | |
tree | 4935c734d984b8e932b7555c70c6f45e8d342d6e /net | |
parent | e0bb89ef031f76dcb9c9d920d18b13948f1418da (diff) |
[PATCH] knfsd: svcrpc: remove another silent drop from deferral code
There's no point deferring something just to immediately fail the deferral,
especially now that we can do something more useful in the failure case by
returning an error.
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/cache.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 824e8534e022..14274490f92e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -530,6 +530,13 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
530 | struct cache_deferred_req *dreq; | 530 | struct cache_deferred_req *dreq; |
531 | int hash = DFR_HASH(item); | 531 | int hash = DFR_HASH(item); |
532 | 532 | ||
533 | if (cache_defer_cnt >= DFR_MAX) { | ||
534 | /* too much in the cache, randomly drop this one, | ||
535 | * or continue and drop the oldest below | ||
536 | */ | ||
537 | if (net_random()&1) | ||
538 | return -ETIMEDOUT; | ||
539 | } | ||
533 | dreq = req->defer(req); | 540 | dreq = req->defer(req); |
534 | if (dreq == NULL) | 541 | if (dreq == NULL) |
535 | return -ETIMEDOUT; | 542 | return -ETIMEDOUT; |
@@ -548,17 +555,8 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
548 | /* it is in, now maybe clean up */ | 555 | /* it is in, now maybe clean up */ |
549 | dreq = NULL; | 556 | dreq = NULL; |
550 | if (++cache_defer_cnt > DFR_MAX) { | 557 | if (++cache_defer_cnt > DFR_MAX) { |
551 | /* too much in the cache, randomly drop | 558 | dreq = list_entry(cache_defer_list.prev, |
552 | * first or last | 559 | struct cache_deferred_req, recent); |
553 | */ | ||
554 | if (net_random()&1) | ||
555 | dreq = list_entry(cache_defer_list.next, | ||
556 | struct cache_deferred_req, | ||
557 | recent); | ||
558 | else | ||
559 | dreq = list_entry(cache_defer_list.prev, | ||
560 | struct cache_deferred_req, | ||
561 | recent); | ||
562 | list_del(&dreq->recent); | 560 | list_del(&dreq->recent); |
563 | list_del(&dreq->hash); | 561 | list_del(&dreq->hash); |
564 | cache_defer_cnt--; | 562 | cache_defer_cnt--; |