aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/cache.c67
1 files changed, 43 insertions, 24 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 49115b107fb..ba61d0fa4b8 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -513,22 +513,25 @@ static int cache_defer_cnt;
513 513
514static void __unhash_deferred_req(struct cache_deferred_req *dreq) 514static void __unhash_deferred_req(struct cache_deferred_req *dreq)
515{ 515{
516 list_del_init(&dreq->recent);
517 hlist_del_init(&dreq->hash); 516 hlist_del_init(&dreq->hash);
518 cache_defer_cnt--; 517 if (!list_empty(&dreq->recent)) {
518 list_del_init(&dreq->recent);
519 cache_defer_cnt--;
520 }
519} 521}
520 522
521static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) 523static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
522{ 524{
523 int hash = DFR_HASH(item); 525 int hash = DFR_HASH(item);
524 526
525 list_add(&dreq->recent, &cache_defer_list); 527 INIT_LIST_HEAD(&dreq->recent);
526 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); 528 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
527} 529}
528 530
529static void setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item) 531static void setup_deferral(struct cache_deferred_req *dreq,
532 struct cache_head *item,
533 int count_me)
530{ 534{
531 struct cache_deferred_req *discard;
532 535
533 dreq->item = item; 536 dreq->item = item;
534 537
@@ -536,18 +539,13 @@ static void setup_deferral(struct cache_deferred_req *dreq, struct cache_head *i
536 539
537 __hash_deferred_req(dreq, item); 540 __hash_deferred_req(dreq, item);
538 541
539 /* it is in, now maybe clean up */ 542 if (count_me) {
540 discard = NULL; 543 cache_defer_cnt++;
541 if (++cache_defer_cnt > DFR_MAX) { 544 list_add(&dreq->recent, &cache_defer_list);
542 discard = list_entry(cache_defer_list.prev,
543 struct cache_deferred_req, recent);
544 __unhash_deferred_req(discard);
545 } 545 }
546
546 spin_unlock(&cache_defer_lock); 547 spin_unlock(&cache_defer_lock);
547 548
548 if (discard)
549 /* there was one too many */
550 discard->revisit(discard, 1);
551} 549}
552 550
553struct thread_deferred_req { 551struct thread_deferred_req {
@@ -570,7 +568,7 @@ static void cache_wait_req(struct cache_req *req, struct cache_head *item)
570 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); 568 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
571 dreq->revisit = cache_restart_thread; 569 dreq->revisit = cache_restart_thread;
572 570
573 setup_deferral(dreq, item); 571 setup_deferral(dreq, item, 0);
574 572
575 if (!test_bit(CACHE_PENDING, &item->flags) || 573 if (!test_bit(CACHE_PENDING, &item->flags) ||
576 wait_for_completion_interruptible_timeout( 574 wait_for_completion_interruptible_timeout(
@@ -594,17 +592,36 @@ static void cache_wait_req(struct cache_req *req, struct cache_head *item)
594 } 592 }
595} 593}
596 594
597static void cache_defer_req(struct cache_req *req, struct cache_head *item) 595static void cache_limit_defers(void)
598{ 596{
599 struct cache_deferred_req *dreq; 597 /* Make sure we haven't exceed the limit of allowed deferred
598 * requests.
599 */
600 struct cache_deferred_req *discard = NULL;
600 601
601 if (cache_defer_cnt >= DFR_MAX) 602 if (cache_defer_cnt <= DFR_MAX)
602 /* too much in the cache, randomly drop this one, 603 return;
603 * or continue and drop the oldest 604
604 */ 605 spin_lock(&cache_defer_lock);
605 if (net_random()&1)
606 return;
607 606
607 /* Consider removing either the first or the last */
608 if (cache_defer_cnt > DFR_MAX) {
609 if (net_random() & 1)
610 discard = list_entry(cache_defer_list.next,
611 struct cache_deferred_req, recent);
612 else
613 discard = list_entry(cache_defer_list.prev,
614 struct cache_deferred_req, recent);
615 __unhash_deferred_req(discard);
616 }
617 spin_unlock(&cache_defer_lock);
618 if (discard)
619 discard->revisit(discard, 1);
620}
621
622static void cache_defer_req(struct cache_req *req, struct cache_head *item)
623{
624 struct cache_deferred_req *dreq;
608 625
609 if (req->thread_wait) { 626 if (req->thread_wait) {
610 cache_wait_req(req, item); 627 cache_wait_req(req, item);
@@ -614,12 +631,14 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
614 dreq = req->defer(req); 631 dreq = req->defer(req);
615 if (dreq == NULL) 632 if (dreq == NULL)
616 return; 633 return;
617 setup_deferral(dreq, item); 634 setup_deferral(dreq, item, 1);
618 if (!test_bit(CACHE_PENDING, &item->flags)) 635 if (!test_bit(CACHE_PENDING, &item->flags))
619 /* Bit could have been cleared before we managed to 636 /* Bit could have been cleared before we managed to
620 * set up the deferral, so need to revisit just in case 637 * set up the deferral, so need to revisit just in case
621 */ 638 */
622 cache_revisit_request(item); 639 cache_revisit_request(item);
640
641 cache_limit_defers();
623} 642}
624 643
625static void cache_revisit_request(struct cache_head *item) 644static void cache_revisit_request(struct cache_head *item)