diff options
-rw-r--r-- | net/sunrpc/cache.c | 44 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 2 |
2 files changed, 25 insertions, 21 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 2c5297f245b4..18e5e8e6f622 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -520,10 +520,26 @@ static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) | |||
520 | complete(&dr->completion); | 520 | complete(&dr->completion); |
521 | } | 521 | } |
522 | 522 | ||
523 | static void __unhash_deferred_req(struct cache_deferred_req *dreq) | ||
524 | { | ||
525 | list_del_init(&dreq->recent); | ||
526 | list_del_init(&dreq->hash); | ||
527 | cache_defer_cnt--; | ||
528 | } | ||
529 | |||
530 | static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) | ||
531 | { | ||
532 | int hash = DFR_HASH(item); | ||
533 | |||
534 | list_add(&dreq->recent, &cache_defer_list); | ||
535 | if (cache_defer_hash[hash].next == NULL) | ||
536 | INIT_LIST_HEAD(&cache_defer_hash[hash]); | ||
537 | list_add(&dreq->hash, &cache_defer_hash[hash]); | ||
538 | } | ||
539 | |||
523 | static int cache_defer_req(struct cache_req *req, struct cache_head *item) | 540 | static int cache_defer_req(struct cache_req *req, struct cache_head *item) |
524 | { | 541 | { |
525 | struct cache_deferred_req *dreq, *discard; | 542 | struct cache_deferred_req *dreq, *discard; |
526 | int hash = DFR_HASH(item); | ||
527 | struct thread_deferred_req sleeper; | 543 | struct thread_deferred_req sleeper; |
528 | 544 | ||
529 | if (cache_defer_cnt >= DFR_MAX) { | 545 | if (cache_defer_cnt >= DFR_MAX) { |
@@ -549,20 +565,14 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
549 | 565 | ||
550 | spin_lock(&cache_defer_lock); | 566 | spin_lock(&cache_defer_lock); |
551 | 567 | ||
552 | list_add(&dreq->recent, &cache_defer_list); | 568 | __hash_deferred_req(dreq, item); |
553 | |||
554 | if (cache_defer_hash[hash].next == NULL) | ||
555 | INIT_LIST_HEAD(&cache_defer_hash[hash]); | ||
556 | list_add(&dreq->hash, &cache_defer_hash[hash]); | ||
557 | 569 | ||
558 | /* it is in, now maybe clean up */ | 570 | /* it is in, now maybe clean up */ |
559 | discard = NULL; | 571 | discard = NULL; |
560 | if (++cache_defer_cnt > DFR_MAX) { | 572 | if (++cache_defer_cnt > DFR_MAX) { |
561 | discard = list_entry(cache_defer_list.prev, | 573 | discard = list_entry(cache_defer_list.prev, |
562 | struct cache_deferred_req, recent); | 574 | struct cache_deferred_req, recent); |
563 | list_del_init(&discard->recent); | 575 | __unhash_deferred_req(discard); |
564 | list_del_init(&discard->hash); | ||
565 | cache_defer_cnt--; | ||
566 | } | 576 | } |
567 | spin_unlock(&cache_defer_lock); | 577 | spin_unlock(&cache_defer_lock); |
568 | 578 | ||
@@ -584,9 +594,7 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
584 | */ | 594 | */ |
585 | spin_lock(&cache_defer_lock); | 595 | spin_lock(&cache_defer_lock); |
586 | if (!list_empty(&sleeper.handle.hash)) { | 596 | if (!list_empty(&sleeper.handle.hash)) { |
587 | list_del_init(&sleeper.handle.recent); | 597 | __unhash_deferred_req(&sleeper.handle); |
588 | list_del_init(&sleeper.handle.hash); | ||
589 | cache_defer_cnt--; | ||
590 | spin_unlock(&cache_defer_lock); | 598 | spin_unlock(&cache_defer_lock); |
591 | } else { | 599 | } else { |
592 | /* cache_revisit_request already removed | 600 | /* cache_revisit_request already removed |
@@ -632,9 +640,8 @@ static void cache_revisit_request(struct cache_head *item) | |||
632 | dreq = list_entry(lp, struct cache_deferred_req, hash); | 640 | dreq = list_entry(lp, struct cache_deferred_req, hash); |
633 | lp = lp->next; | 641 | lp = lp->next; |
634 | if (dreq->item == item) { | 642 | if (dreq->item == item) { |
635 | list_del_init(&dreq->hash); | 643 | __unhash_deferred_req(dreq); |
636 | list_move(&dreq->recent, &pending); | 644 | list_add(&dreq->recent, &pending); |
637 | cache_defer_cnt--; | ||
638 | } | 645 | } |
639 | } | 646 | } |
640 | } | 647 | } |
@@ -657,11 +664,8 @@ void cache_clean_deferred(void *owner) | |||
657 | spin_lock(&cache_defer_lock); | 664 | spin_lock(&cache_defer_lock); |
658 | 665 | ||
659 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { | 666 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { |
660 | if (dreq->owner == owner) { | 667 | if (dreq->owner == owner) |
661 | list_del_init(&dreq->hash); | 668 | __unhash_deferred_req(dreq); |
662 | list_move(&dreq->recent, &pending); | ||
663 | cache_defer_cnt--; | ||
664 | } | ||
665 | } | 669 | } |
666 | spin_unlock(&cache_defer_lock); | 670 | spin_unlock(&cache_defer_lock); |
667 | 671 | ||
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 8ff6840866fa..95fc3e8c51d6 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -665,7 +665,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
665 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 665 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
666 | 666 | ||
667 | /* As there is a shortage of threads and this request | 667 | /* As there is a shortage of threads and this request |
668 | * had to be queue, don't allow the thread to wait so | 668 | * had to be queued, don't allow the thread to wait so |
669 | * long for cache updates. | 669 | * long for cache updates. |
670 | */ | 670 | */ |
671 | rqstp->rq_chandle.thread_wait = 1*HZ; | 671 | rqstp->rq_chandle.thread_wait = 1*HZ; |