aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/cache.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2010-08-26 16:56:23 -0400
committerJ. Bruce Fields <bfields@redhat.com>2010-09-07 20:20:31 -0400
commit3211af1119174fbe8b676422b74870cdd51d7314 (patch)
treed5ad448787a9f32cb30aed6c43bc1bbbac77c600 /net/sunrpc/cache.c
parent6610f720e9e8103c22d1f1ccf8fbb695550a571f (diff)
svcrpc: cache deferral cleanup
Attempt to make obvious the first-try-sleeping-then-try-deferral logic by putting that logic into a top-level function that calls helpers. Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r--net/sunrpc/cache.c143
1 files changed, 79 insertions, 64 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 18e5e8e6f62..da872f9fe1e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -509,17 +509,6 @@ static LIST_HEAD(cache_defer_list);
509static struct list_head cache_defer_hash[DFR_HASHSIZE]; 509static struct list_head cache_defer_hash[DFR_HASHSIZE];
510static int cache_defer_cnt; 510static int cache_defer_cnt;
511 511
512struct thread_deferred_req {
513 struct cache_deferred_req handle;
514 struct completion completion;
515};
516static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
517{
518 struct thread_deferred_req *dr =
519 container_of(dreq, struct thread_deferred_req, handle);
520 complete(&dr->completion);
521}
522
523static void __unhash_deferred_req(struct cache_deferred_req *dreq) 512static void __unhash_deferred_req(struct cache_deferred_req *dreq)
524{ 513{
525 list_del_init(&dreq->recent); 514 list_del_init(&dreq->recent);
@@ -537,29 +526,9 @@ static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_he
537 list_add(&dreq->hash, &cache_defer_hash[hash]); 526 list_add(&dreq->hash, &cache_defer_hash[hash]);
538} 527}
539 528
540static int cache_defer_req(struct cache_req *req, struct cache_head *item) 529static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
541{ 530{
542 struct cache_deferred_req *dreq, *discard; 531 struct cache_deferred_req *discard;
543 struct thread_deferred_req sleeper;
544
545 if (cache_defer_cnt >= DFR_MAX) {
546 /* too much in the cache, randomly drop this one,
547 * or continue and drop the oldest below
548 */
549 if (net_random()&1)
550 return -ENOMEM;
551 }
552 if (req->thread_wait) {
553 dreq = &sleeper.handle;
554 sleeper.completion =
555 COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
556 dreq->revisit = cache_restart_thread;
557 } else
558 dreq = req->defer(req);
559
560 retry:
561 if (dreq == NULL)
562 return -ENOMEM;
563 532
564 dreq->item = item; 533 dreq->item = item;
565 534
@@ -585,42 +554,88 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
585 cache_revisit_request(item); 554 cache_revisit_request(item);
586 return -EAGAIN; 555 return -EAGAIN;
587 } 556 }
557 return 0;
558}
588 559
589 if (dreq == &sleeper.handle) { 560struct thread_deferred_req {
590 if (wait_for_completion_interruptible_timeout( 561 struct cache_deferred_req handle;
591 &sleeper.completion, req->thread_wait) <= 0) { 562 struct completion completion;
592 /* The completion wasn't completed, so we need 563};
593 * to clean up 564
594 */ 565static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
595 spin_lock(&cache_defer_lock); 566{
596 if (!list_empty(&sleeper.handle.hash)) { 567 struct thread_deferred_req *dr =
597 __unhash_deferred_req(&sleeper.handle); 568 container_of(dreq, struct thread_deferred_req, handle);
598 spin_unlock(&cache_defer_lock); 569 complete(&dr->completion);
599 } else { 570}
600 /* cache_revisit_request already removed 571
601 * this from the hash table, but hasn't 572static int cache_wait_req(struct cache_req *req, struct cache_head *item)
602 * called ->revisit yet. It will very soon 573{
603 * and we need to wait for it. 574 struct thread_deferred_req sleeper;
604 */ 575 struct cache_deferred_req *dreq = &sleeper.handle;
605 spin_unlock(&cache_defer_lock); 576 int ret;
606 wait_for_completion(&sleeper.completion); 577
607 } 578 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
608 } 579 dreq->revisit = cache_restart_thread;
609 if (test_bit(CACHE_PENDING, &item->flags)) { 580
610 /* item is still pending, try request 581 ret = setup_deferral(dreq, item);
611 * deferral 582 if (ret)
583 return ret;
584
585 if (wait_for_completion_interruptible_timeout(
586 &sleeper.completion, req->thread_wait) <= 0) {
587 /* The completion wasn't completed, so we need
588 * to clean up
589 */
590 spin_lock(&cache_defer_lock);
591 if (!list_empty(&sleeper.handle.hash)) {
592 __unhash_deferred_req(&sleeper.handle);
593 spin_unlock(&cache_defer_lock);
594 } else {
595 /* cache_revisit_request already removed
596 * this from the hash table, but hasn't
597 * called ->revisit yet. It will very soon
598 * and we need to wait for it.
612 */ 599 */
613 dreq = req->defer(req); 600 spin_unlock(&cache_defer_lock);
614 goto retry; 601 wait_for_completion(&sleeper.completion);
615 } 602 }
616 /* only return success if we actually deferred the 603 }
617 * request. In this case we waited until it was 604 if (test_bit(CACHE_PENDING, &item->flags)) {
618 * answered so no deferral has happened - rather 605 /* item is still pending, try request
619 * an answer already exists. 606 * deferral
620 */ 607 */
621 return -EEXIST; 608 return -ETIMEDOUT;
622 } 609 }
623 return 0; 610 /* only return success if we actually deferred the
611 * request. In this case we waited until it was
612 * answered so no deferral has happened - rather
613 * an answer already exists.
614 */
615 return -EEXIST;
616}
617
618static int cache_defer_req(struct cache_req *req, struct cache_head *item)
619{
620 struct cache_deferred_req *dreq;
621 int ret;
622
623 if (cache_defer_cnt >= DFR_MAX) {
624 /* too much in the cache, randomly drop this one,
625 * or continue and drop the oldest
626 */
627 if (net_random()&1)
628 return -ENOMEM;
629 }
630 if (req->thread_wait) {
631 ret = cache_wait_req(req, item);
632 if (ret != -ETIMEDOUT)
633 return ret;
634 }
635 dreq = req->defer(req);
636 if (dreq == NULL)
637 return -ENOMEM;
638 return setup_deferral(dreq, item);
624} 639}
625 640
626static void cache_revisit_request(struct cache_head *item) 641static void cache_revisit_request(struct cache_head *item)