diff options
author | Trond Myklebust <trond.myklebust@primarydata.com> | 2014-08-03 13:03:11 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2014-08-17 12:00:11 -0400 |
commit | a4aa8054a60c545f100826271ac9f04c34bf828d (patch) | |
tree | 9c9c190c54eecb09aa03e4645528663bc02383b3 /net/sunrpc/svc_xprt.c | |
parent | 983c684466e02b21f83c025ea539deee6c0aeac0 (diff) |
SUNRPC: Fix broken kthread_should_stop test in svc_get_next_xprt
We should definitely not be exiting svc_get_next_xprt() with the
thread enqueued. Fix this by ensuring that we fall through to
the dequeue.
Also move the test itself outside the spin lock protected section.
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 31 |
1 files changed, 10 insertions, 21 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index faaf2b46273b..5eb6f32df3e5 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -632,7 +632,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
632 | { | 632 | { |
633 | struct svc_xprt *xprt; | 633 | struct svc_xprt *xprt; |
634 | struct svc_pool *pool = rqstp->rq_pool; | 634 | struct svc_pool *pool = rqstp->rq_pool; |
635 | long time_left; | 635 | long time_left = 0; |
636 | 636 | ||
637 | /* Normally we will wait up to 5 seconds for any required | 637 | /* Normally we will wait up to 5 seconds for any required |
638 | * cache information to be provided. | 638 | * cache information to be provided. |
@@ -665,30 +665,19 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
665 | 665 | ||
666 | /* No data pending. Go to sleep */ | 666 | /* No data pending. Go to sleep */ |
667 | svc_thread_enqueue(pool, rqstp); | 667 | svc_thread_enqueue(pool, rqstp); |
668 | |||
669 | /* | ||
670 | * checking kthread_should_stop() here allows us to avoid | ||
671 | * locking and signalling when stopping kthreads that call | ||
672 | * svc_recv. If the thread has already been woken up, then | ||
673 | * we can exit here without sleeping. If not, then it | ||
674 | * it'll be woken up quickly during the schedule_timeout | ||
675 | */ | ||
676 | if (kthread_should_stop()) { | ||
677 | set_current_state(TASK_RUNNING); | ||
678 | xprt = ERR_PTR(-EINTR); | ||
679 | goto out; | ||
680 | } | ||
681 | |||
682 | spin_unlock_bh(&pool->sp_lock); | 668 | spin_unlock_bh(&pool->sp_lock); |
683 | 669 | ||
684 | time_left = schedule_timeout(timeout); | 670 | if (!(signalled() || kthread_should_stop())) { |
685 | __set_current_state(TASK_RUNNING); | 671 | time_left = schedule_timeout(timeout); |
672 | __set_current_state(TASK_RUNNING); | ||
686 | 673 | ||
687 | try_to_freeze(); | 674 | try_to_freeze(); |
688 | 675 | ||
689 | xprt = rqstp->rq_xprt; | 676 | xprt = rqstp->rq_xprt; |
690 | if (xprt != NULL) | 677 | if (xprt != NULL) |
691 | return xprt; | 678 | return xprt; |
679 | } else | ||
680 | __set_current_state(TASK_RUNNING); | ||
692 | 681 | ||
693 | spin_lock_bh(&pool->sp_lock); | 682 | spin_lock_bh(&pool->sp_lock); |
694 | if (!time_left) | 683 | if (!time_left) |