diff options
author | Jeff Layton <jlayton@redhat.com> | 2008-02-07 16:34:54 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-04-23 16:13:36 -0400 |
commit | 7086721f9c8b59331e164e534f588e075cfd9d3f (patch) | |
tree | 593ddc28b30dba189069579b82ed81996d965df2 /net/sunrpc/svc_xprt.c | |
parent | 23d42ee278de1552d67daef5774ba59ff30925db (diff) |
SUNRPC: have svc_recv() check kthread_should_stop()
When using kthreads that call into svc_recv, we want to make sure that
they do not block there for a long time when we're trying to take down
the kthread.
This patch changes svc_recv() to check kthread_should_stop() at the same
places that it checks to see if it's signalled(). Also check just before
svc_recv() tries to schedule(). By making sure that we check it just
after setting the task state we can avoid having to use any locking or
signalling to ensure it doesn't block for a long time.
There's still a chance of a 500ms sleep if alloc_page() fails, but
that should be a rare occurrence and isn't a terribly long time in
the context of a kthread being taken down.
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 24 |
1 files changed, 22 insertions, 2 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 332eb47539e1..2e5d43c39142 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/file.h> | 19 | #include <linux/file.h> |
20 | #include <linux/freezer.h> | 20 | #include <linux/freezer.h> |
21 | #include <linux/kthread.h> | ||
21 | #include <net/sock.h> | 22 | #include <net/sock.h> |
22 | #include <net/checksum.h> | 23 | #include <net/checksum.h> |
23 | #include <net/ip.h> | 24 | #include <net/ip.h> |
@@ -587,6 +588,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
587 | struct page *p = alloc_page(GFP_KERNEL); | 588 | struct page *p = alloc_page(GFP_KERNEL); |
588 | if (!p) { | 589 | if (!p) { |
589 | int j = msecs_to_jiffies(500); | 590 | int j = msecs_to_jiffies(500); |
591 | if (kthread_should_stop()) | ||
592 | return -EINTR; | ||
590 | schedule_timeout_uninterruptible(j); | 593 | schedule_timeout_uninterruptible(j); |
591 | } | 594 | } |
592 | rqstp->rq_pages[i] = p; | 595 | rqstp->rq_pages[i] = p; |
@@ -607,7 +610,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
607 | 610 | ||
608 | try_to_freeze(); | 611 | try_to_freeze(); |
609 | cond_resched(); | 612 | cond_resched(); |
610 | if (signalled()) | 613 | if (signalled() || kthread_should_stop()) |
611 | return -EINTR; | 614 | return -EINTR; |
612 | 615 | ||
613 | spin_lock_bh(&pool->sp_lock); | 616 | spin_lock_bh(&pool->sp_lock); |
@@ -626,6 +629,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
626 | * to bring down the daemons ... | 629 | * to bring down the daemons ... |
627 | */ | 630 | */ |
628 | set_current_state(TASK_INTERRUPTIBLE); | 631 | set_current_state(TASK_INTERRUPTIBLE); |
632 | |||
633 | /* | ||
634 | * checking kthread_should_stop() here allows us to avoid | ||
635 | * locking and signalling when stopping kthreads that call | ||
636 | * svc_recv. If the thread has already been woken up, then | ||
637 | * we can exit here without sleeping. If not, then it | ||
638 | * it'll be woken up quickly during the schedule_timeout | ||
639 | */ | ||
640 | if (kthread_should_stop()) { | ||
641 | set_current_state(TASK_RUNNING); | ||
642 | spin_unlock_bh(&pool->sp_lock); | ||
643 | return -EINTR; | ||
644 | } | ||
645 | |||
629 | add_wait_queue(&rqstp->rq_wait, &wait); | 646 | add_wait_queue(&rqstp->rq_wait, &wait); |
630 | spin_unlock_bh(&pool->sp_lock); | 647 | spin_unlock_bh(&pool->sp_lock); |
631 | 648 | ||
@@ -641,7 +658,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
641 | svc_thread_dequeue(pool, rqstp); | 658 | svc_thread_dequeue(pool, rqstp); |
642 | spin_unlock_bh(&pool->sp_lock); | 659 | spin_unlock_bh(&pool->sp_lock); |
643 | dprintk("svc: server %p, no data yet\n", rqstp); | 660 | dprintk("svc: server %p, no data yet\n", rqstp); |
644 | return signalled()? -EINTR : -EAGAIN; | 661 | if (signalled() || kthread_should_stop()) |
662 | return -EINTR; | ||
663 | else | ||
664 | return -EAGAIN; | ||
645 | } | 665 | } |
646 | } | 666 | } |
647 | spin_unlock_bh(&pool->sp_lock); | 667 | spin_unlock_bh(&pool->sp_lock); |