aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc_xprt.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 332eb47539e1..2e5d43c39142 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/file.h> 19#include <linux/file.h>
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/kthread.h>
21#include <net/sock.h> 22#include <net/sock.h>
22#include <net/checksum.h> 23#include <net/checksum.h>
23#include <net/ip.h> 24#include <net/ip.h>
@@ -587,6 +588,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
587 struct page *p = alloc_page(GFP_KERNEL); 588 struct page *p = alloc_page(GFP_KERNEL);
588 if (!p) { 589 if (!p) {
589 int j = msecs_to_jiffies(500); 590 int j = msecs_to_jiffies(500);
591 if (kthread_should_stop())
592 return -EINTR;
590 schedule_timeout_uninterruptible(j); 593 schedule_timeout_uninterruptible(j);
591 } 594 }
592 rqstp->rq_pages[i] = p; 595 rqstp->rq_pages[i] = p;
@@ -607,7 +610,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
607 610
608 try_to_freeze(); 611 try_to_freeze();
609 cond_resched(); 612 cond_resched();
610 if (signalled()) 613 if (signalled() || kthread_should_stop())
611 return -EINTR; 614 return -EINTR;
612 615
613 spin_lock_bh(&pool->sp_lock); 616 spin_lock_bh(&pool->sp_lock);
@@ -626,6 +629,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
626 * to bring down the daemons ... 629 * to bring down the daemons ...
627 */ 630 */
628 set_current_state(TASK_INTERRUPTIBLE); 631 set_current_state(TASK_INTERRUPTIBLE);
632
633 /*
634 * checking kthread_should_stop() here allows us to avoid
635 * locking and signalling when stopping kthreads that call
636 * svc_recv. If the thread has already been woken up, then
637 * we can exit here without sleeping. If not, then it
638 * it'll be woken up quickly during the schedule_timeout
639 */
640 if (kthread_should_stop()) {
641 set_current_state(TASK_RUNNING);
642 spin_unlock_bh(&pool->sp_lock);
643 return -EINTR;
644 }
645
629 add_wait_queue(&rqstp->rq_wait, &wait); 646 add_wait_queue(&rqstp->rq_wait, &wait);
630 spin_unlock_bh(&pool->sp_lock); 647 spin_unlock_bh(&pool->sp_lock);
631 648
@@ -641,7 +658,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
641 svc_thread_dequeue(pool, rqstp); 658 svc_thread_dequeue(pool, rqstp);
642 spin_unlock_bh(&pool->sp_lock); 659 spin_unlock_bh(&pool->sp_lock);
643 dprintk("svc: server %p, no data yet\n", rqstp); 660 dprintk("svc: server %p, no data yet\n", rqstp);
644 return signalled()? -EINTR : -EAGAIN; 661 if (signalled() || kthread_should_stop())
662 return -EINTR;
663 else
664 return -EAGAIN;
645 } 665 }
646 } 666 }
647 spin_unlock_bh(&pool->sp_lock); 667 spin_unlock_bh(&pool->sp_lock);