aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svc_xprt.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r--net/sunrpc/svc_xprt.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 332eb47539e1..d8e8d79a8451 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/file.h> 19#include <linux/file.h>
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/kthread.h>
21#include <net/sock.h> 22#include <net/sock.h>
22#include <net/checksum.h> 23#include <net/checksum.h>
23#include <net/ip.h> 24#include <net/ip.h>
@@ -586,8 +587,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
586 while (rqstp->rq_pages[i] == NULL) { 587 while (rqstp->rq_pages[i] == NULL) {
587 struct page *p = alloc_page(GFP_KERNEL); 588 struct page *p = alloc_page(GFP_KERNEL);
588 if (!p) { 589 if (!p) {
589 int j = msecs_to_jiffies(500); 590 set_current_state(TASK_INTERRUPTIBLE);
590 schedule_timeout_uninterruptible(j); 591 if (signalled() || kthread_should_stop()) {
592 set_current_state(TASK_RUNNING);
593 return -EINTR;
594 }
595 schedule_timeout(msecs_to_jiffies(500));
591 } 596 }
592 rqstp->rq_pages[i] = p; 597 rqstp->rq_pages[i] = p;
593 } 598 }
@@ -607,7 +612,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
607 612
608 try_to_freeze(); 613 try_to_freeze();
609 cond_resched(); 614 cond_resched();
610 if (signalled()) 615 if (signalled() || kthread_should_stop())
611 return -EINTR; 616 return -EINTR;
612 617
613 spin_lock_bh(&pool->sp_lock); 618 spin_lock_bh(&pool->sp_lock);
@@ -626,6 +631,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
626 * to bring down the daemons ... 631 * to bring down the daemons ...
627 */ 632 */
628 set_current_state(TASK_INTERRUPTIBLE); 633 set_current_state(TASK_INTERRUPTIBLE);
634
635 /*
636 * checking kthread_should_stop() here allows us to avoid
637 * locking and signalling when stopping kthreads that call
638 * svc_recv. If the thread has already been woken up, then
639 * we can exit here without sleeping. If not, then it
640 * it'll be woken up quickly during the schedule_timeout
641 */
642 if (kthread_should_stop()) {
643 set_current_state(TASK_RUNNING);
644 spin_unlock_bh(&pool->sp_lock);
645 return -EINTR;
646 }
647
629 add_wait_queue(&rqstp->rq_wait, &wait); 648 add_wait_queue(&rqstp->rq_wait, &wait);
630 spin_unlock_bh(&pool->sp_lock); 649 spin_unlock_bh(&pool->sp_lock);
631 650
@@ -641,7 +660,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
641 svc_thread_dequeue(pool, rqstp); 660 svc_thread_dequeue(pool, rqstp);
642 spin_unlock_bh(&pool->sp_lock); 661 spin_unlock_bh(&pool->sp_lock);
643 dprintk("svc: server %p, no data yet\n", rqstp); 662 dprintk("svc: server %p, no data yet\n", rqstp);
644 return signalled()? -EINTR : -EAGAIN; 663 if (signalled() || kthread_should_stop())
664 return -EINTR;
665 else
666 return -EAGAIN;
645 } 667 }
646 } 668 }
647 spin_unlock_bh(&pool->sp_lock); 669 spin_unlock_bh(&pool->sp_lock);