diff options
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 32 |
1 files changed, 17 insertions, 15 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 08e49d1e17b3..faaf2b46273b 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -348,8 +348,6 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) | |||
348 | 348 | ||
349 | cpu = get_cpu(); | 349 | cpu = get_cpu(); |
350 | pool = svc_pool_for_cpu(xprt->xpt_server, cpu); | 350 | pool = svc_pool_for_cpu(xprt->xpt_server, cpu); |
351 | put_cpu(); | ||
352 | |||
353 | spin_lock_bh(&pool->sp_lock); | 351 | spin_lock_bh(&pool->sp_lock); |
354 | 352 | ||
355 | if (!list_empty(&pool->sp_threads) && | 353 | if (!list_empty(&pool->sp_threads) && |
@@ -382,10 +380,15 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) | |||
382 | printk(KERN_ERR | 380 | printk(KERN_ERR |
383 | "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", | 381 | "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", |
384 | rqstp, rqstp->rq_xprt); | 382 | rqstp, rqstp->rq_xprt); |
385 | rqstp->rq_xprt = xprt; | 383 | /* Note the order of the following 3 lines: |
384 | * We want to assign xprt to rqstp->rq_xprt only _after_ | ||
385 | * we've woken up the process, so that we don't race with | ||
386 | * the lockless check in svc_get_next_xprt(). | ||
387 | */ | ||
386 | svc_xprt_get(xprt); | 388 | svc_xprt_get(xprt); |
389 | wake_up_process(rqstp->rq_task); | ||
390 | rqstp->rq_xprt = xprt; | ||
387 | pool->sp_stats.threads_woken++; | 391 | pool->sp_stats.threads_woken++; |
388 | wake_up(&rqstp->rq_wait); | ||
389 | } else { | 392 | } else { |
390 | dprintk("svc: transport %p put into queue\n", xprt); | 393 | dprintk("svc: transport %p put into queue\n", xprt); |
391 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | 394 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); |
@@ -394,6 +397,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) | |||
394 | 397 | ||
395 | out_unlock: | 398 | out_unlock: |
396 | spin_unlock_bh(&pool->sp_lock); | 399 | spin_unlock_bh(&pool->sp_lock); |
400 | put_cpu(); | ||
397 | } | 401 | } |
398 | 402 | ||
399 | /* | 403 | /* |
@@ -509,7 +513,7 @@ void svc_wake_up(struct svc_serv *serv) | |||
509 | svc_thread_dequeue(pool, rqstp); | 513 | svc_thread_dequeue(pool, rqstp); |
510 | rqstp->rq_xprt = NULL; | 514 | rqstp->rq_xprt = NULL; |
511 | */ | 515 | */ |
512 | wake_up(&rqstp->rq_wait); | 516 | wake_up_process(rqstp->rq_task); |
513 | } else | 517 | } else |
514 | pool->sp_task_pending = 1; | 518 | pool->sp_task_pending = 1; |
515 | spin_unlock_bh(&pool->sp_lock); | 519 | spin_unlock_bh(&pool->sp_lock); |
@@ -628,7 +632,6 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
628 | { | 632 | { |
629 | struct svc_xprt *xprt; | 633 | struct svc_xprt *xprt; |
630 | struct svc_pool *pool = rqstp->rq_pool; | 634 | struct svc_pool *pool = rqstp->rq_pool; |
631 | DECLARE_WAITQUEUE(wait, current); | ||
632 | long time_left; | 635 | long time_left; |
633 | 636 | ||
634 | /* Normally we will wait up to 5 seconds for any required | 637 | /* Normally we will wait up to 5 seconds for any required |
@@ -654,15 +657,15 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
654 | xprt = ERR_PTR(-EAGAIN); | 657 | xprt = ERR_PTR(-EAGAIN); |
655 | goto out; | 658 | goto out; |
656 | } | 659 | } |
657 | /* No data pending. Go to sleep */ | ||
658 | svc_thread_enqueue(pool, rqstp); | ||
659 | |||
660 | /* | 660 | /* |
661 | * We have to be able to interrupt this wait | 661 | * We have to be able to interrupt this wait |
662 | * to bring down the daemons ... | 662 | * to bring down the daemons ... |
663 | */ | 663 | */ |
664 | set_current_state(TASK_INTERRUPTIBLE); | 664 | set_current_state(TASK_INTERRUPTIBLE); |
665 | 665 | ||
666 | /* No data pending. Go to sleep */ | ||
667 | svc_thread_enqueue(pool, rqstp); | ||
668 | |||
666 | /* | 669 | /* |
667 | * checking kthread_should_stop() here allows us to avoid | 670 | * checking kthread_should_stop() here allows us to avoid |
668 | * locking and signalling when stopping kthreads that call | 671 | * locking and signalling when stopping kthreads that call |
@@ -676,14 +679,13 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
676 | goto out; | 679 | goto out; |
677 | } | 680 | } |
678 | 681 | ||
679 | add_wait_queue(&rqstp->rq_wait, &wait); | ||
680 | spin_unlock_bh(&pool->sp_lock); | 682 | spin_unlock_bh(&pool->sp_lock); |
681 | 683 | ||
682 | time_left = schedule_timeout(timeout); | 684 | time_left = schedule_timeout(timeout); |
685 | __set_current_state(TASK_RUNNING); | ||
683 | 686 | ||
684 | try_to_freeze(); | 687 | try_to_freeze(); |
685 | 688 | ||
686 | remove_wait_queue(&rqstp->rq_wait, &wait); | ||
687 | xprt = rqstp->rq_xprt; | 689 | xprt = rqstp->rq_xprt; |
688 | if (xprt != NULL) | 690 | if (xprt != NULL) |
689 | return xprt; | 691 | return xprt; |
@@ -786,10 +788,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
786 | printk(KERN_ERR | 788 | printk(KERN_ERR |
787 | "svc_recv: service %p, transport not NULL!\n", | 789 | "svc_recv: service %p, transport not NULL!\n", |
788 | rqstp); | 790 | rqstp); |
789 | if (waitqueue_active(&rqstp->rq_wait)) | 791 | |
790 | printk(KERN_ERR | 792 | /* Make sure the task pointer is set! */ |
791 | "svc_recv: service %p, wait queue active!\n", | 793 | if (WARN_ON_ONCE(!rqstp->rq_task)) |
792 | rqstp); | 794 | rqstp->rq_task = current_task; |
793 | 795 | ||
794 | err = svc_alloc_arg(rqstp); | 796 | err = svc_alloc_arg(rqstp); |
795 | if (err) | 797 | if (err) |