diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 87 |
1 files changed, 41 insertions, 46 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 3f2c5559ca1a..5a75d23645c8 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -303,6 +303,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) | |||
303 | list_del(&rqstp->rq_list); | 303 | list_del(&rqstp->rq_list); |
304 | } | 304 | } |
305 | 305 | ||
306 | static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) | ||
307 | { | ||
308 | if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) | ||
309 | return true; | ||
310 | if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) | ||
311 | return xprt->xpt_ops->xpo_has_wspace(xprt); | ||
312 | return false; | ||
313 | } | ||
314 | |||
306 | /* | 315 | /* |
307 | * Queue up a transport with data pending. If there are idle nfsd | 316 | * Queue up a transport with data pending. If there are idle nfsd |
308 | * processes, wake 'em up. | 317 | * processes, wake 'em up. |
@@ -315,8 +324,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
315 | struct svc_rqst *rqstp; | 324 | struct svc_rqst *rqstp; |
316 | int cpu; | 325 | int cpu; |
317 | 326 | ||
318 | if (!(xprt->xpt_flags & | 327 | if (!svc_xprt_has_something_to_do(xprt)) |
319 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | ||
320 | return; | 328 | return; |
321 | 329 | ||
322 | cpu = get_cpu(); | 330 | cpu = get_cpu(); |
@@ -346,25 +354,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
346 | BUG_ON(xprt->xpt_pool != NULL); | 354 | BUG_ON(xprt->xpt_pool != NULL); |
347 | xprt->xpt_pool = pool; | 355 | xprt->xpt_pool = pool; |
348 | 356 | ||
349 | /* Handle pending connection */ | ||
350 | if (test_bit(XPT_CONN, &xprt->xpt_flags)) | ||
351 | goto process; | ||
352 | |||
353 | /* Handle close in-progress */ | ||
354 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) | ||
355 | goto process; | ||
356 | |||
357 | /* Check if we have space to reply to a request */ | ||
358 | if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { | ||
359 | /* Don't enqueue while not enough space for reply */ | ||
360 | dprintk("svc: no write space, transport %p not enqueued\n", | ||
361 | xprt); | ||
362 | xprt->xpt_pool = NULL; | ||
363 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
364 | goto out_unlock; | ||
365 | } | ||
366 | |||
367 | process: | ||
368 | if (!list_empty(&pool->sp_threads)) { | 357 | if (!list_empty(&pool->sp_threads)) { |
369 | rqstp = list_entry(pool->sp_threads.next, | 358 | rqstp = list_entry(pool->sp_threads.next, |
370 | struct svc_rqst, | 359 | struct svc_rqst, |
@@ -722,7 +711,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
722 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 711 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
723 | dprintk("svc_recv: found XPT_CLOSE\n"); | 712 | dprintk("svc_recv: found XPT_CLOSE\n"); |
724 | svc_delete_xprt(xprt); | 713 | svc_delete_xprt(xprt); |
725 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | 714 | /* Leave XPT_BUSY set on the dead xprt: */ |
715 | goto out; | ||
716 | } | ||
717 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | ||
726 | struct svc_xprt *newxpt; | 718 | struct svc_xprt *newxpt; |
727 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | 719 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
728 | if (newxpt) { | 720 | if (newxpt) { |
@@ -747,28 +739,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
747 | spin_unlock_bh(&serv->sv_lock); | 739 | spin_unlock_bh(&serv->sv_lock); |
748 | svc_xprt_received(newxpt); | 740 | svc_xprt_received(newxpt); |
749 | } | 741 | } |
750 | svc_xprt_received(xprt); | 742 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { |
751 | } else { | ||
752 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 743 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
753 | rqstp, pool->sp_id, xprt, | 744 | rqstp, pool->sp_id, xprt, |
754 | atomic_read(&xprt->xpt_ref.refcount)); | 745 | atomic_read(&xprt->xpt_ref.refcount)); |
755 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); | 746 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
756 | if (rqstp->rq_deferred) { | 747 | if (rqstp->rq_deferred) |
757 | svc_xprt_received(xprt); | ||
758 | len = svc_deferred_recv(rqstp); | 748 | len = svc_deferred_recv(rqstp); |
759 | } else { | 749 | else |
760 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | 750 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
761 | svc_xprt_received(xprt); | ||
762 | } | ||
763 | dprintk("svc: got len=%d\n", len); | 751 | dprintk("svc: got len=%d\n", len); |
764 | } | 752 | } |
753 | svc_xprt_received(xprt); | ||
765 | 754 | ||
766 | /* No data, incomplete (TCP) read, or accept() */ | 755 | /* No data, incomplete (TCP) read, or accept() */ |
767 | if (len == 0 || len == -EAGAIN) { | 756 | if (len == 0 || len == -EAGAIN) |
768 | rqstp->rq_res.len = 0; | 757 | goto out; |
769 | svc_xprt_release(rqstp); | 758 | |
770 | return -EAGAIN; | ||
771 | } | ||
772 | clear_bit(XPT_OLD, &xprt->xpt_flags); | 759 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
773 | 760 | ||
774 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); | 761 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); |
@@ -777,6 +764,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
777 | if (serv->sv_stats) | 764 | if (serv->sv_stats) |
778 | serv->sv_stats->netcnt++; | 765 | serv->sv_stats->netcnt++; |
779 | return len; | 766 | return len; |
767 | out: | ||
768 | rqstp->rq_res.len = 0; | ||
769 | svc_xprt_release(rqstp); | ||
770 | return -EAGAIN; | ||
780 | } | 771 | } |
781 | EXPORT_SYMBOL_GPL(svc_recv); | 772 | EXPORT_SYMBOL_GPL(svc_recv); |
782 | 773 | ||
@@ -935,7 +926,12 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
935 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) | 926 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) |
936 | /* someone else will have to effect the close */ | 927 | /* someone else will have to effect the close */ |
937 | return; | 928 | return; |
938 | 929 | /* | |
930 | * We expect svc_close_xprt() to work even when no threads are | ||
931 | * running (e.g., while configuring the server before starting | ||
932 | * any threads), so if the transport isn't busy, we delete | ||
933 | * it ourself: | ||
934 | */ | ||
939 | svc_delete_xprt(xprt); | 935 | svc_delete_xprt(xprt); |
940 | } | 936 | } |
941 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 937 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
@@ -945,16 +941,16 @@ void svc_close_all(struct list_head *xprt_list) | |||
945 | struct svc_xprt *xprt; | 941 | struct svc_xprt *xprt; |
946 | struct svc_xprt *tmp; | 942 | struct svc_xprt *tmp; |
947 | 943 | ||
944 | /* | ||
945 | * The server is shutting down, and no more threads are running. | ||
946 | * svc_xprt_enqueue() might still be running, but at worst it | ||
947 | * will re-add the xprt to sp_sockets, which will soon get | ||
948 | * freed. So we don't bother with any more locking, and don't | ||
949 | * leave the close to the (nonexistent) server threads: | ||
950 | */ | ||
948 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 951 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { |
949 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 952 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
950 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { | 953 | svc_delete_xprt(xprt); |
951 | /* Waiting to be processed, but no threads left, | ||
952 | * So just remove it from the waiting list | ||
953 | */ | ||
954 | list_del_init(&xprt->xpt_ready); | ||
955 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
956 | } | ||
957 | svc_close_xprt(xprt); | ||
958 | } | 954 | } |
959 | } | 955 | } |
960 | 956 | ||
@@ -1065,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) | |||
1065 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) | 1061 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) |
1066 | return NULL; | 1062 | return NULL; |
1067 | spin_lock(&xprt->xpt_lock); | 1063 | spin_lock(&xprt->xpt_lock); |
1068 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
1069 | if (!list_empty(&xprt->xpt_deferred)) { | 1064 | if (!list_empty(&xprt->xpt_deferred)) { |
1070 | dr = list_entry(xprt->xpt_deferred.next, | 1065 | dr = list_entry(xprt->xpt_deferred.next, |
1071 | struct svc_deferred_req, | 1066 | struct svc_deferred_req, |
1072 | handle.recent); | 1067 | handle.recent); |
1073 | list_del_init(&dr->handle.recent); | 1068 | list_del_init(&dr->handle.recent); |
1074 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | 1069 | } else |
1075 | } | 1070 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); |
1076 | spin_unlock(&xprt->xpt_lock); | 1071 | spin_unlock(&xprt->xpt_lock); |
1077 | return dr; | 1072 | return dr; |
1078 | } | 1073 | } |