diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-05-02 11:21:08 -0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-07-06 14:54:48 -0400 |
commit | b5e924191f87239e555f3ef3b8d8e697bb95e7dc (patch) | |
tree | 1e7a2207cc4b5c6f794dba99d2618e0803d1a31f | |
parent | 4f8943f8088348ec01456b075d44ad19dce3d698 (diff) |
SUNRPC: Remove the bh-safe lock requirement on xprt->transport_lock
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
-rw-r--r-- | net/sunrpc/xprt.c | 61 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 8 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 23 |
5 files changed, 47 insertions, 53 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f6c82b1651e7..8d41fcf25650 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -302,9 +302,9 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
302 | 302 | ||
303 | if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) | 303 | if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) |
304 | return 1; | 304 | return 1; |
305 | spin_lock_bh(&xprt->transport_lock); | 305 | spin_lock(&xprt->transport_lock); |
306 | retval = xprt->ops->reserve_xprt(xprt, task); | 306 | retval = xprt->ops->reserve_xprt(xprt, task); |
307 | spin_unlock_bh(&xprt->transport_lock); | 307 | spin_unlock(&xprt->transport_lock); |
308 | return retval; | 308 | return retval; |
309 | } | 309 | } |
310 | 310 | ||
@@ -381,9 +381,9 @@ static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *ta | |||
381 | { | 381 | { |
382 | if (xprt->snd_task != task) | 382 | if (xprt->snd_task != task) |
383 | return; | 383 | return; |
384 | spin_lock_bh(&xprt->transport_lock); | 384 | spin_lock(&xprt->transport_lock); |
385 | xprt->ops->release_xprt(xprt, task); | 385 | xprt->ops->release_xprt(xprt, task); |
386 | spin_unlock_bh(&xprt->transport_lock); | 386 | spin_unlock(&xprt->transport_lock); |
387 | } | 387 | } |
388 | 388 | ||
389 | /* | 389 | /* |
@@ -435,9 +435,9 @@ xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) | |||
435 | 435 | ||
436 | if (req->rq_cong) | 436 | if (req->rq_cong) |
437 | return true; | 437 | return true; |
438 | spin_lock_bh(&xprt->transport_lock); | 438 | spin_lock(&xprt->transport_lock); |
439 | ret = __xprt_get_cong(xprt, req) != 0; | 439 | ret = __xprt_get_cong(xprt, req) != 0; |
440 | spin_unlock_bh(&xprt->transport_lock); | 440 | spin_unlock(&xprt->transport_lock); |
441 | return ret; | 441 | return ret; |
442 | } | 442 | } |
443 | EXPORT_SYMBOL_GPL(xprt_request_get_cong); | 443 | EXPORT_SYMBOL_GPL(xprt_request_get_cong); |
@@ -464,9 +464,9 @@ static void | |||
464 | xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) | 464 | xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) |
465 | { | 465 | { |
466 | if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { | 466 | if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { |
467 | spin_lock_bh(&xprt->transport_lock); | 467 | spin_lock(&xprt->transport_lock); |
468 | __xprt_lock_write_next_cong(xprt); | 468 | __xprt_lock_write_next_cong(xprt); |
469 | spin_unlock_bh(&xprt->transport_lock); | 469 | spin_unlock(&xprt->transport_lock); |
470 | } | 470 | } |
471 | } | 471 | } |
472 | 472 | ||
@@ -563,9 +563,9 @@ bool xprt_write_space(struct rpc_xprt *xprt) | |||
563 | 563 | ||
564 | if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) | 564 | if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) |
565 | return false; | 565 | return false; |
566 | spin_lock_bh(&xprt->transport_lock); | 566 | spin_lock(&xprt->transport_lock); |
567 | ret = xprt_clear_write_space_locked(xprt); | 567 | ret = xprt_clear_write_space_locked(xprt); |
568 | spin_unlock_bh(&xprt->transport_lock); | 568 | spin_unlock(&xprt->transport_lock); |
569 | return ret; | 569 | return ret; |
570 | } | 570 | } |
571 | EXPORT_SYMBOL_GPL(xprt_write_space); | 571 | EXPORT_SYMBOL_GPL(xprt_write_space); |
@@ -634,9 +634,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
634 | req->rq_retries = 0; | 634 | req->rq_retries = 0; |
635 | xprt_reset_majortimeo(req); | 635 | xprt_reset_majortimeo(req); |
636 | /* Reset the RTT counters == "slow start" */ | 636 | /* Reset the RTT counters == "slow start" */ |
637 | spin_lock_bh(&xprt->transport_lock); | 637 | spin_lock(&xprt->transport_lock); |
638 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); | 638 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); |
639 | spin_unlock_bh(&xprt->transport_lock); | 639 | spin_unlock(&xprt->transport_lock); |
640 | status = -ETIMEDOUT; | 640 | status = -ETIMEDOUT; |
641 | } | 641 | } |
642 | 642 | ||
@@ -668,11 +668,11 @@ static void xprt_autoclose(struct work_struct *work) | |||
668 | void xprt_disconnect_done(struct rpc_xprt *xprt) | 668 | void xprt_disconnect_done(struct rpc_xprt *xprt) |
669 | { | 669 | { |
670 | dprintk("RPC: disconnected transport %p\n", xprt); | 670 | dprintk("RPC: disconnected transport %p\n", xprt); |
671 | spin_lock_bh(&xprt->transport_lock); | 671 | spin_lock(&xprt->transport_lock); |
672 | xprt_clear_connected(xprt); | 672 | xprt_clear_connected(xprt); |
673 | xprt_clear_write_space_locked(xprt); | 673 | xprt_clear_write_space_locked(xprt); |
674 | xprt_wake_pending_tasks(xprt, -ENOTCONN); | 674 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
675 | spin_unlock_bh(&xprt->transport_lock); | 675 | spin_unlock(&xprt->transport_lock); |
676 | } | 676 | } |
677 | EXPORT_SYMBOL_GPL(xprt_disconnect_done); | 677 | EXPORT_SYMBOL_GPL(xprt_disconnect_done); |
678 | 678 | ||
@@ -684,7 +684,7 @@ EXPORT_SYMBOL_GPL(xprt_disconnect_done); | |||
684 | void xprt_force_disconnect(struct rpc_xprt *xprt) | 684 | void xprt_force_disconnect(struct rpc_xprt *xprt) |
685 | { | 685 | { |
686 | /* Don't race with the test_bit() in xprt_clear_locked() */ | 686 | /* Don't race with the test_bit() in xprt_clear_locked() */ |
687 | spin_lock_bh(&xprt->transport_lock); | 687 | spin_lock(&xprt->transport_lock); |
688 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); | 688 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); |
689 | /* Try to schedule an autoclose RPC call */ | 689 | /* Try to schedule an autoclose RPC call */ |
690 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) | 690 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) |
@@ -692,7 +692,7 @@ void xprt_force_disconnect(struct rpc_xprt *xprt) | |||
692 | else if (xprt->snd_task) | 692 | else if (xprt->snd_task) |
693 | rpc_wake_up_queued_task_set_status(&xprt->pending, | 693 | rpc_wake_up_queued_task_set_status(&xprt->pending, |
694 | xprt->snd_task, -ENOTCONN); | 694 | xprt->snd_task, -ENOTCONN); |
695 | spin_unlock_bh(&xprt->transport_lock); | 695 | spin_unlock(&xprt->transport_lock); |
696 | } | 696 | } |
697 | EXPORT_SYMBOL_GPL(xprt_force_disconnect); | 697 | EXPORT_SYMBOL_GPL(xprt_force_disconnect); |
698 | 698 | ||
@@ -726,7 +726,7 @@ xprt_request_retransmit_after_disconnect(struct rpc_task *task) | |||
726 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) | 726 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) |
727 | { | 727 | { |
728 | /* Don't race with the test_bit() in xprt_clear_locked() */ | 728 | /* Don't race with the test_bit() in xprt_clear_locked() */ |
729 | spin_lock_bh(&xprt->transport_lock); | 729 | spin_lock(&xprt->transport_lock); |
730 | if (cookie != xprt->connect_cookie) | 730 | if (cookie != xprt->connect_cookie) |
731 | goto out; | 731 | goto out; |
732 | if (test_bit(XPRT_CLOSING, &xprt->state)) | 732 | if (test_bit(XPRT_CLOSING, &xprt->state)) |
@@ -737,7 +737,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) | |||
737 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); | 737 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); |
738 | xprt_wake_pending_tasks(xprt, -EAGAIN); | 738 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
739 | out: | 739 | out: |
740 | spin_unlock_bh(&xprt->transport_lock); | 740 | spin_unlock(&xprt->transport_lock); |
741 | } | 741 | } |
742 | 742 | ||
743 | static bool | 743 | static bool |
@@ -759,18 +759,13 @@ xprt_init_autodisconnect(struct timer_list *t) | |||
759 | { | 759 | { |
760 | struct rpc_xprt *xprt = from_timer(xprt, t, timer); | 760 | struct rpc_xprt *xprt = from_timer(xprt, t, timer); |
761 | 761 | ||
762 | spin_lock(&xprt->transport_lock); | ||
763 | if (!RB_EMPTY_ROOT(&xprt->recv_queue)) | 762 | if (!RB_EMPTY_ROOT(&xprt->recv_queue)) |
764 | goto out_abort; | 763 | return; |
765 | /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ | 764 | /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ |
766 | xprt->last_used = jiffies; | 765 | xprt->last_used = jiffies; |
767 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 766 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
768 | goto out_abort; | 767 | return; |
769 | spin_unlock(&xprt->transport_lock); | ||
770 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); | 768 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); |
771 | return; | ||
772 | out_abort: | ||
773 | spin_unlock(&xprt->transport_lock); | ||
774 | } | 769 | } |
775 | 770 | ||
776 | bool xprt_lock_connect(struct rpc_xprt *xprt, | 771 | bool xprt_lock_connect(struct rpc_xprt *xprt, |
@@ -779,7 +774,7 @@ bool xprt_lock_connect(struct rpc_xprt *xprt, | |||
779 | { | 774 | { |
780 | bool ret = false; | 775 | bool ret = false; |
781 | 776 | ||
782 | spin_lock_bh(&xprt->transport_lock); | 777 | spin_lock(&xprt->transport_lock); |
783 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | 778 | if (!test_bit(XPRT_LOCKED, &xprt->state)) |
784 | goto out; | 779 | goto out; |
785 | if (xprt->snd_task != task) | 780 | if (xprt->snd_task != task) |
@@ -787,13 +782,13 @@ bool xprt_lock_connect(struct rpc_xprt *xprt, | |||
787 | xprt->snd_task = cookie; | 782 | xprt->snd_task = cookie; |
788 | ret = true; | 783 | ret = true; |
789 | out: | 784 | out: |
790 | spin_unlock_bh(&xprt->transport_lock); | 785 | spin_unlock(&xprt->transport_lock); |
791 | return ret; | 786 | return ret; |
792 | } | 787 | } |
793 | 788 | ||
794 | void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | 789 | void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) |
795 | { | 790 | { |
796 | spin_lock_bh(&xprt->transport_lock); | 791 | spin_lock(&xprt->transport_lock); |
797 | if (xprt->snd_task != cookie) | 792 | if (xprt->snd_task != cookie) |
798 | goto out; | 793 | goto out; |
799 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | 794 | if (!test_bit(XPRT_LOCKED, &xprt->state)) |
@@ -802,7 +797,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | |||
802 | xprt->ops->release_xprt(xprt, NULL); | 797 | xprt->ops->release_xprt(xprt, NULL); |
803 | xprt_schedule_autodisconnect(xprt); | 798 | xprt_schedule_autodisconnect(xprt); |
804 | out: | 799 | out: |
805 | spin_unlock_bh(&xprt->transport_lock); | 800 | spin_unlock(&xprt->transport_lock); |
806 | wake_up_bit(&xprt->state, XPRT_LOCKED); | 801 | wake_up_bit(&xprt->state, XPRT_LOCKED); |
807 | } | 802 | } |
808 | 803 | ||
@@ -1412,14 +1407,14 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) | |||
1412 | xprt_inject_disconnect(xprt); | 1407 | xprt_inject_disconnect(xprt); |
1413 | 1408 | ||
1414 | task->tk_flags |= RPC_TASK_SENT; | 1409 | task->tk_flags |= RPC_TASK_SENT; |
1415 | spin_lock_bh(&xprt->transport_lock); | 1410 | spin_lock(&xprt->transport_lock); |
1416 | 1411 | ||
1417 | xprt->stat.sends++; | 1412 | xprt->stat.sends++; |
1418 | xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; | 1413 | xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; |
1419 | xprt->stat.bklog_u += xprt->backlog.qlen; | 1414 | xprt->stat.bklog_u += xprt->backlog.qlen; |
1420 | xprt->stat.sending_u += xprt->sending.qlen; | 1415 | xprt->stat.sending_u += xprt->sending.qlen; |
1421 | xprt->stat.pending_u += xprt->pending.qlen; | 1416 | xprt->stat.pending_u += xprt->pending.qlen; |
1422 | spin_unlock_bh(&xprt->transport_lock); | 1417 | spin_unlock(&xprt->transport_lock); |
1423 | 1418 | ||
1424 | req->rq_connect_cookie = connect_cookie; | 1419 | req->rq_connect_cookie = connect_cookie; |
1425 | out_dequeue: | 1420 | out_dequeue: |
@@ -1770,13 +1765,13 @@ void xprt_release(struct rpc_task *task) | |||
1770 | else if (task->tk_client) | 1765 | else if (task->tk_client) |
1771 | rpc_count_iostats(task, task->tk_client->cl_metrics); | 1766 | rpc_count_iostats(task, task->tk_client->cl_metrics); |
1772 | xprt_request_dequeue_all(task, req); | 1767 | xprt_request_dequeue_all(task, req); |
1773 | spin_lock_bh(&xprt->transport_lock); | 1768 | spin_lock(&xprt->transport_lock); |
1774 | xprt->ops->release_xprt(xprt, task); | 1769 | xprt->ops->release_xprt(xprt, task); |
1775 | if (xprt->ops->release_request) | 1770 | if (xprt->ops->release_request) |
1776 | xprt->ops->release_request(task); | 1771 | xprt->ops->release_request(task); |
1777 | xprt->last_used = jiffies; | 1772 | xprt->last_used = jiffies; |
1778 | xprt_schedule_autodisconnect(xprt); | 1773 | xprt_schedule_autodisconnect(xprt); |
1779 | spin_unlock_bh(&xprt->transport_lock); | 1774 | spin_unlock(&xprt->transport_lock); |
1780 | if (req->rq_buffer) | 1775 | if (req->rq_buffer) |
1781 | xprt->ops->buf_free(task); | 1776 | xprt->ops->buf_free(task); |
1782 | xprt_inject_disconnect(xprt); | 1777 | xprt_inject_disconnect(xprt); |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 85115a2e2639..7dc62e55f526 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -1360,10 +1360,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
1360 | else if (credits > buf->rb_max_requests) | 1360 | else if (credits > buf->rb_max_requests) |
1361 | credits = buf->rb_max_requests; | 1361 | credits = buf->rb_max_requests; |
1362 | if (buf->rb_credits != credits) { | 1362 | if (buf->rb_credits != credits) { |
1363 | spin_lock_bh(&xprt->transport_lock); | 1363 | spin_lock(&xprt->transport_lock); |
1364 | buf->rb_credits = credits; | 1364 | buf->rb_credits = credits; |
1365 | xprt->cwnd = credits << RPC_CWNDSHIFT; | 1365 | xprt->cwnd = credits << RPC_CWNDSHIFT; |
1366 | spin_unlock_bh(&xprt->transport_lock); | 1366 | spin_unlock(&xprt->transport_lock); |
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | req = rpcr_to_rdmar(rqst); | 1369 | req = rpcr_to_rdmar(rqst); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index bed57d8b5c19..d1fcc41d5eb5 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c | |||
@@ -72,9 +72,9 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, | |||
72 | else if (credits > r_xprt->rx_buf.rb_bc_max_requests) | 72 | else if (credits > r_xprt->rx_buf.rb_bc_max_requests) |
73 | credits = r_xprt->rx_buf.rb_bc_max_requests; | 73 | credits = r_xprt->rx_buf.rb_bc_max_requests; |
74 | 74 | ||
75 | spin_lock_bh(&xprt->transport_lock); | 75 | spin_lock(&xprt->transport_lock); |
76 | xprt->cwnd = credits << RPC_CWNDSHIFT; | 76 | xprt->cwnd = credits << RPC_CWNDSHIFT; |
77 | spin_unlock_bh(&xprt->transport_lock); | 77 | spin_unlock(&xprt->transport_lock); |
78 | 78 | ||
79 | spin_lock(&xprt->queue_lock); | 79 | spin_lock(&xprt->queue_lock); |
80 | ret = 0; | 80 | ret = 0; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 0004535c0188..3fe665152d95 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -226,9 +226,9 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, | |||
226 | * Enqueue the new transport on the accept queue of the listening | 226 | * Enqueue the new transport on the accept queue of the listening |
227 | * transport | 227 | * transport |
228 | */ | 228 | */ |
229 | spin_lock_bh(&listen_xprt->sc_lock); | 229 | spin_lock(&listen_xprt->sc_lock); |
230 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); | 230 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); |
231 | spin_unlock_bh(&listen_xprt->sc_lock); | 231 | spin_unlock(&listen_xprt->sc_lock); |
232 | 232 | ||
233 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); | 233 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
234 | svc_xprt_enqueue(&listen_xprt->sc_xprt); | 234 | svc_xprt_enqueue(&listen_xprt->sc_xprt); |
@@ -401,7 +401,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
401 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); | 401 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); |
402 | clear_bit(XPT_CONN, &xprt->xpt_flags); | 402 | clear_bit(XPT_CONN, &xprt->xpt_flags); |
403 | /* Get the next entry off the accept list */ | 403 | /* Get the next entry off the accept list */ |
404 | spin_lock_bh(&listen_rdma->sc_lock); | 404 | spin_lock(&listen_rdma->sc_lock); |
405 | if (!list_empty(&listen_rdma->sc_accept_q)) { | 405 | if (!list_empty(&listen_rdma->sc_accept_q)) { |
406 | newxprt = list_entry(listen_rdma->sc_accept_q.next, | 406 | newxprt = list_entry(listen_rdma->sc_accept_q.next, |
407 | struct svcxprt_rdma, sc_accept_q); | 407 | struct svcxprt_rdma, sc_accept_q); |
@@ -409,7 +409,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
409 | } | 409 | } |
410 | if (!list_empty(&listen_rdma->sc_accept_q)) | 410 | if (!list_empty(&listen_rdma->sc_accept_q)) |
411 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); | 411 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); |
412 | spin_unlock_bh(&listen_rdma->sc_lock); | 412 | spin_unlock(&listen_rdma->sc_lock); |
413 | if (!newxprt) | 413 | if (!newxprt) |
414 | return NULL; | 414 | return NULL; |
415 | 415 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 92af57019b96..97c15d47f343 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -880,7 +880,7 @@ static int xs_nospace(struct rpc_rqst *req) | |||
880 | req->rq_slen); | 880 | req->rq_slen); |
881 | 881 | ||
882 | /* Protect against races with write_space */ | 882 | /* Protect against races with write_space */ |
883 | spin_lock_bh(&xprt->transport_lock); | 883 | spin_lock(&xprt->transport_lock); |
884 | 884 | ||
885 | /* Don't race with disconnect */ | 885 | /* Don't race with disconnect */ |
886 | if (xprt_connected(xprt)) { | 886 | if (xprt_connected(xprt)) { |
@@ -890,7 +890,7 @@ static int xs_nospace(struct rpc_rqst *req) | |||
890 | } else | 890 | } else |
891 | ret = -ENOTCONN; | 891 | ret = -ENOTCONN; |
892 | 892 | ||
893 | spin_unlock_bh(&xprt->transport_lock); | 893 | spin_unlock(&xprt->transport_lock); |
894 | 894 | ||
895 | /* Race breaker in case memory is freed before above code is called */ | 895 | /* Race breaker in case memory is freed before above code is called */ |
896 | if (ret == -EAGAIN) { | 896 | if (ret == -EAGAIN) { |
@@ -1344,6 +1344,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
1344 | cancel_delayed_work_sync(&transport->connect_worker); | 1344 | cancel_delayed_work_sync(&transport->connect_worker); |
1345 | xs_close(xprt); | 1345 | xs_close(xprt); |
1346 | cancel_work_sync(&transport->recv_worker); | 1346 | cancel_work_sync(&transport->recv_worker); |
1347 | cancel_work_sync(&transport->error_worker); | ||
1347 | xs_xprt_free(xprt); | 1348 | xs_xprt_free(xprt); |
1348 | module_put(THIS_MODULE); | 1349 | module_put(THIS_MODULE); |
1349 | } | 1350 | } |
@@ -1397,9 +1398,9 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, | |||
1397 | } | 1398 | } |
1398 | 1399 | ||
1399 | 1400 | ||
1400 | spin_lock_bh(&xprt->transport_lock); | 1401 | spin_lock(&xprt->transport_lock); |
1401 | xprt_adjust_cwnd(xprt, task, copied); | 1402 | xprt_adjust_cwnd(xprt, task, copied); |
1402 | spin_unlock_bh(&xprt->transport_lock); | 1403 | spin_unlock(&xprt->transport_lock); |
1403 | spin_lock(&xprt->queue_lock); | 1404 | spin_lock(&xprt->queue_lock); |
1404 | xprt_complete_rqst(task, copied); | 1405 | xprt_complete_rqst(task, copied); |
1405 | __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); | 1406 | __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); |
@@ -1509,7 +1510,6 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1509 | trace_rpc_socket_state_change(xprt, sk->sk_socket); | 1510 | trace_rpc_socket_state_change(xprt, sk->sk_socket); |
1510 | switch (sk->sk_state) { | 1511 | switch (sk->sk_state) { |
1511 | case TCP_ESTABLISHED: | 1512 | case TCP_ESTABLISHED: |
1512 | spin_lock(&xprt->transport_lock); | ||
1513 | if (!xprt_test_and_set_connected(xprt)) { | 1513 | if (!xprt_test_and_set_connected(xprt)) { |
1514 | xprt->connect_cookie++; | 1514 | xprt->connect_cookie++; |
1515 | clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); | 1515 | clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); |
@@ -1520,7 +1520,6 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1520 | xprt->stat.connect_start; | 1520 | xprt->stat.connect_start; |
1521 | xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING); | 1521 | xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING); |
1522 | } | 1522 | } |
1523 | spin_unlock(&xprt->transport_lock); | ||
1524 | break; | 1523 | break; |
1525 | case TCP_FIN_WAIT1: | 1524 | case TCP_FIN_WAIT1: |
1526 | /* The client initiated a shutdown of the socket */ | 1525 | /* The client initiated a shutdown of the socket */ |
@@ -1677,9 +1676,9 @@ static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t | |||
1677 | */ | 1676 | */ |
1678 | static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) | 1677 | static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) |
1679 | { | 1678 | { |
1680 | spin_lock_bh(&xprt->transport_lock); | 1679 | spin_lock(&xprt->transport_lock); |
1681 | xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); | 1680 | xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); |
1682 | spin_unlock_bh(&xprt->transport_lock); | 1681 | spin_unlock(&xprt->transport_lock); |
1683 | } | 1682 | } |
1684 | 1683 | ||
1685 | static int xs_get_random_port(void) | 1684 | static int xs_get_random_port(void) |
@@ -2214,13 +2213,13 @@ static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, | |||
2214 | unsigned int opt_on = 1; | 2213 | unsigned int opt_on = 1; |
2215 | unsigned int timeo; | 2214 | unsigned int timeo; |
2216 | 2215 | ||
2217 | spin_lock_bh(&xprt->transport_lock); | 2216 | spin_lock(&xprt->transport_lock); |
2218 | keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); | 2217 | keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); |
2219 | keepcnt = xprt->timeout->to_retries + 1; | 2218 | keepcnt = xprt->timeout->to_retries + 1; |
2220 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * | 2219 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * |
2221 | (xprt->timeout->to_retries + 1); | 2220 | (xprt->timeout->to_retries + 1); |
2222 | clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); | 2221 | clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); |
2223 | spin_unlock_bh(&xprt->transport_lock); | 2222 | spin_unlock(&xprt->transport_lock); |
2224 | 2223 | ||
2225 | /* TCP Keepalive options */ | 2224 | /* TCP Keepalive options */ |
2226 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, | 2225 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, |
@@ -2245,7 +2244,7 @@ static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, | |||
2245 | struct rpc_timeout to; | 2244 | struct rpc_timeout to; |
2246 | unsigned long initval; | 2245 | unsigned long initval; |
2247 | 2246 | ||
2248 | spin_lock_bh(&xprt->transport_lock); | 2247 | spin_lock(&xprt->transport_lock); |
2249 | if (reconnect_timeout < xprt->max_reconnect_timeout) | 2248 | if (reconnect_timeout < xprt->max_reconnect_timeout) |
2250 | xprt->max_reconnect_timeout = reconnect_timeout; | 2249 | xprt->max_reconnect_timeout = reconnect_timeout; |
2251 | if (connect_timeout < xprt->connect_timeout) { | 2250 | if (connect_timeout < xprt->connect_timeout) { |
@@ -2262,7 +2261,7 @@ static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, | |||
2262 | xprt->connect_timeout = connect_timeout; | 2261 | xprt->connect_timeout = connect_timeout; |
2263 | } | 2262 | } |
2264 | set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); | 2263 | set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); |
2265 | spin_unlock_bh(&xprt->transport_lock); | 2264 | spin_unlock(&xprt->transport_lock); |
2266 | } | 2265 | } |
2267 | 2266 | ||
2268 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | 2267 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) |