diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2015-12-16 17:22:06 -0500 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2015-12-18 15:34:33 -0500 |
commit | 9b06688bc3b9f13f8de90f832c455fddec3d4e8a (patch) | |
tree | 04ef60893cb7168f585e023aba28c064d8a738f7 | |
parent | abfb689711aaebd14d893236c6ea4bcdfb61e74c (diff) |
xprtrdma: Fix additional uses of spin_lock_irqsave(rb_lock)
Clean up.
rb_lock critical sections added in rpcrdma_ep_post_extra_recv()
should have first been converted to use normal spin_lock now that
the reply handler is a work queue.
The backchannel set up code should use the appropriate helper
instead of open-coding a rb_recv_bufs list add.
Problem introduced by glib patch re-ordering on my part.
Fixes: f531a5dbc451 ('xprtrdma: Pre-allocate backward rpc_rqst')
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r-- | net/sunrpc/xprtrdma/backchannel.c | 6 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 7 |
2 files changed, 4 insertions, 9 deletions
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 97554ca68191..40f48c62f9b1 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c | |||
@@ -84,9 +84,7 @@ out_fail: | |||
84 | static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, | 84 | static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, |
85 | unsigned int count) | 85 | unsigned int count) |
86 | { | 86 | { |
87 | struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; | ||
88 | struct rpcrdma_rep *rep; | 87 | struct rpcrdma_rep *rep; |
89 | unsigned long flags; | ||
90 | int rc = 0; | 88 | int rc = 0; |
91 | 89 | ||
92 | while (count--) { | 90 | while (count--) { |
@@ -98,9 +96,7 @@ static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, | |||
98 | break; | 96 | break; |
99 | } | 97 | } |
100 | 98 | ||
101 | spin_lock_irqsave(&buffers->rb_lock, flags); | 99 | rpcrdma_recv_buffer_put(rep); |
102 | list_add(&rep->rr_list, &buffers->rb_recv_bufs); | ||
103 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | ||
104 | } | 100 | } |
105 | 101 | ||
106 | return rc; | 102 | return rc; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 2cc101410a76..003630733ef3 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1338,15 +1338,14 @@ rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) | |||
1338 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | 1338 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
1339 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; | 1339 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
1340 | struct rpcrdma_rep *rep; | 1340 | struct rpcrdma_rep *rep; |
1341 | unsigned long flags; | ||
1342 | int rc; | 1341 | int rc; |
1343 | 1342 | ||
1344 | while (count--) { | 1343 | while (count--) { |
1345 | spin_lock_irqsave(&buffers->rb_lock, flags); | 1344 | spin_lock(&buffers->rb_lock); |
1346 | if (list_empty(&buffers->rb_recv_bufs)) | 1345 | if (list_empty(&buffers->rb_recv_bufs)) |
1347 | goto out_reqbuf; | 1346 | goto out_reqbuf; |
1348 | rep = rpcrdma_buffer_get_rep_locked(buffers); | 1347 | rep = rpcrdma_buffer_get_rep_locked(buffers); |
1349 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1348 | spin_unlock(&buffers->rb_lock); |
1350 | 1349 | ||
1351 | rc = rpcrdma_ep_post_recv(ia, ep, rep); | 1350 | rc = rpcrdma_ep_post_recv(ia, ep, rep); |
1352 | if (rc) | 1351 | if (rc) |
@@ -1356,7 +1355,7 @@ rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) | |||
1356 | return 0; | 1355 | return 0; |
1357 | 1356 | ||
1358 | out_reqbuf: | 1357 | out_reqbuf: |
1359 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1358 | spin_unlock(&buffers->rb_lock); |
1360 | pr_warn("%s: no extra receive buffers\n", __func__); | 1359 | pr_warn("%s: no extra receive buffers\n", __func__); |
1361 | return -ENOMEM; | 1360 | return -ENOMEM; |
1362 | 1361 | ||