aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2018-05-07 15:28:20 -0400
committerJ. Bruce Fields <bfields@redhat.com>2018-05-11 15:48:57 -0400
commit3abb03facee06ea052be6e3a435f6dbb4e54fc04 (patch)
tree652702539ef971300d2b4fe21702b1c58ddc1ab9
parent986b78894b268f605e9ea055b99959bdce0e5945 (diff)
svcrdma: Simplify svc_rdma_send()
Clean up: No current caller of svc_rdma_send's passes in a chained WR. The logic that counts the chain length can be replaced with a constant (1). Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 0ebdc0c76483..edfeca45ac1c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -253,41 +253,41 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
253 svc_xprt_put(&rdma->sc_xprt); 253 svc_xprt_put(&rdma->sc_xprt);
254} 254}
255 255
256/**
257 * svc_rdma_send - Post a single Send WR
258 * @rdma: transport on which to post the WR
259 * @wr: prepared Send WR to post
260 *
261 * Returns zero the Send WR was posted successfully. Otherwise, a
262 * negative errno is returned.
263 */
256int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) 264int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
257{ 265{
258 struct ib_send_wr *bad_wr, *n_wr; 266 struct ib_send_wr *bad_wr;
259 int wr_count;
260 int i;
261 int ret; 267 int ret;
262 268
263 wr_count = 1; 269 might_sleep();
264 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
265 wr_count++;
266 270
267 /* If the SQ is full, wait until an SQ entry is available */ 271 /* If the SQ is full, wait until an SQ entry is available */
268 while (1) { 272 while (1) {
269 if ((atomic_sub_return(wr_count, &rdma->sc_sq_avail) < 0)) { 273 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
270 atomic_inc(&rdma_stat_sq_starve); 274 atomic_inc(&rdma_stat_sq_starve);
271 trace_svcrdma_sq_full(rdma); 275 trace_svcrdma_sq_full(rdma);
272 atomic_add(wr_count, &rdma->sc_sq_avail); 276 atomic_inc(&rdma->sc_sq_avail);
273 wait_event(rdma->sc_send_wait, 277 wait_event(rdma->sc_send_wait,
274 atomic_read(&rdma->sc_sq_avail) > wr_count); 278 atomic_read(&rdma->sc_sq_avail) > 1);
275 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 279 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
276 return -ENOTCONN; 280 return -ENOTCONN;
277 trace_svcrdma_sq_retry(rdma); 281 trace_svcrdma_sq_retry(rdma);
278 continue; 282 continue;
279 } 283 }
280 /* Take a transport ref for each WR posted */
281 for (i = 0; i < wr_count; i++)
282 svc_xprt_get(&rdma->sc_xprt);
283 284
284 /* Bump used SQ WR count and post */ 285 svc_xprt_get(&rdma->sc_xprt);
285 ret = ib_post_send(rdma->sc_qp, wr, &bad_wr); 286 ret = ib_post_send(rdma->sc_qp, wr, &bad_wr);
286 trace_svcrdma_post_send(wr, ret); 287 trace_svcrdma_post_send(wr, ret);
287 if (ret) { 288 if (ret) {
288 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 289 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
289 for (i = 0; i < wr_count; i++) 290 svc_xprt_put(&rdma->sc_xprt);
290 svc_xprt_put(&rdma->sc_xprt);
291 wake_up(&rdma->sc_send_wait); 291 wake_up(&rdma->sc_send_wait);
292 } 292 }
293 break; 293 break;