diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2008-08-11 15:10:19 -0400 |
---|---|---|
committer | Tom Tucker <tom@opengridcomputing.com> | 2008-10-06 15:45:56 -0400 |
commit | 5b180a9a64ca2217a658bd515ef910eafefc5e5a (patch) | |
tree | 61fe08b958b3707be0a516677f72216c06217edd /net | |
parent | a5abf4e81545d9c7280c49cae853cc45fd769ddf (diff) |
svcrdma: Add support to svc_rdma_send to handle chained WR
WR can be submitted as linked lists of WR. Update the svc_rdma_send
routine to handle WR chains. This will be used to submit a WR that
uses an FRMR with another WR that invalidates the FRMR.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 29 |
1 files changed, 21 insertions, 8 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index d9183cbcd967..f22f58767661 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -1235,17 +1235,23 @@ int svc_rdma_fastreg(struct svcxprt_rdma *xprt, | |||
1235 | 1235 | ||
1236 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) | 1236 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) |
1237 | { | 1237 | { |
1238 | struct ib_send_wr *bad_wr; | 1238 | struct ib_send_wr *bad_wr, *n_wr; |
1239 | int wr_count; | ||
1240 | int i; | ||
1239 | int ret; | 1241 | int ret; |
1240 | 1242 | ||
1241 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) | 1243 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
1242 | return -ENOTCONN; | 1244 | return -ENOTCONN; |
1243 | 1245 | ||
1244 | BUG_ON(wr->send_flags != IB_SEND_SIGNALED); | 1246 | BUG_ON(wr->send_flags != IB_SEND_SIGNALED); |
1247 | wr_count = 1; | ||
1248 | for (n_wr = wr->next; n_wr; n_wr = n_wr->next) | ||
1249 | wr_count++; | ||
1250 | |||
1245 | /* If the SQ is full, wait until an SQ entry is available */ | 1251 | /* If the SQ is full, wait until an SQ entry is available */ |
1246 | while (1) { | 1252 | while (1) { |
1247 | spin_lock_bh(&xprt->sc_lock); | 1253 | spin_lock_bh(&xprt->sc_lock); |
1248 | if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) { | 1254 | if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { |
1249 | spin_unlock_bh(&xprt->sc_lock); | 1255 | spin_unlock_bh(&xprt->sc_lock); |
1250 | atomic_inc(&rdma_stat_sq_starve); | 1256 | atomic_inc(&rdma_stat_sq_starve); |
1251 | 1257 | ||
@@ -1260,19 +1266,26 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) | |||
1260 | return 0; | 1266 | return 0; |
1261 | continue; | 1267 | continue; |
1262 | } | 1268 | } |
1263 | /* Bumped used SQ WR count and post */ | 1269 | /* Take a transport ref for each WR posted */ |
1264 | svc_xprt_get(&xprt->sc_xprt); | 1270 | for (i = 0; i < wr_count; i++) |
1271 | svc_xprt_get(&xprt->sc_xprt); | ||
1272 | |||
1273 | /* Bump used SQ WR count and post */ | ||
1274 | atomic_add(wr_count, &xprt->sc_sq_count); | ||
1265 | ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); | 1275 | ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); |
1266 | if (!ret) | 1276 | if (ret) { |
1267 | atomic_inc(&xprt->sc_sq_count); | 1277 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
1268 | else { | 1278 | atomic_sub(wr_count, &xprt->sc_sq_count); |
1269 | svc_xprt_put(&xprt->sc_xprt); | 1279 | for (i = 0; i < wr_count; i ++) |
1280 | svc_xprt_put(&xprt->sc_xprt); | ||
1270 | dprintk("svcrdma: failed to post SQ WR rc=%d, " | 1281 | dprintk("svcrdma: failed to post SQ WR rc=%d, " |
1271 | "sc_sq_count=%d, sc_sq_depth=%d\n", | 1282 | "sc_sq_count=%d, sc_sq_depth=%d\n", |
1272 | ret, atomic_read(&xprt->sc_sq_count), | 1283 | ret, atomic_read(&xprt->sc_sq_count), |
1273 | xprt->sc_sq_depth); | 1284 | xprt->sc_sq_depth); |
1274 | } | 1285 | } |
1275 | spin_unlock_bh(&xprt->sc_lock); | 1286 | spin_unlock_bh(&xprt->sc_lock); |
1287 | if (ret) | ||
1288 | wake_up(&xprt->sc_send_wait); | ||
1276 | break; | 1289 | break; |
1277 | } | 1290 | } |
1278 | return ret; | 1291 | return ret; |