summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2018-05-04 15:34:48 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2018-05-07 09:20:03 -0400
commit914fcad9873cbd46e3a4c3c31551b98b15a49079 (patch)
treed3a0779b4a7b0482728801b0ebec2c8ee110df22 /net/sunrpc
parent107c4beb9bedd07d6e22f7010333dba3dc988292 (diff)
xprtrdma: Fix max_send_wr computation
For FRWR, the computation of max_send_wr is split between frwr_op_open and rpcrdma_ep_create, which makes it difficult to tell that the max_send_wr result is currently incorrect if frwr_op_open has to reduce the credit limit to accommodate a small max_qp_wr. This is a problem now that extra WRs are needed for backchannel operations and a drain CQE. So, refactor the computation so that it is all done in ->ro_open, and fix the FRWR version of this computation so that it accommodates HCAs with small max_qp_wr correctly. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c22
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c30
-rw-r--r--net/sunrpc/xprtrdma/verbs.c24
3 files changed, 52 insertions, 24 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 5cc68a824f45..592d1e8e27c3 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -159,10 +159,32 @@ out_release:
159 fmr_op_release_mr(mr); 159 fmr_op_release_mr(mr);
160} 160}
161 161
162/* On success, sets:
163 * ep->rep_attr.cap.max_send_wr
164 * ep->rep_attr.cap.max_recv_wr
165 * cdata->max_requests
166 * ia->ri_max_segs
167 */
162static int 168static int
163fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 169fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
164 struct rpcrdma_create_data_internal *cdata) 170 struct rpcrdma_create_data_internal *cdata)
165{ 171{
172 int max_qp_wr;
173
174 max_qp_wr = ia->ri_device->attrs.max_qp_wr;
175 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
176 max_qp_wr -= 1;
177 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
178 return -ENOMEM;
179 if (cdata->max_requests > max_qp_wr)
180 cdata->max_requests = max_qp_wr;
181 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
182 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
183 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
184 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
185 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
186 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
187
166 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / 188 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
167 RPCRDMA_MAX_FMR_SGES); 189 RPCRDMA_MAX_FMR_SGES);
168 return 0; 190 return 0;
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index c5743a0960be..0f2e108d387e 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -205,12 +205,22 @@ out_release:
205 frwr_op_release_mr(mr); 205 frwr_op_release_mr(mr);
206} 206}
207 207
208/* On success, sets:
209 * ep->rep_attr.cap.max_send_wr
210 * ep->rep_attr.cap.max_recv_wr
211 * cdata->max_requests
212 * ia->ri_max_segs
213 *
214 * And these FRWR-related fields:
215 * ia->ri_max_frwr_depth
216 * ia->ri_mrtype
217 */
208static int 218static int
209frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 219frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
210 struct rpcrdma_create_data_internal *cdata) 220 struct rpcrdma_create_data_internal *cdata)
211{ 221{
212 struct ib_device_attr *attrs = &ia->ri_device->attrs; 222 struct ib_device_attr *attrs = &ia->ri_device->attrs;
213 int depth, delta; 223 int max_qp_wr, depth, delta;
214 224
215 ia->ri_mrtype = IB_MR_TYPE_MEM_REG; 225 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
216 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) 226 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
@@ -244,14 +254,26 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
244 } while (delta > 0); 254 } while (delta > 0);
245 } 255 }
246 256
247 ep->rep_attr.cap.max_send_wr *= depth; 257 max_qp_wr = ia->ri_device->attrs.max_qp_wr;
248 if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) { 258 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
249 cdata->max_requests = attrs->max_qp_wr / depth; 259 max_qp_wr -= 1;
260 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
261 return -ENOMEM;
262 if (cdata->max_requests > max_qp_wr)
263 cdata->max_requests = max_qp_wr;
264 ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth;
265 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
266 cdata->max_requests = max_qp_wr / depth;
250 if (!cdata->max_requests) 267 if (!cdata->max_requests)
251 return -EINVAL; 268 return -EINVAL;
252 ep->rep_attr.cap.max_send_wr = cdata->max_requests * 269 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
253 depth; 270 depth;
254 } 271 }
272 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
273 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
274 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
275 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
276 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
255 277
256 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / 278 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
257 ia->ri_max_frwr_depth); 279 ia->ri_max_frwr_depth);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 07529ef8e33e..62baddefced3 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -501,8 +501,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
501 struct rpcrdma_create_data_internal *cdata) 501 struct rpcrdma_create_data_internal *cdata)
502{ 502{
503 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; 503 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
504 unsigned int max_qp_wr, max_sge;
505 struct ib_cq *sendcq, *recvcq; 504 struct ib_cq *sendcq, *recvcq;
505 unsigned int max_sge;
506 int rc; 506 int rc;
507 507
508 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, 508 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
@@ -513,29 +513,13 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
513 } 513 }
514 ia->ri_max_send_sges = max_sge; 514 ia->ri_max_send_sges = max_sge;
515 515
516 if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { 516 rc = ia->ri_ops->ro_open(ia, ep, cdata);
517 dprintk("RPC: %s: insufficient wqe's available\n", 517 if (rc)
518 __func__); 518 return rc;
519 return -ENOMEM;
520 }
521 max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
522
523 /* check provider's send/recv wr limits */
524 if (cdata->max_requests > max_qp_wr)
525 cdata->max_requests = max_qp_wr;
526 519
527 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; 520 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
528 ep->rep_attr.qp_context = ep; 521 ep->rep_attr.qp_context = ep;
529 ep->rep_attr.srq = NULL; 522 ep->rep_attr.srq = NULL;
530 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
531 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
532 ep->rep_attr.cap.max_send_wr += 1; /* drain cqe */
533 rc = ia->ri_ops->ro_open(ia, ep, cdata);
534 if (rc)
535 return rc;
536 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
537 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
538 ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */
539 ep->rep_attr.cap.max_send_sge = max_sge; 523 ep->rep_attr.cap.max_send_sge = max_sge;
540 ep->rep_attr.cap.max_recv_sge = 1; 524 ep->rep_attr.cap.max_recv_sge = 1;
541 ep->rep_attr.cap.max_inline_data = 0; 525 ep->rep_attr.cap.max_inline_data = 0;