aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEddie Wai <eddie.wai@broadcom.com>2017-06-29 15:28:13 -0400
committerDoug Ledford <dledford@redhat.com>2017-07-20 11:20:50 -0400
commita25d112fe9c8e8817cde1df17a82aee472c55993 (patch)
tree644616f02fe039323fa385d82835b213bb4e42a4
parent58d4a671d0eac45db1c7f27c8684c277249ac127 (diff)
RDMA/bnxt_re: Fixed the max_rd_atomic support for initiator and destination QP
There's a couple of bugs in the support of max_rd_atomic and max_dest_rd_atomic. In the modify_qp, if the requested max_rd_atomic, which is the ORRQ size, is greater than what the chip can support, then we have to cap the request to chip max as we can't have the HW overflow the ORRQ. Capping the max_rd_atomic support internally is okay to do as the remaining read/atomic WRs will still be sitting in the SQ. However, for the max_dest_rd_atomic, the driver has to error out as this dictates the IRRQ size and we can't control what the remote side sends. Signed-off-by: Eddie Wai <eddie.wai@broadcom.com> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 4e3e5b91d855..4d3cdca03c02 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -172,7 +172,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
172 ib_attr->max_mr = dev_attr->max_mr; 172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd; 173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; 174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom; 175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
176 ib_attr->atomic_cap = IB_ATOMIC_HCA; 176 ib_attr->atomic_cap = IB_ATOMIC_HCA;
177 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; 177 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
178 178
@@ -1512,13 +1512,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1512 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1512 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1513 qp->qplib_qp.modify_flags |= 1513 qp->qplib_qp.modify_flags |=
1514 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; 1514 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1515 qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic; 1515 /* Cap the max_rd_atomic to device max */
1516 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1517 dev_attr->max_qp_rd_atom);
1516 } 1518 }
1517 if (qp_attr_mask & IB_QP_SQ_PSN) { 1519 if (qp_attr_mask & IB_QP_SQ_PSN) {
1518 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; 1520 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1519 qp->qplib_qp.sq.psn = qp_attr->sq_psn; 1521 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1520 } 1522 }
1521 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1523 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1524 if (qp_attr->max_dest_rd_atomic >
1525 dev_attr->max_qp_init_rd_atom) {
1526 dev_err(rdev_to_dev(rdev),
1527 "max_dest_rd_atomic requested%d is > dev_max%d",
1528 qp_attr->max_dest_rd_atomic,
1529 dev_attr->max_qp_init_rd_atom);
1530 return -EINVAL;
1531 }
1532
1522 qp->qplib_qp.modify_flags |= 1533 qp->qplib_qp.modify_flags |=
1523 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; 1534 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1524 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; 1535 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;