aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorParav Pandit <pandit.parav@gmail.com>2016-09-28 16:26:44 -0400
committerDoug Ledford <dledford@redhat.com>2016-10-06 13:50:04 -0400
commitb6bbee0d2438a2c9c7525f5bd7047a8b2ce4f38f (patch)
tree9d6915ef13eb5ed496e944e8da51498be7281dfc
parentd9703650f4aba7555fde92636d8d9a689029e8f8 (diff)
IB/rxe: Properly honor max IRD value for rd/atomic.
This patch honoris the max incoming read request count instead of outgoing read req count (a) during modify qp by allocating response queue metadata (b) during incoming read request processing Signed-off-by: Parav Pandit <pandit.parav@gmail.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c2
3 files changed, 15 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 4a5484ef604f..73849a5a91b3 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -198,7 +198,7 @@ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
198static inline void rxe_advance_resp_resource(struct rxe_qp *qp) 198static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
199{ 199{
200 qp->resp.res_head++; 200 qp->resp.res_head++;
201 if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic)) 201 if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
202 qp->resp.res_head = 0; 202 qp->resp.res_head = 0;
203} 203}
204 204
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 22ba24f2a2c1..62c37a563df9 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -146,7 +146,7 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
146 if (qp->resp.resources) { 146 if (qp->resp.resources) {
147 int i; 147 int i;
148 148
149 for (i = 0; i < qp->attr.max_rd_atomic; i++) { 149 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
150 struct resp_res *res = &qp->resp.resources[i]; 150 struct resp_res *res = &qp->resp.resources[i];
151 151
152 free_rd_atomic_resource(qp, res); 152 free_rd_atomic_resource(qp, res);
@@ -174,7 +174,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
174 struct resp_res *res; 174 struct resp_res *res;
175 175
176 if (qp->resp.resources) { 176 if (qp->resp.resources) {
177 for (i = 0; i < qp->attr.max_rd_atomic; i++) { 177 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
178 res = &qp->resp.resources[i]; 178 res = &qp->resp.resources[i];
179 free_rd_atomic_resource(qp, res); 179 free_rd_atomic_resource(qp, res);
180 } 180 }
@@ -596,14 +596,21 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
596 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 596 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
597 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); 597 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
598 598
599 qp->attr.max_rd_atomic = max_rd_atomic;
600 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
601 }
602
603 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
604 int max_dest_rd_atomic =
605 __roundup_pow_of_two(attr->max_dest_rd_atomic);
606
607 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
608
599 free_rd_atomic_resources(qp); 609 free_rd_atomic_resources(qp);
600 610
601 err = alloc_rd_atomic_resources(qp, max_rd_atomic); 611 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
602 if (err) 612 if (err)
603 return err; 613 return err;
604
605 qp->attr.max_rd_atomic = max_rd_atomic;
606 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
607 } 614 }
608 615
609 if (mask & IB_QP_CUR_STATE) 616 if (mask & IB_QP_CUR_STATE)
@@ -701,11 +708,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
701 pr_debug("set req psn = 0x%x\n", qp->req.psn); 708 pr_debug("set req psn = 0x%x\n", qp->req.psn);
702 } 709 }
703 710
704 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
705 qp->attr.max_dest_rd_atomic =
706 __roundup_pow_of_two(attr->max_dest_rd_atomic);
707 }
708
709 if (mask & IB_QP_PATH_MIG_STATE) 711 if (mask & IB_QP_PATH_MIG_STATE)
710 qp->attr.path_mig_state = attr->path_mig_state; 712 qp->attr.path_mig_state = attr->path_mig_state;
711 713
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3e0f0f2baace..d544b5e84797 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -383,7 +383,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
383 * too many read/atomic ops, we just 383 * too many read/atomic ops, we just
384 * recycle the responder resource queue 384 * recycle the responder resource queue
385 */ 385 */
386 if (likely(qp->attr.max_rd_atomic > 0)) 386 if (likely(qp->attr.max_dest_rd_atomic > 0))
387 return RESPST_CHK_LENGTH; 387 return RESPST_CHK_LENGTH;
388 else 388 else
389 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ; 389 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;