aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBharat Potnuri <bharat@chelsio.com>2018-04-27 07:11:16 -0400
committerDoug Ledford <dledford@redhat.com>2018-04-27 14:38:44 -0400
commit2df19e19ae90d94fd8724083f161f368a2797537 (patch)
treeb2f6622d07fb37ddb5501cddd377aeacdab72c6f
parent54e7e48b13c85d9a730b989fe7dc5250199a4f81 (diff)
iw_cxgb4: Atomically flush per QP HW CQEs
When a CQ is shared by multiple QPs, c4iw_flush_hw_cq() needs to acquire corresponding QP lock before moving the CQEs into its corresponding SW queue and accessing the SQ contents for completing a WR. Ignore CQEs if corresponding QP is already flushed. Cc: stable@vger.kernel.org Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
3 files changed, 13 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 6f2b26126c64..2be2e1ac1b5f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq)
315 * Deal with out-of-order and/or completions that complete 315 * Deal with out-of-order and/or completions that complete
316 * prior unsignalled WRs. 316 * prior unsignalled WRs.
317 */ 317 */
318void c4iw_flush_hw_cq(struct c4iw_cq *chp) 318void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
319{ 319{
320 struct t4_cqe *hw_cqe, *swcqe, read_cqe; 320 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
321 struct c4iw_qp *qhp; 321 struct c4iw_qp *qhp;
@@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
339 if (qhp == NULL) 339 if (qhp == NULL)
340 goto next_cqe; 340 goto next_cqe;
341 341
342 if (flush_qhp != qhp) {
343 spin_lock(&qhp->lock);
344
345 if (qhp->wq.flushed == 1)
346 goto next_cqe;
347 }
348
342 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) 349 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
343 goto next_cqe; 350 goto next_cqe;
344 351
@@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
390next_cqe: 397next_cqe:
391 t4_hwcq_consume(&chp->cq); 398 t4_hwcq_consume(&chp->cq);
392 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); 399 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
400 if (qhp && flush_qhp != qhp)
401 spin_unlock(&qhp->lock);
393 } 402 }
394} 403}
395 404
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index a60def23e9ef..831027717121 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -1053,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
1053void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); 1053void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
1054u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); 1054u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
1055void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); 1055void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
1056void c4iw_flush_hw_cq(struct c4iw_cq *chp); 1056void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
1057void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); 1057void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
1058int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); 1058int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
1059int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); 1059int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index de77b6027d69..ae167b686608 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1343 qhp->wq.flushed = 1; 1343 qhp->wq.flushed = 1;
1344 t4_set_wq_in_error(&qhp->wq); 1344 t4_set_wq_in_error(&qhp->wq);
1345 1345
1346 c4iw_flush_hw_cq(rchp); 1346 c4iw_flush_hw_cq(rchp, qhp);
1347 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); 1347 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1348 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 1348 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1349 1349
1350 if (schp != rchp) 1350 if (schp != rchp)
1351 c4iw_flush_hw_cq(schp); 1351 c4iw_flush_hw_cq(schp, qhp);
1352 sq_flushed = c4iw_flush_sq(qhp); 1352 sq_flushed = c4iw_flush_sq(qhp);
1353 1353
1354 spin_unlock(&qhp->lock); 1354 spin_unlock(&qhp->lock);