aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2017-11-27 16:16:32 -0500
committerJason Gunthorpe <jgg@mellanox.com>2017-12-11 17:33:51 -0500
commitc058ecf6e455fac7346d46197a02398ead90851f (patch)
treee73b6bbb3ac8926c1ad93fb8f2c32525906a7778
parent335ebf6fa35ca1c59b73f76fad19b249d3550e86 (diff)
iw_cxgb4: only insert drain cqes if wq is flushed
Only insert our special drain CQEs to support ib_drain_sq/rq() after the wq is flushed. Otherwise, existing but not yet polled CQEs can be returned out of order to the user application. This can happen when the QP has exited RTS but not yet flushed the QP, which can happen during a normal close (vs abortive close). In addition never count the drain CQEs when determining how many CQEs need to be synthesized during the flush operation. This latter issue should never happen if the QP is properly flushed before inserting the drain CQE, but I wanted to avoid corrupting the CQ state. So we handle it and log a warning once. Fixes: 4fe7c2962e11 ("iw_cxgb4: refactor sq/rq drain logic") Signed-off-by: Steve Wise <swise@opengridcomputing.com> Cc: stable@vger.kernel.org Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c14
2 files changed, 17 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ea55e95cd2c5..b7bfc536e00f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,6 +395,11 @@ next_cqe:
395 395
396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) 396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
397{ 397{
398 if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
399 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
400 return 0;
401 }
402
398 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) 403 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
399 return 0; 404 return 0;
400 405
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 355e288ec969..38bddd02a943 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
868 868
869 qhp = to_c4iw_qp(ibqp); 869 qhp = to_c4iw_qp(ibqp);
870 spin_lock_irqsave(&qhp->lock, flag); 870 spin_lock_irqsave(&qhp->lock, flag);
871 if (t4_wq_in_error(&qhp->wq)) { 871
872 /*
873 * If the qp has been flushed, then just insert a special
874 * drain cqe.
875 */
876 if (qhp->wq.flushed) {
872 spin_unlock_irqrestore(&qhp->lock, flag); 877 spin_unlock_irqrestore(&qhp->lock, flag);
873 complete_sq_drain_wr(qhp, wr); 878 complete_sq_drain_wr(qhp, wr);
874 return err; 879 return err;
@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1011 1016
1012 qhp = to_c4iw_qp(ibqp); 1017 qhp = to_c4iw_qp(ibqp);
1013 spin_lock_irqsave(&qhp->lock, flag); 1018 spin_lock_irqsave(&qhp->lock, flag);
1014 if (t4_wq_in_error(&qhp->wq)) { 1019
1020 /*
1021 * If the qp has been flushed, then just insert a special
1022 * drain cqe.
1023 */
1024 if (qhp->wq.flushed) {
1015 spin_unlock_irqrestore(&qhp->lock, flag); 1025 spin_unlock_irqrestore(&qhp->lock, flag);
1016 complete_rq_drain_wr(qhp, wr); 1026 complete_rq_drain_wr(qhp, wr);
1017 return err; 1027 return err;