diff options
author | Steve Wise <swise@opengridcomputing.com> | 2017-11-30 12:41:56 -0500 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2017-12-07 16:09:59 -0500 |
commit | 335ebf6fa35ca1c59b73f76fad19b249d3550e86 (patch) | |
tree | 1f25f1ebd3cc579081cc66861f19b0ffee31d3ee | |
parent | d0e312fe3d34c1bc014a7f8ec6540d05e8077483 (diff) |
iw_cxgb4: only clear the ARMED bit if a notification is needed
In __flush_qp(), the CQ ARMED bit was being cleared regardless of
whether any notification is actually needed. This resulted in the iser
termination logic getting stuck in ib_drain_sq() because the CQ was not
marked ARMED and thus the drain CQE notification wasn't triggered.
This new bug was exposed when this commit was merged:
commit cbb40fadd31c ("iw_cxgb4: only call the cq comp_handler when the
cq is armed")
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 5ee7fe433136..355e288ec969 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -1285,21 +1285,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
1285 | spin_unlock_irqrestore(&rchp->lock, flag); | 1285 | spin_unlock_irqrestore(&rchp->lock, flag); |
1286 | 1286 | ||
1287 | if (schp == rchp) { | 1287 | if (schp == rchp) { |
1288 | if (t4_clear_cq_armed(&rchp->cq) && | 1288 | if ((rq_flushed || sq_flushed) && |
1289 | (rq_flushed || sq_flushed)) { | 1289 | t4_clear_cq_armed(&rchp->cq)) { |
1290 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | 1290 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
1291 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | 1291 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, |
1292 | rchp->ibcq.cq_context); | 1292 | rchp->ibcq.cq_context); |
1293 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | 1293 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); |
1294 | } | 1294 | } |
1295 | } else { | 1295 | } else { |
1296 | if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { | 1296 | if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { |
1297 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | 1297 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
1298 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | 1298 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, |
1299 | rchp->ibcq.cq_context); | 1299 | rchp->ibcq.cq_context); |
1300 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | 1300 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); |
1301 | } | 1301 | } |
1302 | if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { | 1302 | if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { |
1303 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | 1303 | spin_lock_irqsave(&schp->comp_handler_lock, flag); |
1304 | (*schp->ibcq.comp_handler)(&schp->ibcq, | 1304 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
1305 | schp->ibcq.cq_context); | 1305 | schp->ibcq.cq_context); |