aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2017-11-09 10:14:43 -0500
committerDoug Ledford <dledford@redhat.com>2017-11-13 16:59:22 -0500
commitcbb40fadd31c6bbc59104e58ac95c6ef492d038b (patch)
treeeb5c56c4496ea3b53f2a306c97412ccf263a00a8
parent1c8f1da5d851b92aeb81dbbb9ebd516f6e2588f5 (diff)
iw_cxgb4: only call the cq comp_handler when the cq is armed
The ULPs completion handler should only be called if the CQ is armed for notification. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c20
2 files changed, 17 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index b8c7cc938bce..a252d5c40ae3 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
109 if (qhp->ibqp.event_handler) 109 if (qhp->ibqp.event_handler)
110 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); 110 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
111 111
112 spin_lock_irqsave(&chp->comp_handler_lock, flag); 112 if (t4_clear_cq_armed(&chp->cq)) {
113 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 113 spin_lock_irqsave(&chp->comp_handler_lock, flag);
114 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 114 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
115 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
116 }
115} 117}
116 118
117void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) 119void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 1374b41201a9..fefc5fed1778 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -813,10 +813,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
813 t4_swcq_produce(cq); 813 t4_swcq_produce(cq);
814 spin_unlock_irqrestore(&schp->lock, flag); 814 spin_unlock_irqrestore(&schp->lock, flag);
815 815
816 spin_lock_irqsave(&schp->comp_handler_lock, flag); 816 if (t4_clear_cq_armed(&schp->cq)) {
817 (*schp->ibcq.comp_handler)(&schp->ibcq, 817 spin_lock_irqsave(&schp->comp_handler_lock, flag);
818 schp->ibcq.cq_context); 818 (*schp->ibcq.comp_handler)(&schp->ibcq,
819 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 819 schp->ibcq.cq_context);
820 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
821 }
820} 822}
821 823
822static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) 824static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -842,10 +844,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
842 t4_swcq_produce(cq); 844 t4_swcq_produce(cq);
843 spin_unlock_irqrestore(&rchp->lock, flag); 845 spin_unlock_irqrestore(&rchp->lock, flag);
844 846
845 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 847 if (t4_clear_cq_armed(&rchp->cq)) {
846 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 848 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
847 rchp->ibcq.cq_context); 849 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
848 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 850 rchp->ibcq.cq_context);
851 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
852 }
849} 853}
850 854
851int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 855int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,