aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2014-07-31 15:35:43 -0400
committerRoland Dreier <roland@purestorage.com>2014-08-01 17:54:37 -0400
commit678ea9b5baab6800692b249bdba77c3c07261d61 (patch)
treea08991d5dcca6940c7b18cc4a600277fc4e60a13
parent64aa90f26c06e1cb2aacfb98a7d0eccfbd6c1a91 (diff)
RDMA/cxgb4: Only call CQ completion handler if it is armed
The function __flush_qp() always calls the ULP's CQ completion handler functions even if the CQ was not armed. This can crash the system if the function pointer is NULL. The iSER ULP behaves this way: no completion handler and never arm the CQ for notification. So now we track whether the CQ is armed at flush time and only call the completion handlers if their CQs were armed. Also, if the RCQ and SCQ are the same CQ, the completion handler is getting called twice. It should only be called once after all SQ and RQ WRs are flushed from the QP. So rearrange the logic to fix this. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c37
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
3 files changed, 37 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index d61d0a18f784..a98426fed9ee 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -182,6 +182,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
182 182
183 chp = get_chp(dev, qid); 183 chp = get_chp(dev, qid);
184 if (chp) { 184 if (chp) {
185 t4_clear_cq_armed(&chp->cq);
185 spin_lock_irqsave(&chp->comp_handler_lock, flag); 186 spin_lock_irqsave(&chp->comp_handler_lock, flag);
186 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 187 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
187 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 188 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 086f62f5dc9e..60cfc11a66e4 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1066,7 +1066,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1066 struct c4iw_cq *schp) 1066 struct c4iw_cq *schp)
1067{ 1067{
1068 int count; 1068 int count;
1069 int flushed; 1069 int rq_flushed, sq_flushed;
1070 unsigned long flag; 1070 unsigned long flag;
1071 1071
1072 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); 1072 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
@@ -1084,27 +1084,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1084 1084
1085 c4iw_flush_hw_cq(rchp); 1085 c4iw_flush_hw_cq(rchp);
1086 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); 1086 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1087 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 1087 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1088 spin_unlock(&qhp->lock); 1088 spin_unlock(&qhp->lock);
1089 spin_unlock_irqrestore(&rchp->lock, flag); 1089 spin_unlock_irqrestore(&rchp->lock, flag);
1090 if (flushed) {
1091 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1092 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1093 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1094 }
1095 1090
1096 /* locking hierarchy: cq lock first, then qp lock. */ 1091 /* locking hierarchy: cq lock first, then qp lock. */
1097 spin_lock_irqsave(&schp->lock, flag); 1092 spin_lock_irqsave(&schp->lock, flag);
1098 spin_lock(&qhp->lock); 1093 spin_lock(&qhp->lock);
1099 if (schp != rchp) 1094 if (schp != rchp)
1100 c4iw_flush_hw_cq(schp); 1095 c4iw_flush_hw_cq(schp);
1101 flushed = c4iw_flush_sq(qhp); 1096 sq_flushed = c4iw_flush_sq(qhp);
1102 spin_unlock(&qhp->lock); 1097 spin_unlock(&qhp->lock);
1103 spin_unlock_irqrestore(&schp->lock, flag); 1098 spin_unlock_irqrestore(&schp->lock, flag);
1104 if (flushed) { 1099
1105 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1100 if (schp == rchp) {
1106 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 1101 if (t4_clear_cq_armed(&rchp->cq) &&
1107 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 1102 (rq_flushed || sq_flushed)) {
1103 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1104 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1105 rchp->ibcq.cq_context);
1106 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1107 }
1108 } else {
1109 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1110 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1111 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1112 rchp->ibcq.cq_context);
1113 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1114 }
1115 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1116 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1117 (*schp->ibcq.comp_handler)(&schp->ibcq,
1118 schp->ibcq.cq_context);
1119 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1120 }
1108 } 1121 }
1109} 1122}
1110 1123
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 68b0a6bf4eb0..d8d7fa3e446d 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -531,6 +531,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
531 return !wq->rq.queue[wq->rq.size].status.db_off; 531 return !wq->rq.queue[wq->rq.size].status.db_off;
532} 532}
533 533
534enum t4_cq_flags {
535 CQ_ARMED = 1,
536};
537
534struct t4_cq { 538struct t4_cq {
535 struct t4_cqe *queue; 539 struct t4_cqe *queue;
536 dma_addr_t dma_addr; 540 dma_addr_t dma_addr;
@@ -551,12 +555,19 @@ struct t4_cq {
551 u16 cidx_inc; 555 u16 cidx_inc;
552 u8 gen; 556 u8 gen;
553 u8 error; 557 u8 error;
558 unsigned long flags;
554}; 559};
555 560
561static inline int t4_clear_cq_armed(struct t4_cq *cq)
562{
563 return test_and_clear_bit(CQ_ARMED, &cq->flags);
564}
565
556static inline int t4_arm_cq(struct t4_cq *cq, int se) 566static inline int t4_arm_cq(struct t4_cq *cq, int se)
557{ 567{
558 u32 val; 568 u32 val;
559 569
570 set_bit(CQ_ARMED, &cq->flags);
560 while (cq->cidx_inc > CIDXINC_MASK) { 571 while (cq->cidx_inc > CIDXINC_MASK) {
561 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | 572 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
562 INGRESSQID(cq->cqid); 573 INGRESSQID(cq->cqid);