aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKumar Sanghvi <kumaras@chelsio.com>2011-10-24 11:50:21 -0400
committerRoland Dreier <roland@purestorage.com>2011-10-31 14:34:53 -0400
commit581bbe2cd0694a935e0c3ccd7f011e10094f1df6 (patch)
tree38e536efa0d05d76964b09836def2210a00b41b5
parente14d62c05c0b8eff61c6fd46b4a78fb27c8cf38b (diff)
RDMA/cxgb4: Serialize calls to CQ's comp_handler
Commit 01e7da6ba53c ("RDMA/cxgb4: Make sure flush CQ entries are collected on connection close") introduced a potential problem where a CQ's comp_handler can get called simultaneously from different places in the iw_cxgb4 driver. This does not comply with Documentation/infiniband/core_locking.txt, which states that at a given point of time, there should be only one callback per CQ should be active. This problem was reported by Parav Pandit <Parav.Pandit@Emulex.Com>. Based on discussion between Parav Pandit and Steve Wise, this patch fixes the above problem by serializing the calls to a CQ's comp_handler using a spin_lock. Reported-by: Parav Pandit <Parav.Pandit@Emulex.Com> Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com> Acked-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c15
4 files changed, 23 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 901c5fbf71a4..f35a935267e7 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -818,6 +818,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
818 chp->cq.size--; /* status page */ 818 chp->cq.size--; /* status page */
819 chp->ibcq.cqe = entries - 2; 819 chp->ibcq.cqe = entries - 2;
820 spin_lock_init(&chp->lock); 820 spin_lock_init(&chp->lock);
821 spin_lock_init(&chp->comp_handler_lock);
821 atomic_set(&chp->refcnt, 1); 822 atomic_set(&chp->refcnt, 1);
822 init_waitqueue_head(&chp->wait); 823 init_waitqueue_head(&chp->wait);
823 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 824 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c13041a0aeba..397cb36cf103 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -42,6 +42,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
42{ 42{
43 struct ib_event event; 43 struct ib_event event;
44 struct c4iw_qp_attributes attrs; 44 struct c4iw_qp_attributes attrs;
45 unsigned long flag;
45 46
46 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || 47 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
47 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { 48 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
@@ -72,7 +73,9 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
72 if (qhp->ibqp.event_handler) 73 if (qhp->ibqp.event_handler)
73 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); 74 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
74 75
76 spin_lock_irqsave(&chp->comp_handler_lock, flag);
75 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 77 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
78 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
76} 79}
77 80
78void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) 81void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -183,11 +186,14 @@ out:
183int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) 186int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
184{ 187{
185 struct c4iw_cq *chp; 188 struct c4iw_cq *chp;
189 unsigned long flag;
186 190
187 chp = get_chp(dev, qid); 191 chp = get_chp(dev, qid);
188 if (chp) 192 if (chp) {
193 spin_lock_irqsave(&chp->comp_handler_lock, flag);
189 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 194 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
190 else 195 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
196 } else
191 PDBG("%s unknown cqid 0x%x\n", __func__, qid); 197 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
192 return 0; 198 return 0;
193} 199}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4f045375c8e2..02f015fc3ed2 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -309,6 +309,7 @@ struct c4iw_cq {
309 struct c4iw_dev *rhp; 309 struct c4iw_dev *rhp;
310 struct t4_cq cq; 310 struct t4_cq cq;
311 spinlock_t lock; 311 spinlock_t lock;
312 spinlock_t comp_handler_lock;
312 atomic_t refcnt; 313 atomic_t refcnt;
313 wait_queue_head_t wait; 314 wait_queue_head_t wait;
314}; 315};
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 892fa7c6d310..62c7262a9eb3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -941,8 +941,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
941 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 941 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
942 spin_unlock(&qhp->lock); 942 spin_unlock(&qhp->lock);
943 spin_unlock_irqrestore(&rchp->lock, flag); 943 spin_unlock_irqrestore(&rchp->lock, flag);
944 if (flushed) 944 if (flushed) {
945 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
945 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 946 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
947 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
948 }
946 949
947 /* locking hierarchy: cq lock first, then qp lock. */ 950 /* locking hierarchy: cq lock first, then qp lock. */
948 spin_lock_irqsave(&schp->lock, flag); 951 spin_lock_irqsave(&schp->lock, flag);
@@ -952,13 +955,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
952 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); 955 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
953 spin_unlock(&qhp->lock); 956 spin_unlock(&qhp->lock);
954 spin_unlock_irqrestore(&schp->lock, flag); 957 spin_unlock_irqrestore(&schp->lock, flag);
955 if (flushed) 958 if (flushed) {
959 spin_lock_irqsave(&schp->comp_handler_lock, flag);
956 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 960 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
961 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
962 }
957} 963}
958 964
959static void flush_qp(struct c4iw_qp *qhp) 965static void flush_qp(struct c4iw_qp *qhp)
960{ 966{
961 struct c4iw_cq *rchp, *schp; 967 struct c4iw_cq *rchp, *schp;
968 unsigned long flag;
962 969
963 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 970 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
964 schp = get_chp(qhp->rhp, qhp->attr.scq); 971 schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -966,11 +973,15 @@ static void flush_qp(struct c4iw_qp *qhp)
966 if (qhp->ibqp.uobject) { 973 if (qhp->ibqp.uobject) {
967 t4_set_wq_in_error(&qhp->wq); 974 t4_set_wq_in_error(&qhp->wq);
968 t4_set_cq_in_error(&rchp->cq); 975 t4_set_cq_in_error(&rchp->cq);
976 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
969 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 977 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
978 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
970 if (schp != rchp) { 979 if (schp != rchp) {
971 t4_set_cq_in_error(&schp->cq); 980 t4_set_cq_in_error(&schp->cq);
981 spin_lock_irqsave(&schp->comp_handler_lock, flag);
972 (*schp->ibcq.comp_handler)(&schp->ibcq, 982 (*schp->ibcq.comp_handler)(&schp->ibcq,
973 schp->ibcq.cq_context); 983 schp->ibcq.cq_context);
984 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
974 } 985 }
975 return; 986 return;
976 } 987 }