aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/qp.c
diff options
context:
space:
mode:
authorKumar Sanghvi <kumaras@chelsio.com>2011-10-24 11:50:21 -0400
committerRoland Dreier <roland@purestorage.com>2011-10-31 14:34:53 -0400
commit581bbe2cd0694a935e0c3ccd7f011e10094f1df6 (patch)
tree38e536efa0d05d76964b09836def2210a00b41b5 /drivers/infiniband/hw/cxgb4/qp.c
parente14d62c05c0b8eff61c6fd46b4a78fb27c8cf38b (diff)
RDMA/cxgb4: Serialize calls to CQ's comp_handler
Commit 01e7da6ba53c ("RDMA/cxgb4: Make sure flush CQ entries are collected on connection close") introduced a potential problem where a CQ's comp_handler can get called simultaneously from different places in the iw_cxgb4 driver. This does not comply with Documentation/infiniband/core_locking.txt, which states that at a given point of time, there should be only one callback per CQ should be active. This problem was reported by Parav Pandit <Parav.Pandit@Emulex.Com>. Based on discussion between Parav Pandit and Steve Wise, this patch fixes the above problem by serializing the calls to a CQ's comp_handler using a spin_lock. Reported-by: Parav Pandit <Parav.Pandit@Emulex.Com> Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com> Acked-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 892fa7c6d310..62c7262a9eb3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -941,8 +941,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
941 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 941 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
942 spin_unlock(&qhp->lock); 942 spin_unlock(&qhp->lock);
943 spin_unlock_irqrestore(&rchp->lock, flag); 943 spin_unlock_irqrestore(&rchp->lock, flag);
944 if (flushed) 944 if (flushed) {
945 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
945 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 946 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
947 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
948 }
946 949
947 /* locking hierarchy: cq lock first, then qp lock. */ 950 /* locking hierarchy: cq lock first, then qp lock. */
948 spin_lock_irqsave(&schp->lock, flag); 951 spin_lock_irqsave(&schp->lock, flag);
@@ -952,13 +955,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
952 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); 955 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
953 spin_unlock(&qhp->lock); 956 spin_unlock(&qhp->lock);
954 spin_unlock_irqrestore(&schp->lock, flag); 957 spin_unlock_irqrestore(&schp->lock, flag);
955 if (flushed) 958 if (flushed) {
959 spin_lock_irqsave(&schp->comp_handler_lock, flag);
956 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 960 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
961 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
962 }
957} 963}
958 964
959static void flush_qp(struct c4iw_qp *qhp) 965static void flush_qp(struct c4iw_qp *qhp)
960{ 966{
961 struct c4iw_cq *rchp, *schp; 967 struct c4iw_cq *rchp, *schp;
968 unsigned long flag;
962 969
963 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 970 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
964 schp = get_chp(qhp->rhp, qhp->attr.scq); 971 schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -966,11 +973,15 @@ static void flush_qp(struct c4iw_qp *qhp)
966 if (qhp->ibqp.uobject) { 973 if (qhp->ibqp.uobject) {
967 t4_set_wq_in_error(&qhp->wq); 974 t4_set_wq_in_error(&qhp->wq);
968 t4_set_cq_in_error(&rchp->cq); 975 t4_set_cq_in_error(&rchp->cq);
976 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
969 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 977 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
978 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
970 if (schp != rchp) { 979 if (schp != rchp) {
971 t4_set_cq_in_error(&schp->cq); 980 t4_set_cq_in_error(&schp->cq);
981 spin_lock_irqsave(&schp->comp_handler_lock, flag);
972 (*schp->ibcq.comp_handler)(&schp->ibcq, 982 (*schp->ibcq.comp_handler)(&schp->ibcq,
973 schp->ibcq.cq_context); 983 schp->ibcq.cq_context);
984 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
974 } 985 }
975 return; 986 return;
976 } 987 }