aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorKrishna Kumar <krkumar2@in.ibm.com>2006-11-24 05:33:48 -0500
committerRoland Dreier <rolandd@cisco.com>2006-11-29 18:33:08 -0500
commitc9edea298e52faeb0d4ae875cb712a5d69ba1966 (patch)
tree9f89af31110b7ad8acce86fabb10c98005dedb75 /drivers/infiniband
parent7013696a5f5ccd0d847d5e8b841d0b0b312277c8 (diff)
RDMA/amso1100: Prevent deadlock in destroy QP
It is possible to swap the CQs used for send_cq and recv_cq when creating two different QPs. If these two QPs are then destroyed at the same time, an AB-BA deadlock can occur because the CQ locks are taken our of order. Fix this by always taking CQ locks in a fixed order. Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 5bcf697aa335..179d005ed4a5 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -564,6 +564,32 @@ int c2_alloc_qp(struct c2_dev *c2dev,
564 return err; 564 return err;
565} 565}
566 566
567static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
568{
569 if (send_cq == recv_cq)
570 spin_lock_irq(&send_cq->lock);
571 else if (send_cq > recv_cq) {
572 spin_lock_irq(&send_cq->lock);
573 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
574 } else {
575 spin_lock_irq(&recv_cq->lock);
576 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
577 }
578}
579
580static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
581{
582 if (send_cq == recv_cq)
583 spin_unlock_irq(&send_cq->lock);
584 else if (send_cq > recv_cq) {
585 spin_unlock(&recv_cq->lock);
586 spin_unlock_irq(&send_cq->lock);
587 } else {
588 spin_unlock(&send_cq->lock);
589 spin_unlock_irq(&recv_cq->lock);
590 }
591}
592
567void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) 593void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
568{ 594{
569 struct c2_cq *send_cq; 595 struct c2_cq *send_cq;
@@ -576,15 +602,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
576 * Lock CQs here, so that CQ polling code can do QP lookup 602 * Lock CQs here, so that CQ polling code can do QP lookup
577 * without taking a lock. 603 * without taking a lock.
578 */ 604 */
579 spin_lock_irq(&send_cq->lock); 605 c2_lock_cqs(send_cq, recv_cq);
580 if (send_cq != recv_cq)
581 spin_lock(&recv_cq->lock);
582
583 c2_free_qpn(c2dev, qp->qpn); 606 c2_free_qpn(c2dev, qp->qpn);
584 607 c2_unlock_cqs(send_cq, recv_cq);
585 if (send_cq != recv_cq)
586 spin_unlock(&recv_cq->lock);
587 spin_unlock_irq(&send_cq->lock);
588 608
589 /* 609 /*
590 * Destory qp in the rnic... 610 * Destory qp in the rnic...