diff options
author | Roland Dreier <rolandd@cisco.com> | 2009-09-05 23:24:49 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2009-09-05 23:24:49 -0400 |
commit | 338a8fad27908f64a0d249cc9f5c7d4ddb7e5684 (patch) | |
tree | 5033fb5dbf31c967af592a41da04065ed2493ddd /drivers/infiniband | |
parent | ff149b2a168296c74763cb4a6e7054bdb0a426a1 (diff) |
IB/mlx4: Annotate CQ locking
mlx4_ib_lock_cqs()/mlx4_ib_unlock_cqs() are helper functions that
lock/unlock both CQs attached to a QP in the proper order to avoid
AB-BA deadlocks. Annotate this so sparse can understand what's going
on (and warn us if we misuse these functions).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index c4a02648c8af..219b10397b4d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) | |||
615 | } | 615 | } |
616 | 616 | ||
617 | static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) | 617 | static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) |
618 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) | ||
618 | { | 619 | { |
619 | if (send_cq == recv_cq) | 620 | if (send_cq == recv_cq) { |
620 | spin_lock_irq(&send_cq->lock); | 621 | spin_lock_irq(&send_cq->lock); |
621 | else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | 622 | __acquire(&recv_cq->lock); |
623 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | ||
622 | spin_lock_irq(&send_cq->lock); | 624 | spin_lock_irq(&send_cq->lock); |
623 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | 625 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); |
624 | } else { | 626 | } else { |
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv | |||
628 | } | 630 | } |
629 | 631 | ||
630 | static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) | 632 | static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) |
633 | __releases(&send_cq->lock) __releases(&recv_cq->lock) | ||
631 | { | 634 | { |
632 | if (send_cq == recv_cq) | 635 | if (send_cq == recv_cq) { |
636 | __release(&recv_cq->lock); | ||
633 | spin_unlock_irq(&send_cq->lock); | 637 | spin_unlock_irq(&send_cq->lock); |
634 | else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | 638 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { |
635 | spin_unlock(&recv_cq->lock); | 639 | spin_unlock(&recv_cq->lock); |
636 | spin_unlock_irq(&send_cq->lock); | 640 | spin_unlock_irq(&send_cq->lock); |
637 | } else { | 641 | } else { |