diff options
author | Eli Cohen <eli@dev.mellanox.co.il> | 2014-12-02 05:26:18 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-12-08 20:45:56 -0500 |
commit | 6a4f139aae77d601bd146a6b3c4e12e7e4e0226f (patch) | |
tree | d7816ee1255a0d3dc2261983d81cd038627f3d8d /drivers/infiniband/hw/mlx5/qp.c | |
parent | 28c167fa8f8ea1850e6053bffb8ee30c1ac4411a (diff) |
mlx5: Fix sparse warnings
1. Add required __acquire/__release statements to balance spinlock usage.
2. Change the index parameter of begin_wqe() to be unsigned to match supplied
argument type.
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0e2ef9fe0e29..1cae1c7132b4 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1011,9 +1011,14 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv | |||
1011 | } | 1011 | } |
1012 | } else { | 1012 | } else { |
1013 | spin_lock_irq(&send_cq->lock); | 1013 | spin_lock_irq(&send_cq->lock); |
1014 | __acquire(&recv_cq->lock); | ||
1014 | } | 1015 | } |
1015 | } else if (recv_cq) { | 1016 | } else if (recv_cq) { |
1016 | spin_lock_irq(&recv_cq->lock); | 1017 | spin_lock_irq(&recv_cq->lock); |
1018 | __acquire(&send_cq->lock); | ||
1019 | } else { | ||
1020 | __acquire(&send_cq->lock); | ||
1021 | __acquire(&recv_cq->lock); | ||
1017 | } | 1022 | } |
1018 | } | 1023 | } |
1019 | 1024 | ||
@@ -1033,10 +1038,15 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re | |||
1033 | spin_unlock_irq(&recv_cq->lock); | 1038 | spin_unlock_irq(&recv_cq->lock); |
1034 | } | 1039 | } |
1035 | } else { | 1040 | } else { |
1041 | __release(&recv_cq->lock); | ||
1036 | spin_unlock_irq(&send_cq->lock); | 1042 | spin_unlock_irq(&send_cq->lock); |
1037 | } | 1043 | } |
1038 | } else if (recv_cq) { | 1044 | } else if (recv_cq) { |
1045 | __release(&send_cq->lock); | ||
1039 | spin_unlock_irq(&recv_cq->lock); | 1046 | spin_unlock_irq(&recv_cq->lock); |
1047 | } else { | ||
1048 | __release(&recv_cq->lock); | ||
1049 | __release(&send_cq->lock); | ||
1040 | } | 1050 | } |
1041 | } | 1051 | } |
1042 | 1052 | ||
@@ -2411,7 +2421,7 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) | |||
2411 | 2421 | ||
2412 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | 2422 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, |
2413 | struct mlx5_wqe_ctrl_seg **ctrl, | 2423 | struct mlx5_wqe_ctrl_seg **ctrl, |
2414 | struct ib_send_wr *wr, int *idx, | 2424 | struct ib_send_wr *wr, unsigned *idx, |
2415 | int *size, int nreq) | 2425 | int *size, int nreq) |
2416 | { | 2426 | { |
2417 | int err = 0; | 2427 | int err = 0; |
@@ -2737,6 +2747,8 @@ out: | |||
2737 | 2747 | ||
2738 | if (bf->need_lock) | 2748 | if (bf->need_lock) |
2739 | spin_lock(&bf->lock); | 2749 | spin_lock(&bf->lock); |
2750 | else | ||
2751 | __acquire(&bf->lock); | ||
2740 | 2752 | ||
2741 | /* TBD enable WC */ | 2753 | /* TBD enable WC */ |
2742 | if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { | 2754 | if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { |
@@ -2753,6 +2765,8 @@ out: | |||
2753 | bf->offset ^= bf->buf_size; | 2765 | bf->offset ^= bf->buf_size; |
2754 | if (bf->need_lock) | 2766 | if (bf->need_lock) |
2755 | spin_unlock(&bf->lock); | 2767 | spin_unlock(&bf->lock); |
2768 | else | ||
2769 | __release(&bf->lock); | ||
2756 | } | 2770 | } |
2757 | 2771 | ||
2758 | spin_unlock_irqrestore(&qp->sq.lock, flags); | 2772 | spin_unlock_irqrestore(&qp->sq.lock, flags); |