diff options
author | Yishai Hadas <yishaih@mellanox.com> | 2016-06-22 10:27:29 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-06-23 10:07:03 -0400 |
commit | a6100603a4a87fc436199362bdb81cb849faaf6e (patch) | |
tree | b1b993ebf09f7647d1c17ec97c89fb4dd5ede3ff | |
parent | f2940e2c76bb554a7fbdd28ca5b90904117a9e96 (diff) |
IB/mlx4: Fix error flow when sending mads under SRIOV
Fix mad send error flow to prevent double freeing address handles,
and leaking tx_ring entries when SRIOV is active.
If ib_mad_post_send fails, the address handle pointer in the tx_ring entry
must be set to NULL (or there will be a double-free) and tx_tail must be
incremented (or there will be a leak of tx_ring entries).
The tx_ring is handled the same way in the send-completion handler.
Fixes: 37bfc7c1e83f ("IB/mlx4: SR-IOV multiplex and demultiplex MADs")
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Reviewed-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index d68f506c1922..9c2e53d28f98 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
527 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); | 527 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); |
528 | spin_unlock(&tun_qp->tx_lock); | 528 | spin_unlock(&tun_qp->tx_lock); |
529 | if (ret) | 529 | if (ret) |
530 | goto out; | 530 | goto end; |
531 | 531 | ||
532 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); | 532 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); |
533 | if (tun_qp->tx_ring[tun_tx_ix].ah) | 533 | if (tun_qp->tx_ring[tun_tx_ix].ah) |
@@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
596 | wr.wr.send_flags = IB_SEND_SIGNALED; | 596 | wr.wr.send_flags = IB_SEND_SIGNALED; |
597 | 597 | ||
598 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); | 598 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); |
599 | out: | 599 | if (!ret) |
600 | if (ret) | 600 | return 0; |
601 | ib_destroy_ah(ah); | 601 | out: |
602 | spin_lock(&tun_qp->tx_lock); | ||
603 | tun_qp->tx_ix_tail++; | ||
604 | spin_unlock(&tun_qp->tx_lock); | ||
605 | tun_qp->tx_ring[tun_tx_ix].ah = NULL; | ||
606 | end: | ||
607 | ib_destroy_ah(ah); | ||
602 | return ret; | 608 | return ret; |
603 | } | 609 | } |
604 | 610 | ||
@@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
1326 | 1332 | ||
1327 | 1333 | ||
1328 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); | 1334 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); |
1335 | if (!ret) | ||
1336 | return 0; | ||
1337 | |||
1338 | spin_lock(&sqp->tx_lock); | ||
1339 | sqp->tx_ix_tail++; | ||
1340 | spin_unlock(&sqp->tx_lock); | ||
1341 | sqp->tx_ring[wire_tx_ix].ah = NULL; | ||
1329 | out: | 1342 | out: |
1330 | if (ret) | 1343 | ib_destroy_ah(ah); |
1331 | ib_destroy_ah(ah); | ||
1332 | return ret; | 1344 | return ret; |
1333 | } | 1345 | } |
1334 | 1346 | ||