diff options
| author | Steve Wise <swise@opengridcomputing.com> | 2016-10-18 17:04:39 -0400 |
|---|---|---|
| committer | Doug Ledford <dledford@redhat.com> | 2016-11-16 20:10:36 -0500 |
| commit | 4ff522ea47944ffd3d4d27023ace8bc6a722c834 (patch) | |
| tree | 8f3a55cb119b4a0fc58485eb0368375a38aad56e /drivers/infiniband/hw | |
| parent | 6fa1f2f0aa6191193704b9ff10e5a2cafe540738 (diff) | |
iw_cxgb4: set *bad_wr for post_send/post_recv errors
There are a few cases in c4iw_post_send() and c4iw_post_receive()
where *bad_wr is not set when an error is returned. This can
cause a crash if the application tries to use bad_wr.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw')
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index f57deba6717c..5790e1dbd618 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -797,11 +797,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 797 | spin_lock_irqsave(&qhp->lock, flag); | 797 | spin_lock_irqsave(&qhp->lock, flag); |
| 798 | if (t4_wq_in_error(&qhp->wq)) { | 798 | if (t4_wq_in_error(&qhp->wq)) { |
| 799 | spin_unlock_irqrestore(&qhp->lock, flag); | 799 | spin_unlock_irqrestore(&qhp->lock, flag); |
| 800 | *bad_wr = wr; | ||
| 800 | return -EINVAL; | 801 | return -EINVAL; |
| 801 | } | 802 | } |
| 802 | num_wrs = t4_sq_avail(&qhp->wq); | 803 | num_wrs = t4_sq_avail(&qhp->wq); |
| 803 | if (num_wrs == 0) { | 804 | if (num_wrs == 0) { |
| 804 | spin_unlock_irqrestore(&qhp->lock, flag); | 805 | spin_unlock_irqrestore(&qhp->lock, flag); |
| 806 | *bad_wr = wr; | ||
| 805 | return -ENOMEM; | 807 | return -ENOMEM; |
| 806 | } | 808 | } |
| 807 | while (wr) { | 809 | while (wr) { |
| @@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 934 | spin_lock_irqsave(&qhp->lock, flag); | 936 | spin_lock_irqsave(&qhp->lock, flag); |
| 935 | if (t4_wq_in_error(&qhp->wq)) { | 937 | if (t4_wq_in_error(&qhp->wq)) { |
| 936 | spin_unlock_irqrestore(&qhp->lock, flag); | 938 | spin_unlock_irqrestore(&qhp->lock, flag); |
| 939 | *bad_wr = wr; | ||
| 937 | return -EINVAL; | 940 | return -EINVAL; |
| 938 | } | 941 | } |
| 939 | num_wrs = t4_rq_avail(&qhp->wq); | 942 | num_wrs = t4_rq_avail(&qhp->wq); |
| 940 | if (num_wrs == 0) { | 943 | if (num_wrs == 0) { |
| 941 | spin_unlock_irqrestore(&qhp->lock, flag); | 944 | spin_unlock_irqrestore(&qhp->lock, flag); |
| 945 | *bad_wr = wr; | ||
| 942 | return -ENOMEM; | 946 | return -ENOMEM; |
| 943 | } | 947 | } |
| 944 | while (wr) { | 948 | while (wr) { |
