diff options
author | Roland Dreier <rolandd@cisco.com> | 2005-09-18 17:00:17 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2005-09-19 01:02:38 -0400 |
commit | 3853194c2e174cee4da093c67bd54cbf9a38559a (patch) | |
tree | 3939280d0bf49b4b24068215f30aefdc38aaf0bc | |
parent | c915033fc62d7186d243d89f88782d6be33fd8f6 (diff) |
[PATCH] IB/mthca: Fix posting work requests to shared receive queues
The error handling paths in mthca_tavor_post_srq_recv() and
mthca_arbel_post_srq_recv() are quite bogus, the result of a
screwed up merge. Fix them so they work as intended.
Pointed out by Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index fe06cc0df936..45dedd2787a4 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -409,7 +409,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
409 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | 409 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); |
410 | err = -ENOMEM; | 410 | err = -ENOMEM; |
411 | *bad_wr = wr; | 411 | *bad_wr = wr; |
412 | return nreq; | 412 | break; |
413 | } | 413 | } |
414 | 414 | ||
415 | wqe = get_wqe(srq, ind); | 415 | wqe = get_wqe(srq, ind); |
@@ -427,7 +427,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
427 | err = -EINVAL; | 427 | err = -EINVAL; |
428 | *bad_wr = wr; | 428 | *bad_wr = wr; |
429 | srq->last = prev_wqe; | 429 | srq->last = prev_wqe; |
430 | return nreq; | 430 | break; |
431 | } | 431 | } |
432 | 432 | ||
433 | for (i = 0; i < wr->num_sge; ++i) { | 433 | for (i = 0; i < wr->num_sge; ++i) { |
@@ -456,8 +456,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
456 | srq->first_free = next_ind; | 456 | srq->first_free = next_ind; |
457 | } | 457 | } |
458 | 458 | ||
459 | return nreq; | ||
460 | |||
461 | if (likely(nreq)) { | 459 | if (likely(nreq)) { |
462 | __be32 doorbell[2]; | 460 | __be32 doorbell[2]; |
463 | 461 | ||
@@ -501,7 +499,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
501 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | 499 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); |
502 | err = -ENOMEM; | 500 | err = -ENOMEM; |
503 | *bad_wr = wr; | 501 | *bad_wr = wr; |
504 | return nreq; | 502 | break; |
505 | } | 503 | } |
506 | 504 | ||
507 | wqe = get_wqe(srq, ind); | 505 | wqe = get_wqe(srq, ind); |
@@ -517,7 +515,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
517 | if (unlikely(wr->num_sge > srq->max_gs)) { | 515 | if (unlikely(wr->num_sge > srq->max_gs)) { |
518 | err = -EINVAL; | 516 | err = -EINVAL; |
519 | *bad_wr = wr; | 517 | *bad_wr = wr; |
520 | return nreq; | 518 | break; |
521 | } | 519 | } |
522 | 520 | ||
523 | for (i = 0; i < wr->num_sge; ++i) { | 521 | for (i = 0; i < wr->num_sge; ++i) { |