aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.co.il>2008-02-04 23:20:44 -0500
committerRoland Dreier <rolandd@cisco.com>2008-02-04 23:20:44 -0500
commit1203c42e7be1aa0be641b701f42b6d38c2d94b39 (patch)
tree3f84fb72b8fb61d460f3207d21ddf06b4a78700f /drivers/infiniband
parent1d96354e617990799b1cb5d7ff8f7c467b8767c8 (diff)
IB/mthca: Remove checks for srq->first_free < 0
The SRQ receive posting functions make sure that srq->first_free never becomes negative, so we can remove tests of whether it is negative. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c26
1 files changed, 3 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 553d681f6813..ec63adc1099c 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -475,11 +475,7 @@ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
475 475
476 spin_lock(&srq->lock); 476 spin_lock(&srq->lock);
477 477
478 if (likely(srq->first_free >= 0)) 478 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
479 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
480 else
481 srq->first_free = ind;
482
483 *wqe_to_link(get_wqe(srq, ind)) = -1; 479 *wqe_to_link(get_wqe(srq, ind)) = -1;
484 srq->last_free = ind; 480 srq->last_free = ind;
485 481
@@ -506,15 +502,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
506 first_ind = srq->first_free; 502 first_ind = srq->first_free;
507 503
508 for (nreq = 0; wr; wr = wr->next) { 504 for (nreq = 0; wr; wr = wr->next) {
509 ind = srq->first_free; 505 ind = srq->first_free;
510
511 if (unlikely(ind < 0)) {
512 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
513 err = -ENOMEM;
514 *bad_wr = wr;
515 break;
516 }
517
518 wqe = get_wqe(srq, ind); 506 wqe = get_wqe(srq, ind);
519 next_ind = *wqe_to_link(wqe); 507 next_ind = *wqe_to_link(wqe);
520 508
@@ -614,15 +602,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
614 spin_lock_irqsave(&srq->lock, flags); 602 spin_lock_irqsave(&srq->lock, flags);
615 603
616 for (nreq = 0; wr; ++nreq, wr = wr->next) { 604 for (nreq = 0; wr; ++nreq, wr = wr->next) {
617 ind = srq->first_free; 605 ind = srq->first_free;
618
619 if (unlikely(ind < 0)) {
620 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
621 err = -ENOMEM;
622 *bad_wr = wr;
623 break;
624 }
625
626 wqe = get_wqe(srq, ind); 606 wqe = get_wqe(srq, ind);
627 next_ind = *wqe_to_link(wqe); 607 next_ind = *wqe_to_link(wqe);
628 608