aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_srq.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-10-06 16:25:16 -0400
committerRoland Dreier <rolandd@cisco.com>2005-10-17 18:20:28 -0400
commite23d6d2b090658007732770720a44375cba23200 (patch)
tree277afdc20477a3ab8ca9a1ff26c478e047a3a581 /drivers/infiniband/hw/mthca/mthca_srq.c
parent90f104da22bbf2e2b8a2136c12fb4e013fccf504 (diff)
[IB] mthca: detect SRQ overflow
The hardware relies on us keeping one extra work request that never gets used in SRQs. Add checks to the SRQ work request posting functions so that they fail when someone is about to use up that extra work request, rather than when someone uses the very last work request. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_srq.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 13d2290261d9..e464321a7aa7 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -438,6 +438,14 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
438 438
439 wqe = get_wqe(srq, ind); 439 wqe = get_wqe(srq, ind);
440 next_ind = *wqe_to_link(wqe); 440 next_ind = *wqe_to_link(wqe);
441
442 if (next_ind < 0) {
443 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
444 err = -ENOMEM;
445 *bad_wr = wr;
446 break;
447 }
448
441 prev_wqe = srq->last; 449 prev_wqe = srq->last;
442 srq->last = wqe; 450 srq->last = wqe;
443 451
@@ -529,6 +537,13 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
529 wqe = get_wqe(srq, ind); 537 wqe = get_wqe(srq, ind);
530 next_ind = *wqe_to_link(wqe); 538 next_ind = *wqe_to_link(wqe);
531 539
540 if (next_ind < 0) {
541 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
542 err = -ENOMEM;
543 *bad_wr = wr;
544 break;
545 }
546
532 ((struct mthca_next_seg *) wqe)->nda_op = 547 ((struct mthca_next_seg *) wqe)->nda_op =
533 cpu_to_be32((next_ind << srq->wqe_shift) | 1); 548 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
534 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 549 ((struct mthca_next_seg *) wqe)->ee_nds = 0;