aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_srq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_srq.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c47
1 files changed, 16 insertions, 31 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 553d681f6813..a5ffff6e1026 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
175 * scatter list L_Keys to the sentry value of 0x100. 175 * scatter list L_Keys to the sentry value of 0x100.
176 */ 176 */
177 for (i = 0; i < srq->max; ++i) { 177 for (i = 0; i < srq->max; ++i) {
178 wqe = get_wqe(srq, i); 178 struct mthca_next_seg *next;
179 179
180 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 180 next = wqe = get_wqe(srq, i);
181
182 if (i < srq->max - 1) {
183 *wqe_to_link(wqe) = i + 1;
184 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
185 } else {
186 *wqe_to_link(wqe) = -1;
187 next->nda_op = 0;
188 }
181 189
182 for (scatter = wqe + sizeof (struct mthca_next_seg); 190 for (scatter = wqe + sizeof (struct mthca_next_seg);
183 (void *) scatter < wqe + (1 << srq->wqe_shift); 191 (void *) scatter < wqe + (1 << srq->wqe_shift);
@@ -470,16 +478,15 @@ out:
470void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 478void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
471{ 479{
472 int ind; 480 int ind;
481 struct mthca_next_seg *last_free;
473 482
474 ind = wqe_addr >> srq->wqe_shift; 483 ind = wqe_addr >> srq->wqe_shift;
475 484
476 spin_lock(&srq->lock); 485 spin_lock(&srq->lock);
477 486
478 if (likely(srq->first_free >= 0)) 487 last_free = get_wqe(srq, srq->last_free);
479 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 488 *wqe_to_link(last_free) = ind;
480 else 489 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
481 srq->first_free = ind;
482
483 *wqe_to_link(get_wqe(srq, ind)) = -1; 490 *wqe_to_link(get_wqe(srq, ind)) = -1;
484 srq->last_free = ind; 491 srq->last_free = ind;
485 492
@@ -506,15 +513,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
506 first_ind = srq->first_free; 513 first_ind = srq->first_free;
507 514
508 for (nreq = 0; wr; wr = wr->next) { 515 for (nreq = 0; wr; wr = wr->next) {
509 ind = srq->first_free; 516 ind = srq->first_free;
510
511 if (unlikely(ind < 0)) {
512 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
513 err = -ENOMEM;
514 *bad_wr = wr;
515 break;
516 }
517
518 wqe = get_wqe(srq, ind); 517 wqe = get_wqe(srq, ind);
519 next_ind = *wqe_to_link(wqe); 518 next_ind = *wqe_to_link(wqe);
520 519
@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
528 prev_wqe = srq->last; 527 prev_wqe = srq->last;
529 srq->last = wqe; 528 srq->last = wqe;
530 529
531 ((struct mthca_next_seg *) wqe)->nda_op = 0;
532 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 530 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
533 /* flags field will always remain 0 */ 531 /* flags field will always remain 0 */
534 532
@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
549 if (i < srq->max_gs) 547 if (i < srq->max_gs)
550 mthca_set_data_seg_inval(wqe); 548 mthca_set_data_seg_inval(wqe);
551 549
552 ((struct mthca_next_seg *) prev_wqe)->nda_op =
553 cpu_to_be32((ind << srq->wqe_shift) | 1);
554 wmb();
555 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 550 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
556 cpu_to_be32(MTHCA_NEXT_DBD); 551 cpu_to_be32(MTHCA_NEXT_DBD);
557 552
@@ -614,15 +609,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
614 spin_lock_irqsave(&srq->lock, flags); 609 spin_lock_irqsave(&srq->lock, flags);
615 610
616 for (nreq = 0; wr; ++nreq, wr = wr->next) { 611 for (nreq = 0; wr; ++nreq, wr = wr->next) {
617 ind = srq->first_free; 612 ind = srq->first_free;
618
619 if (unlikely(ind < 0)) {
620 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
621 err = -ENOMEM;
622 *bad_wr = wr;
623 break;
624 }
625
626 wqe = get_wqe(srq, ind); 613 wqe = get_wqe(srq, ind);
627 next_ind = *wqe_to_link(wqe); 614 next_ind = *wqe_to_link(wqe);
628 615
@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
633 break; 620 break;
634 } 621 }
635 622
636 ((struct mthca_next_seg *) wqe)->nda_op =
637 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
638 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 623 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
639 /* flags field will always remain 0 */ 624 /* flags field will always remain 0 */
640 625