aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca
diff options
context:
space:
mode:
authorEli Cohen <eli at mellanox.co.il>2008-01-24 09:38:06 -0500
committerRoland Dreier <rolandd@cisco.com>2008-02-04 23:20:44 -0500
commit1d368c546566e249da8181e933c53788093965cf (patch)
tree08ea2e0280f125f2981ab7ada0d335b5928cc670 /drivers/infiniband/hw/mthca
parent1203c42e7be1aa0be641b701f42b6d38c2d94b39 (diff)
IB/ib_mthca: Pre-link receive WQEs in Tavor mode
We have recently discovered that Tavor mode requires each WQE in a posted list of receive WQEs to have a valid NDA field at all times. This requirement holds true for regular QPs as well as for SRQs. This patch prelinks the receive queue in a regular QP and keeps the free list in SRQ always properly linked. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Reviewed-by: Jack Morgenstein <jackm@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c23
2 files changed, 22 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 0e5461c65731..db5595bbf7f0 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1175{ 1175{
1176 int ret; 1176 int ret;
1177 int i; 1177 int i;
1178 struct mthca_next_seg *next;
1178 1179
1179 qp->refcount = 1; 1180 qp->refcount = 1;
1180 init_waitqueue_head(&qp->wait); 1181 init_waitqueue_head(&qp->wait);
@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1217 } 1218 }
1218 1219
1219 if (mthca_is_memfree(dev)) { 1220 if (mthca_is_memfree(dev)) {
1220 struct mthca_next_seg *next;
1221 struct mthca_data_seg *scatter; 1221 struct mthca_data_seg *scatter;
1222 int size = (sizeof (struct mthca_next_seg) + 1222 int size = (sizeof (struct mthca_next_seg) +
1223 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; 1223 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1240 qp->sq.wqe_shift) + 1240 qp->sq.wqe_shift) +
1241 qp->send_wqe_offset); 1241 qp->send_wqe_offset);
1242 } 1242 }
1243 } else {
1244 for (i = 0; i < qp->rq.max; ++i) {
1245 next = get_recv_wqe(qp, i);
1246 next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1247 qp->rq.wqe_shift) | 1);
1248 }
1249
1243 } 1250 }
1244 1251
1245 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 1252 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1863 prev_wqe = qp->rq.last; 1870 prev_wqe = qp->rq.last;
1864 qp->rq.last = wqe; 1871 qp->rq.last = wqe;
1865 1872
1866 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1867 ((struct mthca_next_seg *) wqe)->ee_nds = 1873 ((struct mthca_next_seg *) wqe)->ee_nds =
1868 cpu_to_be32(MTHCA_NEXT_DBD); 1874 cpu_to_be32(MTHCA_NEXT_DBD);
1869 ((struct mthca_next_seg *) wqe)->flags = 0; 1875 ((struct mthca_next_seg *) wqe)->flags = 0;
@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1885 1891
1886 qp->wrid[ind] = wr->wr_id; 1892 qp->wrid[ind] = wr->wr_id;
1887 1893
1888 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1889 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1890 wmb();
1891 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1894 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1892 cpu_to_be32(MTHCA_NEXT_DBD | size); 1895 cpu_to_be32(MTHCA_NEXT_DBD | size);
1893 1896
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index ec63adc1099c..a5ffff6e1026 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
175 * scatter list L_Keys to the sentry value of 0x100. 175 * scatter list L_Keys to the sentry value of 0x100.
176 */ 176 */
177 for (i = 0; i < srq->max; ++i) { 177 for (i = 0; i < srq->max; ++i) {
178 wqe = get_wqe(srq, i); 178 struct mthca_next_seg *next;
179 179
180 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 180 next = wqe = get_wqe(srq, i);
181
182 if (i < srq->max - 1) {
183 *wqe_to_link(wqe) = i + 1;
184 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
185 } else {
186 *wqe_to_link(wqe) = -1;
187 next->nda_op = 0;
188 }
181 189
182 for (scatter = wqe + sizeof (struct mthca_next_seg); 190 for (scatter = wqe + sizeof (struct mthca_next_seg);
183 (void *) scatter < wqe + (1 << srq->wqe_shift); 191 (void *) scatter < wqe + (1 << srq->wqe_shift);
@@ -470,12 +478,15 @@ out:
470void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 478void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
471{ 479{
472 int ind; 480 int ind;
481 struct mthca_next_seg *last_free;
473 482
474 ind = wqe_addr >> srq->wqe_shift; 483 ind = wqe_addr >> srq->wqe_shift;
475 484
476 spin_lock(&srq->lock); 485 spin_lock(&srq->lock);
477 486
478 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 487 last_free = get_wqe(srq, srq->last_free);
488 *wqe_to_link(last_free) = ind;
489 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
479 *wqe_to_link(get_wqe(srq, ind)) = -1; 490 *wqe_to_link(get_wqe(srq, ind)) = -1;
480 srq->last_free = ind; 491 srq->last_free = ind;
481 492
@@ -516,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
516 prev_wqe = srq->last; 527 prev_wqe = srq->last;
517 srq->last = wqe; 528 srq->last = wqe;
518 529
519 ((struct mthca_next_seg *) wqe)->nda_op = 0;
520 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 530 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
521 /* flags field will always remain 0 */ 531 /* flags field will always remain 0 */
522 532
@@ -537,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
537 if (i < srq->max_gs) 547 if (i < srq->max_gs)
538 mthca_set_data_seg_inval(wqe); 548 mthca_set_data_seg_inval(wqe);
539 549
540 ((struct mthca_next_seg *) prev_wqe)->nda_op =
541 cpu_to_be32((ind << srq->wqe_shift) | 1);
542 wmb();
543 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 550 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
544 cpu_to_be32(MTHCA_NEXT_DBD); 551 cpu_to_be32(MTHCA_NEXT_DBD);
545 552
@@ -613,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
613 break; 620 break;
614 } 621 }
615 622
616 ((struct mthca_next_seg *) wqe)->nda_op =
617 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
618 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 623 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
619 /* flags field will always remain 0 */ 624 /* flags field will always remain 0 */
620 625