diff options
author | Eli Cohen <eli at mellanox.co.il> | 2008-01-24 09:38:06 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-02-04 23:20:44 -0500 |
commit | 1d368c546566e249da8181e933c53788093965cf (patch) | |
tree | 08ea2e0280f125f2981ab7ada0d335b5928cc670 /drivers/infiniband/hw/mthca/mthca_qp.c | |
parent | 1203c42e7be1aa0be641b701f42b6d38c2d94b39 (diff) |
IB/ib_mthca: Pre-link receive WQEs in Tavor mode
We have recently discovered that Tavor mode requires each WQE in a
posted list of receive WQEs to have a valid NDA field at all times.
This requirement holds true for regular QPs as well as for SRQs. This
patch prelinks the receive queue in a regular QP and keeps the free
list in SRQ always properly linked.
Signed-off-by: Eli Cohen <eli@mellanox.co.il>
Reviewed-by: Jack Morgenstein <jackm@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 0e5461c65731..db5595bbf7f0 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1175 | { | 1175 | { |
1176 | int ret; | 1176 | int ret; |
1177 | int i; | 1177 | int i; |
1178 | struct mthca_next_seg *next; | ||
1178 | 1179 | ||
1179 | qp->refcount = 1; | 1180 | qp->refcount = 1; |
1180 | init_waitqueue_head(&qp->wait); | 1181 | init_waitqueue_head(&qp->wait); |
@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1217 | } | 1218 | } |
1218 | 1219 | ||
1219 | if (mthca_is_memfree(dev)) { | 1220 | if (mthca_is_memfree(dev)) { |
1220 | struct mthca_next_seg *next; | ||
1221 | struct mthca_data_seg *scatter; | 1221 | struct mthca_data_seg *scatter; |
1222 | int size = (sizeof (struct mthca_next_seg) + | 1222 | int size = (sizeof (struct mthca_next_seg) + |
1223 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; | 1223 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; |
@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1240 | qp->sq.wqe_shift) + | 1240 | qp->sq.wqe_shift) + |
1241 | qp->send_wqe_offset); | 1241 | qp->send_wqe_offset); |
1242 | } | 1242 | } |
1243 | } else { | ||
1244 | for (i = 0; i < qp->rq.max; ++i) { | ||
1245 | next = get_recv_wqe(qp, i); | ||
1246 | next->nda_op = htonl((((i + 1) % qp->rq.max) << | ||
1247 | qp->rq.wqe_shift) | 1); | ||
1248 | } | ||
1249 | |||
1243 | } | 1250 | } |
1244 | 1251 | ||
1245 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); | 1252 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1863 | prev_wqe = qp->rq.last; | 1870 | prev_wqe = qp->rq.last; |
1864 | qp->rq.last = wqe; | 1871 | qp->rq.last = wqe; |
1865 | 1872 | ||
1866 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | ||
1867 | ((struct mthca_next_seg *) wqe)->ee_nds = | 1873 | ((struct mthca_next_seg *) wqe)->ee_nds = |
1868 | cpu_to_be32(MTHCA_NEXT_DBD); | 1874 | cpu_to_be32(MTHCA_NEXT_DBD); |
1869 | ((struct mthca_next_seg *) wqe)->flags = 0; | 1875 | ((struct mthca_next_seg *) wqe)->flags = 0; |
@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1885 | 1891 | ||
1886 | qp->wrid[ind] = wr->wr_id; | 1892 | qp->wrid[ind] = wr->wr_id; |
1887 | 1893 | ||
1888 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | ||
1889 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | ||
1890 | wmb(); | ||
1891 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1894 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1892 | cpu_to_be32(MTHCA_NEXT_DBD | size); | 1895 | cpu_to_be32(MTHCA_NEXT_DBD | size); |
1893 | 1896 | ||