aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <roland@topspin.com>2005-04-16 18:26:33 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:26:33 -0400
commitddf841f052fa218547c86169d1074968eca8c503 (patch)
treea7e6e193f0a9fce9652b1c8b33913b199b3a4c49
parentd10ddbf6d7f6699c386d1f41bf542189de32b6be (diff)
[PATCH] IB/mthca: update receive queue initialization for new HCAs
Update initialization of receive queue to match new documentation. This change is required to support new MT25204 HCA. Signed-off-by: Roland Dreier <roland@topspin.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 0db4c9761611..53af7aab1991 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -181,6 +181,10 @@ enum {
181 MTHCA_MLX_SLR = 1 << 16 181 MTHCA_MLX_SLR = 1 << 16
182}; 182};
183 183
184enum {
185 MTHCA_INVAL_LKEY = 0x100
186};
187
184struct mthca_next_seg { 188struct mthca_next_seg {
185 u32 nda_op; /* [31:6] next WQE [4:0] next opcode */ 189 u32 nda_op; /* [31:6] next WQE [4:0] next opcode */
186 u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ 190 u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
@@ -1082,7 +1086,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1082 enum ib_sig_type send_policy, 1086 enum ib_sig_type send_policy,
1083 struct mthca_qp *qp) 1087 struct mthca_qp *qp)
1084{ 1088{
1085 struct mthca_next_seg *wqe;
1086 int ret; 1089 int ret;
1087 int i; 1090 int i;
1088 1091
@@ -1105,18 +1108,28 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1105 } 1108 }
1106 1109
1107 if (mthca_is_memfree(dev)) { 1110 if (mthca_is_memfree(dev)) {
1111 struct mthca_next_seg *next;
1112 struct mthca_data_seg *scatter;
1113 int size = (sizeof (struct mthca_next_seg) +
1114 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1115
1108 for (i = 0; i < qp->rq.max; ++i) { 1116 for (i = 0; i < qp->rq.max; ++i) {
1109 wqe = get_recv_wqe(qp, i); 1117 next = get_recv_wqe(qp, i);
1110 wqe->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << 1118 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1111 qp->rq.wqe_shift); 1119 qp->rq.wqe_shift);
1112 wqe->ee_nds = cpu_to_be32(1 << (qp->rq.wqe_shift - 4)); 1120 next->ee_nds = cpu_to_be32(size);
1121
1122 for (scatter = (void *) (next + 1);
1123 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1124 ++scatter)
1125 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1113 } 1126 }
1114 1127
1115 for (i = 0; i < qp->sq.max; ++i) { 1128 for (i = 0; i < qp->sq.max; ++i) {
1116 wqe = get_send_wqe(qp, i); 1129 next = get_send_wqe(qp, i);
1117 wqe->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << 1130 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1118 qp->sq.wqe_shift) + 1131 qp->sq.wqe_shift) +
1119 qp->send_wqe_offset); 1132 qp->send_wqe_offset);
1120 } 1133 }
1121 } 1134 }
1122 1135
@@ -1975,7 +1988,7 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1975 1988
1976 if (i < qp->rq.max_gs) { 1989 if (i < qp->rq.max_gs) {
1977 ((struct mthca_data_seg *) wqe)->byte_count = 0; 1990 ((struct mthca_data_seg *) wqe)->byte_count = 0;
1978 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(0x100); 1991 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1979 ((struct mthca_data_seg *) wqe)->addr = 0; 1992 ((struct mthca_data_seg *) wqe)->addr = 0;
1980 } 1993 }
1981 1994