diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2007-06-18 17:24:42 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-07-09 23:12:26 -0400 |
commit | 06ee109002672ac875558ec699b53cf08a865bd3 (patch) | |
tree | 6d76bae2f40731b27d6b11f71c0cd9ff200e45af /drivers/infiniband/hw | |
parent | 9380068fc2f230e7840ff87d3f1e6030ae2ee5e8 (diff) |
IB/ipath: Fix RDMA read retry code
A RDMA read response or atomic response can ACK earlier sends and RDMA
writes. In this case, the wrong work request pointer was being used
to store the read first response or atomic result. Also, if a RDMA
read request is retried, the code to compute which request to resend
was incorrect.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_rc.c | 57 |
1 files changed, 38 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 9ba80d107dcd..014d811d222d 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -806,13 +806,15 @@ static inline void update_last_psn(struct ipath_qp *qp, u32 psn) | |||
806 | * Called at interrupt level with the QP s_lock held and interrupts disabled. | 806 | * Called at interrupt level with the QP s_lock held and interrupts disabled. |
807 | * Returns 1 if OK, 0 if current operation should be aborted (NAK). | 807 | * Returns 1 if OK, 0 if current operation should be aborted (NAK). |
808 | */ | 808 | */ |
809 | static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | 809 | static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode, |
810 | u64 val) | ||
810 | { | 811 | { |
811 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 812 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
812 | struct ib_wc wc; | 813 | struct ib_wc wc; |
813 | struct ipath_swqe *wqe; | 814 | struct ipath_swqe *wqe; |
814 | int ret = 0; | 815 | int ret = 0; |
815 | u32 ack_psn; | 816 | u32 ack_psn; |
817 | int diff; | ||
816 | 818 | ||
817 | /* | 819 | /* |
818 | * Remove the QP from the timeout queue (or RNR timeout queue). | 820 | * Remove the QP from the timeout queue (or RNR timeout queue). |
@@ -840,7 +842,19 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
840 | * The MSN might be for a later WQE than the PSN indicates so | 842 | * The MSN might be for a later WQE than the PSN indicates so |
841 | * only complete WQEs that the PSN finishes. | 843 | * only complete WQEs that the PSN finishes. |
842 | */ | 844 | */ |
843 | while (ipath_cmp24(ack_psn, wqe->lpsn) >= 0) { | 845 | while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) { |
846 | /* | ||
847 | * RDMA_READ_RESPONSE_ONLY is a special case since | ||
848 | * we want to generate completion events for everything | ||
849 | * before the RDMA read, copy the data, then generate | ||
850 | * the completion for the read. | ||
851 | */ | ||
852 | if (wqe->wr.opcode == IB_WR_RDMA_READ && | ||
853 | opcode == OP(RDMA_READ_RESPONSE_ONLY) && | ||
854 | diff == 0) { | ||
855 | ret = 1; | ||
856 | goto bail; | ||
857 | } | ||
844 | /* | 858 | /* |
845 | * If this request is a RDMA read or atomic, and the ACK is | 859 | * If this request is a RDMA read or atomic, and the ACK is |
846 | * for a later operation, this ACK NAKs the RDMA read or | 860 | * for a later operation, this ACK NAKs the RDMA read or |
@@ -851,12 +865,10 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
851 | * is sent but before the response is received. | 865 | * is sent but before the response is received. |
852 | */ | 866 | */ |
853 | if ((wqe->wr.opcode == IB_WR_RDMA_READ && | 867 | if ((wqe->wr.opcode == IB_WR_RDMA_READ && |
854 | (opcode != OP(RDMA_READ_RESPONSE_LAST) || | 868 | (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || |
855 | ipath_cmp24(ack_psn, wqe->lpsn) != 0)) || | ||
856 | ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | 869 | ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || |
857 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && | 870 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && |
858 | (opcode != OP(ATOMIC_ACKNOWLEDGE) || | 871 | (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { |
859 | ipath_cmp24(wqe->psn, psn) != 0))) { | ||
860 | /* | 872 | /* |
861 | * The last valid PSN seen is the previous | 873 | * The last valid PSN seen is the previous |
862 | * request's. | 874 | * request's. |
@@ -870,6 +882,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
870 | */ | 882 | */ |
871 | goto bail; | 883 | goto bail; |
872 | } | 884 | } |
885 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
886 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | ||
887 | *(u64 *) wqe->sg_list[0].vaddr = val; | ||
873 | if (qp->s_num_rd_atomic && | 888 | if (qp->s_num_rd_atomic && |
874 | (wqe->wr.opcode == IB_WR_RDMA_READ || | 889 | (wqe->wr.opcode == IB_WR_RDMA_READ || |
875 | wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | 890 | wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || |
@@ -1079,6 +1094,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1079 | int diff; | 1094 | int diff; |
1080 | u32 pad; | 1095 | u32 pad; |
1081 | u32 aeth; | 1096 | u32 aeth; |
1097 | u64 val; | ||
1082 | 1098 | ||
1083 | spin_lock_irqsave(&qp->s_lock, flags); | 1099 | spin_lock_irqsave(&qp->s_lock, flags); |
1084 | 1100 | ||
@@ -1118,8 +1134,6 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1118 | data += sizeof(__be32); | 1134 | data += sizeof(__be32); |
1119 | } | 1135 | } |
1120 | if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { | 1136 | if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { |
1121 | u64 val; | ||
1122 | |||
1123 | if (!header_in_data) { | 1137 | if (!header_in_data) { |
1124 | __be32 *p = ohdr->u.at.atomic_ack_eth; | 1138 | __be32 *p = ohdr->u.at.atomic_ack_eth; |
1125 | 1139 | ||
@@ -1127,12 +1141,13 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1127 | be32_to_cpu(p[1]); | 1141 | be32_to_cpu(p[1]); |
1128 | } else | 1142 | } else |
1129 | val = be64_to_cpu(((__be64 *) data)[0]); | 1143 | val = be64_to_cpu(((__be64 *) data)[0]); |
1130 | *(u64 *) wqe->sg_list[0].vaddr = val; | 1144 | } else |
1131 | } | 1145 | val = 0; |
1132 | if (!do_rc_ack(qp, aeth, psn, opcode) || | 1146 | if (!do_rc_ack(qp, aeth, psn, opcode, val) || |
1133 | opcode != OP(RDMA_READ_RESPONSE_FIRST)) | 1147 | opcode != OP(RDMA_READ_RESPONSE_FIRST)) |
1134 | goto ack_done; | 1148 | goto ack_done; |
1135 | hdrsize += 4; | 1149 | hdrsize += 4; |
1150 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
1136 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) | 1151 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) |
1137 | goto ack_op_err; | 1152 | goto ack_op_err; |
1138 | /* | 1153 | /* |
@@ -1176,13 +1191,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1176 | goto bail; | 1191 | goto bail; |
1177 | 1192 | ||
1178 | case OP(RDMA_READ_RESPONSE_ONLY): | 1193 | case OP(RDMA_READ_RESPONSE_ONLY): |
1179 | if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { | 1194 | if (!header_in_data) |
1180 | dev->n_rdma_seq++; | 1195 | aeth = be32_to_cpu(ohdr->u.aeth); |
1181 | ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); | 1196 | else |
1197 | aeth = be32_to_cpu(((__be32 *) data)[0]); | ||
1198 | if (!do_rc_ack(qp, aeth, psn, opcode, 0)) | ||
1182 | goto ack_done; | 1199 | goto ack_done; |
1183 | } | ||
1184 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) | ||
1185 | goto ack_op_err; | ||
1186 | /* Get the number of bytes the message was padded by. */ | 1200 | /* Get the number of bytes the message was padded by. */ |
1187 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 1201 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
1188 | /* | 1202 | /* |
@@ -1197,6 +1211,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1197 | * have to be careful to copy the data to the right | 1211 | * have to be careful to copy the data to the right |
1198 | * location. | 1212 | * location. |
1199 | */ | 1213 | */ |
1214 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
1200 | qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, | 1215 | qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, |
1201 | wqe, psn, pmtu); | 1216 | wqe, psn, pmtu); |
1202 | goto read_last; | 1217 | goto read_last; |
@@ -1230,7 +1245,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1230 | data += sizeof(__be32); | 1245 | data += sizeof(__be32); |
1231 | } | 1246 | } |
1232 | ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen); | 1247 | ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen); |
1233 | (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST)); | 1248 | (void) do_rc_ack(qp, aeth, psn, |
1249 | OP(RDMA_READ_RESPONSE_LAST), 0); | ||
1234 | goto ack_done; | 1250 | goto ack_done; |
1235 | } | 1251 | } |
1236 | 1252 | ||
@@ -1344,8 +1360,11 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1344 | e = NULL; | 1360 | e = NULL; |
1345 | break; | 1361 | break; |
1346 | } | 1362 | } |
1347 | if (ipath_cmp24(psn, e->psn) >= 0) | 1363 | if (ipath_cmp24(psn, e->psn) >= 0) { |
1364 | if (prev == qp->s_tail_ack_queue) | ||
1365 | old_req = 0; | ||
1348 | break; | 1366 | break; |
1367 | } | ||
1349 | } | 1368 | } |
1350 | switch (opcode) { | 1369 | switch (opcode) { |
1351 | case OP(RDMA_READ_REQUEST): { | 1370 | case OP(RDMA_READ_REQUEST): { |