aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_rc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_rc.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c63
1 files changed, 44 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index d6aa14afa26..b4b88d0b53f 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1136,7 +1136,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1136 goto ack_done; 1136 goto ack_done;
1137 hdrsize += 4; 1137 hdrsize += 4;
1138 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1138 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1139 goto ack_done; 1139 goto ack_op_err;
1140 /* 1140 /*
1141 * If this is a response to a resent RDMA read, we 1141 * If this is a response to a resent RDMA read, we
1142 * have to be careful to copy the data to the right 1142 * have to be careful to copy the data to the right
@@ -1154,12 +1154,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1154 goto ack_done; 1154 goto ack_done;
1155 } 1155 }
1156 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1156 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1157 goto ack_done; 1157 goto ack_op_err;
1158 read_middle: 1158 read_middle:
1159 if (unlikely(tlen != (hdrsize + pmtu + 4))) 1159 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1160 goto ack_done; 1160 goto ack_len_err;
1161 if (unlikely(pmtu >= qp->s_rdma_read_len)) 1161 if (unlikely(pmtu >= qp->s_rdma_read_len))
1162 goto ack_done; 1162 goto ack_len_err;
1163 1163
1164 /* We got a response so update the timeout. */ 1164 /* We got a response so update the timeout. */
1165 spin_lock(&dev->pending_lock); 1165 spin_lock(&dev->pending_lock);
@@ -1184,12 +1184,20 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1184 goto ack_done; 1184 goto ack_done;
1185 } 1185 }
1186 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1186 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1187 goto ack_done; 1187 goto ack_op_err;
1188 /* Get the number of bytes the message was padded by. */
1189 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1190 /*
1191 * Check that the data size is >= 0 && <= pmtu.
1192 * Remember to account for the AETH header (4) and
1193 * ICRC (4).
1194 */
1195 if (unlikely(tlen < (hdrsize + pad + 8)))
1196 goto ack_len_err;
1188 /* 1197 /*
1189 * If this is a response to a resent RDMA read, we 1198 * If this is a response to a resent RDMA read, we
1190 * have to be careful to copy the data to the right 1199 * have to be careful to copy the data to the right
1191 * location. 1200 * location.
1192 * XXX should check PSN and wqe opcode first.
1193 */ 1201 */
1194 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1202 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1195 wqe, psn, pmtu); 1203 wqe, psn, pmtu);
@@ -1203,26 +1211,20 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1203 goto ack_done; 1211 goto ack_done;
1204 } 1212 }
1205 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1213 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1206 goto ack_done; 1214 goto ack_op_err;
1207 read_last: 1215 /* Get the number of bytes the message was padded by. */
1208 /*
1209 * Get the number of bytes the message was padded by.
1210 */
1211 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 1216 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1212 /* 1217 /*
1213 * Check that the data size is >= 1 && <= pmtu. 1218 * Check that the data size is >= 1 && <= pmtu.
1214 * Remember to account for the AETH header (4) and 1219 * Remember to account for the AETH header (4) and
1215 * ICRC (4). 1220 * ICRC (4).
1216 */ 1221 */
1217 if (unlikely(tlen <= (hdrsize + pad + 8))) { 1222 if (unlikely(tlen <= (hdrsize + pad + 8)))
1218 /* XXX Need to generate an error CQ entry. */ 1223 goto ack_len_err;
1219 goto ack_done; 1224 read_last:
1220 }
1221 tlen -= hdrsize + pad + 8; 1225 tlen -= hdrsize + pad + 8;
1222 if (unlikely(tlen != qp->s_rdma_read_len)) { 1226 if (unlikely(tlen != qp->s_rdma_read_len))
1223 /* XXX Need to generate an error CQ entry. */ 1227 goto ack_len_err;
1224 goto ack_done;
1225 }
1226 if (!header_in_data) 1228 if (!header_in_data)
1227 aeth = be32_to_cpu(ohdr->u.aeth); 1229 aeth = be32_to_cpu(ohdr->u.aeth);
1228 else { 1230 else {
@@ -1236,6 +1238,29 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1236 1238
1237ack_done: 1239ack_done:
1238 spin_unlock_irqrestore(&qp->s_lock, flags); 1240 spin_unlock_irqrestore(&qp->s_lock, flags);
1241 goto bail;
1242
1243ack_op_err:
1244 wc.status = IB_WC_LOC_QP_OP_ERR;
1245 goto ack_err;
1246
1247ack_len_err:
1248 wc.status = IB_WC_LOC_LEN_ERR;
1249ack_err:
1250 wc.wr_id = wqe->wr.wr_id;
1251 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1252 wc.vendor_err = 0;
1253 wc.byte_len = 0;
1254 wc.imm_data = 0;
1255 wc.qp = &qp->ibqp;
1256 wc.src_qp = qp->remote_qpn;
1257 wc.wc_flags = 0;
1258 wc.pkey_index = 0;
1259 wc.slid = qp->remote_ah_attr.dlid;
1260 wc.sl = qp->remote_ah_attr.sl;
1261 wc.dlid_path_bits = 0;
1262 wc.port_num = 0;
1263 ipath_sqerror_qp(qp, &wc);
1239bail: 1264bail:
1240 return; 1265 return;
1241} 1266}