aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-09-28 12:00:14 -0400
committerRoland Dreier <rolandd@cisco.com>2006-09-28 14:16:55 -0400
commit8d0208cb59a43bf867e16b977c34c4d6cd618f59 (patch)
treedb5d04d6d0f823118f063d061c9188b57304d19f /drivers/infiniband
parent1fd3b40fde3bfacdf742cadfe99cfd47ffd05219 (diff)
IB/ipath: Flush RWQEs if access error or invalid error seen
If the receiver goes into the error state, we need to flush the posted receive WQEs. Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c18
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h3
4 files changed, 26 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index ecfaca7a571b..46c1c89bf6ae 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -335,6 +335,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
335 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 335 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
336 qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 336 qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
337 qp->r_nak_state = 0; 337 qp->r_nak_state = 0;
338 qp->r_wrid_valid = 0;
338 qp->s_rnr_timeout = 0; 339 qp->s_rnr_timeout = 0;
339 qp->s_head = 0; 340 qp->s_head = 0;
340 qp->s_tail = 0; 341 qp->s_tail = 0;
@@ -353,12 +354,13 @@ static void ipath_reset_qp(struct ipath_qp *qp)
353/** 354/**
354 * ipath_error_qp - put a QP into an error state 355 * ipath_error_qp - put a QP into an error state
355 * @qp: the QP to put into an error state 356 * @qp: the QP to put into an error state
357 * @err: the receive completion error to signal if a RWQE is active
356 * 358 *
357 * Flushes both send and receive work queues. 359 * Flushes both send and receive work queues.
358 * QP s_lock should be held and interrupts disabled. 360 * QP s_lock should be held and interrupts disabled.
359 */ 361 */
360 362
361void ipath_error_qp(struct ipath_qp *qp) 363void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
362{ 364{
363 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 365 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
364 struct ib_wc wc; 366 struct ib_wc wc;
@@ -374,7 +376,6 @@ void ipath_error_qp(struct ipath_qp *qp)
374 list_del_init(&qp->piowait); 376 list_del_init(&qp->piowait);
375 spin_unlock(&dev->pending_lock); 377 spin_unlock(&dev->pending_lock);
376 378
377 wc.status = IB_WC_WR_FLUSH_ERR;
378 wc.vendor_err = 0; 379 wc.vendor_err = 0;
379 wc.byte_len = 0; 380 wc.byte_len = 0;
380 wc.imm_data = 0; 381 wc.imm_data = 0;
@@ -386,6 +387,12 @@ void ipath_error_qp(struct ipath_qp *qp)
386 wc.sl = 0; 387 wc.sl = 0;
387 wc.dlid_path_bits = 0; 388 wc.dlid_path_bits = 0;
388 wc.port_num = 0; 389 wc.port_num = 0;
390 if (qp->r_wrid_valid) {
391 qp->r_wrid_valid = 0;
392 wc.status = err;
393 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
394 }
395 wc.status = IB_WC_WR_FLUSH_ERR;
389 396
390 while (qp->s_last != qp->s_head) { 397 while (qp->s_last != qp->s_head) {
391 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 398 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
@@ -502,7 +509,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
502 break; 509 break;
503 510
504 case IB_QPS_ERR: 511 case IB_QPS_ERR:
505 ipath_error_qp(qp); 512 ipath_error_qp(qp, IB_WC_GENERAL_ERR);
506 break; 513 break;
507 514
508 default: 515 default:
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 595941b2b1bd..a504cf67f272 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1293,6 +1293,14 @@ done:
1293 return 1; 1293 return 1;
1294} 1294}
1295 1295
1296static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1297{
1298 spin_lock_irq(&qp->s_lock);
1299 qp->state = IB_QPS_ERR;
1300 ipath_error_qp(qp, err);
1301 spin_unlock_irq(&qp->s_lock);
1302}
1303
1296/** 1304/**
1297 * ipath_rc_rcv - process an incoming RC packet 1305 * ipath_rc_rcv - process an incoming RC packet
1298 * @dev: the device this packet came in on 1306 * @dev: the device this packet came in on
@@ -1385,8 +1393,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1385 */ 1393 */
1386 if (qp->r_ack_state >= OP(COMPARE_SWAP)) 1394 if (qp->r_ack_state >= OP(COMPARE_SWAP))
1387 goto send_ack; 1395 goto send_ack;
1388 /* XXX Flush WQEs */ 1396 ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
1389 qp->state = IB_QPS_ERR;
1390 qp->r_ack_state = OP(SEND_ONLY); 1397 qp->r_ack_state = OP(SEND_ONLY);
1391 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 1398 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1392 qp->r_ack_psn = qp->r_psn; 1399 qp->r_ack_psn = qp->r_psn;
@@ -1492,9 +1499,9 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1492 goto nack_inv; 1499 goto nack_inv;
1493 ipath_copy_sge(&qp->r_sge, data, tlen); 1500 ipath_copy_sge(&qp->r_sge, data, tlen);
1494 qp->r_msn++; 1501 qp->r_msn++;
1495 if (opcode == OP(RDMA_WRITE_LAST) || 1502 if (!qp->r_wrid_valid)
1496 opcode == OP(RDMA_WRITE_ONLY))
1497 break; 1503 break;
1504 qp->r_wrid_valid = 0;
1498 wc.wr_id = qp->r_wr_id; 1505 wc.wr_id = qp->r_wr_id;
1499 wc.status = IB_WC_SUCCESS; 1506 wc.status = IB_WC_SUCCESS;
1500 wc.opcode = IB_WC_RECV; 1507 wc.opcode = IB_WC_RECV;
@@ -1685,8 +1692,7 @@ nack_acc:
1685 * is pending though. 1692 * is pending though.
1686 */ 1693 */
1687 if (qp->r_ack_state < OP(COMPARE_SWAP)) { 1694 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1688 /* XXX Flush WQEs */ 1695 ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
1689 qp->state = IB_QPS_ERR;
1690 qp->r_ack_state = OP(RDMA_WRITE_ONLY); 1696 qp->r_ack_state = OP(RDMA_WRITE_ONLY);
1691 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 1697 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1692 qp->r_ack_psn = qp->r_psn; 1698 qp->r_ack_psn = qp->r_psn;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 17ae23fb1e40..f7530512045d 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -229,6 +229,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
229 } 229 }
230 } 230 }
231 spin_unlock_irqrestore(&rq->lock, flags); 231 spin_unlock_irqrestore(&rq->lock, flags);
232 qp->r_wrid_valid = 1;
232 233
233bail: 234bail:
234 return ret; 235 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 3597d362e5dd..8039f6e5f0c8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -365,6 +365,7 @@ struct ipath_qp {
365 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 365 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
366 u8 r_reuse_sge; /* for UC receive errors */ 366 u8 r_reuse_sge; /* for UC receive errors */
367 u8 r_sge_inx; /* current index into sg_list */ 367 u8 r_sge_inx; /* current index into sg_list */
368 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
368 u8 qp_access_flags; 369 u8 qp_access_flags;
369 u8 s_max_sge; /* size of s_wq->sg_list */ 370 u8 s_max_sge; /* size of s_wq->sg_list */
370 u8 s_retry_cnt; /* number of times to retry */ 371 u8 s_retry_cnt; /* number of times to retry */
@@ -639,6 +640,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
639 640
640int ipath_destroy_qp(struct ib_qp *ibqp); 641int ipath_destroy_qp(struct ib_qp *ibqp);
641 642
643void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
644
642int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 645int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
643 int attr_mask, struct ib_udata *udata); 646 int attr_mask, struct ib_udata *udata);
644 647