diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-09-28 12:00:14 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-28 14:16:55 -0400 |
commit | 8d0208cb59a43bf867e16b977c34c4d6cd618f59 (patch) | |
tree | db5d04d6d0f823118f063d061c9188b57304d19f /drivers/infiniband/hw/ipath/ipath_qp.c | |
parent | 1fd3b40fde3bfacdf742cadfe99cfd47ffd05219 (diff) |
IB/ipath: Flush RWQEs if access error or invalid error seen
If the receiver goes into the error state, we need to flush the
posted receive WQEs.
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_qp.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index ecfaca7a571b..46c1c89bf6ae 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -335,6 +335,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
335 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 335 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
336 | qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 336 | qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
337 | qp->r_nak_state = 0; | 337 | qp->r_nak_state = 0; |
338 | qp->r_wrid_valid = 0; | ||
338 | qp->s_rnr_timeout = 0; | 339 | qp->s_rnr_timeout = 0; |
339 | qp->s_head = 0; | 340 | qp->s_head = 0; |
340 | qp->s_tail = 0; | 341 | qp->s_tail = 0; |
@@ -353,12 +354,13 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
353 | /** | 354 | /** |
354 | * ipath_error_qp - put a QP into an error state | 355 | * ipath_error_qp - put a QP into an error state |
355 | * @qp: the QP to put into an error state | 356 | * @qp: the QP to put into an error state |
357 | * @err: the receive completion error to signal if a RWQE is active | ||
356 | * | 358 | * |
357 | * Flushes both send and receive work queues. | 359 | * Flushes both send and receive work queues. |
358 | * QP s_lock should be held and interrupts disabled. | 360 | * QP s_lock should be held and interrupts disabled. |
359 | */ | 361 | */ |
360 | 362 | ||
361 | void ipath_error_qp(struct ipath_qp *qp) | 363 | void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) |
362 | { | 364 | { |
363 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 365 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
364 | struct ib_wc wc; | 366 | struct ib_wc wc; |
@@ -374,7 +376,6 @@ void ipath_error_qp(struct ipath_qp *qp) | |||
374 | list_del_init(&qp->piowait); | 376 | list_del_init(&qp->piowait); |
375 | spin_unlock(&dev->pending_lock); | 377 | spin_unlock(&dev->pending_lock); |
376 | 378 | ||
377 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
378 | wc.vendor_err = 0; | 379 | wc.vendor_err = 0; |
379 | wc.byte_len = 0; | 380 | wc.byte_len = 0; |
380 | wc.imm_data = 0; | 381 | wc.imm_data = 0; |
@@ -386,6 +387,12 @@ void ipath_error_qp(struct ipath_qp *qp) | |||
386 | wc.sl = 0; | 387 | wc.sl = 0; |
387 | wc.dlid_path_bits = 0; | 388 | wc.dlid_path_bits = 0; |
388 | wc.port_num = 0; | 389 | wc.port_num = 0; |
390 | if (qp->r_wrid_valid) { | ||
391 | qp->r_wrid_valid = 0; | ||
392 | wc.status = err; | ||
393 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
394 | } | ||
395 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
389 | 396 | ||
390 | while (qp->s_last != qp->s_head) { | 397 | while (qp->s_last != qp->s_head) { |
391 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | 398 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); |
@@ -502,7 +509,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
502 | break; | 509 | break; |
503 | 510 | ||
504 | case IB_QPS_ERR: | 511 | case IB_QPS_ERR: |
505 | ipath_error_qp(qp); | 512 | ipath_error_qp(qp, IB_WC_GENERAL_ERR); |
506 | break; | 513 | break; |
507 | 514 | ||
508 | default: | 515 | default: |