aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index a8c4a6b03d7a..6a41fdbc8e57 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -377,13 +377,15 @@ static void ipath_reset_qp(struct ipath_qp *qp)
377 * @err: the receive completion error to signal if a RWQE is active 377 * @err: the receive completion error to signal if a RWQE is active
378 * 378 *
379 * Flushes both send and receive work queues. 379 * Flushes both send and receive work queues.
380 * Returns true if last WQE event should be generated.
380 * The QP s_lock should be held and interrupts disabled. 381 * The QP s_lock should be held and interrupts disabled.
381 */ 382 */
382 383
383void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) 384int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
384{ 385{
385 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 386 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
386 struct ib_wc wc; 387 struct ib_wc wc;
388 int ret = 0;
387 389
388 ipath_dbg("QP%d/%d in error state\n", 390 ipath_dbg("QP%d/%d in error state\n",
389 qp->ibqp.qp_num, qp->remote_qpn); 391 qp->ibqp.qp_num, qp->remote_qpn);
@@ -454,7 +456,10 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
454 wq->tail = tail; 456 wq->tail = tail;
455 457
456 spin_unlock(&qp->r_rq.lock); 458 spin_unlock(&qp->r_rq.lock);
457 } 459 } else if (qp->ibqp.event_handler)
460 ret = 1;
461
462 return ret;
458} 463}
459 464
460/** 465/**
@@ -473,6 +478,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
473 struct ipath_qp *qp = to_iqp(ibqp); 478 struct ipath_qp *qp = to_iqp(ibqp);
474 enum ib_qp_state cur_state, new_state; 479 enum ib_qp_state cur_state, new_state;
475 unsigned long flags; 480 unsigned long flags;
481 int lastwqe = 0;
476 int ret; 482 int ret;
477 483
478 spin_lock_irqsave(&qp->s_lock, flags); 484 spin_lock_irqsave(&qp->s_lock, flags);
@@ -532,7 +538,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
532 break; 538 break;
533 539
534 case IB_QPS_ERR: 540 case IB_QPS_ERR:
535 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); 541 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
536 break; 542 break;
537 543
538 default: 544 default:
@@ -591,6 +597,14 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
591 qp->state = new_state; 597 qp->state = new_state;
592 spin_unlock_irqrestore(&qp->s_lock, flags); 598 spin_unlock_irqrestore(&qp->s_lock, flags);
593 599
600 if (lastwqe) {
601 struct ib_event ev;
602
603 ev.device = qp->ibqp.device;
604 ev.element.qp = &qp->ibqp;
605 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
606 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
607 }
594 ret = 0; 608 ret = 0;
595 goto bail; 609 goto bail;
596 610