diff options
author | Steve Wise <swise@opengridcomputing.com> | 2008-01-21 15:42:09 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-01-25 17:17:45 -0500 |
commit | 856b5925047d73a85557203d124d62c5eea1fbd3 (patch) | |
tree | a2cd5eaff8a758e41d4870c7f62c2c7cf8aca861 /drivers/infiniband/hw/cxgb3 | |
parent | 4e1e93a4189a98cfb0e24865f7f44470ae5f805e (diff) |
RDMA/cxgb3: Flush the receive queue when closing
- for kernel mode cqs, call event notification handler when flushing.
- flush QP when moving from RTS -> CLOSING.
- fix logic to identify a kernel mode qp.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb3')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 9bb811256b28..7681fdc0e21d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -642,6 +642,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
642 | cxio_flush_rq(&qhp->wq, &rchp->cq, count); | 642 | cxio_flush_rq(&qhp->wq, &rchp->cq, count); |
643 | spin_unlock(&qhp->lock); | 643 | spin_unlock(&qhp->lock); |
644 | spin_unlock_irqrestore(&rchp->lock, *flag); | 644 | spin_unlock_irqrestore(&rchp->lock, *flag); |
645 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | ||
645 | 646 | ||
646 | /* locking heirarchy: cq lock first, then qp lock. */ | 647 | /* locking heirarchy: cq lock first, then qp lock. */ |
647 | spin_lock_irqsave(&schp->lock, *flag); | 648 | spin_lock_irqsave(&schp->lock, *flag); |
@@ -651,6 +652,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
651 | cxio_flush_sq(&qhp->wq, &schp->cq, count); | 652 | cxio_flush_sq(&qhp->wq, &schp->cq, count); |
652 | spin_unlock(&qhp->lock); | 653 | spin_unlock(&qhp->lock); |
653 | spin_unlock_irqrestore(&schp->lock, *flag); | 654 | spin_unlock_irqrestore(&schp->lock, *flag); |
655 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | ||
654 | 656 | ||
655 | /* deref */ | 657 | /* deref */ |
656 | if (atomic_dec_and_test(&qhp->refcnt)) | 658 | if (atomic_dec_and_test(&qhp->refcnt)) |
@@ -661,7 +663,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
661 | 663 | ||
662 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | 664 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) |
663 | { | 665 | { |
664 | if (t3b_device(qhp->rhp)) | 666 | if (qhp->ibqp.uobject) |
665 | cxio_set_wq_in_error(&qhp->wq); | 667 | cxio_set_wq_in_error(&qhp->wq); |
666 | else | 668 | else |
667 | __flush_qp(qhp, flag); | 669 | __flush_qp(qhp, flag); |
@@ -830,10 +832,11 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
830 | disconnect = 1; | 832 | disconnect = 1; |
831 | ep = qhp->ep; | 833 | ep = qhp->ep; |
832 | } | 834 | } |
835 | flush_qp(qhp, &flag); | ||
833 | break; | 836 | break; |
834 | case IWCH_QP_STATE_TERMINATE: | 837 | case IWCH_QP_STATE_TERMINATE: |
835 | qhp->attr.state = IWCH_QP_STATE_TERMINATE; | 838 | qhp->attr.state = IWCH_QP_STATE_TERMINATE; |
836 | if (t3b_device(qhp->rhp)) | 839 | if (qhp->ibqp.uobject) |
837 | cxio_set_wq_in_error(&qhp->wq); | 840 | cxio_set_wq_in_error(&qhp->wq); |
838 | if (!internal) | 841 | if (!internal) |
839 | terminate = 1; | 842 | terminate = 1; |