diff options
| author | Steve Wise <swise@opengridcomputing.com> | 2012-03-07 17:48:46 -0500 |
|---|---|---|
| committer | Roland Dreier <roland@purestorage.com> | 2012-03-07 18:12:45 -0500 |
| commit | db4106ce635830201fad1bfca731a635beab6a72 (patch) | |
| tree | 344aead8f592bdab67bc395213d0ddda0fe88bc5 /drivers/infiniband | |
| parent | 6b21d18ed50c7d145220b0724ea7f2613abf0f95 (diff) | |
RDMA/cxgb3: Don't pass irq flags to flush_qp()
Since flush_qp() is always called with irqs disabled, all the locking
inside flush_qp() and __flush_qp() doesn't need irq save/restore.
Further, passing the flag variable from iwch_modify_qp() is just wrong
and causes a WARN_ON() in local_bh_enable().
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
| -rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index bea5839d89ee..6de8463f453b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
| @@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) | |||
| 803 | * Assumes qhp lock is held. | 803 | * Assumes qhp lock is held. |
| 804 | */ | 804 | */ |
| 805 | static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | 805 | static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, |
| 806 | struct iwch_cq *schp, unsigned long *flag) | 806 | struct iwch_cq *schp) |
| 807 | { | 807 | { |
| 808 | int count; | 808 | int count; |
| 809 | int flushed; | 809 | int flushed; |
| @@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | |||
| 812 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | 812 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
| 813 | /* take a ref on the qhp since we must release the lock */ | 813 | /* take a ref on the qhp since we must release the lock */ |
| 814 | atomic_inc(&qhp->refcnt); | 814 | atomic_inc(&qhp->refcnt); |
| 815 | spin_unlock_irqrestore(&qhp->lock, *flag); | 815 | spin_unlock(&qhp->lock); |
| 816 | 816 | ||
| 817 | /* locking hierarchy: cq lock first, then qp lock. */ | 817 | /* locking hierarchy: cq lock first, then qp lock. */ |
| 818 | spin_lock_irqsave(&rchp->lock, *flag); | 818 | spin_lock(&rchp->lock); |
| 819 | spin_lock(&qhp->lock); | 819 | spin_lock(&qhp->lock); |
| 820 | cxio_flush_hw_cq(&rchp->cq); | 820 | cxio_flush_hw_cq(&rchp->cq); |
| 821 | cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); | 821 | cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); |
| 822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); | 822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); |
| 823 | spin_unlock(&qhp->lock); | 823 | spin_unlock(&qhp->lock); |
| 824 | spin_unlock_irqrestore(&rchp->lock, *flag); | 824 | spin_unlock(&rchp->lock); |
| 825 | if (flushed) { | 825 | if (flushed) { |
| 826 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | 826 | spin_lock(&rchp->comp_handler_lock); |
| 827 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 827 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
| 828 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | 828 | spin_unlock(&rchp->comp_handler_lock); |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | /* locking hierarchy: cq lock first, then qp lock. */ | 831 | /* locking hierarchy: cq lock first, then qp lock. */ |
| 832 | spin_lock_irqsave(&schp->lock, *flag); | 832 | spin_lock(&schp->lock); |
| 833 | spin_lock(&qhp->lock); | 833 | spin_lock(&qhp->lock); |
| 834 | cxio_flush_hw_cq(&schp->cq); | 834 | cxio_flush_hw_cq(&schp->cq); |
| 835 | cxio_count_scqes(&schp->cq, &qhp->wq, &count); | 835 | cxio_count_scqes(&schp->cq, &qhp->wq, &count); |
| 836 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); | 836 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); |
| 837 | spin_unlock(&qhp->lock); | 837 | spin_unlock(&qhp->lock); |
| 838 | spin_unlock_irqrestore(&schp->lock, *flag); | 838 | spin_unlock(&schp->lock); |
| 839 | if (flushed) { | 839 | if (flushed) { |
| 840 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | 840 | spin_lock(&schp->comp_handler_lock); |
| 841 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 841 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); |
| 842 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | 842 | spin_unlock(&schp->comp_handler_lock); |
| 843 | } | 843 | } |
| 844 | 844 | ||
| 845 | /* deref */ | 845 | /* deref */ |
| 846 | if (atomic_dec_and_test(&qhp->refcnt)) | 846 | if (atomic_dec_and_test(&qhp->refcnt)) |
| 847 | wake_up(&qhp->wait); | 847 | wake_up(&qhp->wait); |
| 848 | 848 | ||
| 849 | spin_lock_irqsave(&qhp->lock, *flag); | 849 | spin_lock(&qhp->lock); |
| 850 | } | 850 | } |
| 851 | 851 | ||
| 852 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | 852 | static void flush_qp(struct iwch_qp *qhp) |
| 853 | { | 853 | { |
| 854 | struct iwch_cq *rchp, *schp; | 854 | struct iwch_cq *rchp, *schp; |
| 855 | 855 | ||
| @@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
| 859 | if (qhp->ibqp.uobject) { | 859 | if (qhp->ibqp.uobject) { |
| 860 | cxio_set_wq_in_error(&qhp->wq); | 860 | cxio_set_wq_in_error(&qhp->wq); |
| 861 | cxio_set_cq_in_error(&rchp->cq); | 861 | cxio_set_cq_in_error(&rchp->cq); |
| 862 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | 862 | spin_lock(&rchp->comp_handler_lock); |
| 863 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 863 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
| 864 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | 864 | spin_unlock(&rchp->comp_handler_lock); |
| 865 | if (schp != rchp) { | 865 | if (schp != rchp) { |
| 866 | cxio_set_cq_in_error(&schp->cq); | 866 | cxio_set_cq_in_error(&schp->cq); |
| 867 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | 867 | spin_lock(&schp->comp_handler_lock); |
| 868 | (*schp->ibcq.comp_handler)(&schp->ibcq, | 868 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
| 869 | schp->ibcq.cq_context); | 869 | schp->ibcq.cq_context); |
| 870 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | 870 | spin_unlock(&schp->comp_handler_lock); |
| 871 | } | 871 | } |
| 872 | return; | 872 | return; |
| 873 | } | 873 | } |
| 874 | __flush_qp(qhp, rchp, schp, flag); | 874 | __flush_qp(qhp, rchp, schp); |
| 875 | } | 875 | } |
| 876 | 876 | ||
| 877 | 877 | ||
| @@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
| 1030 | break; | 1030 | break; |
| 1031 | case IWCH_QP_STATE_ERROR: | 1031 | case IWCH_QP_STATE_ERROR: |
| 1032 | qhp->attr.state = IWCH_QP_STATE_ERROR; | 1032 | qhp->attr.state = IWCH_QP_STATE_ERROR; |
| 1033 | flush_qp(qhp, &flag); | 1033 | flush_qp(qhp); |
| 1034 | break; | 1034 | break; |
| 1035 | default: | 1035 | default: |
| 1036 | ret = -EINVAL; | 1036 | ret = -EINVAL; |
| @@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
| 1078 | } | 1078 | } |
| 1079 | switch (attrs->next_state) { | 1079 | switch (attrs->next_state) { |
| 1080 | case IWCH_QP_STATE_IDLE: | 1080 | case IWCH_QP_STATE_IDLE: |
| 1081 | flush_qp(qhp, &flag); | 1081 | flush_qp(qhp); |
| 1082 | qhp->attr.state = IWCH_QP_STATE_IDLE; | 1082 | qhp->attr.state = IWCH_QP_STATE_IDLE; |
| 1083 | qhp->attr.llp_stream_handle = NULL; | 1083 | qhp->attr.llp_stream_handle = NULL; |
| 1084 | put_ep(&qhp->ep->com); | 1084 | put_ep(&qhp->ep->com); |
| @@ -1132,7 +1132,7 @@ err: | |||
| 1132 | free=1; | 1132 | free=1; |
| 1133 | wake_up(&qhp->wait); | 1133 | wake_up(&qhp->wait); |
| 1134 | BUG_ON(!ep); | 1134 | BUG_ON(!ep); |
| 1135 | flush_qp(qhp, &flag); | 1135 | flush_qp(qhp); |
| 1136 | out: | 1136 | out: |
| 1137 | spin_unlock_irqrestore(&qhp->lock, flag); | 1137 | spin_unlock_irqrestore(&qhp->lock, flag); |
| 1138 | 1138 | ||
