diff options
author | Mitesh Ahuja <mitesh.ahuja@emulex.com> | 2014-12-18 03:43:01 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2015-02-18 11:31:03 -0500 |
commit | 4b8180aa5d13f87a42459a74518b7fb084312fe6 (patch) | |
tree | 98e96c07e34187d0fea09ceb62c7d95528563bcc /drivers | |
parent | 43c706b10a1054c0a73b2dc10374a946c8a3a17f (diff) |
RDMA/ocrdma: Host crash on destroying device resources
1. Cleanup sequence in ocrdma_remove(). The device should be
unregistered from IB stack before any device specific cleanup.
2. Always return success in the resource destroy path. In case destroy
command returns error, IB stack will trigger cleanup again while
closing the uverbs device causing kernel panic BUG_ON().
Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
Signed-off-by: Mitesh Ahuja <mitesh.ahuja@emulex.com>
Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_main.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 21 |
2 files changed, 10 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index edd81da55100..0083360d918e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -530,11 +530,11 @@ static void ocrdma_remove(struct ocrdma_dev *dev) | |||
530 | /* first unregister with stack to stop all the active traffic | 530 | /* first unregister with stack to stop all the active traffic |
531 | * of the registered clients. | 531 | * of the registered clients. |
532 | */ | 532 | */ |
533 | ocrdma_rem_port_stats(dev); | ||
534 | ocrdma_remove_sysfiles(dev); | 533 | ocrdma_remove_sysfiles(dev); |
535 | |||
536 | ib_unregister_device(&dev->ibdev); | 534 | ib_unregister_device(&dev->ibdev); |
537 | 535 | ||
536 | ocrdma_rem_port_stats(dev); | ||
537 | |||
538 | spin_lock(&ocrdma_devlist_lock); | 538 | spin_lock(&ocrdma_devlist_lock); |
539 | list_del_rcu(&dev->entry); | 539 | list_del_rcu(&dev->entry); |
540 | spin_unlock(&ocrdma_devlist_lock); | 540 | spin_unlock(&ocrdma_devlist_lock); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index cda7b9569486..589986ab13a8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -435,7 +435,6 @@ err: | |||
435 | 435 | ||
436 | static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) | 436 | static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) |
437 | { | 437 | { |
438 | int status = 0; | ||
439 | struct ocrdma_pd *pd = uctx->cntxt_pd; | 438 | struct ocrdma_pd *pd = uctx->cntxt_pd; |
440 | struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); | 439 | struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); |
441 | 440 | ||
@@ -444,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) | |||
444 | __func__, dev->id, pd->id); | 443 | __func__, dev->id, pd->id); |
445 | } | 444 | } |
446 | uctx->cntxt_pd = NULL; | 445 | uctx->cntxt_pd = NULL; |
447 | status = _ocrdma_dealloc_pd(dev, pd); | 446 | (void)_ocrdma_dealloc_pd(dev, pd); |
448 | return status; | 447 | return 0; |
449 | } | 448 | } |
450 | 449 | ||
451 | static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) | 450 | static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) |
@@ -947,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |||
947 | { | 946 | { |
948 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); | 947 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); |
949 | struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); | 948 | struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); |
950 | int status; | ||
951 | 949 | ||
952 | status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); | 950 | (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); |
953 | 951 | ||
954 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | 952 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); |
955 | 953 | ||
@@ -960,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |||
960 | 958 | ||
961 | /* Don't stop cleanup, in case FW is unresponsive */ | 959 | /* Don't stop cleanup, in case FW is unresponsive */ |
962 | if (dev->mqe_ctx.fw_error_state) { | 960 | if (dev->mqe_ctx.fw_error_state) { |
963 | status = 0; | ||
964 | pr_err("%s(%d) fw not responding.\n", | 961 | pr_err("%s(%d) fw not responding.\n", |
965 | __func__, dev->id); | 962 | __func__, dev->id); |
966 | } | 963 | } |
967 | return status; | 964 | return 0; |
968 | } | 965 | } |
969 | 966 | ||
970 | static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | 967 | static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, |
@@ -1096,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq) | |||
1096 | 1093 | ||
1097 | int ocrdma_destroy_cq(struct ib_cq *ibcq) | 1094 | int ocrdma_destroy_cq(struct ib_cq *ibcq) |
1098 | { | 1095 | { |
1099 | int status; | ||
1100 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | 1096 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
1101 | struct ocrdma_eq *eq = NULL; | 1097 | struct ocrdma_eq *eq = NULL; |
1102 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); | 1098 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); |
@@ -1113,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) | |||
1113 | synchronize_irq(irq); | 1109 | synchronize_irq(irq); |
1114 | ocrdma_flush_cq(cq); | 1110 | ocrdma_flush_cq(cq); |
1115 | 1111 | ||
1116 | status = ocrdma_mbx_destroy_cq(dev, cq); | 1112 | (void)ocrdma_mbx_destroy_cq(dev, cq); |
1117 | if (cq->ucontext) { | 1113 | if (cq->ucontext) { |
1118 | pdid = cq->ucontext->cntxt_pd->id; | 1114 | pdid = cq->ucontext->cntxt_pd->id; |
1119 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, | 1115 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, |
@@ -1124,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) | |||
1124 | } | 1120 | } |
1125 | 1121 | ||
1126 | kfree(cq); | 1122 | kfree(cq); |
1127 | return status; | 1123 | return 0; |
1128 | } | 1124 | } |
1129 | 1125 | ||
1130 | static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | 1126 | static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) |
@@ -1725,7 +1721,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp) | |||
1725 | 1721 | ||
1726 | int ocrdma_destroy_qp(struct ib_qp *ibqp) | 1722 | int ocrdma_destroy_qp(struct ib_qp *ibqp) |
1727 | { | 1723 | { |
1728 | int status; | ||
1729 | struct ocrdma_pd *pd; | 1724 | struct ocrdma_pd *pd; |
1730 | struct ocrdma_qp *qp; | 1725 | struct ocrdma_qp *qp; |
1731 | struct ocrdma_dev *dev; | 1726 | struct ocrdma_dev *dev; |
@@ -1747,7 +1742,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) | |||
1747 | * discarded until the old CQEs are discarded. | 1742 | * discarded until the old CQEs are discarded. |
1748 | */ | 1743 | */ |
1749 | mutex_lock(&dev->dev_lock); | 1744 | mutex_lock(&dev->dev_lock); |
1750 | status = ocrdma_mbx_destroy_qp(dev, qp); | 1745 | (void) ocrdma_mbx_destroy_qp(dev, qp); |
1751 | 1746 | ||
1752 | /* | 1747 | /* |
1753 | * acquire CQ lock while destroy is in progress, in order to | 1748 | * acquire CQ lock while destroy is in progress, in order to |
@@ -1782,7 +1777,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) | |||
1782 | kfree(qp->wqe_wr_id_tbl); | 1777 | kfree(qp->wqe_wr_id_tbl); |
1783 | kfree(qp->rqe_wr_id_tbl); | 1778 | kfree(qp->rqe_wr_id_tbl); |
1784 | kfree(qp); | 1779 | kfree(qp); |
1785 | return status; | 1780 | return 0; |
1786 | } | 1781 | } |
1787 | 1782 | ||
1788 | static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, | 1783 | static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, |