aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDevesh Sharma <devesh.sharma@broadcom.com>2017-06-29 15:28:08 -0400
committerDoug Ledford <dledford@redhat.com>2017-07-20 11:20:49 -0400
commitb3b2c7c5506bd8d15214136f8b11a2e6c9728033 (patch)
tree7217e72d2ffd31c887ff1395f7f97db3d99e20d2
parent396551eb00e46aa8f843c448bced0c76971ec58c (diff)
RDMA/bnxt_re: Free doorbell page index (DPI) during dealloc ucontext
The driver must free the DPI during the dealloc_ucontext instead of freeing it during dealloc_pd. However, the DPI allocation scheme remains unchanged. Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c58
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
2 files changed, 30 insertions, 31 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index e794b0fa4ae6..e743ffd392c6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -612,30 +612,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
612 int rc; 612 int rc;
613 613
614 bnxt_re_destroy_fence_mr(pd); 614 bnxt_re_destroy_fence_mr(pd);
615 if (ib_pd->uobject && pd->dpi.dbr) {
616 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
617 struct bnxt_re_ucontext *ucntx;
618 615
619 /* Free DPI only if this is the first PD allocated by the 616 if (pd->qplib_pd.id) {
620 * application and mark the context dpi as NULL 617 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
621 */ 618 &rdev->qplib_res.pd_tbl,
622 ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); 619 &pd->qplib_pd);
623
624 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
625 &rdev->qplib_res.dpi_tbl,
626 &pd->dpi);
627 if (rc) 620 if (rc)
628 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI"); 621 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
629 /* Don't fail, continue*/
630 ucntx->dpi = NULL;
631 }
632
633 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
634 &rdev->qplib_res.pd_tbl,
635 &pd->qplib_pd);
636 if (rc) {
637 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
638 return rc;
639 } 622 }
640 623
641 kfree(pd); 624 kfree(pd);
@@ -667,23 +650,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
667 if (udata) { 650 if (udata) {
668 struct bnxt_re_pd_resp resp; 651 struct bnxt_re_pd_resp resp;
669 652
670 if (!ucntx->dpi) { 653 if (!ucntx->dpi.dbr) {
671 /* Allocate DPI in alloc_pd to avoid failing of 654 /* Allocate DPI in alloc_pd to avoid failing of
672 * ibv_devinfo and family of application when DPIs 655 * ibv_devinfo and family of application when DPIs
673 * are depleted. 656 * are depleted.
674 */ 657 */
675 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, 658 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
676 &pd->dpi, ucntx)) { 659 &ucntx->dpi, ucntx)) {
677 rc = -ENOMEM; 660 rc = -ENOMEM;
678 goto dbfail; 661 goto dbfail;
679 } 662 }
680 ucntx->dpi = &pd->dpi;
681 } 663 }
682 664
683 resp.pdid = pd->qplib_pd.id; 665 resp.pdid = pd->qplib_pd.id;
684 /* Still allow mapping this DBR to the new user PD. */ 666 /* Still allow mapping this DBR to the new user PD. */
685 resp.dpi = ucntx->dpi->dpi; 667 resp.dpi = ucntx->dpi.dpi;
686 resp.dbr = (u64)ucntx->dpi->umdbr; 668 resp.dbr = (u64)ucntx->dpi.umdbr;
687 669
688 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 670 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
689 if (rc) { 671 if (rc) {
@@ -960,7 +942,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
960 qplib_qp->rq.nmap = umem->nmap; 942 qplib_qp->rq.nmap = umem->nmap;
961 } 943 }
962 944
963 qplib_qp->dpi = cntx->dpi; 945 qplib_qp->dpi = &cntx->dpi;
964 return 0; 946 return 0;
965rqfail: 947rqfail:
966 ib_umem_release(qp->sumem); 948 ib_umem_release(qp->sumem);
@@ -2403,7 +2385,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2403 } 2385 }
2404 cq->qplib_cq.sghead = cq->umem->sg_head.sgl; 2386 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2405 cq->qplib_cq.nmap = cq->umem->nmap; 2387 cq->qplib_cq.nmap = cq->umem->nmap;
2406 cq->qplib_cq.dpi = uctx->dpi; 2388 cq->qplib_cq.dpi = &uctx->dpi;
2407 } else { 2389 } else {
2408 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 2390 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2409 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), 2391 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
@@ -3388,8 +3370,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3388 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, 3370 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3389 struct bnxt_re_ucontext, 3371 struct bnxt_re_ucontext,
3390 ib_uctx); 3372 ib_uctx);
3373
3374 struct bnxt_re_dev *rdev = uctx->rdev;
3375 int rc = 0;
3376
3391 if (uctx->shpg) 3377 if (uctx->shpg)
3392 free_page((unsigned long)uctx->shpg); 3378 free_page((unsigned long)uctx->shpg);
3379
3380 if (uctx->dpi.dbr) {
3381 /* Free DPI only if this is the first PD allocated by the
3382 * application and mark the context dpi as NULL
3383 */
3384 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3385 &rdev->qplib_res.dpi_tbl,
3386 &uctx->dpi);
3387 if (rc)
3388 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3389 /* Don't fail, continue*/
3390 uctx->dpi.dbr = NULL;
3391 }
3392
3393 kfree(uctx); 3393 kfree(uctx);
3394 return 0; 3394 return 0;
3395} 3395}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 6c160f6a5398..a0bb7e33d7ca 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -59,7 +59,6 @@ struct bnxt_re_pd {
59 struct bnxt_re_dev *rdev; 59 struct bnxt_re_dev *rdev;
60 struct ib_pd ib_pd; 60 struct ib_pd ib_pd;
61 struct bnxt_qplib_pd qplib_pd; 61 struct bnxt_qplib_pd qplib_pd;
62 struct bnxt_qplib_dpi dpi;
63 struct bnxt_re_fence_data fence; 62 struct bnxt_re_fence_data fence;
64}; 63};
65 64
@@ -127,7 +126,7 @@ struct bnxt_re_mw {
127struct bnxt_re_ucontext { 126struct bnxt_re_ucontext {
128 struct bnxt_re_dev *rdev; 127 struct bnxt_re_dev *rdev;
129 struct ib_ucontext ib_uctx; 128 struct ib_ucontext ib_uctx;
130 struct bnxt_qplib_dpi *dpi; 129 struct bnxt_qplib_dpi dpi;
131 void *shpg; 130 void *shpg;
132 spinlock_t sh_lock; /* protect shpg */ 131 spinlock_t sh_lock; /* protect shpg */
133}; 132};