diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-21 17:22:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-21 17:22:05 -0400 |
commit | bb236dbeea8181c6733e6d34a53bfef9c8ef4e95 (patch) | |
tree | 2d71f98dcc5676beb4e6029462d60be0ff6ebbc1 /drivers/infiniband/hw/bnxt_re/ib_verbs.c | |
parent | 24a1635a41bccb5cc426eaef8b88c7e0961ef6bb (diff) | |
parent | a62ab66b13a0f9bcb17b7b761f6670941ed5cd62 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull more rdma fixes from Doug Ledford:
"As per my previous pull request, there were two drivers that each had
a rather large number of legitimate fixes still to be sent.
As it turned out, I also missed a reasonably large set of fixes from
one person across the stack that are all important fixes. All in all,
the bnxt_re, i40iw, and Dan Carpenter are 3/4 to 2/3rds of this pull
request.
There were some other random fixes that I didn't send in the last pull
request that I added to this one. This catches the rdma stack up to
the fixes from up to about the beginning of this week. Any more fixes
I'll wait and batch up later in the -rc cycle. This will give us a
good base to start with for basing a for-next branch on -rc2.
Summary:
- i40iw fixes
- bnxt_re fixes
- Dan Carpenter bugfixes across stack
- ten more random fixes, no more than two from any one person"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (37 commits)
RDMA/core: Initialize port_num in qp_attr
RDMA/uverbs: Fix the check for port number
IB/cma: Fix reference count leak when no ipv4 addresses are set
RDMA/iser: don't send an rkey if all data is written as immadiate-data
rxe: fix broken receive queue draining
RDMA/qedr: Prevent memory overrun in verbs' user responses
iw_cxgb4: don't use WR keys/addrs for 0 byte reads
IB/mlx4: Fix CM REQ retries in paravirt mode
IB/rdmavt: Setting of QP timeout can overflow jiffies computation
IB/core: Fix sparse warnings
RDMA/bnxt_re: Fix the value reported for local ack delay
RDMA/bnxt_re: Report MISSED_EVENTS in req_notify_cq
RDMA/bnxt_re: Fix return value of poll routine
RDMA/bnxt_re: Enable atomics only if host bios supports
RDMA/bnxt_re: Specify RDMA component when allocating stats context
RDMA/bnxt_re: Fixed the max_rd_atomic support for initiator and destination QP
RDMA/bnxt_re: Report supported value to IB stack in query_device
RDMA/bnxt_re: Do not free the ctx_tbl entry if delete GID fails
RDMA/bnxt_re: Fix WQE Size posted to HW to prevent it from throwing error
RDMA/bnxt_re: Free doorbell page index (DPI) during dealloc ucontext
...
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/ib_verbs.c')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 119 |
1 files changed, 72 insertions, 47 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index c7bd68311d0c..f0e01b3ac711 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, | |||
145 | ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver); | 145 | ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver); |
146 | bnxt_qplib_get_guid(rdev->netdev->dev_addr, | 146 | bnxt_qplib_get_guid(rdev->netdev->dev_addr, |
147 | (u8 *)&ib_attr->sys_image_guid); | 147 | (u8 *)&ib_attr->sys_image_guid); |
148 | ib_attr->max_mr_size = ~0ull; | 148 | ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; |
149 | ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K | | 149 | ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K; |
150 | BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M | | ||
151 | BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G; | ||
152 | 150 | ||
153 | ib_attr->vendor_id = rdev->en_dev->pdev->vendor; | 151 | ib_attr->vendor_id = rdev->en_dev->pdev->vendor; |
154 | ib_attr->vendor_part_id = rdev->en_dev->pdev->device; | 152 | ib_attr->vendor_part_id = rdev->en_dev->pdev->device; |
@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev, | |||
174 | ib_attr->max_mr = dev_attr->max_mr; | 172 | ib_attr->max_mr = dev_attr->max_mr; |
175 | ib_attr->max_pd = dev_attr->max_pd; | 173 | ib_attr->max_pd = dev_attr->max_pd; |
176 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; | 174 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; |
177 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom; | 175 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; |
178 | ib_attr->atomic_cap = IB_ATOMIC_HCA; | 176 | if (dev_attr->is_atomic) { |
179 | ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; | 177 | ib_attr->atomic_cap = IB_ATOMIC_HCA; |
178 | ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; | ||
179 | } | ||
180 | 180 | ||
181 | ib_attr->max_ee_rd_atom = 0; | 181 | ib_attr->max_ee_rd_atom = 0; |
182 | ib_attr->max_res_rd_atom = 0; | 182 | ib_attr->max_res_rd_atom = 0; |
@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev, | |||
201 | ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; | 201 | ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; |
202 | 202 | ||
203 | ib_attr->max_pkeys = 1; | 203 | ib_attr->max_pkeys = 1; |
204 | ib_attr->local_ca_ack_delay = 0; | 204 | ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; |
205 | return 0; | 205 | return 0; |
206 | } | 206 | } |
207 | 207 | ||
@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, | |||
390 | return -EINVAL; | 390 | return -EINVAL; |
391 | ctx->refcnt--; | 391 | ctx->refcnt--; |
392 | if (!ctx->refcnt) { | 392 | if (!ctx->refcnt) { |
393 | rc = bnxt_qplib_del_sgid | 393 | rc = bnxt_qplib_del_sgid(sgid_tbl, |
394 | (sgid_tbl, | 394 | &sgid_tbl->tbl[ctx->idx], |
395 | &sgid_tbl->tbl[ctx->idx], true); | 395 | true); |
396 | if (rc) | 396 | if (rc) { |
397 | dev_err(rdev_to_dev(rdev), | 397 | dev_err(rdev_to_dev(rdev), |
398 | "Failed to remove GID: %#x", rc); | 398 | "Failed to remove GID: %#x", rc); |
399 | ctx_tbl = sgid_tbl->ctx; | 399 | } else { |
400 | ctx_tbl[ctx->idx] = NULL; | 400 | ctx_tbl = sgid_tbl->ctx; |
401 | kfree(ctx); | 401 | ctx_tbl[ctx->idx] = NULL; |
402 | kfree(ctx); | ||
403 | } | ||
402 | } | 404 | } |
403 | } else { | 405 | } else { |
404 | return -EINVAL; | 406 | return -EINVAL; |
@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) | |||
588 | 590 | ||
589 | /* Create a fence MW only for kernel consumers */ | 591 | /* Create a fence MW only for kernel consumers */ |
590 | mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); | 592 | mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); |
591 | if (!mw) { | 593 | if (IS_ERR(mw)) { |
592 | dev_err(rdev_to_dev(rdev), | 594 | dev_err(rdev_to_dev(rdev), |
593 | "Failed to create fence-MW for PD: %p\n", pd); | 595 | "Failed to create fence-MW for PD: %p\n", pd); |
594 | rc = -EINVAL; | 596 | rc = PTR_ERR(mw); |
595 | goto fail; | 597 | goto fail; |
596 | } | 598 | } |
597 | fence->mw = mw; | 599 | fence->mw = mw; |
@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) | |||
612 | int rc; | 614 | int rc; |
613 | 615 | ||
614 | bnxt_re_destroy_fence_mr(pd); | 616 | bnxt_re_destroy_fence_mr(pd); |
615 | if (ib_pd->uobject && pd->dpi.dbr) { | ||
616 | struct ib_ucontext *ib_uctx = ib_pd->uobject->context; | ||
617 | struct bnxt_re_ucontext *ucntx; | ||
618 | 617 | ||
619 | /* Free DPI only if this is the first PD allocated by the | 618 | if (pd->qplib_pd.id) { |
620 | * application and mark the context dpi as NULL | 619 | rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, |
621 | */ | 620 | &rdev->qplib_res.pd_tbl, |
622 | ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); | 621 | &pd->qplib_pd); |
623 | |||
624 | rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, | ||
625 | &rdev->qplib_res.dpi_tbl, | ||
626 | &pd->dpi); | ||
627 | if (rc) | 622 | if (rc) |
628 | dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI"); | 623 | dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); |
629 | /* Don't fail, continue*/ | ||
630 | ucntx->dpi = NULL; | ||
631 | } | ||
632 | |||
633 | rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, | ||
634 | &rdev->qplib_res.pd_tbl, | ||
635 | &pd->qplib_pd); | ||
636 | if (rc) { | ||
637 | dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); | ||
638 | return rc; | ||
639 | } | 624 | } |
640 | 625 | ||
641 | kfree(pd); | 626 | kfree(pd); |
@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, | |||
667 | if (udata) { | 652 | if (udata) { |
668 | struct bnxt_re_pd_resp resp; | 653 | struct bnxt_re_pd_resp resp; |
669 | 654 | ||
670 | if (!ucntx->dpi) { | 655 | if (!ucntx->dpi.dbr) { |
671 | /* Allocate DPI in alloc_pd to avoid failing of | 656 | /* Allocate DPI in alloc_pd to avoid failing of |
672 | * ibv_devinfo and family of application when DPIs | 657 | * ibv_devinfo and family of application when DPIs |
673 | * are depleted. | 658 | * are depleted. |
674 | */ | 659 | */ |
675 | if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, | 660 | if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, |
676 | &pd->dpi, ucntx)) { | 661 | &ucntx->dpi, ucntx)) { |
677 | rc = -ENOMEM; | 662 | rc = -ENOMEM; |
678 | goto dbfail; | 663 | goto dbfail; |
679 | } | 664 | } |
680 | ucntx->dpi = &pd->dpi; | ||
681 | } | 665 | } |
682 | 666 | ||
683 | resp.pdid = pd->qplib_pd.id; | 667 | resp.pdid = pd->qplib_pd.id; |
684 | /* Still allow mapping this DBR to the new user PD. */ | 668 | /* Still allow mapping this DBR to the new user PD. */ |
685 | resp.dpi = ucntx->dpi->dpi; | 669 | resp.dpi = ucntx->dpi.dpi; |
686 | resp.dbr = (u64)ucntx->dpi->umdbr; | 670 | resp.dbr = (u64)ucntx->dpi.umdbr; |
687 | 671 | ||
688 | rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); | 672 | rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
689 | if (rc) { | 673 | if (rc) { |
@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, | |||
960 | qplib_qp->rq.nmap = umem->nmap; | 944 | qplib_qp->rq.nmap = umem->nmap; |
961 | } | 945 | } |
962 | 946 | ||
963 | qplib_qp->dpi = cntx->dpi; | 947 | qplib_qp->dpi = &cntx->dpi; |
964 | return 0; | 948 | return 0; |
965 | rqfail: | 949 | rqfail: |
966 | ib_umem_release(qp->sumem); | 950 | ib_umem_release(qp->sumem); |
@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1530 | if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { | 1514 | if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { |
1531 | qp->qplib_qp.modify_flags |= | 1515 | qp->qplib_qp.modify_flags |= |
1532 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; | 1516 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; |
1533 | qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic; | 1517 | /* Cap the max_rd_atomic to device max */ |
1518 | qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, | ||
1519 | dev_attr->max_qp_rd_atom); | ||
1534 | } | 1520 | } |
1535 | if (qp_attr_mask & IB_QP_SQ_PSN) { | 1521 | if (qp_attr_mask & IB_QP_SQ_PSN) { |
1536 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; | 1522 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; |
1537 | qp->qplib_qp.sq.psn = qp_attr->sq_psn; | 1523 | qp->qplib_qp.sq.psn = qp_attr->sq_psn; |
1538 | } | 1524 | } |
1539 | if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { | 1525 | if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
1526 | if (qp_attr->max_dest_rd_atomic > | ||
1527 | dev_attr->max_qp_init_rd_atom) { | ||
1528 | dev_err(rdev_to_dev(rdev), | ||
1529 | "max_dest_rd_atomic requested%d is > dev_max%d", | ||
1530 | qp_attr->max_dest_rd_atomic, | ||
1531 | dev_attr->max_qp_init_rd_atom); | ||
1532 | return -EINVAL; | ||
1533 | } | ||
1534 | |||
1540 | qp->qplib_qp.modify_flags |= | 1535 | qp->qplib_qp.modify_flags |= |
1541 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; | 1536 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; |
1542 | qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; | 1537 | qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; |
@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, | |||
2403 | } | 2398 | } |
2404 | cq->qplib_cq.sghead = cq->umem->sg_head.sgl; | 2399 | cq->qplib_cq.sghead = cq->umem->sg_head.sgl; |
2405 | cq->qplib_cq.nmap = cq->umem->nmap; | 2400 | cq->qplib_cq.nmap = cq->umem->nmap; |
2406 | cq->qplib_cq.dpi = uctx->dpi; | 2401 | cq->qplib_cq.dpi = &uctx->dpi; |
2407 | } else { | 2402 | } else { |
2408 | cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); | 2403 | cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); |
2409 | cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), | 2404 | cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), |
@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) | |||
2905 | 2900 | ||
2906 | spin_lock_irqsave(&cq->cq_lock, flags); | 2901 | spin_lock_irqsave(&cq->cq_lock, flags); |
2907 | budget = min_t(u32, num_entries, cq->max_cql); | 2902 | budget = min_t(u32, num_entries, cq->max_cql); |
2903 | num_entries = budget; | ||
2908 | if (!cq->cql) { | 2904 | if (!cq->cql) { |
2909 | dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); | 2905 | dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); |
2910 | goto exit; | 2906 | goto exit; |
@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, | |||
3031 | else if (ib_cqn_flags & IB_CQ_SOLICITED) | 3027 | else if (ib_cqn_flags & IB_CQ_SOLICITED) |
3032 | type = DBR_DBR_TYPE_CQ_ARMSE; | 3028 | type = DBR_DBR_TYPE_CQ_ARMSE; |
3033 | 3029 | ||
3030 | /* Poll to see if there are missed events */ | ||
3031 | if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && | ||
3032 | !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) | ||
3033 | return 1; | ||
3034 | |||
3034 | bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); | 3035 | bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); |
3035 | 3036 | ||
3036 | return 0; | 3037 | return 0; |
@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, | |||
3245 | struct scatterlist *sg; | 3246 | struct scatterlist *sg; |
3246 | int entry; | 3247 | int entry; |
3247 | 3248 | ||
3249 | if (length > BNXT_RE_MAX_MR_SIZE) { | ||
3250 | dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", | ||
3251 | length, BNXT_RE_MAX_MR_SIZE); | ||
3252 | return ERR_PTR(-ENOMEM); | ||
3253 | } | ||
3254 | |||
3248 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | 3255 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
3249 | if (!mr) | 3256 | if (!mr) |
3250 | return ERR_PTR(-ENOMEM); | 3257 | return ERR_PTR(-ENOMEM); |
@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) | |||
3388 | struct bnxt_re_ucontext *uctx = container_of(ib_uctx, | 3395 | struct bnxt_re_ucontext *uctx = container_of(ib_uctx, |
3389 | struct bnxt_re_ucontext, | 3396 | struct bnxt_re_ucontext, |
3390 | ib_uctx); | 3397 | ib_uctx); |
3398 | |||
3399 | struct bnxt_re_dev *rdev = uctx->rdev; | ||
3400 | int rc = 0; | ||
3401 | |||
3391 | if (uctx->shpg) | 3402 | if (uctx->shpg) |
3392 | free_page((unsigned long)uctx->shpg); | 3403 | free_page((unsigned long)uctx->shpg); |
3404 | |||
3405 | if (uctx->dpi.dbr) { | ||
3406 | /* Free DPI only if this is the first PD allocated by the | ||
3407 | * application and mark the context dpi as NULL | ||
3408 | */ | ||
3409 | rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, | ||
3410 | &rdev->qplib_res.dpi_tbl, | ||
3411 | &uctx->dpi); | ||
3412 | if (rc) | ||
3413 | dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!"); | ||
3414 | /* Don't fail, continue*/ | ||
3415 | uctx->dpi.dbr = NULL; | ||
3416 | } | ||
3417 | |||
3393 | kfree(uctx); | 3418 | kfree(uctx); |
3394 | return 0; | 3419 | return 0; |
3395 | } | 3420 | } |