aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h9
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c119
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/rdma/rdmavt_qp.h14
32 files changed, 306 insertions, 119 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 11aff923b633..0eb393237ba2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1033,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1033 } else 1033 } else
1034 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1034 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1035 qp_attr_mask); 1035 qp_attr_mask);
1036 qp_attr->port_num = id_priv->id.port_num;
1037 *qp_attr_mask |= IB_QP_PORT;
1036 } else 1038 } else
1037 ret = -ENOSYS; 1039 ret = -ENOSYS;
1038 1040
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3f55d18a3791..2c98533a0203 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1296 struct ib_uobject *uobj; 1296 struct ib_uobject *uobj;
1297 struct ib_cq *cq; 1297 struct ib_cq *cq;
1298 struct ib_ucq_object *obj; 1298 struct ib_ucq_object *obj;
1299 struct ib_uverbs_event_queue *ev_queue;
1300 int ret = -EINVAL; 1299 int ret = -EINVAL;
1301 1300
1302 if (copy_from_user(&cmd, buf, sizeof cmd)) 1301 if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1313 */ 1312 */
1314 uverbs_uobject_get(uobj); 1313 uverbs_uobject_get(uobj);
1315 cq = uobj->object; 1314 cq = uobj->object;
1316 ev_queue = cq->cq_context;
1317 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1315 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1318 1316
1319 memset(&resp, 0, sizeof(resp)); 1317 memset(&resp, 0, sizeof(resp));
@@ -1935,7 +1933,8 @@ static int modify_qp(struct ib_uverbs_file *file,
1935 goto out; 1933 goto out;
1936 } 1934 }
1937 1935
1938 if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1936 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1937 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1939 ret = -EINVAL; 1938 ret = -EINVAL;
1940 goto release_qp; 1939 goto release_qp;
1941 } 1940 }
@@ -2088,7 +2087,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2088 struct ib_uverbs_destroy_qp cmd; 2087 struct ib_uverbs_destroy_qp cmd;
2089 struct ib_uverbs_destroy_qp_resp resp; 2088 struct ib_uverbs_destroy_qp_resp resp;
2090 struct ib_uobject *uobj; 2089 struct ib_uobject *uobj;
2091 struct ib_qp *qp;
2092 struct ib_uqp_object *obj; 2090 struct ib_uqp_object *obj;
2093 int ret = -EINVAL; 2091 int ret = -EINVAL;
2094 2092
@@ -2102,7 +2100,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2102 if (IS_ERR(uobj)) 2100 if (IS_ERR(uobj))
2103 return PTR_ERR(uobj); 2101 return PTR_ERR(uobj);
2104 2102
2105 qp = uobj->object;
2106 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2103 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2107 /* 2104 /*
2108 * Make sure we don't free the memory in remove_commit as we still 2105 * Make sure we don't free the memory in remove_commit as we still
@@ -3004,7 +3001,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3004{ 3001{
3005 struct ib_uverbs_ex_destroy_wq cmd = {}; 3002 struct ib_uverbs_ex_destroy_wq cmd = {};
3006 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3003 struct ib_uverbs_ex_destroy_wq_resp resp = {};
3007 struct ib_wq *wq;
3008 struct ib_uobject *uobj; 3004 struct ib_uobject *uobj;
3009 struct ib_uwq_object *obj; 3005 struct ib_uwq_object *obj;
3010 size_t required_cmd_sz; 3006 size_t required_cmd_sz;
@@ -3038,7 +3034,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3038 if (IS_ERR(uobj)) 3034 if (IS_ERR(uobj))
3039 return PTR_ERR(uobj); 3035 return PTR_ERR(uobj);
3040 3036
3041 wq = uobj->object;
3042 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3037 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
3043 /* 3038 /*
3044 * Make sure we don't free the memory in remove_commit as we still 3039 * Make sure we don't free the memory in remove_commit as we still
@@ -3728,10 +3723,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3728 struct ib_uverbs_destroy_srq cmd; 3723 struct ib_uverbs_destroy_srq cmd;
3729 struct ib_uverbs_destroy_srq_resp resp; 3724 struct ib_uverbs_destroy_srq_resp resp;
3730 struct ib_uobject *uobj; 3725 struct ib_uobject *uobj;
3731 struct ib_srq *srq;
3732 struct ib_uevent_object *obj; 3726 struct ib_uevent_object *obj;
3733 int ret = -EINVAL; 3727 int ret = -EINVAL;
3734 enum ib_srq_type srq_type;
3735 3728
3736 if (copy_from_user(&cmd, buf, sizeof cmd)) 3729 if (copy_from_user(&cmd, buf, sizeof cmd))
3737 return -EFAULT; 3730 return -EFAULT;
@@ -3741,9 +3734,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3741 if (IS_ERR(uobj)) 3734 if (IS_ERR(uobj))
3742 return PTR_ERR(uobj); 3735 return PTR_ERR(uobj);
3743 3736
3744 srq = uobj->object;
3745 obj = container_of(uobj, struct ib_uevent_object, uobject); 3737 obj = container_of(uobj, struct ib_uevent_object, uobject);
3746 srq_type = srq->srq_type;
3747 /* 3738 /*
3748 * Make sure we don't free the memory in remove_commit as we still 3739 * Make sure we don't free the memory in remove_commit as we still
3749 * needs the uobject memory to create the response. 3740 * needs the uobject memory to create the response.
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 08772836fded..85527532c49d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -51,6 +51,8 @@
51#define BNXT_RE_PAGE_SIZE_8M BIT(23) 51#define BNXT_RE_PAGE_SIZE_8M BIT(23)
52#define BNXT_RE_PAGE_SIZE_1G BIT(30) 52#define BNXT_RE_PAGE_SIZE_1G BIT(30)
53 53
54#define BNXT_RE_MAX_MR_SIZE BIT(30)
55
54#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) 56#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
55#define BNXT_RE_MAX_MRW_COUNT (64 * 1024) 57#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
56#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) 58#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
@@ -60,6 +62,13 @@
60 62
61#define BNXT_RE_RQ_WQE_THRESHOLD 32 63#define BNXT_RE_RQ_WQE_THRESHOLD 32
62 64
65/*
66 * Setting the default ack delay value to 16, which means
67 * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
68 */
69
70#define BNXT_RE_DEFAULT_ACK_DELAY 16
71
63struct bnxt_re_work { 72struct bnxt_re_work {
64 struct work_struct work; 73 struct work_struct work;
65 unsigned long event; 74 unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index c7bd68311d0c..f0e01b3ac711 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver); 145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr, 146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid); 147 (u8 *)&ib_attr->sys_image_guid);
148 ib_attr->max_mr_size = ~0ull; 148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K | 149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
150 BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
151 BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
152 150
153 ib_attr->vendor_id = rdev->en_dev->pdev->vendor; 151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
154 ib_attr->vendor_part_id = rdev->en_dev->pdev->device; 152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev,
174 ib_attr->max_mr = dev_attr->max_mr; 172 ib_attr->max_mr = dev_attr->max_mr;
175 ib_attr->max_pd = dev_attr->max_pd; 173 ib_attr->max_pd = dev_attr->max_pd;
176 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; 174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
177 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom; 175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
178 ib_attr->atomic_cap = IB_ATOMIC_HCA; 176 if (dev_attr->is_atomic) {
179 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; 177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
180 180
181 ib_attr->max_ee_rd_atom = 0; 181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0; 182 ib_attr->max_res_rd_atom = 0;
@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; 201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202 202
203 ib_attr->max_pkeys = 1; 203 ib_attr->max_pkeys = 1;
204 ib_attr->local_ca_ack_delay = 0; 204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
205 return 0; 205 return 0;
206} 206}
207 207
@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
390 return -EINVAL; 390 return -EINVAL;
391 ctx->refcnt--; 391 ctx->refcnt--;
392 if (!ctx->refcnt) { 392 if (!ctx->refcnt) {
393 rc = bnxt_qplib_del_sgid 393 rc = bnxt_qplib_del_sgid(sgid_tbl,
394 (sgid_tbl, 394 &sgid_tbl->tbl[ctx->idx],
395 &sgid_tbl->tbl[ctx->idx], true); 395 true);
396 if (rc) 396 if (rc) {
397 dev_err(rdev_to_dev(rdev), 397 dev_err(rdev_to_dev(rdev),
398 "Failed to remove GID: %#x", rc); 398 "Failed to remove GID: %#x", rc);
399 ctx_tbl = sgid_tbl->ctx; 399 } else {
400 ctx_tbl[ctx->idx] = NULL; 400 ctx_tbl = sgid_tbl->ctx;
401 kfree(ctx); 401 ctx_tbl[ctx->idx] = NULL;
402 kfree(ctx);
403 }
402 } 404 }
403 } else { 405 } else {
404 return -EINVAL; 406 return -EINVAL;
@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
588 590
589 /* Create a fence MW only for kernel consumers */ 591 /* Create a fence MW only for kernel consumers */
590 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); 592 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
591 if (!mw) { 593 if (IS_ERR(mw)) {
592 dev_err(rdev_to_dev(rdev), 594 dev_err(rdev_to_dev(rdev),
593 "Failed to create fence-MW for PD: %p\n", pd); 595 "Failed to create fence-MW for PD: %p\n", pd);
594 rc = -EINVAL; 596 rc = PTR_ERR(mw);
595 goto fail; 597 goto fail;
596 } 598 }
597 fence->mw = mw; 599 fence->mw = mw;
@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
612 int rc; 614 int rc;
613 615
614 bnxt_re_destroy_fence_mr(pd); 616 bnxt_re_destroy_fence_mr(pd);
615 if (ib_pd->uobject && pd->dpi.dbr) {
616 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
617 struct bnxt_re_ucontext *ucntx;
618 617
619 /* Free DPI only if this is the first PD allocated by the 618 if (pd->qplib_pd.id) {
620 * application and mark the context dpi as NULL 619 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
621 */ 620 &rdev->qplib_res.pd_tbl,
622 ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); 621 &pd->qplib_pd);
623
624 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
625 &rdev->qplib_res.dpi_tbl,
626 &pd->dpi);
627 if (rc) 622 if (rc)
628 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI"); 623 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
629 /* Don't fail, continue*/
630 ucntx->dpi = NULL;
631 }
632
633 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
634 &rdev->qplib_res.pd_tbl,
635 &pd->qplib_pd);
636 if (rc) {
637 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
638 return rc;
639 } 624 }
640 625
641 kfree(pd); 626 kfree(pd);
@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
667 if (udata) { 652 if (udata) {
668 struct bnxt_re_pd_resp resp; 653 struct bnxt_re_pd_resp resp;
669 654
670 if (!ucntx->dpi) { 655 if (!ucntx->dpi.dbr) {
671 /* Allocate DPI in alloc_pd to avoid failing of 656 /* Allocate DPI in alloc_pd to avoid failing of
672 * ibv_devinfo and family of application when DPIs 657 * ibv_devinfo and family of application when DPIs
673 * are depleted. 658 * are depleted.
674 */ 659 */
675 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, 660 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
676 &pd->dpi, ucntx)) { 661 &ucntx->dpi, ucntx)) {
677 rc = -ENOMEM; 662 rc = -ENOMEM;
678 goto dbfail; 663 goto dbfail;
679 } 664 }
680 ucntx->dpi = &pd->dpi;
681 } 665 }
682 666
683 resp.pdid = pd->qplib_pd.id; 667 resp.pdid = pd->qplib_pd.id;
684 /* Still allow mapping this DBR to the new user PD. */ 668 /* Still allow mapping this DBR to the new user PD. */
685 resp.dpi = ucntx->dpi->dpi; 669 resp.dpi = ucntx->dpi.dpi;
686 resp.dbr = (u64)ucntx->dpi->umdbr; 670 resp.dbr = (u64)ucntx->dpi.umdbr;
687 671
688 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 672 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
689 if (rc) { 673 if (rc) {
@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
960 qplib_qp->rq.nmap = umem->nmap; 944 qplib_qp->rq.nmap = umem->nmap;
961 } 945 }
962 946
963 qplib_qp->dpi = cntx->dpi; 947 qplib_qp->dpi = &cntx->dpi;
964 return 0; 948 return 0;
965rqfail: 949rqfail:
966 ib_umem_release(qp->sumem); 950 ib_umem_release(qp->sumem);
@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1530 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1514 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1531 qp->qplib_qp.modify_flags |= 1515 qp->qplib_qp.modify_flags |=
1532 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; 1516 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1533 qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic; 1517 /* Cap the max_rd_atomic to device max */
1518 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1519 dev_attr->max_qp_rd_atom);
1534 } 1520 }
1535 if (qp_attr_mask & IB_QP_SQ_PSN) { 1521 if (qp_attr_mask & IB_QP_SQ_PSN) {
1536 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; 1522 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1537 qp->qplib_qp.sq.psn = qp_attr->sq_psn; 1523 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1538 } 1524 }
1539 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1525 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1526 if (qp_attr->max_dest_rd_atomic >
1527 dev_attr->max_qp_init_rd_atom) {
1528 dev_err(rdev_to_dev(rdev),
1529 "max_dest_rd_atomic requested%d is > dev_max%d",
1530 qp_attr->max_dest_rd_atomic,
1531 dev_attr->max_qp_init_rd_atom);
1532 return -EINVAL;
1533 }
1534
1540 qp->qplib_qp.modify_flags |= 1535 qp->qplib_qp.modify_flags |=
1541 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; 1536 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1542 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; 1537 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2403 } 2398 }
2404 cq->qplib_cq.sghead = cq->umem->sg_head.sgl; 2399 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2405 cq->qplib_cq.nmap = cq->umem->nmap; 2400 cq->qplib_cq.nmap = cq->umem->nmap;
2406 cq->qplib_cq.dpi = uctx->dpi; 2401 cq->qplib_cq.dpi = &uctx->dpi;
2407 } else { 2402 } else {
2408 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 2403 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2409 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), 2404 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2905 2900
2906 spin_lock_irqsave(&cq->cq_lock, flags); 2901 spin_lock_irqsave(&cq->cq_lock, flags);
2907 budget = min_t(u32, num_entries, cq->max_cql); 2902 budget = min_t(u32, num_entries, cq->max_cql);
2903 num_entries = budget;
2908 if (!cq->cql) { 2904 if (!cq->cql) {
2909 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); 2905 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2910 goto exit; 2906 goto exit;
@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3031 else if (ib_cqn_flags & IB_CQ_SOLICITED) 3027 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3032 type = DBR_DBR_TYPE_CQ_ARMSE; 3028 type = DBR_DBR_TYPE_CQ_ARMSE;
3033 3029
3030 /* Poll to see if there are missed events */
3031 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3032 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3033 return 1;
3034
3034 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); 3035 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3035 3036
3036 return 0; 3037 return 0;
@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3245 struct scatterlist *sg; 3246 struct scatterlist *sg;
3246 int entry; 3247 int entry;
3247 3248
3249 if (length > BNXT_RE_MAX_MR_SIZE) {
3250 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3251 length, BNXT_RE_MAX_MR_SIZE);
3252 return ERR_PTR(-ENOMEM);
3253 }
3254
3248 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3255 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3249 if (!mr) 3256 if (!mr)
3250 return ERR_PTR(-ENOMEM); 3257 return ERR_PTR(-ENOMEM);
@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3388 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, 3395 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3389 struct bnxt_re_ucontext, 3396 struct bnxt_re_ucontext,
3390 ib_uctx); 3397 ib_uctx);
3398
3399 struct bnxt_re_dev *rdev = uctx->rdev;
3400 int rc = 0;
3401
3391 if (uctx->shpg) 3402 if (uctx->shpg)
3392 free_page((unsigned long)uctx->shpg); 3403 free_page((unsigned long)uctx->shpg);
3404
3405 if (uctx->dpi.dbr) {
3406 /* Free DPI only if this is the first PD allocated by the
3407 * application and mark the context dpi as NULL
3408 */
3409 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3410 &rdev->qplib_res.dpi_tbl,
3411 &uctx->dpi);
3412 if (rc)
3413 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3414 /* Don't fail, continue*/
3415 uctx->dpi.dbr = NULL;
3416 }
3417
3393 kfree(uctx); 3418 kfree(uctx);
3394 return 0; 3419 return 0;
3395} 3420}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 6c160f6a5398..a0bb7e33d7ca 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -59,7 +59,6 @@ struct bnxt_re_pd {
59 struct bnxt_re_dev *rdev; 59 struct bnxt_re_dev *rdev;
60 struct ib_pd ib_pd; 60 struct ib_pd ib_pd;
61 struct bnxt_qplib_pd qplib_pd; 61 struct bnxt_qplib_pd qplib_pd;
62 struct bnxt_qplib_dpi dpi;
63 struct bnxt_re_fence_data fence; 62 struct bnxt_re_fence_data fence;
64}; 63};
65 64
@@ -127,7 +126,7 @@ struct bnxt_re_mw {
127struct bnxt_re_ucontext { 126struct bnxt_re_ucontext {
128 struct bnxt_re_dev *rdev; 127 struct bnxt_re_dev *rdev;
129 struct ib_ucontext ib_uctx; 128 struct ib_ucontext ib_uctx;
130 struct bnxt_qplib_dpi *dpi; 129 struct bnxt_qplib_dpi dpi;
131 void *shpg; 130 void *shpg;
132 spinlock_t sh_lock; /* protect shpg */ 131 spinlock_t sh_lock; /* protect shpg */
133}; 132};
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 1fce5e73216b..ceae2d92fb08 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
333 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); 333 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
334 req.update_period_ms = cpu_to_le32(1000); 334 req.update_period_ms = cpu_to_le32(1000);
335 req.stats_dma_addr = cpu_to_le64(dma_map); 335 req.stats_dma_addr = cpu_to_le64(dma_map);
336 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
336 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 337 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
337 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 338 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
338 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 339 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f05500bcdcf1..9af1514e5944 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1128 } 1128 }
1129 /* Each SGE entry = 1 WQE size16 */ 1129 /* Each SGE entry = 1 WQE size16 */
1130 wqe_size16 = wqe->num_sge; 1130 wqe_size16 = wqe->num_sge;
1131 /* HW requires wqe size has room for atleast one SGE even if
1132 * none was supplied by ULP
1133 */
1134 if (!wqe->num_sge)
1135 wqe_size16++;
1131 } 1136 }
1132 1137
1133 /* Specifics */ 1138 /* Specifics */
@@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1364 rqe->flags = wqe->flags; 1369 rqe->flags = wqe->flags;
1365 rqe->wqe_size = wqe->num_sge + 1370 rqe->wqe_size = wqe->num_sge +
1366 ((offsetof(typeof(*rqe), data) + 15) >> 4); 1371 ((offsetof(typeof(*rqe), data) + 15) >> 4);
1372 /* HW requires wqe size has room for atleast one SGE even if none
1373 * was supplied by ULP
1374 */
1375 if (!wqe->num_sge)
1376 rqe->wqe_size++;
1367 1377
1368 /* Supply the rqe->wr_id index to the wr_id_tbl for now */ 1378 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1369 rqe->wr_id[0] = cpu_to_le32(sw_prod); 1379 rqe->wr_id[0] = cpu_to_le32(sw_prod);
@@ -1885,6 +1895,25 @@ flush_rq:
1885 return rc; 1895 return rc;
1886} 1896}
1887 1897
1898bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
1899{
1900 struct cq_base *hw_cqe, **hw_cqe_ptr;
1901 unsigned long flags;
1902 u32 sw_cons, raw_cons;
1903 bool rc = true;
1904
1905 spin_lock_irqsave(&cq->hwq.lock, flags);
1906 raw_cons = cq->hwq.cons;
1907 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
1908 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1909 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
1910
1911 /* Check for Valid bit. If the CQE is valid, return false */
1912 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
1913 spin_unlock_irqrestore(&cq->hwq.lock, flags);
1914 return rc;
1915}
1916
1888static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, 1917static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
1889 struct cq_res_raweth_qp1 *hwcqe, 1918 struct cq_res_raweth_qp1 *hwcqe,
1890 struct bnxt_qplib_cqe **pcqe, 1919 struct bnxt_qplib_cqe **pcqe,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 36b7b7db0e3f..19176e06c98a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
449int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 449int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
450int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 450int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
451 int num, struct bnxt_qplib_qp **qp); 451 int num, struct bnxt_qplib_qp **qp);
452bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
452void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 453void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
453void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 454void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
454int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); 455int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index fde18cf0e406..ef91ab786dd4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 0, 0, 0, 0, 0, 0 } }; 51 0, 0, 0, 0, 0, 0, 0, 0 } };
52 52
53/* Device */ 53/* Device */
54
55static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
56{
57 int rc;
58 u16 pcie_ctl2;
59
60 rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
61 &pcie_ctl2);
62 if (rc)
63 return false;
64 return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
65}
66
54int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, 67int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
55 struct bnxt_qplib_dev_attr *attr) 68 struct bnxt_qplib_dev_attr *attr)
56{ 69{
@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
81 94
82 /* Extract the context from the side buffer */ 95 /* Extract the context from the side buffer */
83 attr->max_qp = le32_to_cpu(sb->max_qp); 96 attr->max_qp = le32_to_cpu(sb->max_qp);
97 /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
98 attr->max_qp += 1;
84 attr->max_qp_rd_atom = 99 attr->max_qp_rd_atom =
85 sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 100 sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
86 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; 101 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
129 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 144 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
130 } 145 }
131 146
147 attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
132bail: 148bail:
133 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 149 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
134 return rc; 150 return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a543f959098b..2ce7e2a32cf0 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -42,6 +42,8 @@
42 42
43#define BNXT_QPLIB_RESERVED_QP_WRS 128 43#define BNXT_QPLIB_RESERVED_QP_WRS 128
44 44
45#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
46
45struct bnxt_qplib_dev_attr { 47struct bnxt_qplib_dev_attr {
46 char fw_ver[32]; 48 char fw_ver[32];
47 u16 max_sgid; 49 u16 max_sgid;
@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr {
70 u32 max_inline_data; 72 u32 max_inline_data;
71 u32 l2_db_size; 73 u32 l2_db_size;
72 u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; 74 u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
75 bool is_atomic;
73}; 76};
74 77
75struct bnxt_qplib_pd { 78struct bnxt_qplib_pd {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 29d30744d6c9..0cd0c1fa27d4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
718 struct iwch_mr *mhp; 718 struct iwch_mr *mhp;
719 u32 mmid; 719 u32 mmid;
720 u32 stag = 0; 720 u32 stag = 0;
721 int ret = 0; 721 int ret = -ENOMEM;
722 722
723 if (mr_type != IB_MR_TYPE_MEM_REG || 723 if (mr_type != IB_MR_TYPE_MEM_REG ||
724 max_num_sg > T3_MAX_FASTREG_DEPTH) 724 max_num_sg > T3_MAX_FASTREG_DEPTH)
@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
731 goto err; 731 goto err;
732 732
733 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); 733 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
734 if (!mhp->pages) { 734 if (!mhp->pages)
735 ret = -ENOMEM;
736 goto pl_err; 735 goto pl_err;
737 }
738 736
739 mhp->rhp = rhp; 737 mhp->rhp = rhp;
740 ret = iwch_alloc_pbl(mhp, max_num_sg); 738 ret = iwch_alloc_pbl(mhp, max_num_sg);
@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
751 mhp->attr.state = 1; 749 mhp->attr.state = 1;
752 mmid = (stag) >> 8; 750 mmid = (stag) >> 8;
753 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 751 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
754 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) 752 ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
753 if (ret)
755 goto err3; 754 goto err3;
756 755
757 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 756 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e16fcaf6b5a3..be07da1997e6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
963 goto err3; 963 goto err3;
964 964
965 if (ucontext) { 965 if (ucontext) {
966 ret = -ENOMEM;
966 mm = kmalloc(sizeof *mm, GFP_KERNEL); 967 mm = kmalloc(sizeof *mm, GFP_KERNEL);
967 if (!mm) 968 if (!mm)
968 goto err4; 969 goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bfc77596acbe..cb7fc0d35d1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
569{ 569{
570 if (wr->num_sge > 1) 570 if (wr->num_sge > 1)
571 return -EINVAL; 571 return -EINVAL;
572 if (wr->num_sge) { 572 if (wr->num_sge && wr->sg_list[0].length) {
573 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); 573 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
574 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr 574 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
575 >> 32)); 575 >> 32));
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index da2eb5a281fa..9b1566468744 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
527int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); 527int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
528void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq); 528void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
529 529
530void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
530void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev); 531void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
531void i40iw_add_pdusecount(struct i40iw_pd *iwpd); 532void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
532void i40iw_rem_devusecount(struct i40iw_device *iwdev); 533void i40iw_rem_devusecount(struct i40iw_device *iwdev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 6ae98aa7f74e..5a2fa743676c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
3487 if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || 3487 if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
3488 (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || 3488 (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
3489 (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || 3489 (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
3490 (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { 3490 (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
3491 iwdev->reset)) {
3491 issue_close = 1; 3492 issue_close = 1;
3492 iwqp->cm_id = NULL; 3493 iwqp->cm_id = NULL;
3493 if (!iwqp->flush_issued) { 3494 if (!iwqp->flush_issued) {
@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
4265 cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry); 4266 cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
4266 attr.qp_state = IB_QPS_ERR; 4267 attr.qp_state = IB_QPS_ERR;
4267 i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); 4268 i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
4269 if (iwdev->reset)
4270 i40iw_cm_disconn(cm_node->iwqp);
4268 i40iw_rem_ref_cm_node(cm_node); 4271 i40iw_rem_ref_cm_node(cm_node);
4269 } 4272 }
4270} 4273}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index a027e2072477..9ec1ae9a82c9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
1970 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); 1970 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
1971 } 1971 }
1972 1972
1973 cqp->process_cqp_sds = i40iw_update_sds_noccq;
1974
1973 return ret_code; 1975 return ret_code;
1974} 1976}
1975 1977
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e0f47cc2effc..ae8463ff59a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
243 if (free_hwcqp) 243 if (free_hwcqp)
244 dev->cqp_ops->cqp_destroy(dev->cqp); 244 dev->cqp_ops->cqp_destroy(dev->cqp);
245 245
246 i40iw_cleanup_pending_cqp_op(iwdev);
247
246 i40iw_free_dma_mem(dev->hw, &cqp->sq); 248 i40iw_free_dma_mem(dev->hw, &cqp->sq);
247 kfree(cqp->scratch_array); 249 kfree(cqp->scratch_array);
248 iwdev->cqp.scratch_array = NULL; 250 iwdev->cqp.scratch_array = NULL;
@@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
274/** 276/**
275 * i40iw_destroy_aeq - destroy aeq 277 * i40iw_destroy_aeq - destroy aeq
276 * @iwdev: iwarp device 278 * @iwdev: iwarp device
277 * @reset: true if called before reset
278 * 279 *
279 * Issue a destroy aeq request and 280 * Issue a destroy aeq request and
280 * free the resources associated with the aeq 281 * free the resources associated with the aeq
281 * The function is called during driver unload 282 * The function is called during driver unload
282 */ 283 */
283static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) 284static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
284{ 285{
285 enum i40iw_status_code status = I40IW_ERR_NOT_READY; 286 enum i40iw_status_code status = I40IW_ERR_NOT_READY;
286 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 287 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
288 289
289 if (!iwdev->msix_shared) 290 if (!iwdev->msix_shared)
290 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); 291 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
291 if (reset) 292 if (iwdev->reset)
292 goto exit; 293 goto exit;
293 294
294 if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) 295 if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
@@ -304,19 +305,17 @@ exit:
304 * i40iw_destroy_ceq - destroy ceq 305 * i40iw_destroy_ceq - destroy ceq
305 * @iwdev: iwarp device 306 * @iwdev: iwarp device
306 * @iwceq: ceq to be destroyed 307 * @iwceq: ceq to be destroyed
307 * @reset: true if called before reset
308 * 308 *
309 * Issue a destroy ceq request and 309 * Issue a destroy ceq request and
310 * free the resources associated with the ceq 310 * free the resources associated with the ceq
311 */ 311 */
312static void i40iw_destroy_ceq(struct i40iw_device *iwdev, 312static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
313 struct i40iw_ceq *iwceq, 313 struct i40iw_ceq *iwceq)
314 bool reset)
315{ 314{
316 enum i40iw_status_code status; 315 enum i40iw_status_code status;
317 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 316 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
318 317
319 if (reset) 318 if (iwdev->reset)
320 goto exit; 319 goto exit;
321 320
322 status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); 321 status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
@@ -335,12 +334,11 @@ exit:
335/** 334/**
336 * i40iw_dele_ceqs - destroy all ceq's 335 * i40iw_dele_ceqs - destroy all ceq's
337 * @iwdev: iwarp device 336 * @iwdev: iwarp device
338 * @reset: true if called before reset
339 * 337 *
340 * Go through all of the device ceq's and for each ceq 338 * Go through all of the device ceq's and for each ceq
341 * disable the ceq interrupt and destroy the ceq 339 * disable the ceq interrupt and destroy the ceq
342 */ 340 */
343static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) 341static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
344{ 342{
345 u32 i = 0; 343 u32 i = 0;
346 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 344 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
349 347
350 if (iwdev->msix_shared) { 348 if (iwdev->msix_shared) {
351 i40iw_disable_irq(dev, msix_vec, (void *)iwdev); 349 i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
352 i40iw_destroy_ceq(iwdev, iwceq, reset); 350 i40iw_destroy_ceq(iwdev, iwceq);
353 iwceq++; 351 iwceq++;
354 i++; 352 i++;
355 } 353 }
356 354
357 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { 355 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
358 i40iw_disable_irq(dev, msix_vec, (void *)iwceq); 356 i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
359 i40iw_destroy_ceq(iwdev, iwceq, reset); 357 i40iw_destroy_ceq(iwdev, iwceq);
360 } 358 }
361} 359}
362 360
363/** 361/**
364 * i40iw_destroy_ccq - destroy control cq 362 * i40iw_destroy_ccq - destroy control cq
365 * @iwdev: iwarp device 363 * @iwdev: iwarp device
366 * @reset: true if called before reset
367 * 364 *
368 * Issue destroy ccq request and 365 * Issue destroy ccq request and
369 * free the resources associated with the ccq 366 * free the resources associated with the ccq
370 */ 367 */
371static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset) 368static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
372{ 369{
373 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 370 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
374 struct i40iw_ccq *ccq = &iwdev->ccq; 371 struct i40iw_ccq *ccq = &iwdev->ccq;
375 enum i40iw_status_code status = 0; 372 enum i40iw_status_code status = 0;
376 373
377 if (!reset) 374 if (!iwdev->reset)
378 status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); 375 status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
379 if (status) 376 if (status)
380 i40iw_pr_err("ccq destroy failed %d\n", status); 377 i40iw_pr_err("ccq destroy failed %d\n", status);
@@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
810 iwceq->msix_idx = msix_vec->idx; 807 iwceq->msix_idx = msix_vec->idx;
811 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); 808 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
812 if (status) { 809 if (status) {
813 i40iw_destroy_ceq(iwdev, iwceq, false); 810 i40iw_destroy_ceq(iwdev, iwceq);
814 break; 811 break;
815 } 812 }
816 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); 813 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
@@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
912 909
913 status = i40iw_configure_aeq_vector(iwdev); 910 status = i40iw_configure_aeq_vector(iwdev);
914 if (status) { 911 if (status) {
915 i40iw_destroy_aeq(iwdev, false); 912 i40iw_destroy_aeq(iwdev);
916 return status; 913 return status;
917 } 914 }
918 915
@@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1442/** 1439/**
1443 * i40iw_deinit_device - clean up the device resources 1440 * i40iw_deinit_device - clean up the device resources
1444 * @iwdev: iwarp device 1441 * @iwdev: iwarp device
1445 * @reset: true if called before reset
1446 * 1442 *
1447 * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, 1443 * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
1448 * destroy the device queues and free the pble and the hmc objects 1444 * destroy the device queues and free the pble and the hmc objects
1449 */ 1445 */
1450static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) 1446static void i40iw_deinit_device(struct i40iw_device *iwdev)
1451{ 1447{
1452 struct i40e_info *ldev = iwdev->ldev; 1448 struct i40e_info *ldev = iwdev->ldev;
1453 1449
@@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
1464 i40iw_destroy_rdma_device(iwdev->iwibdev); 1460 i40iw_destroy_rdma_device(iwdev->iwibdev);
1465 /* fallthrough */ 1461 /* fallthrough */
1466 case IP_ADDR_REGISTERED: 1462 case IP_ADDR_REGISTERED:
1467 if (!reset) 1463 if (!iwdev->reset)
1468 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1464 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1469 /* fallthrough */ 1465 /* fallthrough */
1470 case INET_NOTIFIER: 1466 case INET_NOTIFIER:
@@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
1474 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1470 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1475 } 1471 }
1476 /* fallthrough */ 1472 /* fallthrough */
1473 case PBLE_CHUNK_MEM:
1474 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1475 /* fallthrough */
1477 case CEQ_CREATED: 1476 case CEQ_CREATED:
1478 i40iw_dele_ceqs(iwdev, reset); 1477 i40iw_dele_ceqs(iwdev);
1479 /* fallthrough */ 1478 /* fallthrough */
1480 case AEQ_CREATED: 1479 case AEQ_CREATED:
1481 i40iw_destroy_aeq(iwdev, reset); 1480 i40iw_destroy_aeq(iwdev);
1482 /* fallthrough */ 1481 /* fallthrough */
1483 case IEQ_CREATED: 1482 case IEQ_CREATED:
1484 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset); 1483 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
1485 /* fallthrough */ 1484 /* fallthrough */
1486 case ILQ_CREATED: 1485 case ILQ_CREATED:
1487 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset); 1486 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
1488 /* fallthrough */ 1487 /* fallthrough */
1489 case CCQ_CREATED: 1488 case CCQ_CREATED:
1490 i40iw_destroy_ccq(iwdev, reset); 1489 i40iw_destroy_ccq(iwdev);
1491 /* fallthrough */
1492 case PBLE_CHUNK_MEM:
1493 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1494 /* fallthrough */ 1490 /* fallthrough */
1495 case HMC_OBJS_CREATED: 1491 case HMC_OBJS_CREATED:
1496 i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset); 1492 i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
1497 /* fallthrough */ 1493 /* fallthrough */
1498 case CQP_CREATED: 1494 case CQP_CREATED:
1499 i40iw_destroy_cqp(iwdev, true); 1495 i40iw_destroy_cqp(iwdev, true);
@@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1670 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); 1666 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
1671 if (status) 1667 if (status)
1672 break; 1668 break;
1669 iwdev->init_state = PBLE_CHUNK_MEM;
1673 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); 1670 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1674 i40iw_register_notifiers(); 1671 i40iw_register_notifiers();
1675 iwdev->init_state = INET_NOTIFIER; 1672 iwdev->init_state = INET_NOTIFIER;
@@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1693 } while (0); 1690 } while (0);
1694 1691
1695 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); 1692 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
1696 i40iw_deinit_device(iwdev, false); 1693 i40iw_deinit_device(iwdev);
1697 return -ERESTART; 1694 return -ERESTART;
1698} 1695}
1699 1696
@@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
1774 iwdev = &hdl->device; 1771 iwdev = &hdl->device;
1775 iwdev->closing = true; 1772 iwdev->closing = true;
1776 1773
1774 if (reset)
1775 iwdev->reset = true;
1776
1777 i40iw_cm_disconnect_all(iwdev); 1777 i40iw_cm_disconnect_all(iwdev);
1778 destroy_workqueue(iwdev->virtchnl_wq); 1778 destroy_workqueue(iwdev->virtchnl_wq);
1779 i40iw_deinit_device(iwdev, reset); 1779 i40iw_deinit_device(iwdev);
1780} 1780}
1781 1781
1782/** 1782/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index db41ab40da9c..71050c5d29a0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
408 set_64bit_val(wqe, 0, info->paddr); 408 set_64bit_val(wqe, 0, info->paddr);
409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); 409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
410 set_64bit_val(wqe, 16, header[0]); 410 set_64bit_val(wqe, 16, header[0]);
411
412 /* Ensure all data is written before writing valid bit */
413 wmb();
411 set_64bit_val(wqe, 24, header[1]); 414 set_64bit_val(wqe, 24, header[1]);
412 415
413 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); 416 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
@@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1411 1414
1412 if (!list_empty(rxlist)) { 1415 if (!list_empty(rxlist)) {
1413 tmpbuf = (struct i40iw_puda_buf *)rxlist->next; 1416 tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1414 plist = &tmpbuf->list;
1415 while ((struct list_head *)tmpbuf != rxlist) { 1417 while ((struct list_head *)tmpbuf != rxlist) {
1416 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) 1418 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1417 break; 1419 break;
1420 plist = &tmpbuf->list;
1418 tmpbuf = (struct i40iw_puda_buf *)plist->next; 1421 tmpbuf = (struct i40iw_puda_buf *)plist->next;
1419 } 1422 }
1420 /* Insert buf before tmpbuf */ 1423 /* Insert buf before tmpbuf */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 56d986924a4c..e311ec559f4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
337 */ 337 */
338void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) 338void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
339{ 339{
340 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
340 unsigned long flags; 341 unsigned long flags;
341 342
342 if (cqp_request->dynamic) { 343 if (cqp_request->dynamic) {
@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
350 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); 351 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
351 spin_unlock_irqrestore(&cqp->req_lock, flags); 352 spin_unlock_irqrestore(&cqp->req_lock, flags);
352 } 353 }
354 wake_up(&iwdev->close_wq);
353} 355}
354 356
355/** 357/**
@@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
365} 367}
366 368
367/** 369/**
370 * i40iw_free_pending_cqp_request -free pending cqp request objs
371 * @cqp: cqp ptr
372 * @cqp_request: to be put back in cqp list
373 */
374static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
375 struct i40iw_cqp_request *cqp_request)
376{
377 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
378
379 if (cqp_request->waiting) {
380 cqp_request->compl_info.error = true;
381 cqp_request->request_done = true;
382 wake_up(&cqp_request->waitq);
383 }
384 i40iw_put_cqp_request(cqp, cqp_request);
385 wait_event_timeout(iwdev->close_wq,
386 !atomic_read(&cqp_request->refcount),
387 1000);
388}
389
390/**
391 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
392 * @iwdev: iwarp device
393 */
394void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
395{
396 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
397 struct i40iw_cqp *cqp = &iwdev->cqp;
398 struct i40iw_cqp_request *cqp_request = NULL;
399 struct cqp_commands_info *pcmdinfo = NULL;
400 u32 i, pending_work, wqe_idx;
401
402 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
403 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
404 for (i = 0; i < pending_work; i++) {
405 cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
406 if (cqp_request)
407 i40iw_free_pending_cqp_request(cqp, cqp_request);
408 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
409 }
410
411 while (!list_empty(&dev->cqp_cmd_head)) {
412 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
413 cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
414 if (cqp_request)
415 i40iw_free_pending_cqp_request(cqp, cqp_request);
416 }
417}
418
419/**
368 * i40iw_free_qp - callback after destroy cqp completes 420 * i40iw_free_qp - callback after destroy cqp completes
369 * @cqp_request: cqp request for destroy qp 421 * @cqp_request: cqp request for destroy qp
370 * @num: not used 422 * @num: not used
@@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
546 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; 598 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
547 cqp_info->in.u.qp_destroy.remove_hash_idx = true; 599 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
548 status = i40iw_handle_cqp_op(iwdev, cqp_request); 600 status = i40iw_handle_cqp_op(iwdev, cqp_request);
549 if (status) 601 if (!status)
550 i40iw_pr_err("CQP-OP Destroy QP fail"); 602 return;
603
604 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
605 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
606 i40iw_rem_devusecount(iwdev);
551} 607}
552 608
553/** 609/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4dbe61ec7a77..02d871db7ca5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
426 struct i40iw_qp *iwqp, 426 struct i40iw_qp *iwqp,
427 u32 qp_num) 427 u32 qp_num)
428{ 428{
429 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
430
429 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); 431 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
430 if (qp_num) 432 if (qp_num)
431 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); 433 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
434 if (iwpbl->pbl_allocated)
435 i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
432 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); 436 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
433 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); 437 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
434 kfree(iwqp->kqp.wrid_mem); 438 kfree(iwqp->kqp.wrid_mem);
@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
483 struct i40iw_qp *iwqp, 487 struct i40iw_qp *iwqp,
484 struct i40iw_qp_init_info *init_info) 488 struct i40iw_qp_init_info *init_info)
485{ 489{
486 struct i40iw_pbl *iwpbl = iwqp->iwpbl; 490 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
487 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; 491 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
488 492
489 iwqp->page = qpmr->sq_page; 493 iwqp->page = qpmr->sq_page;
@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
688 ucontext = to_ucontext(ibpd->uobject->context); 692 ucontext = to_ucontext(ibpd->uobject->context);
689 693
690 if (req.user_wqe_buffers) { 694 if (req.user_wqe_buffers) {
695 struct i40iw_pbl *iwpbl;
696
691 spin_lock_irqsave( 697 spin_lock_irqsave(
692 &ucontext->qp_reg_mem_list_lock, flags); 698 &ucontext->qp_reg_mem_list_lock, flags);
693 iwqp->iwpbl = i40iw_get_pbl( 699 iwpbl = i40iw_get_pbl(
694 (unsigned long)req.user_wqe_buffers, 700 (unsigned long)req.user_wqe_buffers,
695 &ucontext->qp_reg_mem_list); 701 &ucontext->qp_reg_mem_list);
696 spin_unlock_irqrestore( 702 spin_unlock_irqrestore(
697 &ucontext->qp_reg_mem_list_lock, flags); 703 &ucontext->qp_reg_mem_list_lock, flags);
698 704
699 if (!iwqp->iwpbl) { 705 if (!iwpbl) {
700 err_code = -ENODATA; 706 err_code = -ENODATA;
701 i40iw_pr_err("no pbl info\n"); 707 i40iw_pr_err("no pbl info\n");
702 goto error; 708 goto error;
703 } 709 }
710 memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
704 } 711 }
705 } 712 }
706 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); 713 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1161 memset(&req, 0, sizeof(req)); 1168 memset(&req, 0, sizeof(req));
1162 iwcq->user_mode = true; 1169 iwcq->user_mode = true;
1163 ucontext = to_ucontext(context); 1170 ucontext = to_ucontext(context);
1164 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) 1171 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1172 err_code = -EFAULT;
1165 goto cq_free_resources; 1173 goto cq_free_resources;
1174 }
1166 1175
1167 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1176 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1168 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, 1177 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
2063 ucontext = to_ucontext(ibpd->uobject->context); 2072 ucontext = to_ucontext(ibpd->uobject->context);
2064 i40iw_del_memlist(iwmr, ucontext); 2073 i40iw_del_memlist(iwmr, ucontext);
2065 } 2074 }
2066 if (iwpbl->pbl_allocated) 2075 if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
2067 i40iw_free_pble(iwdev->pble_rsrc, palloc); 2076 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2068 kfree(iwmr); 2077 kfree(iwmr);
2069 return 0; 2078 return 0;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 07c3fec77de6..9067443cd311 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -170,7 +170,7 @@ struct i40iw_qp {
170 struct i40iw_qp_kmode kqp; 170 struct i40iw_qp_kmode kqp;
171 struct i40iw_dma_mem host_ctx; 171 struct i40iw_dma_mem host_ctx;
172 struct timer_list terminate_timer; 172 struct timer_list terminate_timer;
173 struct i40iw_pbl *iwpbl; 173 struct i40iw_pbl iwpbl;
174 struct i40iw_dma_mem q2_ctx_mem; 174 struct i40iw_dma_mem q2_ctx_mem;
175 struct i40iw_dma_mem ietf_mem; 175 struct i40iw_dma_mem ietf_mem;
176 struct completion sq_drained; 176 struct completion sq_drained;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 1e6c526450d9..fedaf8260105 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
323 mad->mad_hdr.attr_id == CM_REP_ATTR_ID || 323 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
324 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { 324 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
325 sl_cm_id = get_local_comm_id(mad); 325 sl_cm_id = get_local_comm_id(mad);
326 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
327 if (id)
328 goto cont;
326 id = id_map_alloc(ibdev, slave_id, sl_cm_id); 329 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
327 if (IS_ERR(id)) { 330 if (IS_ERR(id)) {
328 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", 331 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
343 return -EINVAL; 346 return -EINVAL;
344 } 347 }
345 348
349cont:
346 set_local_comm_id(mad, id->pv_cm_id); 350 set_local_comm_id(mad, id->pv_cm_id);
347 351
348 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) 352 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8ab2f1360a45..2c40a2e989d2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -835,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
835 access_flags, 0); 835 access_flags, 0);
836 err = PTR_ERR_OR_ZERO(*umem); 836 err = PTR_ERR_OR_ZERO(*umem);
837 if (err < 0) { 837 if (err < 0) {
838 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); 838 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
839 return err; 839 return err;
840 } 840 }
841 841
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 2f30bda8457a..27d5e8d9f08d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -744,7 +744,8 @@ err:
744 if (is_uctx_pd) { 744 if (is_uctx_pd) {
745 ocrdma_release_ucontext_pd(uctx); 745 ocrdma_release_ucontext_pd(uctx);
746 } else { 746 } else {
747 status = _ocrdma_dealloc_pd(dev, pd); 747 if (_ocrdma_dealloc_pd(dev, pd))
748 pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
748 } 749 }
749exit: 750exit:
750 return ERR_PTR(status); 751 return ERR_PTR(status);
@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1901 goto err; 1902 goto err;
1902 1903
1903 if (udata == NULL) { 1904 if (udata == NULL) {
1905 status = -ENOMEM;
1904 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, 1906 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1905 GFP_KERNEL); 1907 GFP_KERNEL);
1906 if (srq->rqe_wr_id_tbl == NULL) 1908 if (srq->rqe_wr_id_tbl == NULL)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 548e4d1e998f..2ae71b8f1ba8 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -53,6 +53,14 @@
53 53
54#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) 54#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55 55
56static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
56int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 64int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57{ 65{
58 if (index > QEDR_ROCE_PKEY_TABLE_LEN) 66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
@@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
378 uresp.sges_per_srq_wr = dev->attr.max_srq_sge; 386 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
379 uresp.max_cqes = QEDR_MAX_CQES; 387 uresp.max_cqes = QEDR_MAX_CQES;
380 388
381 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 389 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
382 if (rc) 390 if (rc)
383 goto err; 391 goto err;
384 392
@@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
499 507
500 uresp.pd_id = pd_id; 508 uresp.pd_id = pd_id;
501 509
502 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 510 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
503 if (rc) { 511 if (rc) {
504 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 512 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
505 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); 513 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
@@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev,
729 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); 737 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
730 uresp.icid = cq->icid; 738 uresp.icid = cq->icid;
731 739
732 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 740 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
733 if (rc) 741 if (rc)
734 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); 742 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
735 743
@@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1238 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; 1246 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1239 uresp.qp_id = qp->qp_id; 1247 uresp.qp_id = qp->qp_id;
1240 1248
1241 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1249 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1242 if (rc) 1250 if (rc)
1243 DP_ERR(dev, 1251 DP_ERR(dev,
1244 "create qp: failed a copy to user space with qp icid=0x%x.\n", 1252 "create qp: failed a copy to user space with qp icid=0x%x.\n",
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 459865439a0b..8876ee7bc326 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1258,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1258 1258
1259 if (attr_mask & IB_QP_TIMEOUT) { 1259 if (attr_mask & IB_QP_TIMEOUT) {
1260 qp->timeout = attr->timeout; 1260 qp->timeout = attr->timeout;
1261 qp->timeout_jiffies = 1261 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1262 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1263 1000UL);
1264 } 1262 }
1265 1263
1266 if (attr_mask & IB_QP_QKEY) 1264 if (attr_mask & IB_QP_QKEY)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index be944d5aa9af..a958ee918a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1219 kfree_skb(skb); 1219 kfree_skb(skb);
1220 } 1220 }
1221 1221
1222 if (notify)
1223 return;
1224
1222 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue)) 1225 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1223 advance_consumer(qp->rq.queue); 1226 advance_consumer(qp->rq.queue);
1224} 1227}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 07511718d98d..af90a7d42b96 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
914 914
915 spin_unlock_irqrestore(&rq->producer_lock, flags); 915 spin_unlock_irqrestore(&rq->producer_lock, flags);
916 916
917 if (qp->resp.state == QP_STATE_ERROR)
918 rxe_run_task(&qp->resp.task, 1);
919
917err1: 920err1:
918 return err; 921 return err;
919} 922}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 70dacaf9044e..4ce315c92b48 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2239,6 +2239,7 @@ static struct net_device *ipoib_add_port(const char *format,
2239 goto register_failed; 2239 goto register_failed;
2240 } 2240 }
2241 2241
2242 result = -ENOMEM;
2242 if (ipoib_cm_add_mode_attr(priv->dev)) 2243 if (ipoib_cm_add_mode_attr(priv->dev))
2243 goto sysfs_failed; 2244 goto sysfs_failed;
2244 if (ipoib_add_pkey_attr(priv->dev)) 2245 if (ipoib_add_pkey_attr(priv->dev))
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 12ed62ce9ff7..2a07692007bd 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
137 137
138 if (unsol_sz < edtl) { 138 if (unsol_sz < edtl) {
139 hdr->flags |= ISER_WSV; 139 hdr->flags |= ISER_WSV;
140 hdr->write_stag = cpu_to_be32(mem_reg->rkey); 140 if (buf_out->data_len > imm_sz) {
141 hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); 141 hdr->write_stag = cpu_to_be32(mem_reg->rkey);
142 hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
143 }
142 144
143 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " 145 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
144 "VA:%#llX + unsol:%d\n", 146 "VA:%#llX + unsol:%d\n",
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 4b34c51f859e..b73a14edc85e 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -205,11 +205,13 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
205 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 205 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
206 if (dev) { 206 if (dev) {
207 ip4 = in_dev_get(dev); 207 ip4 = in_dev_get(dev);
208 if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) { 208 if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
209 ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address, 209 ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
210 (struct in6_addr *)gid); 210 (struct in6_addr *)gid);
211
212 if (ip4)
211 in_dev_put(ip4); 213 in_dev_put(ip4);
212 } 214
213 dev_put(dev); 215 dev_put(dev);
214 } 216 }
215} 217}
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index be6472e5b06b..d664d2e76280 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -647,6 +647,20 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
647 return len >> qp->log_pmtu; 647 return len >> qp->log_pmtu;
648} 648}
649 649
650/**
651 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
652 * @timeout - timeout input(0 - 31).
653 *
654 * Return a timeout value in jiffies.
655 */
656static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
657{
658 if (timeout > 31)
659 timeout = 31;
660
661 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
662}
663
650extern const int ib_rvt_state_ops[]; 664extern const int ib_rvt_state_ops[];
651 665
652struct rvt_dev_info; 666struct rvt_dev_info;