summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShamir Rabinovitch <shamir.rabinovitch@oracle.com>2019-03-31 12:10:07 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-04-01 14:00:47 -0400
commitff23dfa134576e071ace69e91761d229a0f73139 (patch)
tree5c1b572efdb90153268b69dd7695437f15dd762d
parentbdeacabd1a5fb4c0274b949d7220501c3401a3b4 (diff)
IB: Pass only ib_udata in function prototypes
Now when ib_udata is passed to all the driver's object create/destroy APIs the ib_udata will carry the ib_ucontext for every user command. There is no need to also pass the ib_ucontext via the functions prototypes. Make ib_udata the only argument psssed. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/infiniband/core/cq.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c8
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c3
-rw-r--r--drivers/infiniband/core/verbs.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c21
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c16
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c20
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c28
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h4
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c17
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h4
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c45
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c35
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c50
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c19
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c13
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.h6
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h3
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c16
-rw-r--r--include/rdma/ib_verbs.h5
53 files changed, 271 insertions, 328 deletions
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 4797eef549c3..a4c81992267c 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -147,7 +147,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
147 struct ib_cq *cq; 147 struct ib_cq *cq;
148 int ret = -ENOMEM; 148 int ret = -ENOMEM;
149 149
150 cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL); 150 cq = dev->ops.create_cq(dev, &cq_attr, NULL);
151 if (IS_ERR(cq)) 151 if (IS_ERR(cq))
152 return cq; 152 return cq;
153 153
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index da31dba33fc5..89b0f5420dfe 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -423,7 +423,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
423 atomic_set(&pd->usecnt, 0); 423 atomic_set(&pd->usecnt, 0);
424 pd->res.type = RDMA_RESTRACK_PD; 424 pd->res.type = RDMA_RESTRACK_PD;
425 425
426 ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata); 426 ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
427 if (ret) 427 if (ret)
428 goto err_alloc; 428 goto err_alloc;
429 429
@@ -594,8 +594,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
594 } 594 }
595 595
596 if (!xrcd) { 596 if (!xrcd) {
597 xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context, 597 xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata);
598 &attrs->driver_udata);
599 if (IS_ERR(xrcd)) { 598 if (IS_ERR(xrcd)) {
600 ret = PTR_ERR(xrcd); 599 ret = PTR_ERR(xrcd);
601 goto err; 600 goto err;
@@ -1009,8 +1008,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
1009 attr.comp_vector = cmd->comp_vector; 1008 attr.comp_vector = cmd->comp_vector;
1010 attr.flags = cmd->flags; 1009 attr.flags = cmd->flags;
1011 1010
1012 cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, 1011 cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata);
1013 &attrs->driver_udata);
1014 if (IS_ERR(cq)) { 1012 if (IS_ERR(cq)) {
1015 ret = PTR_ERR(cq); 1013 ret = PTR_ERR(cq);
1016 goto err_file; 1014 goto err_file;
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index cde608c268ff..977e386009fc 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -111,8 +111,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
111 INIT_LIST_HEAD(&obj->comp_list); 111 INIT_LIST_HEAD(&obj->comp_list);
112 INIT_LIST_HEAD(&obj->async_list); 112 INIT_LIST_HEAD(&obj->async_list);
113 113
114 cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, 114 cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata);
115 &attrs->driver_udata);
116 if (IS_ERR(cq)) { 115 if (IS_ERR(cq)) {
117 ret = PTR_ERR(cq); 116 ret = PTR_ERR(cq);
118 goto err_event_file; 117 goto err_event_file;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index ba9a89df815d..a479f4c12541 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -269,7 +269,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
269 pd->res.type = RDMA_RESTRACK_PD; 269 pd->res.type = RDMA_RESTRACK_PD;
270 rdma_restrack_set_task(&pd->res, caller); 270 rdma_restrack_set_task(&pd->res, caller);
271 271
272 ret = device->ops.alloc_pd(pd, NULL, NULL); 272 ret = device->ops.alloc_pd(pd, NULL);
273 if (ret) { 273 if (ret) {
274 kfree(pd); 274 kfree(pd);
275 return ERR_PTR(ret); 275 return ERR_PTR(ret);
@@ -1911,7 +1911,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
1911{ 1911{
1912 struct ib_cq *cq; 1912 struct ib_cq *cq;
1913 1913
1914 cq = device->ops.create_cq(device, cq_attr, NULL, NULL); 1914 cq = device->ops.create_cq(device, cq_attr, NULL);
1915 1915
1916 if (!IS_ERR(cq)) { 1916 if (!IS_ERR(cq)) {
1917 cq->device = device; 1917 cq->device = device;
@@ -2142,7 +2142,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
2142 if (!device->ops.alloc_xrcd) 2142 if (!device->ops.alloc_xrcd)
2143 return ERR_PTR(-EOPNOTSUPP); 2143 return ERR_PTR(-EOPNOTSUPP);
2144 2144
2145 xrcd = device->ops.alloc_xrcd(device, NULL, NULL); 2145 xrcd = device->ops.alloc_xrcd(device, NULL);
2146 if (!IS_ERR(xrcd)) { 2146 if (!IS_ERR(xrcd)) {
2147 xrcd->device = device; 2147 xrcd->device = device;
2148 xrcd->inode = NULL; 2148 xrcd->inode = NULL;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a586ac28630b..04e3529ffe06 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -576,14 +576,12 @@ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
576 &pd->qplib_pd); 576 &pd->qplib_pd);
577} 577}
578 578
579int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext, 579int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
580 struct ib_udata *udata)
581{ 580{
582 struct ib_device *ibdev = ibpd->device; 581 struct ib_device *ibdev = ibpd->device;
583 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 582 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
584 struct bnxt_re_ucontext *ucntx = container_of(ucontext, 583 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
585 struct bnxt_re_ucontext, 584 udata, struct bnxt_re_ucontext, ib_uctx);
586 ib_uctx);
587 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); 585 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
588 int rc; 586 int rc;
589 587
@@ -2589,7 +2587,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2589 2587
2590struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, 2588struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2591 const struct ib_cq_init_attr *attr, 2589 const struct ib_cq_init_attr *attr,
2592 struct ib_ucontext *context,
2593 struct ib_udata *udata) 2590 struct ib_udata *udata)
2594{ 2591{
2595 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 2592 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -2616,12 +2613,10 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2616 if (entries > dev_attr->max_cq_wqes + 1) 2613 if (entries > dev_attr->max_cq_wqes + 1)
2617 entries = dev_attr->max_cq_wqes + 1; 2614 entries = dev_attr->max_cq_wqes + 1;
2618 2615
2619 if (context) { 2616 if (udata) {
2620 struct bnxt_re_cq_req req; 2617 struct bnxt_re_cq_req req;
2621 struct bnxt_re_ucontext *uctx = container_of 2618 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2622 (context, 2619 udata, struct bnxt_re_ucontext, ib_uctx);
2623 struct bnxt_re_ucontext,
2624 ib_uctx);
2625 if (ib_copy_from_udata(&req, udata, sizeof(req))) { 2620 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2626 rc = -EFAULT; 2621 rc = -EFAULT;
2627 goto fail; 2622 goto fail;
@@ -2672,7 +2667,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2672 atomic_inc(&rdev->cq_count); 2667 atomic_inc(&rdev->cq_count);
2673 spin_lock_init(&cq->cq_lock); 2668 spin_lock_init(&cq->cq_lock);
2674 2669
2675 if (context) { 2670 if (udata) {
2676 struct bnxt_re_cq_resp resp; 2671 struct bnxt_re_cq_resp resp;
2677 2672
2678 resp.cqid = cq->qplib_cq.id; 2673 resp.cqid = cq->qplib_cq.id;
@@ -2690,7 +2685,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2690 return &cq->ib_cq; 2685 return &cq->ib_cq;
2691 2686
2692c2fail: 2687c2fail:
2693 if (context) 2688 if (udata)
2694 ib_umem_release(cq->umem); 2689 ib_umem_release(cq->umem);
2695fail: 2690fail:
2696 kfree(cq->cql); 2691 kfree(cq->cql);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 44e49988600e..488dc735a260 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -163,8 +163,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
163 int index, union ib_gid *gid); 163 int index, union ib_gid *gid);
164enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, 164enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
165 u8 port_num); 165 u8 port_num);
166int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, 166int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
167 struct ib_udata *udata);
168void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 167void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
169struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, 168struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
170 struct rdma_ah_attr *ah_attr, 169 struct rdma_ah_attr *ah_attr,
@@ -197,7 +196,6 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
197 const struct ib_recv_wr **bad_recv_wr); 196 const struct ib_recv_wr **bad_recv_wr);
198struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, 197struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
199 const struct ib_cq_init_attr *attr, 198 const struct ib_cq_init_attr *attr,
200 struct ib_ucontext *context,
201 struct ib_udata *udata); 199 struct ib_udata *udata);
202int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 200int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
203int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 201int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index bf07e93aeb94..62b99d26f0d3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -106,7 +106,6 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
106 106
107static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, 107static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
108 const struct ib_cq_init_attr *attr, 108 const struct ib_cq_init_attr *attr,
109 struct ib_ucontext *ib_context,
110 struct ib_udata *udata) 109 struct ib_udata *udata)
111{ 110{
112 int entries = attr->cqe; 111 int entries = attr->cqe;
@@ -114,7 +113,6 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
114 struct iwch_cq *chp; 113 struct iwch_cq *chp;
115 struct iwch_create_cq_resp uresp; 114 struct iwch_create_cq_resp uresp;
116 struct iwch_create_cq_req ureq; 115 struct iwch_create_cq_req ureq;
117 struct iwch_ucontext *ucontext = NULL;
118 static int warned; 116 static int warned;
119 size_t resplen; 117 size_t resplen;
120 118
@@ -127,8 +125,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
127 if (!chp) 125 if (!chp)
128 return ERR_PTR(-ENOMEM); 126 return ERR_PTR(-ENOMEM);
129 127
130 if (ib_context) { 128 if (udata) {
131 ucontext = to_iwch_ucontext(ib_context);
132 if (!t3a_device(rhp)) { 129 if (!t3a_device(rhp)) {
133 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { 130 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
134 kfree(chp); 131 kfree(chp);
@@ -154,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
154 entries = roundup_pow_of_two(entries); 151 entries = roundup_pow_of_two(entries);
155 chp->cq.size_log2 = ilog2(entries); 152 chp->cq.size_log2 = ilog2(entries);
156 153
157 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { 154 if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata)) {
158 kfree(chp); 155 kfree(chp);
159 return ERR_PTR(-ENOMEM); 156 return ERR_PTR(-ENOMEM);
160 } 157 }
@@ -170,8 +167,10 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
170 return ERR_PTR(-ENOMEM); 167 return ERR_PTR(-ENOMEM);
171 } 168 }
172 169
173 if (ucontext) { 170 if (udata) {
174 struct iwch_mm_entry *mm; 171 struct iwch_mm_entry *mm;
172 struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
173 udata, struct iwch_ucontext, ibucontext);
175 174
176 mm = kmalloc(sizeof *mm, GFP_KERNEL); 175 mm = kmalloc(sizeof *mm, GFP_KERNEL);
177 if (!mm) { 176 if (!mm) {
@@ -378,8 +377,7 @@ static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
378 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); 377 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
379} 378}
380 379
381static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, 380static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
382 struct ib_udata *udata)
383{ 381{
384 struct iwch_pd *php = to_iwch_pd(pd); 382 struct iwch_pd *php = to_iwch_pd(pd);
385 struct ib_device *ibdev = pd->device; 383 struct ib_device *ibdev = pd->device;
@@ -394,7 +392,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
394 392
395 php->pdid = pdid; 393 php->pdid = pdid;
396 php->rhp = rhp; 394 php->rhp = rhp;
397 if (context) { 395 if (udata) {
398 struct iwch_alloc_pd_resp resp = {.pdid = php->pdid}; 396 struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
399 397
400 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 398 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 571281888de0..52ce586621c6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -994,7 +994,6 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
994 994
995struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, 995struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
996 const struct ib_cq_init_attr *attr, 996 const struct ib_cq_init_attr *attr,
997 struct ib_ucontext *ib_context,
998 struct ib_udata *udata) 997 struct ib_udata *udata)
999{ 998{
1000 int entries = attr->cqe; 999 int entries = attr->cqe;
@@ -1003,10 +1002,11 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
1003 struct c4iw_cq *chp; 1002 struct c4iw_cq *chp;
1004 struct c4iw_create_cq ucmd; 1003 struct c4iw_create_cq ucmd;
1005 struct c4iw_create_cq_resp uresp; 1004 struct c4iw_create_cq_resp uresp;
1006 struct c4iw_ucontext *ucontext = NULL;
1007 int ret, wr_len; 1005 int ret, wr_len;
1008 size_t memsize, hwentries; 1006 size_t memsize, hwentries;
1009 struct c4iw_mm_entry *mm, *mm2; 1007 struct c4iw_mm_entry *mm, *mm2;
1008 struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
1009 udata, struct c4iw_ucontext, ibucontext);
1010 1010
1011 pr_debug("ib_dev %p entries %d\n", ibdev, entries); 1011 pr_debug("ib_dev %p entries %d\n", ibdev, entries);
1012 if (attr->flags) 1012 if (attr->flags)
@@ -1017,8 +1017,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
1017 if (vector >= rhp->rdev.lldi.nciq) 1017 if (vector >= rhp->rdev.lldi.nciq)
1018 return ERR_PTR(-EINVAL); 1018 return ERR_PTR(-EINVAL);
1019 1019
1020 if (ib_context) { 1020 if (udata) {
1021 ucontext = to_c4iw_ucontext(ib_context);
1022 if (udata->inlen < sizeof(ucmd)) 1021 if (udata->inlen < sizeof(ucmd))
1023 ucontext->is_32b_cqe = 1; 1022 ucontext->is_32b_cqe = 1;
1024 } 1023 }
@@ -1070,7 +1069,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
1070 /* 1069 /*
1071 * memsize must be a multiple of the page size if its a user cq. 1070 * memsize must be a multiple of the page size if its a user cq.
1072 */ 1071 */
1073 if (ucontext) 1072 if (udata)
1074 memsize = roundup(memsize, PAGE_SIZE); 1073 memsize = roundup(memsize, PAGE_SIZE);
1075 1074
1076 chp->cq.size = hwentries; 1075 chp->cq.size = hwentries;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 586fd1a00d33..4b721a261053 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -995,7 +995,6 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
995int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); 995int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
996struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, 996struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
997 const struct ib_cq_init_attr *attr, 997 const struct ib_cq_init_attr *attr,
998 struct ib_ucontext *ib_context,
999 struct ib_udata *udata); 998 struct ib_udata *udata);
1000int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 999int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1001int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, 1000int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 12f7d3ae6a53..0fbad47661cc 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -204,8 +204,7 @@ static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
204 mutex_unlock(&rhp->rdev.stats.lock); 204 mutex_unlock(&rhp->rdev.stats.lock);
205} 205}
206 206
207static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, 207static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
208 struct ib_udata *udata)
209{ 208{
210 struct c4iw_pd *php = to_c4iw_pd(pd); 209 struct c4iw_pd *php = to_c4iw_pd(pd);
211 struct ib_device *ibdev = pd->device; 210 struct ib_device *ibdev = pd->device;
@@ -220,7 +219,7 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
220 219
221 php->pdid = pdid; 220 php->pdid = pdid;
222 php->rhp = rhp; 221 php->rhp = rhp;
223 if (context) { 222 if (udata) {
224 struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; 223 struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
225 224
226 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { 225 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 305c362ef5c6..9caf35061721 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -302,7 +302,6 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
302 302
303struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, 303struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
304 const struct ib_cq_init_attr *attr, 304 const struct ib_cq_init_attr *attr,
305 struct ib_ucontext *context,
306 struct ib_udata *udata) 305 struct ib_udata *udata)
307{ 306{
308 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); 307 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
@@ -314,6 +313,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
314 int vector = attr->comp_vector; 313 int vector = attr->comp_vector;
315 int cq_entries = attr->cqe; 314 int cq_entries = attr->cqe;
316 int ret; 315 int ret;
316 struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
317 udata, struct hns_roce_ucontext, ibucontext);
317 318
318 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { 319 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
319 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", 320 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@@ -332,7 +333,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
332 hr_cq->ib_cq.cqe = cq_entries - 1; 333 hr_cq->ib_cq.cqe = cq_entries - 1;
333 spin_lock_init(&hr_cq->lock); 334 spin_lock_init(&hr_cq->lock);
334 335
335 if (context) { 336 if (udata) {
336 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 337 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
337 dev_err(dev, "Failed to copy_from_udata.\n"); 338 dev_err(dev, "Failed to copy_from_udata.\n");
338 ret = -EFAULT; 339 ret = -EFAULT;
@@ -350,8 +351,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
350 351
351 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 352 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
352 (udata->outlen >= sizeof(resp))) { 353 (udata->outlen >= sizeof(resp))) {
353 ret = hns_roce_db_map_user(to_hr_ucontext(context), 354 ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
354 udata, ucmd.db_addr,
355 &hr_cq->db); 355 &hr_cq->db);
356 if (ret) { 356 if (ret) {
357 dev_err(dev, "cq record doorbell map failed!\n"); 357 dev_err(dev, "cq record doorbell map failed!\n");
@@ -362,7 +362,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
362 } 362 }
363 363
364 /* Get user space parameters */ 364 /* Get user space parameters */
365 uar = &to_hr_ucontext(context)->uar; 365 uar = &context->uar;
366 } else { 366 } else {
367 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { 367 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
368 ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); 368 ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
@@ -401,7 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
401 * problems if tptr is set to zero here, so we initialze it in user 401 * problems if tptr is set to zero here, so we initialze it in user
402 * space. 402 * space.
403 */ 403 */
404 if (!context && hr_cq->tptr_addr) 404 if (!udata && hr_cq->tptr_addr)
405 *hr_cq->tptr_addr = 0; 405 *hr_cq->tptr_addr = 0;
406 406
407 /* Get created cq handler and carry out event */ 407 /* Get created cq handler and carry out event */
@@ -409,7 +409,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
409 hr_cq->event = hns_roce_ib_cq_event; 409 hr_cq->event = hns_roce_ib_cq_event;
410 hr_cq->cq_depth = cq_entries; 410 hr_cq->cq_depth = cq_entries;
411 411
412 if (context) { 412 if (udata) {
413 resp.cqn = hr_cq->cqn; 413 resp.cqn = hr_cq->cqn;
414 ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); 414 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
415 if (ret) 415 if (ret)
@@ -422,21 +422,20 @@ err_cqc:
422 hns_roce_free_cq(hr_dev, hr_cq); 422 hns_roce_free_cq(hr_dev, hr_cq);
423 423
424err_dbmap: 424err_dbmap:
425 if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 425 if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
426 (udata->outlen >= sizeof(resp))) 426 (udata->outlen >= sizeof(resp)))
427 hns_roce_db_unmap_user(to_hr_ucontext(context), 427 hns_roce_db_unmap_user(context, &hr_cq->db);
428 &hr_cq->db);
429 428
430err_mtt: 429err_mtt:
431 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); 430 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
432 if (context) 431 if (udata)
433 ib_umem_release(hr_cq->umem); 432 ib_umem_release(hr_cq->umem);
434 else 433 else
435 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, 434 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
436 hr_cq->ib_cq.cqe); 435 hr_cq->ib_cq.cqe);
437 436
438err_db: 437err_db:
439 if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) 438 if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
440 hns_roce_free_db(hr_dev, &hr_cq->db); 439 hns_roce_free_db(hr_dev, &hr_cq->db);
441 440
442err_cq: 441err_cq:
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 780a7ba204db..b23b13f06d58 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1112,8 +1112,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd,
1112int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 1112int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1113int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); 1113int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
1114 1114
1115int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, 1115int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1116 struct ib_udata *udata);
1117void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 1116void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1118 1117
1119struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); 1118struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
@@ -1177,7 +1176,6 @@ int to_hr_qp_type(int qp_type);
1177 1176
1178struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, 1177struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
1179 const struct ib_cq_init_attr *attr, 1178 const struct ib_cq_init_attr *attr,
1180 struct ib_ucontext *context,
1181 struct ib_udata *udata); 1179 struct ib_udata *udata);
1182 1180
1183int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); 1181int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 1863516f6be9..98c6a41edefd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -730,7 +730,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
730 /* Reserved cq for loop qp */ 730 /* Reserved cq for loop qp */
731 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; 731 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
732 cq_init_attr.comp_vector = 0; 732 cq_init_attr.comp_vector = 0;
733 cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL); 733 cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL);
734 if (IS_ERR(cq)) { 734 if (IS_ERR(cq)) {
735 dev_err(dev, "Create cq for reserved loop qp failed!"); 735 dev_err(dev, "Create cq for reserved loop qp failed!");
736 return -ENOMEM; 736 return -ENOMEM;
@@ -749,7 +749,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
749 goto alloc_mem_failed; 749 goto alloc_mem_failed;
750 750
751 pd->device = ibdev; 751 pd->device = ibdev;
752 ret = hns_roce_alloc_pd(pd, NULL, NULL); 752 ret = hns_roce_alloc_pd(pd, NULL);
753 if (ret) 753 if (ret)
754 goto alloc_pd_failed; 754 goto alloc_pd_failed;
755 755
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 504e6e466d72..813401384d78 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -57,8 +57,7 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
57 hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); 57 hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
58} 58}
59 59
60int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 60int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
61 struct ib_udata *udata)
62{ 61{
63 struct ib_device *ib_dev = ibpd->device; 62 struct ib_device *ib_dev = ibpd->device;
64 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); 63 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
@@ -72,7 +71,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
72 return ret; 71 return ret;
73 } 72 }
74 73
75 if (context) { 74 if (udata) {
76 struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn}; 75 struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
77 76
78 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { 77 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index fcb9e2448a49..7bf7fe854464 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -291,18 +291,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
291/** 291/**
292 * i40iw_alloc_pd - allocate protection domain 292 * i40iw_alloc_pd - allocate protection domain
293 * @pd: PD pointer 293 * @pd: PD pointer
294 * @context: user context created during alloc
295 * @udata: user data 294 * @udata: user data
296 */ 295 */
297static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, 296static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
298 struct ib_udata *udata)
299{ 297{
300 struct i40iw_pd *iwpd = to_iwpd(pd); 298 struct i40iw_pd *iwpd = to_iwpd(pd);
301 struct i40iw_device *iwdev = to_iwdev(pd->device); 299 struct i40iw_device *iwdev = to_iwdev(pd->device);
302 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 300 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
303 struct i40iw_alloc_pd_resp uresp; 301 struct i40iw_alloc_pd_resp uresp;
304 struct i40iw_sc_pd *sc_pd; 302 struct i40iw_sc_pd *sc_pd;
305 struct i40iw_ucontext *ucontext;
306 u32 pd_id = 0; 303 u32 pd_id = 0;
307 int err; 304 int err;
308 305
@@ -318,8 +315,9 @@ static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
318 315
319 sc_pd = &iwpd->sc_pd; 316 sc_pd = &iwpd->sc_pd;
320 317
321 if (context) { 318 if (udata) {
322 ucontext = to_ucontext(context); 319 struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
320 udata, struct i40iw_ucontext, ibucontext);
323 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); 321 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
324 memset(&uresp, 0, sizeof(uresp)); 322 memset(&uresp, 0, sizeof(uresp));
325 uresp.pd_id = pd_id; 323 uresp.pd_id = pd_id;
@@ -1091,12 +1089,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1091 * i40iw_create_cq - create cq 1089 * i40iw_create_cq - create cq
1092 * @ibdev: device pointer from stack 1090 * @ibdev: device pointer from stack
1093 * @attr: attributes for cq 1091 * @attr: attributes for cq
1094 * @context: user context created during alloc
1095 * @udata: user data 1092 * @udata: user data
1096 */ 1093 */
1097static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, 1094static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1098 const struct ib_cq_init_attr *attr, 1095 const struct ib_cq_init_attr *attr,
1099 struct ib_ucontext *context,
1100 struct ib_udata *udata) 1096 struct ib_udata *udata)
1101{ 1097{
1102 struct i40iw_device *iwdev = to_iwdev(ibdev); 1098 struct i40iw_device *iwdev = to_iwdev(ibdev);
@@ -1146,14 +1142,14 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1146 info.ceq_id_valid = true; 1142 info.ceq_id_valid = true;
1147 info.ceqe_mask = 1; 1143 info.ceqe_mask = 1;
1148 info.type = I40IW_CQ_TYPE_IWARP; 1144 info.type = I40IW_CQ_TYPE_IWARP;
1149 if (context) { 1145 if (udata) {
1150 struct i40iw_ucontext *ucontext; 1146 struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1147 udata, struct i40iw_ucontext, ibucontext);
1151 struct i40iw_create_cq_req req; 1148 struct i40iw_create_cq_req req;
1152 struct i40iw_cq_mr *cqmr; 1149 struct i40iw_cq_mr *cqmr;
1153 1150
1154 memset(&req, 0, sizeof(req)); 1151 memset(&req, 0, sizeof(req));
1155 iwcq->user_mode = true; 1152 iwcq->user_mode = true;
1156 ucontext = to_ucontext(context);
1157 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) { 1153 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1158 err_code = -EFAULT; 1154 err_code = -EFAULT;
1159 goto cq_free_resources; 1155 goto cq_free_resources;
@@ -1223,7 +1219,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1223 goto cq_free_resources; 1219 goto cq_free_resources;
1224 } 1220 }
1225 1221
1226 if (context) { 1222 if (udata) {
1227 struct i40iw_create_cq_resp resp; 1223 struct i40iw_create_cq_resp resp;
1228 1224
1229 memset(&resp, 0, sizeof(resp)); 1225 memset(&resp, 0, sizeof(resp));
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5403a1ff7cc2..022a0b4ea452 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -174,7 +174,6 @@ err_buf:
174#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION 174#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
175struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, 175struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
176 const struct ib_cq_init_attr *attr, 176 const struct ib_cq_init_attr *attr,
177 struct ib_ucontext *context,
178 struct ib_udata *udata) 177 struct ib_udata *udata)
179{ 178{
180 int entries = attr->cqe; 179 int entries = attr->cqe;
@@ -184,6 +183,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
184 struct mlx4_uar *uar; 183 struct mlx4_uar *uar;
185 void *buf_addr; 184 void *buf_addr;
186 int err; 185 int err;
186 struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
187 udata, struct mlx4_ib_ucontext, ibucontext);
187 188
188 if (entries < 1 || entries > dev->dev->caps.max_cqes) 189 if (entries < 1 || entries > dev->dev->caps.max_cqes)
189 return ERR_PTR(-EINVAL); 190 return ERR_PTR(-EINVAL);
@@ -205,7 +206,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
205 INIT_LIST_HEAD(&cq->send_qp_list); 206 INIT_LIST_HEAD(&cq->send_qp_list);
206 INIT_LIST_HEAD(&cq->recv_qp_list); 207 INIT_LIST_HEAD(&cq->recv_qp_list);
207 208
208 if (context) { 209 if (udata) {
209 struct mlx4_ib_create_cq ucmd; 210 struct mlx4_ib_create_cq ucmd;
210 211
211 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 212 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
@@ -219,12 +220,11 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
219 if (err) 220 if (err)
220 goto err_cq; 221 goto err_cq;
221 222
222 err = mlx4_ib_db_map_user(to_mucontext(context), udata, 223 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
223 ucmd.db_addr, &cq->db);
224 if (err) 224 if (err)
225 goto err_mtt; 225 goto err_mtt;
226 226
227 uar = &to_mucontext(context)->uar; 227 uar = &context->uar;
228 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; 228 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
229 } else { 229 } else {
230 err = mlx4_db_alloc(dev->dev, &cq->db, 1); 230 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
@@ -249,21 +249,21 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
249 if (dev->eq_table) 249 if (dev->eq_table)
250 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; 250 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
251 251
252 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, 252 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
253 cq->db.dma, &cq->mcq, vector, 0, 253 &cq->mcq, vector, 0,
254 !!(cq->create_flags & 254 !!(cq->create_flags &
255 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION), 255 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
256 buf_addr, !!context); 256 buf_addr, !!udata);
257 if (err) 257 if (err)
258 goto err_dbmap; 258 goto err_dbmap;
259 259
260 if (context) 260 if (udata)
261 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; 261 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
262 else 262 else
263 cq->mcq.comp = mlx4_ib_cq_comp; 263 cq->mcq.comp = mlx4_ib_cq_comp;
264 cq->mcq.event = mlx4_ib_cq_event; 264 cq->mcq.event = mlx4_ib_cq_event;
265 265
266 if (context) 266 if (udata)
267 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 267 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
268 err = -EFAULT; 268 err = -EFAULT;
269 goto err_cq_free; 269 goto err_cq_free;
@@ -275,19 +275,19 @@ err_cq_free:
275 mlx4_cq_free(dev->dev, &cq->mcq); 275 mlx4_cq_free(dev->dev, &cq->mcq);
276 276
277err_dbmap: 277err_dbmap:
278 if (context) 278 if (udata)
279 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); 279 mlx4_ib_db_unmap_user(context, &cq->db);
280 280
281err_mtt: 281err_mtt:
282 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); 282 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
283 283
284 if (context) 284 if (udata)
285 ib_umem_release(cq->umem); 285 ib_umem_release(cq->umem);
286 else 286 else
287 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 287 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
288 288
289err_db: 289err_db:
290 if (!context) 290 if (!udata)
291 mlx4_db_free(dev->dev, &cq->db); 291 mlx4_db_free(dev->dev, &cq->db);
292 292
293err_cq: 293err_cq:
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 3aab71b29ce8..0f390351cef0 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <rdma/uverbs_ioctl.h>
34 35
35#include "mlx4_ib.h" 36#include "mlx4_ib.h"
36 37
@@ -41,12 +42,13 @@ struct mlx4_ib_user_db_page {
41 int refcnt; 42 int refcnt;
42}; 43};
43 44
44int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, 45int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
45 struct ib_udata *udata, unsigned long virt,
46 struct mlx4_db *db) 46 struct mlx4_db *db)
47{ 47{
48 struct mlx4_ib_user_db_page *page; 48 struct mlx4_ib_user_db_page *page;
49 int err = 0; 49 int err = 0;
50 struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
51 udata, struct mlx4_ib_ucontext, ibucontext);
50 52
51 mutex_lock(&context->db_page_mutex); 53 mutex_lock(&context->db_page_mutex);
52 54
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e50f9de71119..952b1bac46db 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1177,8 +1177,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1177 } 1177 }
1178} 1178}
1179 1179
1180static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 1180static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1181 struct ib_udata *udata)
1182{ 1181{
1183 struct mlx4_ib_pd *pd = to_mpd(ibpd); 1182 struct mlx4_ib_pd *pd = to_mpd(ibpd);
1184 struct ib_device *ibdev = ibpd->device; 1183 struct ib_device *ibdev = ibpd->device;
@@ -1188,7 +1187,7 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
1188 if (err) 1187 if (err)
1189 return err; 1188 return err;
1190 1189
1191 if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { 1190 if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1192 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); 1191 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1193 return -EFAULT; 1192 return -EFAULT;
1194 } 1193 }
@@ -1201,7 +1200,6 @@ static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1201} 1200}
1202 1201
1203static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, 1202static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1204 struct ib_ucontext *context,
1205 struct ib_udata *udata) 1203 struct ib_udata *udata)
1206{ 1204{
1207 struct mlx4_ib_xrcd *xrcd; 1205 struct mlx4_ib_xrcd *xrcd;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 58112b59cc7c..79143848b560 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -723,8 +723,7 @@ static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
723int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); 723int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
724void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); 724void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
725 725
726int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, 726int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
727 struct ib_udata *udata, unsigned long virt,
728 struct mlx4_db *db); 727 struct mlx4_db *db);
729void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); 728void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
730 729
@@ -746,7 +745,6 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
746int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 745int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
747struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, 746struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
748 const struct ib_cq_init_attr *attr, 747 const struct ib_cq_init_attr *attr,
749 struct ib_ucontext *context,
750 struct ib_udata *udata); 748 struct ib_udata *udata);
751int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 749int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
752int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 750int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 99ceffe5cfec..364e16b5f8e1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1041,11 +1041,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
1041 goto err_mtt; 1041 goto err_mtt;
1042 1042
1043 if (qp_has_rq(init_attr)) { 1043 if (qp_has_rq(init_attr)) {
1044 err = mlx4_ib_db_map_user( 1044 err = mlx4_ib_db_map_user(udata,
1045 context, udata, 1045 (src == MLX4_IB_QP_SRC) ?
1046 (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr : 1046 ucmd.qp.db_addr :
1047 ucmd.wq.db_addr, 1047 ucmd.wq.db_addr,
1048 &qp->db); 1048 &qp->db);
1049 if (err) 1049 if (err)
1050 goto err_mtt; 1050 goto err_mtt;
1051 } 1051 }
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 2a20205d1662..94c3c334a672 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -131,8 +131,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
131 if (err) 131 if (err)
132 goto err_mtt; 132 goto err_mtt;
133 133
134 err = mlx4_ib_db_map_user(ucontext, udata, ucmd.db_addr, 134 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
135 &srq->db);
136 if (err) 135 if (err)
137 goto err_mtt; 136 goto err_mtt;
138 } else { 137 } else {
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5bed098ccdef..2e2e65f00257 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -679,8 +679,7 @@ static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
679} 679}
680 680
681static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, 681static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
682 struct ib_ucontext *context, struct mlx5_ib_cq *cq, 682 struct mlx5_ib_cq *cq, int entries, u32 **cqb,
683 int entries, u32 **cqb,
684 int *cqe_size, int *index, int *inlen) 683 int *cqe_size, int *index, int *inlen)
685{ 684{
686 struct mlx5_ib_create_cq ucmd = {}; 685 struct mlx5_ib_create_cq ucmd = {};
@@ -691,6 +690,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
691 int ncont; 690 int ncont;
692 void *cqc; 691 void *cqc;
693 int err; 692 int err;
693 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
694 udata, struct mlx5_ib_ucontext, ibucontext);
694 695
695 ucmdlen = udata->inlen < sizeof(ucmd) ? 696 ucmdlen = udata->inlen < sizeof(ucmd) ?
696 (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd); 697 (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
@@ -715,8 +716,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
715 return err; 716 return err;
716 } 717 }
717 718
718 err = mlx5_ib_db_map_user(to_mucontext(context), udata, ucmd.db_addr, 719 err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
719 &cq->db);
720 if (err) 720 if (err)
721 goto err_umem; 721 goto err_umem;
722 722
@@ -740,7 +740,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
740 MLX5_SET(cqc, cqc, log_page_size, 740 MLX5_SET(cqc, cqc, log_page_size,
741 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 741 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
742 742
743 *index = to_mucontext(context)->bfregi.sys_pages[0]; 743 *index = context->bfregi.sys_pages[0];
744 744
745 if (ucmd.cqe_comp_en == 1) { 745 if (ucmd.cqe_comp_en == 1) {
746 int mini_cqe_format; 746 int mini_cqe_format;
@@ -782,14 +782,14 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
782 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; 782 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
783 } 783 }
784 784
785 MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid); 785 MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
786 return 0; 786 return 0;
787 787
788err_cqb: 788err_cqb:
789 kvfree(*cqb); 789 kvfree(*cqb);
790 790
791err_db: 791err_db:
792 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 792 mlx5_ib_db_unmap_user(context, &cq->db);
793 793
794err_umem: 794err_umem:
795 ib_umem_release(cq->buf.umem); 795 ib_umem_release(cq->buf.umem);
@@ -886,7 +886,6 @@ static void notify_soft_wc_handler(struct work_struct *work)
886 886
887struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 887struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
888 const struct ib_cq_init_attr *attr, 888 const struct ib_cq_init_attr *attr,
889 struct ib_ucontext *context,
890 struct ib_udata *udata) 889 struct ib_udata *udata)
891{ 890{
892 int entries = attr->cqe; 891 int entries = attr->cqe;
@@ -927,8 +926,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
927 INIT_LIST_HEAD(&cq->list_recv_qp); 926 INIT_LIST_HEAD(&cq->list_recv_qp);
928 927
929 if (udata) { 928 if (udata) {
930 err = create_cq_user(dev, udata, context, cq, entries, 929 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
931 &cqb, &cqe_size, &index, &inlen); 930 &index, &inlen);
932 if (err) 931 if (err)
933 goto err_create; 932 goto err_create;
934 } else { 933 } else {
@@ -965,7 +964,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
965 964
966 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); 965 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
967 cq->mcq.irqn = irqn; 966 cq->mcq.irqn = irqn;
968 if (context) 967 if (udata)
969 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; 968 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
970 else 969 else
971 cq->mcq.comp = mlx5_ib_cq_comp; 970 cq->mcq.comp = mlx5_ib_cq_comp;
@@ -973,7 +972,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
973 972
974 INIT_LIST_HEAD(&cq->wc_list); 973 INIT_LIST_HEAD(&cq->wc_list);
975 974
976 if (context) 975 if (udata)
977 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { 976 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
978 err = -EFAULT; 977 err = -EFAULT;
979 goto err_cmd; 978 goto err_cmd;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 769a5952a0f6..f706e1bd40ad 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2341,8 +2341,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
2341 return 0; 2341 return 0;
2342} 2342}
2343 2343
2344static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 2344static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2345 struct ib_udata *udata)
2346{ 2345{
2347 struct mlx5_ib_pd *pd = to_mpd(ibpd); 2346 struct mlx5_ib_pd *pd = to_mpd(ibpd);
2348 struct ib_device *ibdev = ibpd->device; 2347 struct ib_device *ibdev = ibpd->device;
@@ -2351,8 +2350,10 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
2351 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; 2350 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2352 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; 2351 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2353 u16 uid = 0; 2352 u16 uid = 0;
2353 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2354 udata, struct mlx5_ib_ucontext, ibucontext);
2354 2355
2355 uid = context ? to_mucontext(context)->devx_uid : 0; 2356 uid = context ? context->devx_uid : 0;
2356 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); 2357 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2357 MLX5_SET(alloc_pd_in, in, uid, uid); 2358 MLX5_SET(alloc_pd_in, in, uid, uid);
2358 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), 2359 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
@@ -2362,7 +2363,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
2362 2363
2363 pd->pdn = MLX5_GET(alloc_pd_out, out, pd); 2364 pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2364 pd->uid = uid; 2365 pd->uid = uid;
2365 if (context) { 2366 if (udata) {
2366 resp.pdn = pd->pdn; 2367 resp.pdn = pd->pdn;
2367 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 2368 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2368 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); 2369 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
@@ -4749,11 +4750,11 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
4749 devr->p0->uobject = NULL; 4750 devr->p0->uobject = NULL;
4750 atomic_set(&devr->p0->usecnt, 0); 4751 atomic_set(&devr->p0->usecnt, 0);
4751 4752
4752 ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL); 4753 ret = mlx5_ib_alloc_pd(devr->p0, NULL);
4753 if (ret) 4754 if (ret)
4754 goto error0; 4755 goto error0;
4755 4756
4756 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 4757 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
4757 if (IS_ERR(devr->c0)) { 4758 if (IS_ERR(devr->c0)) {
4758 ret = PTR_ERR(devr->c0); 4759 ret = PTR_ERR(devr->c0);
4759 goto error1; 4760 goto error1;
@@ -4765,7 +4766,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
4765 devr->c0->cq_context = NULL; 4766 devr->c0->cq_context = NULL;
4766 atomic_set(&devr->c0->usecnt, 0); 4767 atomic_set(&devr->c0->usecnt, 0);
4767 4768
4768 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 4769 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4769 if (IS_ERR(devr->x0)) { 4770 if (IS_ERR(devr->x0)) {
4770 ret = PTR_ERR(devr->x0); 4771 ret = PTR_ERR(devr->x0);
4771 goto error2; 4772 goto error2;
@@ -4776,7 +4777,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
4776 mutex_init(&devr->x0->tgt_qp_mutex); 4777 mutex_init(&devr->x0->tgt_qp_mutex);
4777 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 4778 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4778 4779
4779 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 4780 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4780 if (IS_ERR(devr->x1)) { 4781 if (IS_ERR(devr->x1)) {
4781 ret = PTR_ERR(devr->x1); 4782 ret = PTR_ERR(devr->x1);
4782 goto error3; 4783 goto error3;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e45f59b0cc52..f7314d78aafd 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1083,7 +1083,6 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
1083 void *buffer, int buflen, size_t *bc); 1083 void *buffer, int buflen, size_t *bc);
1084struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 1084struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
1085 const struct ib_cq_init_attr *attr, 1085 const struct ib_cq_init_attr *attr,
1086 struct ib_ucontext *context,
1087 struct ib_udata *udata); 1086 struct ib_udata *udata);
1088int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 1087int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1089int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 1088int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
@@ -1123,8 +1122,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1123 struct ib_mad_hdr *out, size_t *out_mad_size, 1122 struct ib_mad_hdr *out, size_t *out_mad_size,
1124 u16 *out_mad_pkey_index); 1123 u16 *out_mad_pkey_index);
1125struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 1124struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
1126 struct ib_ucontext *context, 1125 struct ib_udata *udata);
1127 struct ib_udata *udata);
1128int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 1126int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1129int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); 1127int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
1130int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); 1128int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 940ac1caa590..3470a9c496b1 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -5632,8 +5632,7 @@ out:
5632} 5632}
5633 5633
5634struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 5634struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
5635 struct ib_ucontext *context, 5635 struct ib_udata *udata)
5636 struct ib_udata *udata)
5637{ 5636{
5638 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5637 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5639 struct mlx5_ib_xrcd *xrcd; 5638 struct mlx5_ib_xrcd *xrcd;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9e4efd58c119..9a77374a327b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -363,18 +363,17 @@ static int mthca_mmap_uar(struct ib_ucontext *context,
363 return 0; 363 return 0;
364} 364}
365 365
366static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 366static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
367 struct ib_udata *udata)
368{ 367{
369 struct ib_device *ibdev = ibpd->device; 368 struct ib_device *ibdev = ibpd->device;
370 struct mthca_pd *pd = to_mpd(ibpd); 369 struct mthca_pd *pd = to_mpd(ibpd);
371 int err; 370 int err;
372 371
373 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); 372 err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
374 if (err) 373 if (err)
375 return err; 374 return err;
376 375
377 if (context) { 376 if (udata) {
378 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { 377 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
379 mthca_pd_free(to_mdev(ibdev), pd); 378 mthca_pd_free(to_mdev(ibdev), pd);
380 return -EFAULT; 379 return -EFAULT;
@@ -634,7 +633,6 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
634 633
635static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, 634static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
636 const struct ib_cq_init_attr *attr, 635 const struct ib_cq_init_attr *attr,
637 struct ib_ucontext *context,
638 struct ib_udata *udata) 636 struct ib_udata *udata)
639{ 637{
640 int entries = attr->cqe; 638 int entries = attr->cqe;
@@ -642,6 +640,8 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
642 struct mthca_cq *cq; 640 struct mthca_cq *cq;
643 int nent; 641 int nent;
644 int err; 642 int err;
643 struct mthca_ucontext *context = rdma_udata_to_drv_context(
644 udata, struct mthca_ucontext, ibucontext);
645 645
646 if (attr->flags) 646 if (attr->flags)
647 return ERR_PTR(-EINVAL); 647 return ERR_PTR(-EINVAL);
@@ -649,19 +649,19 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
649 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) 649 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
650 return ERR_PTR(-EINVAL); 650 return ERR_PTR(-EINVAL);
651 651
652 if (context) { 652 if (udata) {
653 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 653 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
654 return ERR_PTR(-EFAULT); 654 return ERR_PTR(-EFAULT);
655 655
656 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 656 err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
657 to_mucontext(context)->db_tab, 657 context->db_tab, ucmd.set_db_index,
658 ucmd.set_db_index, ucmd.set_db_page); 658 ucmd.set_db_page);
659 if (err) 659 if (err)
660 return ERR_PTR(err); 660 return ERR_PTR(err);
661 661
662 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 662 err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
663 to_mucontext(context)->db_tab, 663 context->db_tab, ucmd.arm_db_index,
664 ucmd.arm_db_index, ucmd.arm_db_page); 664 ucmd.arm_db_page);
665 if (err) 665 if (err)
666 goto err_unmap_set; 666 goto err_unmap_set;
667 } 667 }
@@ -672,7 +672,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
672 goto err_unmap_arm; 672 goto err_unmap_arm;
673 } 673 }
674 674
675 if (context) { 675 if (udata) {
676 cq->buf.mr.ibmr.lkey = ucmd.lkey; 676 cq->buf.mr.ibmr.lkey = ucmd.lkey;
677 cq->set_ci_db_index = ucmd.set_db_index; 677 cq->set_ci_db_index = ucmd.set_db_index;
678 cq->arm_db_index = ucmd.arm_db_index; 678 cq->arm_db_index = ucmd.arm_db_index;
@@ -681,14 +681,13 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
681 for (nent = 1; nent <= entries; nent <<= 1) 681 for (nent = 1; nent <= entries; nent <<= 1)
682 ; /* nothing */ 682 ; /* nothing */
683 683
684 err = mthca_init_cq(to_mdev(ibdev), nent, 684 err = mthca_init_cq(to_mdev(ibdev), nent, context,
685 context ? to_mucontext(context) : NULL, 685 udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
686 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
687 cq); 686 cq);
688 if (err) 687 if (err)
689 goto err_free; 688 goto err_free;
690 689
691 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { 690 if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
692 mthca_free_cq(to_mdev(ibdev), cq); 691 mthca_free_cq(to_mdev(ibdev), cq);
693 err = -EFAULT; 692 err = -EFAULT;
694 goto err_free; 693 goto err_free;
@@ -702,14 +701,14 @@ err_free:
702 kfree(cq); 701 kfree(cq);
703 702
704err_unmap_arm: 703err_unmap_arm:
705 if (context) 704 if (udata)
706 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 705 mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
707 to_mucontext(context)->db_tab, ucmd.arm_db_index); 706 context->db_tab, ucmd.arm_db_index);
708 707
709err_unmap_set: 708err_unmap_set:
710 if (context) 709 if (udata)
711 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 710 mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
712 to_mucontext(context)->db_tab, ucmd.set_db_index); 711 context->db_tab, ucmd.set_db_index);
713 712
714 return ERR_PTR(err); 713 return ERR_PTR(err);
715} 714}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 244255b1e940..a3b5e8eecb98 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -640,22 +640,24 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
640/** 640/**
641 * nes_alloc_pd 641 * nes_alloc_pd
642 */ 642 */
643static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, 643static int nes_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
644 struct ib_udata *udata)
645{ 644{
646 struct ib_device *ibdev = pd->device; 645 struct ib_device *ibdev = pd->device;
647 struct nes_pd *nespd = to_nespd(pd); 646 struct nes_pd *nespd = to_nespd(pd);
648 struct nes_vnic *nesvnic = to_nesvnic(ibdev); 647 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
649 struct nes_device *nesdev = nesvnic->nesdev; 648 struct nes_device *nesdev = nesvnic->nesdev;
650 struct nes_adapter *nesadapter = nesdev->nesadapter; 649 struct nes_adapter *nesadapter = nesdev->nesadapter;
651 struct nes_ucontext *nesucontext;
652 struct nes_alloc_pd_resp uresp; 650 struct nes_alloc_pd_resp uresp;
653 u32 pd_num = 0; 651 u32 pd_num = 0;
654 int err; 652 int err;
653 struct nes_ucontext *nesucontext = rdma_udata_to_drv_context(
654 udata, struct nes_ucontext, ibucontext);
655 655
656 nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n", 656 nes_debug(
657 nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context, 657 NES_DBG_PD,
658 netdev_refcnt_read(nesvnic->netdev)); 658 "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
659 nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev,
660 &nesucontext->ibucontext, netdev_refcnt_read(nesvnic->netdev));
659 661
660 err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds, 662 err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
661 nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD); 663 nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD);
@@ -667,8 +669,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
667 669
668 nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd; 670 nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
669 671
670 if (context) { 672 if (udata) {
671 nesucontext = to_nesucontext(context);
672 nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells, 673 nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells,
673 NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); 674 NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
674 nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", 675 nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
@@ -1375,7 +1376,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1375 */ 1376 */
1376static struct ib_cq *nes_create_cq(struct ib_device *ibdev, 1377static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1377 const struct ib_cq_init_attr *attr, 1378 const struct ib_cq_init_attr *attr,
1378 struct ib_ucontext *context,
1379 struct ib_udata *udata) 1379 struct ib_udata *udata)
1380{ 1380{
1381 int entries = attr->cqe; 1381 int entries = attr->cqe;
@@ -1420,9 +1420,10 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1420 nescq->hw_cq.cq_number = cq_num; 1420 nescq->hw_cq.cq_number = cq_num;
1421 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; 1421 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
1422 1422
1423 if (udata) {
1424 struct nes_ucontext *nes_ucontext = rdma_udata_to_drv_context(
1425 udata, struct nes_ucontext, ibucontext);
1423 1426
1424 if (context) {
1425 nes_ucontext = to_nesucontext(context);
1426 if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) { 1427 if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) {
1427 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); 1428 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1428 kfree(nescq); 1429 kfree(nescq);
@@ -1489,7 +1490,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1489 cqp_request = nes_get_cqp_request(nesdev); 1490 cqp_request = nes_get_cqp_request(nesdev);
1490 if (cqp_request == NULL) { 1491 if (cqp_request == NULL) {
1491 nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n"); 1492 nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
1492 if (!context) 1493 if (!udata)
1493 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, 1494 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1494 nescq->hw_cq.cq_pbase); 1495 nescq->hw_cq.cq_pbase);
1495 else { 1496 else {
@@ -1518,7 +1519,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1518 if (nesadapter->free_4kpbl == 0) { 1519 if (nesadapter->free_4kpbl == 0) {
1519 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 1520 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1520 nes_free_cqp_request(nesdev, cqp_request); 1521 nes_free_cqp_request(nesdev, cqp_request);
1521 if (!context) 1522 if (!udata)
1522 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, 1523 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1523 nescq->hw_cq.cq_pbase); 1524 nescq->hw_cq.cq_pbase);
1524 else { 1525 else {
@@ -1540,7 +1541,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1540 if (nesadapter->free_256pbl == 0) { 1541 if (nesadapter->free_256pbl == 0) {
1541 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 1542 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1542 nes_free_cqp_request(nesdev, cqp_request); 1543 nes_free_cqp_request(nesdev, cqp_request);
1543 if (!context) 1544 if (!udata)
1544 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, 1545 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1545 nescq->hw_cq.cq_pbase); 1546 nescq->hw_cq.cq_pbase);
1546 else { 1547 else {
@@ -1566,7 +1567,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1566 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, 1567 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
1567 (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16))); 1568 (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
1568 1569
1569 if (context) { 1570 if (udata) {
1570 if (pbl_entries != 1) 1571 if (pbl_entries != 1)
1571 u64temp = (u64)nespbl->pbl_pbase; 1572 u64temp = (u64)nespbl->pbl_pbase;
1572 else 1573 else
@@ -1597,7 +1598,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1597 nescq->hw_cq.cq_number, ret); 1598 nescq->hw_cq.cq_number, ret);
1598 if ((!ret) || (cqp_request->major_code)) { 1599 if ((!ret) || (cqp_request->major_code)) {
1599 nes_put_cqp_request(nesdev, cqp_request); 1600 nes_put_cqp_request(nesdev, cqp_request);
1600 if (!context) 1601 if (!udata)
1601 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, 1602 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1602 nescq->hw_cq.cq_pbase); 1603 nescq->hw_cq.cq_pbase);
1603 else { 1604 else {
@@ -1611,7 +1612,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
1611 } 1612 }
1612 nes_put_cqp_request(nesdev, cqp_request); 1613 nes_put_cqp_request(nesdev, cqp_request);
1613 1614
1614 if (context) { 1615 if (udata) {
1615 /* free the nespbl */ 1616 /* free the nespbl */
1616 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, 1617 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
1617 nespbl->pbl_pbase); 1618 nespbl->pbl_pbase);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b8f891660516..cf7aeb963dce 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -47,6 +47,7 @@
47#include <rdma/ib_umem.h> 47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h> 48#include <rdma/ib_addr.h>
49#include <rdma/ib_cache.h> 49#include <rdma/ib_cache.h>
50#include <rdma/uverbs_ioctl.h>
50 51
51#include "ocrdma.h" 52#include "ocrdma.h"
52#include "ocrdma_hw.h" 53#include "ocrdma_hw.h"
@@ -367,6 +368,16 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
367 return status; 368 return status;
368} 369}
369 370
371/*
372 * NOTE:
373 *
374 * ocrdma_ucontext must be used here because this function is also
375 * called from ocrdma_alloc_ucontext where ib_udata does not have
376 * valid ib_ucontext pointer. ib_uverbs_get_context does not call
377 * uobj_{alloc|get_xxx} helpers which are used to store the
378 * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
379 * ib_udata does NOT imply valid ib_ucontext here!
380 */
370static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, 381static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
371 struct ocrdma_ucontext *uctx, 382 struct ocrdma_ucontext *uctx,
372 struct ib_udata *udata) 383 struct ib_udata *udata)
@@ -593,7 +604,6 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
593} 604}
594 605
595static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, 606static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
596 struct ib_ucontext *ib_ctx,
597 struct ib_udata *udata) 607 struct ib_udata *udata)
598{ 608{
599 int status; 609 int status;
@@ -601,7 +611,8 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
601 u64 dpp_page_addr = 0; 611 u64 dpp_page_addr = 0;
602 u32 db_page_size; 612 u32 db_page_size;
603 struct ocrdma_alloc_pd_uresp rsp; 613 struct ocrdma_alloc_pd_uresp rsp;
604 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 614 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
615 udata, struct ocrdma_ucontext, ibucontext);
605 616
606 memset(&rsp, 0, sizeof(rsp)); 617 memset(&rsp, 0, sizeof(rsp));
607 rsp.id = pd->id; 618 rsp.id = pd->id;
@@ -639,18 +650,17 @@ dpp_map_err:
639 return status; 650 return status;
640} 651}
641 652
642int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 653int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
643 struct ib_udata *udata)
644{ 654{
645 struct ib_device *ibdev = ibpd->device; 655 struct ib_device *ibdev = ibpd->device;
646 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 656 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
647 struct ocrdma_pd *pd; 657 struct ocrdma_pd *pd;
648 struct ocrdma_ucontext *uctx = NULL;
649 int status; 658 int status;
650 u8 is_uctx_pd = false; 659 u8 is_uctx_pd = false;
660 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
661 udata, struct ocrdma_ucontext, ibucontext);
651 662
652 if (udata && context) { 663 if (udata) {
653 uctx = get_ocrdma_ucontext(context);
654 pd = ocrdma_get_ucontext_pd(uctx); 664 pd = ocrdma_get_ucontext_pd(uctx);
655 if (pd) { 665 if (pd) {
656 is_uctx_pd = true; 666 is_uctx_pd = true;
@@ -664,8 +674,8 @@ int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
664 goto exit; 674 goto exit;
665 675
666pd_mapping: 676pd_mapping:
667 if (udata && context) { 677 if (udata) {
668 status = ocrdma_copy_pd_uresp(dev, pd, context, udata); 678 status = ocrdma_copy_pd_uresp(dev, pd, udata);
669 if (status) 679 if (status)
670 goto err; 680 goto err;
671 } 681 }
@@ -946,13 +956,17 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
946} 956}
947 957
948static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 958static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
949 struct ib_udata *udata, 959 struct ib_udata *udata)
950 struct ib_ucontext *ib_ctx)
951{ 960{
952 int status; 961 int status;
953 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 962 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
963 udata, struct ocrdma_ucontext, ibucontext);
954 struct ocrdma_create_cq_uresp uresp; 964 struct ocrdma_create_cq_uresp uresp;
955 965
966 /* this must be user flow! */
967 if (!udata)
968 return -EINVAL;
969
956 memset(&uresp, 0, sizeof(uresp)); 970 memset(&uresp, 0, sizeof(uresp));
957 uresp.cq_id = cq->id; 971 uresp.cq_id = cq->id;
958 uresp.page_size = PAGE_ALIGN(cq->len); 972 uresp.page_size = PAGE_ALIGN(cq->len);
@@ -983,13 +997,13 @@ err:
983 997
984struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, 998struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
985 const struct ib_cq_init_attr *attr, 999 const struct ib_cq_init_attr *attr,
986 struct ib_ucontext *ib_ctx,
987 struct ib_udata *udata) 1000 struct ib_udata *udata)
988{ 1001{
989 int entries = attr->cqe; 1002 int entries = attr->cqe;
990 struct ocrdma_cq *cq; 1003 struct ocrdma_cq *cq;
991 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 1004 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
992 struct ocrdma_ucontext *uctx = NULL; 1005 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
1006 udata, struct ocrdma_ucontext, ibucontext);
993 u16 pd_id = 0; 1007 u16 pd_id = 0;
994 int status; 1008 int status;
995 struct ocrdma_create_cq_ureq ureq; 1009 struct ocrdma_create_cq_ureq ureq;
@@ -1011,18 +1025,16 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1011 INIT_LIST_HEAD(&cq->sq_head); 1025 INIT_LIST_HEAD(&cq->sq_head);
1012 INIT_LIST_HEAD(&cq->rq_head); 1026 INIT_LIST_HEAD(&cq->rq_head);
1013 1027
1014 if (ib_ctx) { 1028 if (udata)
1015 uctx = get_ocrdma_ucontext(ib_ctx);
1016 pd_id = uctx->cntxt_pd->id; 1029 pd_id = uctx->cntxt_pd->id;
1017 }
1018 1030
1019 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); 1031 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1020 if (status) { 1032 if (status) {
1021 kfree(cq); 1033 kfree(cq);
1022 return ERR_PTR(status); 1034 return ERR_PTR(status);
1023 } 1035 }
1024 if (ib_ctx) { 1036 if (udata) {
1025 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); 1037 status = ocrdma_copy_cq_uresp(dev, cq, udata);
1026 if (status) 1038 if (status)
1027 goto ctx_err; 1039 goto ctx_err;
1028 } 1040 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 3636cbcbcaa4..dfdebe4e48e6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -69,13 +69,11 @@ void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
69 69
70int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); 70int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
71 71
72int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, 72int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
73 struct ib_udata *udata);
74void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 73void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
75 74
76struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, 75struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
77 const struct ib_cq_init_attr *attr, 76 const struct ib_cq_init_attr *attr,
78 struct ib_ucontext *ib_ctx,
79 struct ib_udata *udata); 77 struct ib_udata *udata);
80int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 78int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
81int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 79int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 4cd16ad16430..44ab86718c2f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -42,6 +42,7 @@
42#include <rdma/ib_umem.h> 42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h> 44#include <rdma/ib_cache.h>
45#include <rdma/uverbs_ioctl.h>
45 46
46#include <linux/qed/common_hsi.h> 47#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h" 48#include "qedr_hsi_rdma.h"
@@ -436,8 +437,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
436 vma->vm_page_prot); 437 vma->vm_page_prot);
437} 438}
438 439
439int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 440int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
440 struct ib_udata *udata)
441{ 441{
442 struct ib_device *ibdev = ibpd->device; 442 struct ib_device *ibdev = ibpd->device;
443 struct qedr_dev *dev = get_qedr_dev(ibdev); 443 struct qedr_dev *dev = get_qedr_dev(ibdev);
@@ -446,7 +446,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
446 int rc; 446 int rc;
447 447
448 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n", 448 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
449 (udata && context) ? "User Lib" : "Kernel"); 449 udata ? "User Lib" : "Kernel");
450 450
451 if (!dev->rdma_ctx) { 451 if (!dev->rdma_ctx) {
452 DP_ERR(dev, "invalid RDMA context\n"); 452 DP_ERR(dev, "invalid RDMA context\n");
@@ -459,10 +459,12 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
459 459
460 pd->pd_id = pd_id; 460 pd->pd_id = pd_id;
461 461
462 if (udata && context) { 462 if (udata) {
463 struct qedr_alloc_pd_uresp uresp = { 463 struct qedr_alloc_pd_uresp uresp = {
464 .pd_id = pd_id, 464 .pd_id = pd_id,
465 }; 465 };
466 struct qedr_ucontext *context = rdma_udata_to_drv_context(
467 udata, struct qedr_ucontext, ibucontext);
466 468
467 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 469 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
468 if (rc) { 470 if (rc) {
@@ -471,7 +473,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
471 return rc; 473 return rc;
472 } 474 }
473 475
474 pd->uctx = get_qedr_ucontext(context); 476 pd->uctx = context;
475 pd->uctx->pd = pd; 477 pd->uctx->pd = pd;
476 } 478 }
477 479
@@ -816,9 +818,10 @@ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
816 818
817struct ib_cq *qedr_create_cq(struct ib_device *ibdev, 819struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
818 const struct ib_cq_init_attr *attr, 820 const struct ib_cq_init_attr *attr,
819 struct ib_ucontext *ib_ctx, struct ib_udata *udata) 821 struct ib_udata *udata)
820{ 822{
821 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx); 823 struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
824 udata, struct qedr_ucontext, ibucontext);
822 struct qed_rdma_destroy_cq_out_params destroy_oparams; 825 struct qed_rdma_destroy_cq_out_params destroy_oparams;
823 struct qed_rdma_destroy_cq_in_params destroy_iparams; 826 struct qed_rdma_destroy_cq_in_params destroy_iparams;
824 struct qedr_dev *dev = get_qedr_dev(ibdev); 827 struct qedr_dev *dev = get_qedr_dev(ibdev);
@@ -906,7 +909,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
906 cq->sig = QEDR_CQ_MAGIC_NUMBER; 909 cq->sig = QEDR_CQ_MAGIC_NUMBER;
907 spin_lock_init(&cq->cq_lock); 910 spin_lock_init(&cq->cq_lock);
908 911
909 if (ib_ctx) { 912 if (udata) {
910 rc = qedr_copy_cq_uresp(dev, cq, udata); 913 rc = qedr_copy_cq_uresp(dev, cq, udata);
911 if (rc) 914 if (rc)
912 goto err3; 915 goto err3;
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index cd9659ac2aad..46a9828b9777 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -47,13 +47,11 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
47void qedr_dealloc_ucontext(struct ib_ucontext *uctx); 47void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
48 48
49int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); 49int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
50int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, 50int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
51 struct ib_udata *udata);
52void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 51void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
53 52
54struct ib_cq *qedr_create_cq(struct ib_device *ibdev, 53struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
55 const struct ib_cq_init_attr *attr, 54 const struct ib_cq_init_attr *attr,
56 struct ib_ucontext *ib_ctx,
57 struct ib_udata *udata); 55 struct ib_udata *udata);
58int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 56int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
59int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 57int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index e282eea8ecce..e9352750e029 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -447,8 +447,7 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
447 return 0; 447 return 0;
448} 448}
449 449
450int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 450int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
451 struct ib_udata *udata)
452{ 451{
453 struct usnic_ib_pd *pd = to_upd(ibpd); 452 struct usnic_ib_pd *pd = to_upd(ibpd);
454 void *umem_pd; 453 void *umem_pd;
@@ -590,7 +589,6 @@ out_unlock:
590 589
591struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, 590struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
592 const struct ib_cq_init_attr *attr, 591 const struct ib_cq_init_attr *attr,
593 struct ib_ucontext *context,
594 struct ib_udata *udata) 592 struct ib_udata *udata)
595{ 593{
596 struct ib_cq *cq; 594 struct ib_cq *cq;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 349c8dc13a12..028f322f8e9b 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -50,8 +50,7 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
50 union ib_gid *gid); 50 union ib_gid *gid);
51int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 51int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
52 u16 *pkey); 52 u16 *pkey);
53int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 53int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
54 struct ib_udata *udata);
55void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 54void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
56struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, 55struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
57 struct ib_qp_init_attr *init_attr, 56 struct ib_qp_init_attr *init_attr,
@@ -61,7 +60,6 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
61 int attr_mask, struct ib_udata *udata); 60 int attr_mask, struct ib_udata *udata);
62struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, 61struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
63 const struct ib_cq_init_attr *attr, 62 const struct ib_cq_init_attr *attr,
64 struct ib_ucontext *context,
65 struct ib_udata *udata); 63 struct ib_udata *udata);
66int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 64int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
67struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, 65struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 5ba278324134..d7deb19a2800 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -49,6 +49,7 @@
49#include <rdma/ib_addr.h> 49#include <rdma/ib_addr.h>
50#include <rdma/ib_smi.h> 50#include <rdma/ib_smi.h>
51#include <rdma/ib_user_verbs.h> 51#include <rdma/ib_user_verbs.h>
52#include <rdma/uverbs_ioctl.h>
52 53
53#include "pvrdma.h" 54#include "pvrdma.h"
54 55
@@ -93,7 +94,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
93 * pvrdma_create_cq - create completion queue 94 * pvrdma_create_cq - create completion queue
94 * @ibdev: the device 95 * @ibdev: the device
95 * @attr: completion queue attributes 96 * @attr: completion queue attributes
96 * @context: user context
97 * @udata: user data 97 * @udata: user data
98 * 98 *
99 * @return: ib_cq completion queue pointer on success, 99 * @return: ib_cq completion queue pointer on success,
@@ -101,7 +101,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
101 */ 101 */
102struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, 102struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
103 const struct ib_cq_init_attr *attr, 103 const struct ib_cq_init_attr *attr,
104 struct ib_ucontext *context,
105 struct ib_udata *udata) 104 struct ib_udata *udata)
106{ 105{
107 int entries = attr->cqe; 106 int entries = attr->cqe;
@@ -116,6 +115,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
116 struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; 115 struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
117 struct pvrdma_create_cq_resp cq_resp = {0}; 116 struct pvrdma_create_cq_resp cq_resp = {0};
118 struct pvrdma_create_cq ucmd; 117 struct pvrdma_create_cq ucmd;
118 struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
119 udata, struct pvrdma_ucontext, ibucontext);
119 120
120 BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); 121 BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
121 122
@@ -133,7 +134,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
133 } 134 }
134 135
135 cq->ibcq.cqe = entries; 136 cq->ibcq.cqe = entries;
136 cq->is_kernel = !context; 137 cq->is_kernel = !udata;
137 138
138 if (!cq->is_kernel) { 139 if (!cq->is_kernel) {
139 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 140 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
@@ -185,8 +186,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
185 memset(cmd, 0, sizeof(*cmd)); 186 memset(cmd, 0, sizeof(*cmd));
186 cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ; 187 cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
187 cmd->nchunks = npages; 188 cmd->nchunks = npages;
188 cmd->ctx_handle = (context) ? 189 cmd->ctx_handle = context ? context->ctx_handle : 0;
189 (u64)to_vucontext(context)->ctx_handle : 0;
190 cmd->cqe = entries; 190 cmd->cqe = entries;
191 cmd->pdir_dma = cq->pdir.dir_dma; 191 cmd->pdir_dma = cq->pdir.dir_dma;
192 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP); 192 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
@@ -204,7 +204,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
204 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 204 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
205 205
206 if (!cq->is_kernel) { 206 if (!cq->is_kernel) {
207 cq->uar = &(to_vucontext(context)->uar); 207 cq->uar = &context->uar;
208 208
209 /* Copy udata back. */ 209 /* Copy udata back. */
210 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { 210 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 19ff6004b477..0302fa3b6c85 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -50,6 +50,7 @@
50#include <rdma/ib_smi.h> 50#include <rdma/ib_smi.h>
51#include <rdma/ib_user_verbs.h> 51#include <rdma/ib_user_verbs.h>
52#include <rdma/vmw_pvrdma-abi.h> 52#include <rdma/vmw_pvrdma-abi.h>
53#include <rdma/uverbs_ioctl.h>
53 54
54#include "pvrdma.h" 55#include "pvrdma.h"
55 56
@@ -419,13 +420,11 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
419/** 420/**
420 * pvrdma_alloc_pd - allocate protection domain 421 * pvrdma_alloc_pd - allocate protection domain
421 * @ibpd: PD pointer 422 * @ibpd: PD pointer
422 * @context: user context
423 * @udata: user data 423 * @udata: user data
424 * 424 *
425 * @return: the ib_pd protection domain pointer on success, otherwise errno. 425 * @return: the ib_pd protection domain pointer on success, otherwise errno.
426 */ 426 */
427int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 427int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
428 struct ib_udata *udata)
429{ 428{
430 struct ib_device *ibdev = ibpd->device; 429 struct ib_device *ibdev = ibpd->device;
431 struct pvrdma_pd *pd = to_vpd(ibpd); 430 struct pvrdma_pd *pd = to_vpd(ibpd);
@@ -436,13 +435,15 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
436 struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; 435 struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
437 struct pvrdma_alloc_pd_resp pd_resp = {0}; 436 struct pvrdma_alloc_pd_resp pd_resp = {0};
438 int ret; 437 int ret;
438 struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
439 udata, struct pvrdma_ucontext, ibucontext);
439 440
440 /* Check allowed max pds */ 441 /* Check allowed max pds */
441 if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd)) 442 if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
442 return -ENOMEM; 443 return -ENOMEM;
443 444
444 cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD; 445 cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
445 cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0; 446 cmd->ctx_handle = context ? context->ctx_handle : 0;
446 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP); 447 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
447 if (ret < 0) { 448 if (ret < 0) {
448 dev_warn(&dev->pdev->dev, 449 dev_warn(&dev->pdev->dev,
@@ -451,12 +452,12 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
451 goto err; 452 goto err;
452 } 453 }
453 454
454 pd->privileged = !context; 455 pd->privileged = !udata;
455 pd->pd_handle = resp->pd_handle; 456 pd->pd_handle = resp->pd_handle;
456 pd->pdn = resp->pd_handle; 457 pd->pdn = resp->pd_handle;
457 pd_resp.pdn = resp->pd_handle; 458 pd_resp.pdn = resp->pd_handle;
458 459
459 if (context) { 460 if (udata) {
460 if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { 461 if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
461 dev_warn(&dev->pdev->dev, 462 dev_warn(&dev->pdev->dev,
462 "failed to copy back protection domain\n"); 463 "failed to copy back protection domain\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 2c8ba5bf8d0f..562b70e70e79 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -398,8 +398,7 @@ int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
398int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 398int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
399int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 399int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
400void pvrdma_dealloc_ucontext(struct ib_ucontext *context); 400void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
401int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, 401int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
402 struct ib_udata *udata);
403void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 402void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
404struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); 403struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
405struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 404struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -412,7 +411,6 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
412 int sg_nents, unsigned int *sg_offset); 411 int sg_nents, unsigned int *sg_offset);
413struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, 412struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
414 const struct ib_cq_init_attr *attr, 413 const struct ib_cq_init_attr *attr,
415 struct ib_ucontext *context,
416 struct ib_udata *udata); 414 struct ib_udata *udata);
417int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 415int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
418int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 416int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 6f7ff2384506..a06e6da7a026 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -168,7 +168,6 @@ static void send_complete(struct work_struct *work)
168 * rvt_create_cq - create a completion queue 168 * rvt_create_cq - create a completion queue
169 * @ibdev: the device this completion queue is attached to 169 * @ibdev: the device this completion queue is attached to
170 * @attr: creation attributes 170 * @attr: creation attributes
171 * @context: unused by the QLogic_IB driver
172 * @udata: user data for libibverbs.so 171 * @udata: user data for libibverbs.so
173 * 172 *
174 * Called by ib_create_cq() in the generic verbs code. 173 * Called by ib_create_cq() in the generic verbs code.
@@ -178,7 +177,6 @@ static void send_complete(struct work_struct *work)
178 */ 177 */
179struct ib_cq *rvt_create_cq(struct ib_device *ibdev, 178struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
180 const struct ib_cq_init_attr *attr, 179 const struct ib_cq_init_attr *attr,
181 struct ib_ucontext *context,
182 struct ib_udata *udata) 180 struct ib_udata *udata)
183{ 181{
184 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 182 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
@@ -232,7 +230,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
232 if (udata && udata->outlen >= sizeof(__u64)) { 230 if (udata && udata->outlen >= sizeof(__u64)) {
233 int err; 231 int err;
234 232
235 cq->ip = rvt_create_mmap_info(rdi, sz, context, wc); 233 cq->ip = rvt_create_mmap_info(rdi, sz, udata, wc);
236 if (!cq->ip) { 234 if (!cq->ip) {
237 ret = ERR_PTR(-ENOMEM); 235 ret = ERR_PTR(-ENOMEM);
238 goto bail_wc; 236 goto bail_wc;
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
index e42661ecdef8..3ad6faf18ecb 100644
--- a/drivers/infiniband/sw/rdmavt/cq.h
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -53,7 +53,6 @@
53 53
54struct ib_cq *rvt_create_cq(struct ib_device *ibdev, 54struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
55 const struct ib_cq_init_attr *attr, 55 const struct ib_cq_init_attr *attr,
56 struct ib_ucontext *context,
57 struct ib_udata *udata); 56 struct ib_udata *udata);
58int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 57int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
59int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); 58int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index 6b712eecbd37..652f4a7efc1b 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -49,6 +49,7 @@
49#include <linux/vmalloc.h> 49#include <linux/vmalloc.h>
50#include <linux/mm.h> 50#include <linux/mm.h>
51#include <asm/pgtable.h> 51#include <asm/pgtable.h>
52#include <rdma/uverbs_ioctl.h>
52#include "mmap.h" 53#include "mmap.h"
53 54
54/** 55/**
@@ -150,18 +151,19 @@ done:
150 * rvt_create_mmap_info - allocate information for hfi1_mmap 151 * rvt_create_mmap_info - allocate information for hfi1_mmap
151 * @rdi: rvt dev struct 152 * @rdi: rvt dev struct
152 * @size: size in bytes to map 153 * @size: size in bytes to map
153 * @context: user context 154 * @udata: user data (must be valid!)
154 * @obj: opaque pointer to a cq, wq etc 155 * @obj: opaque pointer to a cq, wq etc
155 * 156 *
156 * Return: rvt_mmap struct on success 157 * Return: rvt_mmap struct on success
157 */ 158 */
158struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, 159struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
159 u32 size, 160 struct ib_udata *udata, void *obj)
160 struct ib_ucontext *context,
161 void *obj)
162{ 161{
163 struct rvt_mmap_info *ip; 162 struct rvt_mmap_info *ip;
164 163
164 if (!udata)
165 return ERR_PTR(-EINVAL);
166
165 ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); 167 ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
166 if (!ip) 168 if (!ip)
167 return ip; 169 return ip;
@@ -177,7 +179,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
177 179
178 INIT_LIST_HEAD(&ip->pending_mmaps); 180 INIT_LIST_HEAD(&ip->pending_mmaps);
179 ip->size = size; 181 ip->size = size;
180 ip->context = context; 182 ip->context =
183 container_of(udata, struct uverbs_attr_bundle, driver_udata)
184 ->context;
181 ip->obj = obj; 185 ip->obj = obj;
182 kref_init(&ip->ref); 186 kref_init(&ip->ref);
183 187
diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h
index fab0e7b1daf9..02466c40bc1e 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.h
+++ b/drivers/infiniband/sw/rdmavt/mmap.h
@@ -53,10 +53,8 @@
53void rvt_mmap_init(struct rvt_dev_info *rdi); 53void rvt_mmap_init(struct rvt_dev_info *rdi);
54void rvt_release_mmap_info(struct kref *ref); 54void rvt_release_mmap_info(struct kref *ref);
55int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 55int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
56struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, 56struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
57 u32 size, 57 struct ib_udata *udata, void *obj);
58 struct ib_ucontext *context,
59 void *obj);
60void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip, 58void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
61 u32 size, void *obj); 59 u32 size, void *obj);
62 60
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
index e84341282374..a403718f0b5e 100644
--- a/drivers/infiniband/sw/rdmavt/pd.c
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -51,15 +51,13 @@
51/** 51/**
52 * rvt_alloc_pd - allocate a protection domain 52 * rvt_alloc_pd - allocate a protection domain
53 * @ibpd: PD 53 * @ibpd: PD
54 * @context: optional user context
55 * @udata: optional user data 54 * @udata: optional user data
56 * 55 *
57 * Allocate and keep track of a PD. 56 * Allocate and keep track of a PD.
58 * 57 *
59 * Return: 0 on success 58 * Return: 0 on success
60 */ 59 */
61int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 60int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
62 struct ib_udata *udata)
63{ 61{
64 struct ib_device *ibdev = ibpd->device; 62 struct ib_device *ibdev = ibpd->device;
65 struct rvt_dev_info *dev = ib_to_rvt(ibdev); 63 struct rvt_dev_info *dev = ib_to_rvt(ibdev);
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
index d0368a625e03..71ba76d72b1d 100644
--- a/drivers/infiniband/sw/rdmavt/pd.h
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -50,8 +50,7 @@
50 50
51#include <rdma/rdma_vt.h> 51#include <rdma/rdma_vt.h>
52 52
53int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, 53int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
54 struct ib_udata *udata);
55void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 54void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
56 55
57#endif /* DEF_RDMAVTPD_H */ 56#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e8bba7e56c29..90ed99f4b026 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -957,8 +957,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
957 size_t sg_list_sz; 957 size_t sg_list_sz;
958 struct ib_qp *ret = ERR_PTR(-ENOMEM); 958 struct ib_qp *ret = ERR_PTR(-ENOMEM);
959 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 959 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
960 struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
961 udata, struct rvt_ucontext, ibucontext);
962 void *priv = NULL; 960 void *priv = NULL;
963 size_t sqsize; 961 size_t sqsize;
964 962
@@ -1131,8 +1129,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1131 } else { 1129 } else {
1132 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 1130 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1133 1131
1134 qp->ip = rvt_create_mmap_info(rdi, s, 1132 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1135 &ucontext->ibucontext,
1136 qp->r_rq.wq); 1133 qp->r_rq.wq);
1137 if (!qp->ip) { 1134 if (!qp->ip) {
1138 ret = ERR_PTR(-ENOMEM); 1135 ret = ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 3090b0935714..21d276eaf15a 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -78,8 +78,6 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
78 struct ib_udata *udata) 78 struct ib_udata *udata)
79{ 79{
80 struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); 80 struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
81 struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
82 udata, struct rvt_ucontext, ibucontext);
83 struct rvt_srq *srq; 81 struct rvt_srq *srq;
84 u32 sz; 82 u32 sz;
85 struct ib_srq *ret; 83 struct ib_srq *ret;
@@ -121,9 +119,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
121 int err; 119 int err;
122 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; 120 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
123 121
124 srq->ip = 122 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
125 rvt_create_mmap_info(dev, s, &ucontext->ibucontext,
126 srq->rq.wq);
127 if (!srq->ip) { 123 if (!srq->ip) {
128 ret = ERR_PTR(-ENOMEM); 124 ret = ERR_PTR(-ENOMEM);
129 goto bail_wq; 125 goto bail_wq;
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index a57276f2cb84..ad3090131126 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -82,7 +82,7 @@ static void rxe_send_complete(unsigned long data)
82} 82}
83 83
84int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, 84int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
85 int comp_vector, struct ib_ucontext *context, 85 int comp_vector, struct ib_udata *udata,
86 struct rxe_create_cq_resp __user *uresp) 86 struct rxe_create_cq_resp __user *uresp)
87{ 87{
88 int err; 88 int err;
@@ -94,7 +94,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
94 return -ENOMEM; 94 return -ENOMEM;
95 } 95 }
96 96
97 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, 97 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
98 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); 98 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
99 if (err) { 99 if (err) {
100 vfree(cq->queue->buf); 100 vfree(cq->queue->buf);
@@ -115,13 +115,13 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
115} 115}
116 116
117int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, 117int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
118 struct rxe_resize_cq_resp __user *uresp) 118 struct rxe_resize_cq_resp __user *uresp,
119 struct ib_udata *udata)
119{ 120{
120 int err; 121 int err;
121 122
122 err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe, 123 err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
123 sizeof(struct rxe_cqe), 124 sizeof(struct rxe_cqe), udata,
124 cq->queue->ip ? cq->queue->ip->context : NULL,
125 uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock); 125 uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
126 if (!err) 126 if (!err)
127 cq->ibcq.cqe = cqe; 127 cq->ibcq.cqe = cqe;
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 3d8cef836f0d..775c23becaec 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -53,11 +53,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
53 int cqe, int comp_vector); 53 int cqe, int comp_vector);
54 54
55int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, 55int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
56 int comp_vector, struct ib_ucontext *context, 56 int comp_vector, struct ib_udata *udata,
57 struct rxe_create_cq_resp __user *uresp); 57 struct rxe_create_cq_resp __user *uresp);
58 58
59int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, 59int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
60 struct rxe_resize_cq_resp __user *uresp); 60 struct rxe_resize_cq_resp __user *uresp,
61 struct ib_udata *udata);
61 62
62int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); 63int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
63 64
@@ -91,10 +92,8 @@ struct rxe_mmap_info {
91 92
92void rxe_mmap_release(struct kref *ref); 93void rxe_mmap_release(struct kref *ref);
93 94
94struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, 95struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
95 u32 size, 96 struct ib_udata *udata, void *obj);
96 struct ib_ucontext *context,
97 void *obj);
98 97
99int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 98int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
100 99
@@ -224,13 +223,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
224 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); 223 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
225 224
226int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, 225int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
227 struct ib_srq_init_attr *init, 226 struct ib_srq_init_attr *init, struct ib_udata *udata,
228 struct ib_ucontext *context,
229 struct rxe_create_srq_resp __user *uresp); 227 struct rxe_create_srq_resp __user *uresp);
230 228
231int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 229int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
232 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, 230 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
233 struct rxe_modify_srq_cmd *ucmd); 231 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
234 232
235void rxe_dealloc(struct ib_device *ib_dev); 233void rxe_dealloc(struct ib_device *ib_dev);
236 234
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index d22431e3a908..48f48122ddcb 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -36,6 +36,7 @@
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/errno.h> 37#include <linux/errno.h>
38#include <asm/pgtable.h> 38#include <asm/pgtable.h>
39#include <rdma/uverbs_ioctl.h>
39 40
40#include "rxe.h" 41#include "rxe.h"
41#include "rxe_loc.h" 42#include "rxe_loc.h"
@@ -140,13 +141,14 @@ done:
140/* 141/*
141 * Allocate information for rxe_mmap 142 * Allocate information for rxe_mmap
142 */ 143 */
143struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, 144struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
144 u32 size, 145 struct ib_udata *udata, void *obj)
145 struct ib_ucontext *context,
146 void *obj)
147{ 146{
148 struct rxe_mmap_info *ip; 147 struct rxe_mmap_info *ip;
149 148
149 if (!udata)
150 return ERR_PTR(-EINVAL);
151
150 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 152 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
151 if (!ip) 153 if (!ip)
152 return NULL; 154 return NULL;
@@ -165,7 +167,9 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
165 167
166 INIT_LIST_HEAD(&ip->pending_mmaps); 168 INIT_LIST_HEAD(&ip->pending_mmaps);
167 ip->info.size = size; 169 ip->info.size = size;
168 ip->context = context; 170 ip->context =
171 container_of(udata, struct uverbs_attr_bundle, driver_udata)
172 ->context;
169 ip->obj = obj; 173 ip->obj = obj;
170 kref_init(&ip->ref); 174 kref_init(&ip->ref);
171 175
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 09ede70dc1e8..e2c6d1cedf41 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -217,8 +217,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
217} 217}
218 218
219static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 219static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
220 struct ib_qp_init_attr *init, 220 struct ib_qp_init_attr *init, struct ib_udata *udata,
221 struct ib_ucontext *context,
222 struct rxe_create_qp_resp __user *uresp) 221 struct rxe_create_qp_resp __user *uresp)
223{ 222{
224 int err; 223 int err;
@@ -254,7 +253,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
254 if (!qp->sq.queue) 253 if (!qp->sq.queue)
255 return -ENOMEM; 254 return -ENOMEM;
256 255
257 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context, 256 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
258 qp->sq.queue->buf, qp->sq.queue->buf_size, 257 qp->sq.queue->buf, qp->sq.queue->buf_size,
259 &qp->sq.queue->ip); 258 &qp->sq.queue->ip);
260 259
@@ -287,7 +286,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
287 286
288static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 287static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
289 struct ib_qp_init_attr *init, 288 struct ib_qp_init_attr *init,
290 struct ib_ucontext *context, 289 struct ib_udata *udata,
291 struct rxe_create_qp_resp __user *uresp) 290 struct rxe_create_qp_resp __user *uresp)
292{ 291{
293 int err; 292 int err;
@@ -308,7 +307,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
308 if (!qp->rq.queue) 307 if (!qp->rq.queue)
309 return -ENOMEM; 308 return -ENOMEM;
310 309
311 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context, 310 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
312 qp->rq.queue->buf, qp->rq.queue->buf_size, 311 qp->rq.queue->buf, qp->rq.queue->buf_size,
313 &qp->rq.queue->ip); 312 &qp->rq.queue->ip);
314 if (err) { 313 if (err) {
@@ -344,8 +343,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
344 struct rxe_cq *rcq = to_rcq(init->recv_cq); 343 struct rxe_cq *rcq = to_rcq(init->recv_cq);
345 struct rxe_cq *scq = to_rcq(init->send_cq); 344 struct rxe_cq *scq = to_rcq(init->send_cq);
346 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 345 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
347 struct rxe_ucontext *ucontext =
348 rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
349 346
350 rxe_add_ref(pd); 347 rxe_add_ref(pd);
351 rxe_add_ref(rcq); 348 rxe_add_ref(rcq);
@@ -360,11 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
360 357
361 rxe_qp_init_misc(rxe, qp, init); 358 rxe_qp_init_misc(rxe, qp, init);
362 359
363 err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp); 360 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
364 if (err) 361 if (err)
365 goto err1; 362 goto err1;
366 363
367 err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp); 364 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
368 if (err) 365 if (err)
369 goto err2; 366 goto err2;
370 367
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index f84ab4469261..ff92704de32f 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -36,18 +36,15 @@
36#include "rxe_loc.h" 36#include "rxe_loc.h"
37#include "rxe_queue.h" 37#include "rxe_queue.h"
38 38
39int do_mmap_info(struct rxe_dev *rxe, 39int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
40 struct mminfo __user *outbuf, 40 struct ib_udata *udata, struct rxe_queue_buf *buf,
41 struct ib_ucontext *context, 41 size_t buf_size, struct rxe_mmap_info **ip_p)
42 struct rxe_queue_buf *buf,
43 size_t buf_size,
44 struct rxe_mmap_info **ip_p)
45{ 42{
46 int err; 43 int err;
47 struct rxe_mmap_info *ip = NULL; 44 struct rxe_mmap_info *ip = NULL;
48 45
49 if (outbuf) { 46 if (outbuf) {
50 ip = rxe_create_mmap_info(rxe, buf_size, context, buf); 47 ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
51 if (!ip) 48 if (!ip)
52 goto err1; 49 goto err1;
53 50
@@ -153,12 +150,9 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
153 return 0; 150 return 0;
154} 151}
155 152
156int rxe_queue_resize(struct rxe_queue *q, 153int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
157 unsigned int *num_elem_p, 154 unsigned int elem_size, struct ib_udata *udata,
158 unsigned int elem_size, 155 struct mminfo __user *outbuf, spinlock_t *producer_lock,
159 struct ib_ucontext *context,
160 struct mminfo __user *outbuf,
161 spinlock_t *producer_lock,
162 spinlock_t *consumer_lock) 156 spinlock_t *consumer_lock)
163{ 157{
164 struct rxe_queue *new_q; 158 struct rxe_queue *new_q;
@@ -170,7 +164,7 @@ int rxe_queue_resize(struct rxe_queue *q,
170 if (!new_q) 164 if (!new_q)
171 return -ENOMEM; 165 return -ENOMEM;
172 166
173 err = do_mmap_info(new_q->rxe, outbuf, context, new_q->buf, 167 err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
174 new_q->buf_size, &new_q->ip); 168 new_q->buf_size, &new_q->ip);
175 if (err) { 169 if (err) {
176 vfree(new_q->buf); 170 vfree(new_q->buf);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 79ba4b320054..acd0a925481c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -76,12 +76,9 @@ struct rxe_queue {
76 unsigned int index_mask; 76 unsigned int index_mask;
77}; 77};
78 78
79int do_mmap_info(struct rxe_dev *rxe, 79int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
80 struct mminfo __user *outbuf, 80 struct ib_udata *udata, struct rxe_queue_buf *buf,
81 struct ib_ucontext *context, 81 size_t buf_size, struct rxe_mmap_info **ip_p);
82 struct rxe_queue_buf *buf,
83 size_t buf_size,
84 struct rxe_mmap_info **ip_p);
85 82
86void rxe_queue_reset(struct rxe_queue *q); 83void rxe_queue_reset(struct rxe_queue *q);
87 84
@@ -89,10 +86,8 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
89 int *num_elem, 86 int *num_elem,
90 unsigned int elem_size); 87 unsigned int elem_size);
91 88
92int rxe_queue_resize(struct rxe_queue *q, 89int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
93 unsigned int *num_elem_p, 90 unsigned int elem_size, struct ib_udata *udata,
94 unsigned int elem_size,
95 struct ib_ucontext *context,
96 struct mminfo __user *outbuf, 91 struct mminfo __user *outbuf,
97 /* Protect producers while resizing queue */ 92 /* Protect producers while resizing queue */
98 spinlock_t *producer_lock, 93 spinlock_t *producer_lock,
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index c41a5fee81f7..d8459431534e 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -99,8 +99,7 @@ err1:
99} 99}
100 100
101int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, 101int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
102 struct ib_srq_init_attr *init, 102 struct ib_srq_init_attr *init, struct ib_udata *udata,
103 struct ib_ucontext *context,
104 struct rxe_create_srq_resp __user *uresp) 103 struct rxe_create_srq_resp __user *uresp)
105{ 104{
106 int err; 105 int err;
@@ -128,7 +127,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
128 127
129 srq->rq.queue = q; 128 srq->rq.queue = q;
130 129
131 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf, 130 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
132 q->buf_size, &q->ip); 131 q->buf_size, &q->ip);
133 if (err) { 132 if (err) {
134 vfree(q->buf); 133 vfree(q->buf);
@@ -149,7 +148,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
149 148
150int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 149int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
151 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, 150 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
152 struct rxe_modify_srq_cmd *ucmd) 151 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
153{ 152{
154 int err; 153 int err;
155 struct rxe_queue *q = srq->rq.queue; 154 struct rxe_queue *q = srq->rq.queue;
@@ -163,11 +162,8 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
163 mi = u64_to_user_ptr(ucmd->mmap_info_addr); 162 mi = u64_to_user_ptr(ucmd->mmap_info_addr);
164 163
165 err = rxe_queue_resize(q, &attr->max_wr, 164 err = rxe_queue_resize(q, &attr->max_wr,
166 rcv_wqe_size(srq->rq.max_sge), 165 rcv_wqe_size(srq->rq.max_sge), udata, mi,
167 srq->rq.queue->ip ? 166 &srq->rq.producer_lock,
168 srq->rq.queue->ip->context :
169 NULL,
170 mi, &srq->rq.producer_lock,
171 &srq->rq.consumer_lock); 167 &srq->rq.consumer_lock);
172 if (err) 168 if (err)
173 goto err2; 169 goto err2;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index e625731ae42d..4f581af2ad54 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -176,8 +176,7 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
176 return 0; 176 return 0;
177} 177}
178 178
179static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, 179static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
180 struct ib_udata *udata)
181{ 180{
182 struct rxe_dev *rxe = to_rdev(ibpd->device); 181 struct rxe_dev *rxe = to_rdev(ibpd->device);
183 struct rxe_pd *pd = to_rpd(ibpd); 182 struct rxe_pd *pd = to_rpd(ibpd);
@@ -305,8 +304,6 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
305 int err; 304 int err;
306 struct rxe_dev *rxe = to_rdev(ibpd->device); 305 struct rxe_dev *rxe = to_rdev(ibpd->device);
307 struct rxe_pd *pd = to_rpd(ibpd); 306 struct rxe_pd *pd = to_rpd(ibpd);
308 struct rxe_ucontext *ucontext =
309 rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
310 struct rxe_srq *srq; 307 struct rxe_srq *srq;
311 struct rxe_create_srq_resp __user *uresp = NULL; 308 struct rxe_create_srq_resp __user *uresp = NULL;
312 309
@@ -330,7 +327,7 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
330 rxe_add_ref(pd); 327 rxe_add_ref(pd);
331 srq->pd = pd; 328 srq->pd = pd;
332 329
333 err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp); 330 err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
334 if (err) 331 if (err)
335 goto err2; 332 goto err2;
336 333
@@ -366,7 +363,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
366 if (err) 363 if (err)
367 goto err1; 364 goto err1;
368 365
369 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd); 366 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
370 if (err) 367 if (err)
371 goto err1; 368 goto err1;
372 369
@@ -799,7 +796,6 @@ err1:
799 796
800static struct ib_cq *rxe_create_cq(struct ib_device *dev, 797static struct ib_cq *rxe_create_cq(struct ib_device *dev,
801 const struct ib_cq_init_attr *attr, 798 const struct ib_cq_init_attr *attr,
802 struct ib_ucontext *context,
803 struct ib_udata *udata) 799 struct ib_udata *udata)
804{ 800{
805 int err; 801 int err;
@@ -826,8 +822,8 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev,
826 goto err1; 822 goto err1;
827 } 823 }
828 824
829 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, 825 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
830 context, uresp); 826 uresp);
831 if (err) 827 if (err)
832 goto err2; 828 goto err2;
833 829
@@ -866,7 +862,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
866 if (err) 862 if (err)
867 goto err1; 863 goto err1;
868 864
869 err = rxe_cq_resize_queue(cq, cqe, uresp); 865 err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
870 if (err) 866 if (err)
871 goto err1; 867 goto err1;
872 868
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 54e48dd36644..0e24f6b6c61d 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2394,8 +2394,7 @@ struct ib_device_ops {
2394 void (*dealloc_ucontext)(struct ib_ucontext *context); 2394 void (*dealloc_ucontext)(struct ib_ucontext *context);
2395 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); 2395 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2396 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2396 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2397 int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context, 2397 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2398 struct ib_udata *udata);
2399 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2398 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2400 struct ib_ah *(*create_ah)(struct ib_pd *pd, 2399 struct ib_ah *(*create_ah)(struct ib_pd *pd,
2401 struct rdma_ah_attr *ah_attr, u32 flags, 2400 struct rdma_ah_attr *ah_attr, u32 flags,
@@ -2421,7 +2420,6 @@ struct ib_device_ops {
2421 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); 2420 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2422 struct ib_cq *(*create_cq)(struct ib_device *device, 2421 struct ib_cq *(*create_cq)(struct ib_device *device,
2423 const struct ib_cq_init_attr *attr, 2422 const struct ib_cq_init_attr *attr,
2424 struct ib_ucontext *context,
2425 struct ib_udata *udata); 2423 struct ib_udata *udata);
2426 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2424 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2427 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); 2425 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
@@ -2456,7 +2454,6 @@ struct ib_device_ops {
2456 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2454 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2457 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2455 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2458 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, 2456 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2459 struct ib_ucontext *ucontext,
2460 struct ib_udata *udata); 2457 struct ib_udata *udata);
2461 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); 2458 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2462 struct ib_flow *(*create_flow)(struct ib_qp *qp, 2459 struct ib_flow *(*create_flow)(struct ib_qp *qp,