diff options
Diffstat (limited to 'drivers')
72 files changed, 319 insertions, 309 deletions
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index d61e5e1427c2..4797eef549c3 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c | |||
@@ -128,15 +128,17 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) | |||
128 | * @comp_vector: HCA completion vectors for this CQ | 128 | * @comp_vector: HCA completion vectors for this CQ |
129 | * @poll_ctx: context to poll the CQ from. | 129 | * @poll_ctx: context to poll the CQ from. |
130 | * @caller: module owner name. | 130 | * @caller: module owner name. |
131 | * @udata: Valid user data or NULL for kernel object | ||
131 | * | 132 | * |
132 | * This is the proper interface to allocate a CQ for in-kernel users. A | 133 | * This is the proper interface to allocate a CQ for in-kernel users. A |
133 | * CQ allocated with this interface will automatically be polled from the | 134 | * CQ allocated with this interface will automatically be polled from the |
134 | * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id | 135 | * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id |
135 | * to use this CQ abstraction. | 136 | * to use this CQ abstraction. |
136 | */ | 137 | */ |
137 | struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, | 138 | struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, |
138 | int nr_cqe, int comp_vector, | 139 | int nr_cqe, int comp_vector, |
139 | enum ib_poll_context poll_ctx, const char *caller) | 140 | enum ib_poll_context poll_ctx, |
141 | const char *caller, struct ib_udata *udata) | ||
140 | { | 142 | { |
141 | struct ib_cq_init_attr cq_attr = { | 143 | struct ib_cq_init_attr cq_attr = { |
142 | .cqe = nr_cqe, | 144 | .cqe = nr_cqe, |
@@ -193,16 +195,17 @@ out_free_wc: | |||
193 | kfree(cq->wc); | 195 | kfree(cq->wc); |
194 | rdma_restrack_del(&cq->res); | 196 | rdma_restrack_del(&cq->res); |
195 | out_destroy_cq: | 197 | out_destroy_cq: |
196 | cq->device->ops.destroy_cq(cq); | 198 | cq->device->ops.destroy_cq(cq, udata); |
197 | return ERR_PTR(ret); | 199 | return ERR_PTR(ret); |
198 | } | 200 | } |
199 | EXPORT_SYMBOL(__ib_alloc_cq); | 201 | EXPORT_SYMBOL(__ib_alloc_cq_user); |
200 | 202 | ||
201 | /** | 203 | /** |
202 | * ib_free_cq - free a completion queue | 204 | * ib_free_cq - free a completion queue |
203 | * @cq: completion queue to free. | 205 | * @cq: completion queue to free. |
206 | * @udata: User data or NULL for kernel object | ||
204 | */ | 207 | */ |
205 | void ib_free_cq(struct ib_cq *cq) | 208 | void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) |
206 | { | 209 | { |
207 | int ret; | 210 | int ret; |
208 | 211 | ||
@@ -225,7 +228,7 @@ void ib_free_cq(struct ib_cq *cq) | |||
225 | 228 | ||
226 | kfree(cq->wc); | 229 | kfree(cq->wc); |
227 | rdma_restrack_del(&cq->res); | 230 | rdma_restrack_del(&cq->res); |
228 | ret = cq->device->ops.destroy_cq(cq); | 231 | ret = cq->device->ops.destroy_cq(cq, udata); |
229 | WARN_ON_ONCE(ret); | 232 | WARN_ON_ONCE(ret); |
230 | } | 233 | } |
231 | EXPORT_SYMBOL(ib_free_cq); | 234 | EXPORT_SYMBOL(ib_free_cq_user); |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index ea0bc6885517..fa5ea6529333 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -240,7 +240,7 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); | |||
240 | void ib_uverbs_event_handler(struct ib_event_handler *handler, | 240 | void ib_uverbs_event_handler(struct ib_event_handler *handler, |
241 | struct ib_event *event); | 241 | struct ib_event *event); |
242 | int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, | 242 | int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, |
243 | enum rdma_remove_reason why); | 243 | enum rdma_remove_reason why, struct ib_udata *udata); |
244 | 244 | ||
245 | int uverbs_dealloc_mw(struct ib_mw *mw); | 245 | int uverbs_dealloc_mw(struct ib_mw *mw); |
246 | void ib_uverbs_detach_umcast(struct ib_qp *qp, | 246 | void ib_uverbs_detach_umcast(struct ib_qp *qp, |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 726275288887..fe63dfd5f1b6 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -439,7 +439,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) | |||
439 | return uobj_alloc_commit(uobj, attrs); | 439 | return uobj_alloc_commit(uobj, attrs); |
440 | 440 | ||
441 | err_copy: | 441 | err_copy: |
442 | ib_dealloc_pd(pd); | 442 | ib_dealloc_pd_user(pd, &attrs->driver_udata); |
443 | pd = NULL; | 443 | pd = NULL; |
444 | err_alloc: | 444 | err_alloc: |
445 | kfree(pd); | 445 | kfree(pd); |
@@ -643,7 +643,7 @@ err_copy: | |||
643 | } | 643 | } |
644 | 644 | ||
645 | err_dealloc_xrcd: | 645 | err_dealloc_xrcd: |
646 | ib_dealloc_xrcd(xrcd); | 646 | ib_dealloc_xrcd(xrcd, &attrs->driver_udata); |
647 | 647 | ||
648 | err: | 648 | err: |
649 | uobj_alloc_abort(&obj->uobject, attrs); | 649 | uobj_alloc_abort(&obj->uobject, attrs); |
@@ -669,9 +669,8 @@ static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs) | |||
669 | return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs); | 669 | return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs); |
670 | } | 670 | } |
671 | 671 | ||
672 | int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, | 672 | int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, |
673 | struct ib_xrcd *xrcd, | 673 | enum rdma_remove_reason why, struct ib_udata *udata) |
674 | enum rdma_remove_reason why) | ||
675 | { | 674 | { |
676 | struct inode *inode; | 675 | struct inode *inode; |
677 | int ret; | 676 | int ret; |
@@ -681,7 +680,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, | |||
681 | if (inode && !atomic_dec_and_test(&xrcd->usecnt)) | 680 | if (inode && !atomic_dec_and_test(&xrcd->usecnt)) |
682 | return 0; | 681 | return 0; |
683 | 682 | ||
684 | ret = ib_dealloc_xrcd(xrcd); | 683 | ret = ib_dealloc_xrcd(xrcd, udata); |
685 | 684 | ||
686 | if (ib_is_destroy_retryable(ret, why, uobject)) { | 685 | if (ib_is_destroy_retryable(ret, why, uobject)) { |
687 | atomic_inc(&xrcd->usecnt); | 686 | atomic_inc(&xrcd->usecnt); |
@@ -766,7 +765,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) | |||
766 | return uobj_alloc_commit(uobj, attrs); | 765 | return uobj_alloc_commit(uobj, attrs); |
767 | 766 | ||
768 | err_copy: | 767 | err_copy: |
769 | ib_dereg_mr(mr); | 768 | ib_dereg_mr_user(mr, &attrs->driver_udata); |
770 | 769 | ||
771 | err_put: | 770 | err_put: |
772 | uobj_put_obj_read(pd); | 771 | uobj_put_obj_read(pd); |
@@ -2965,7 +2964,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs) | |||
2965 | return uobj_alloc_commit(&obj->uevent.uobject, attrs); | 2964 | return uobj_alloc_commit(&obj->uevent.uobject, attrs); |
2966 | 2965 | ||
2967 | err_copy: | 2966 | err_copy: |
2968 | ib_destroy_wq(wq); | 2967 | ib_destroy_wq(wq, &attrs->driver_udata); |
2969 | err_put_cq: | 2968 | err_put_cq: |
2970 | uobj_put_obj_read(cq); | 2969 | uobj_put_obj_read(cq); |
2971 | err_put_pd: | 2970 | err_put_pd: |
@@ -3461,7 +3460,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, | |||
3461 | return uobj_alloc_commit(&obj->uevent.uobject, attrs); | 3460 | return uobj_alloc_commit(&obj->uevent.uobject, attrs); |
3462 | 3461 | ||
3463 | err_copy: | 3462 | err_copy: |
3464 | ib_destroy_srq(srq); | 3463 | ib_destroy_srq_user(srq, &attrs->driver_udata); |
3465 | 3464 | ||
3466 | err_put: | 3465 | err_put: |
3467 | uobj_put_obj_read(pd); | 3466 | uobj_put_obj_read(pd); |
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index a1b22fca057e..c625f590a8f0 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c | |||
@@ -43,8 +43,9 @@ static int uverbs_free_ah(struct ib_uobject *uobject, | |||
43 | enum rdma_remove_reason why, | 43 | enum rdma_remove_reason why, |
44 | struct uverbs_attr_bundle *attrs) | 44 | struct uverbs_attr_bundle *attrs) |
45 | { | 45 | { |
46 | return rdma_destroy_ah((struct ib_ah *)uobject->object, | 46 | return rdma_destroy_ah_user((struct ib_ah *)uobject->object, |
47 | RDMA_DESTROY_AH_SLEEPABLE); | 47 | RDMA_DESTROY_AH_SLEEPABLE, |
48 | &attrs->driver_udata); | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static int uverbs_free_flow(struct ib_uobject *uobject, | 51 | static int uverbs_free_flow(struct ib_uobject *uobject, |
@@ -97,7 +98,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject, | |||
97 | ib_uverbs_detach_umcast(qp, uqp); | 98 | ib_uverbs_detach_umcast(qp, uqp); |
98 | } | 99 | } |
99 | 100 | ||
100 | ret = ib_destroy_qp(qp); | 101 | ret = ib_destroy_qp_user(qp, &attrs->driver_udata); |
101 | if (ib_is_destroy_retryable(ret, why, uobject)) | 102 | if (ib_is_destroy_retryable(ret, why, uobject)) |
102 | return ret; | 103 | return ret; |
103 | 104 | ||
@@ -133,7 +134,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject, | |||
133 | container_of(uobject, struct ib_uwq_object, uevent.uobject); | 134 | container_of(uobject, struct ib_uwq_object, uevent.uobject); |
134 | int ret; | 135 | int ret; |
135 | 136 | ||
136 | ret = ib_destroy_wq(wq); | 137 | ret = ib_destroy_wq(wq, &attrs->driver_udata); |
137 | if (ib_is_destroy_retryable(ret, why, uobject)) | 138 | if (ib_is_destroy_retryable(ret, why, uobject)) |
138 | return ret; | 139 | return ret; |
139 | 140 | ||
@@ -151,7 +152,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject, | |||
151 | enum ib_srq_type srq_type = srq->srq_type; | 152 | enum ib_srq_type srq_type = srq->srq_type; |
152 | int ret; | 153 | int ret; |
153 | 154 | ||
154 | ret = ib_destroy_srq(srq); | 155 | ret = ib_destroy_srq_user(srq, &attrs->driver_udata); |
155 | if (ib_is_destroy_retryable(ret, why, uobject)) | 156 | if (ib_is_destroy_retryable(ret, why, uobject)) |
156 | return ret; | 157 | return ret; |
157 | 158 | ||
@@ -180,7 +181,7 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject, | |||
180 | return ret; | 181 | return ret; |
181 | 182 | ||
182 | mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex); | 183 | mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex); |
183 | ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why); | 184 | ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, &attrs->driver_udata); |
184 | mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex); | 185 | mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex); |
185 | 186 | ||
186 | return ret; | 187 | return ret; |
@@ -197,7 +198,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject, | |||
197 | if (ret) | 198 | if (ret) |
198 | return ret; | 199 | return ret; |
199 | 200 | ||
200 | ib_dealloc_pd(pd); | 201 | ib_dealloc_pd_user(pd, &attrs->driver_udata); |
201 | return 0; | 202 | return 0; |
202 | } | 203 | } |
203 | 204 | ||
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c index 5664a8f48527..f03506ece016 100644 --- a/drivers/infiniband/core/uverbs_std_types_cq.c +++ b/drivers/infiniband/core/uverbs_std_types_cq.c | |||
@@ -44,7 +44,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject, | |||
44 | container_of(uobject, struct ib_ucq_object, uobject); | 44 | container_of(uobject, struct ib_ucq_object, uobject); |
45 | int ret; | 45 | int ret; |
46 | 46 | ||
47 | ret = ib_destroy_cq(cq); | 47 | ret = ib_destroy_cq_user(cq, &attrs->driver_udata); |
48 | if (ib_is_destroy_retryable(ret, why, uobject)) | 48 | if (ib_is_destroy_retryable(ret, why, uobject)) |
49 | return ret; | 49 | return ret; |
50 | 50 | ||
diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c index 50d71522e1cd..c9b68dcf8f5c 100644 --- a/drivers/infiniband/core/uverbs_std_types_dm.c +++ b/drivers/infiniband/core/uverbs_std_types_dm.c | |||
@@ -45,7 +45,7 @@ static int uverbs_free_dm(struct ib_uobject *uobject, | |||
45 | if (ret) | 45 | if (ret) |
46 | return ret; | 46 | return ret; |
47 | 47 | ||
48 | return dm->device->ops.dealloc_dm(dm); | 48 | return dm->device->ops.dealloc_dm(dm, attrs); |
49 | } | 49 | } |
50 | 50 | ||
51 | static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)( | 51 | static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)( |
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c index a74b73f684d4..610d3b9f7654 100644 --- a/drivers/infiniband/core/uverbs_std_types_mr.c +++ b/drivers/infiniband/core/uverbs_std_types_mr.c | |||
@@ -38,7 +38,8 @@ static int uverbs_free_mr(struct ib_uobject *uobject, | |||
38 | enum rdma_remove_reason why, | 38 | enum rdma_remove_reason why, |
39 | struct uverbs_attr_bundle *attrs) | 39 | struct uverbs_attr_bundle *attrs) |
40 | { | 40 | { |
41 | return ib_dereg_mr((struct ib_mr *)uobject->object); | 41 | return ib_dereg_mr_user((struct ib_mr *)uobject->object, |
42 | &attrs->driver_udata); | ||
42 | } | 43 | } |
43 | 44 | ||
44 | static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)( | 45 | static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)( |
@@ -147,7 +148,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)( | |||
147 | return 0; | 148 | return 0; |
148 | 149 | ||
149 | err_dereg: | 150 | err_dereg: |
150 | ib_dereg_mr(mr); | 151 | ib_dereg_mr_user(mr, &attrs->driver_udata); |
151 | 152 | ||
152 | return ret; | 153 | return ret; |
153 | } | 154 | } |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 5a5e83f5f0fc..ba9a89df815d 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -316,17 +316,18 @@ EXPORT_SYMBOL(__ib_alloc_pd); | |||
316 | /** | 316 | /** |
317 | * ib_dealloc_pd - Deallocates a protection domain. | 317 | * ib_dealloc_pd - Deallocates a protection domain. |
318 | * @pd: The protection domain to deallocate. | 318 | * @pd: The protection domain to deallocate. |
319 | * @udata: Valid user data or NULL for kernel object | ||
319 | * | 320 | * |
320 | * It is an error to call this function while any resources in the pd still | 321 | * It is an error to call this function while any resources in the pd still |
321 | * exist. The caller is responsible to synchronously destroy them and | 322 | * exist. The caller is responsible to synchronously destroy them and |
322 | * guarantee no new allocations will happen. | 323 | * guarantee no new allocations will happen. |
323 | */ | 324 | */ |
324 | void ib_dealloc_pd(struct ib_pd *pd) | 325 | void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) |
325 | { | 326 | { |
326 | int ret; | 327 | int ret; |
327 | 328 | ||
328 | if (pd->__internal_mr) { | 329 | if (pd->__internal_mr) { |
329 | ret = pd->device->ops.dereg_mr(pd->__internal_mr); | 330 | ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL); |
330 | WARN_ON(ret); | 331 | WARN_ON(ret); |
331 | pd->__internal_mr = NULL; | 332 | pd->__internal_mr = NULL; |
332 | } | 333 | } |
@@ -336,10 +337,10 @@ void ib_dealloc_pd(struct ib_pd *pd) | |||
336 | WARN_ON(atomic_read(&pd->usecnt)); | 337 | WARN_ON(atomic_read(&pd->usecnt)); |
337 | 338 | ||
338 | rdma_restrack_del(&pd->res); | 339 | rdma_restrack_del(&pd->res); |
339 | pd->device->ops.dealloc_pd(pd); | 340 | pd->device->ops.dealloc_pd(pd, udata); |
340 | kfree(pd); | 341 | kfree(pd); |
341 | } | 342 | } |
342 | EXPORT_SYMBOL(ib_dealloc_pd); | 343 | EXPORT_SYMBOL(ib_dealloc_pd_user); |
343 | 344 | ||
344 | /* Address handles */ | 345 | /* Address handles */ |
345 | 346 | ||
@@ -930,7 +931,7 @@ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) | |||
930 | } | 931 | } |
931 | EXPORT_SYMBOL(rdma_query_ah); | 932 | EXPORT_SYMBOL(rdma_query_ah); |
932 | 933 | ||
933 | int rdma_destroy_ah(struct ib_ah *ah, u32 flags) | 934 | int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
934 | { | 935 | { |
935 | const struct ib_gid_attr *sgid_attr = ah->sgid_attr; | 936 | const struct ib_gid_attr *sgid_attr = ah->sgid_attr; |
936 | struct ib_pd *pd; | 937 | struct ib_pd *pd; |
@@ -939,7 +940,7 @@ int rdma_destroy_ah(struct ib_ah *ah, u32 flags) | |||
939 | might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); | 940 | might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); |
940 | 941 | ||
941 | pd = ah->pd; | 942 | pd = ah->pd; |
942 | ret = ah->device->ops.destroy_ah(ah, flags); | 943 | ret = ah->device->ops.destroy_ah(ah, flags, udata); |
943 | if (!ret) { | 944 | if (!ret) { |
944 | atomic_dec(&pd->usecnt); | 945 | atomic_dec(&pd->usecnt); |
945 | if (sgid_attr) | 946 | if (sgid_attr) |
@@ -948,7 +949,7 @@ int rdma_destroy_ah(struct ib_ah *ah, u32 flags) | |||
948 | 949 | ||
949 | return ret; | 950 | return ret; |
950 | } | 951 | } |
951 | EXPORT_SYMBOL(rdma_destroy_ah); | 952 | EXPORT_SYMBOL(rdma_destroy_ah_user); |
952 | 953 | ||
953 | /* Shared receive queues */ | 954 | /* Shared receive queues */ |
954 | 955 | ||
@@ -1003,7 +1004,7 @@ int ib_query_srq(struct ib_srq *srq, | |||
1003 | } | 1004 | } |
1004 | EXPORT_SYMBOL(ib_query_srq); | 1005 | EXPORT_SYMBOL(ib_query_srq); |
1005 | 1006 | ||
1006 | int ib_destroy_srq(struct ib_srq *srq) | 1007 | int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) |
1007 | { | 1008 | { |
1008 | struct ib_pd *pd; | 1009 | struct ib_pd *pd; |
1009 | enum ib_srq_type srq_type; | 1010 | enum ib_srq_type srq_type; |
@@ -1021,7 +1022,7 @@ int ib_destroy_srq(struct ib_srq *srq) | |||
1021 | if (srq_type == IB_SRQT_XRC) | 1022 | if (srq_type == IB_SRQT_XRC) |
1022 | xrcd = srq->ext.xrc.xrcd; | 1023 | xrcd = srq->ext.xrc.xrcd; |
1023 | 1024 | ||
1024 | ret = srq->device->ops.destroy_srq(srq); | 1025 | ret = srq->device->ops.destroy_srq(srq, udata); |
1025 | if (!ret) { | 1026 | if (!ret) { |
1026 | atomic_dec(&pd->usecnt); | 1027 | atomic_dec(&pd->usecnt); |
1027 | if (srq_type == IB_SRQT_XRC) | 1028 | if (srq_type == IB_SRQT_XRC) |
@@ -1032,7 +1033,7 @@ int ib_destroy_srq(struct ib_srq *srq) | |||
1032 | 1033 | ||
1033 | return ret; | 1034 | return ret; |
1034 | } | 1035 | } |
1035 | EXPORT_SYMBOL(ib_destroy_srq); | 1036 | EXPORT_SYMBOL(ib_destroy_srq_user); |
1036 | 1037 | ||
1037 | /* Queue pairs */ | 1038 | /* Queue pairs */ |
1038 | 1039 | ||
@@ -1111,8 +1112,9 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | |||
1111 | } | 1112 | } |
1112 | EXPORT_SYMBOL(ib_open_qp); | 1113 | EXPORT_SYMBOL(ib_open_qp); |
1113 | 1114 | ||
1114 | static struct ib_qp *create_xrc_qp(struct ib_qp *qp, | 1115 | static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, |
1115 | struct ib_qp_init_attr *qp_init_attr) | 1116 | struct ib_qp_init_attr *qp_init_attr, |
1117 | struct ib_udata *udata) | ||
1116 | { | 1118 | { |
1117 | struct ib_qp *real_qp = qp; | 1119 | struct ib_qp *real_qp = qp; |
1118 | 1120 | ||
@@ -1134,8 +1136,9 @@ static struct ib_qp *create_xrc_qp(struct ib_qp *qp, | |||
1134 | return qp; | 1136 | return qp; |
1135 | } | 1137 | } |
1136 | 1138 | ||
1137 | struct ib_qp *ib_create_qp(struct ib_pd *pd, | 1139 | struct ib_qp *ib_create_qp_user(struct ib_pd *pd, |
1138 | struct ib_qp_init_attr *qp_init_attr) | 1140 | struct ib_qp_init_attr *qp_init_attr, |
1141 | struct ib_udata *udata) | ||
1139 | { | 1142 | { |
1140 | struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; | 1143 | struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; |
1141 | struct ib_qp *qp; | 1144 | struct ib_qp *qp; |
@@ -1176,7 +1179,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
1176 | qp->port = 0; | 1179 | qp->port = 0; |
1177 | 1180 | ||
1178 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { | 1181 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { |
1179 | struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr); | 1182 | struct ib_qp *xrc_qp = |
1183 | create_xrc_qp_user(qp, qp_init_attr, udata); | ||
1180 | 1184 | ||
1181 | if (IS_ERR(xrc_qp)) { | 1185 | if (IS_ERR(xrc_qp)) { |
1182 | ret = PTR_ERR(xrc_qp); | 1186 | ret = PTR_ERR(xrc_qp); |
@@ -1230,7 +1234,7 @@ err: | |||
1230 | return ERR_PTR(ret); | 1234 | return ERR_PTR(ret); |
1231 | 1235 | ||
1232 | } | 1236 | } |
1233 | EXPORT_SYMBOL(ib_create_qp); | 1237 | EXPORT_SYMBOL(ib_create_qp_user); |
1234 | 1238 | ||
1235 | static const struct { | 1239 | static const struct { |
1236 | int valid; | 1240 | int valid; |
@@ -1837,7 +1841,7 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp) | |||
1837 | return 0; | 1841 | return 0; |
1838 | } | 1842 | } |
1839 | 1843 | ||
1840 | int ib_destroy_qp(struct ib_qp *qp) | 1844 | int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata) |
1841 | { | 1845 | { |
1842 | const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; | 1846 | const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; |
1843 | const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; | 1847 | const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; |
@@ -1869,7 +1873,7 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
1869 | rdma_rw_cleanup_mrs(qp); | 1873 | rdma_rw_cleanup_mrs(qp); |
1870 | 1874 | ||
1871 | rdma_restrack_del(&qp->res); | 1875 | rdma_restrack_del(&qp->res); |
1872 | ret = qp->device->ops.destroy_qp(qp); | 1876 | ret = qp->device->ops.destroy_qp(qp, udata); |
1873 | if (!ret) { | 1877 | if (!ret) { |
1874 | if (alt_path_sgid_attr) | 1878 | if (alt_path_sgid_attr) |
1875 | rdma_put_gid_attr(alt_path_sgid_attr); | 1879 | rdma_put_gid_attr(alt_path_sgid_attr); |
@@ -1894,7 +1898,7 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
1894 | 1898 | ||
1895 | return ret; | 1899 | return ret; |
1896 | } | 1900 | } |
1897 | EXPORT_SYMBOL(ib_destroy_qp); | 1901 | EXPORT_SYMBOL(ib_destroy_qp_user); |
1898 | 1902 | ||
1899 | /* Completion queues */ | 1903 | /* Completion queues */ |
1900 | 1904 | ||
@@ -1933,15 +1937,15 @@ int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) | |||
1933 | } | 1937 | } |
1934 | EXPORT_SYMBOL(rdma_set_cq_moderation); | 1938 | EXPORT_SYMBOL(rdma_set_cq_moderation); |
1935 | 1939 | ||
1936 | int ib_destroy_cq(struct ib_cq *cq) | 1940 | int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) |
1937 | { | 1941 | { |
1938 | if (atomic_read(&cq->usecnt)) | 1942 | if (atomic_read(&cq->usecnt)) |
1939 | return -EBUSY; | 1943 | return -EBUSY; |
1940 | 1944 | ||
1941 | rdma_restrack_del(&cq->res); | 1945 | rdma_restrack_del(&cq->res); |
1942 | return cq->device->ops.destroy_cq(cq); | 1946 | return cq->device->ops.destroy_cq(cq, udata); |
1943 | } | 1947 | } |
1944 | EXPORT_SYMBOL(ib_destroy_cq); | 1948 | EXPORT_SYMBOL(ib_destroy_cq_user); |
1945 | 1949 | ||
1946 | int ib_resize_cq(struct ib_cq *cq, int cqe) | 1950 | int ib_resize_cq(struct ib_cq *cq, int cqe) |
1947 | { | 1951 | { |
@@ -1952,14 +1956,14 @@ EXPORT_SYMBOL(ib_resize_cq); | |||
1952 | 1956 | ||
1953 | /* Memory regions */ | 1957 | /* Memory regions */ |
1954 | 1958 | ||
1955 | int ib_dereg_mr(struct ib_mr *mr) | 1959 | int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata) |
1956 | { | 1960 | { |
1957 | struct ib_pd *pd = mr->pd; | 1961 | struct ib_pd *pd = mr->pd; |
1958 | struct ib_dm *dm = mr->dm; | 1962 | struct ib_dm *dm = mr->dm; |
1959 | int ret; | 1963 | int ret; |
1960 | 1964 | ||
1961 | rdma_restrack_del(&mr->res); | 1965 | rdma_restrack_del(&mr->res); |
1962 | ret = mr->device->ops.dereg_mr(mr); | 1966 | ret = mr->device->ops.dereg_mr(mr, udata); |
1963 | if (!ret) { | 1967 | if (!ret) { |
1964 | atomic_dec(&pd->usecnt); | 1968 | atomic_dec(&pd->usecnt); |
1965 | if (dm) | 1969 | if (dm) |
@@ -1968,13 +1972,14 @@ int ib_dereg_mr(struct ib_mr *mr) | |||
1968 | 1972 | ||
1969 | return ret; | 1973 | return ret; |
1970 | } | 1974 | } |
1971 | EXPORT_SYMBOL(ib_dereg_mr); | 1975 | EXPORT_SYMBOL(ib_dereg_mr_user); |
1972 | 1976 | ||
1973 | /** | 1977 | /** |
1974 | * ib_alloc_mr() - Allocates a memory region | 1978 | * ib_alloc_mr() - Allocates a memory region |
1975 | * @pd: protection domain associated with the region | 1979 | * @pd: protection domain associated with the region |
1976 | * @mr_type: memory region type | 1980 | * @mr_type: memory region type |
1977 | * @max_num_sg: maximum sg entries available for registration. | 1981 | * @max_num_sg: maximum sg entries available for registration. |
1982 | * @udata: user data or null for kernel objects | ||
1978 | * | 1983 | * |
1979 | * Notes: | 1984 | * Notes: |
1980 | * Memory registeration page/sg lists must not exceed max_num_sg. | 1985 | * Memory registeration page/sg lists must not exceed max_num_sg. |
@@ -1982,16 +1987,15 @@ EXPORT_SYMBOL(ib_dereg_mr); | |||
1982 | * max_num_sg * used_page_size. | 1987 | * max_num_sg * used_page_size. |
1983 | * | 1988 | * |
1984 | */ | 1989 | */ |
1985 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, | 1990 | struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, |
1986 | enum ib_mr_type mr_type, | 1991 | u32 max_num_sg, struct ib_udata *udata) |
1987 | u32 max_num_sg) | ||
1988 | { | 1992 | { |
1989 | struct ib_mr *mr; | 1993 | struct ib_mr *mr; |
1990 | 1994 | ||
1991 | if (!pd->device->ops.alloc_mr) | 1995 | if (!pd->device->ops.alloc_mr) |
1992 | return ERR_PTR(-EOPNOTSUPP); | 1996 | return ERR_PTR(-EOPNOTSUPP); |
1993 | 1997 | ||
1994 | mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); | 1998 | mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata); |
1995 | if (!IS_ERR(mr)) { | 1999 | if (!IS_ERR(mr)) { |
1996 | mr->device = pd->device; | 2000 | mr->device = pd->device; |
1997 | mr->pd = pd; | 2001 | mr->pd = pd; |
@@ -2005,7 +2009,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, | |||
2005 | 2009 | ||
2006 | return mr; | 2010 | return mr; |
2007 | } | 2011 | } |
2008 | EXPORT_SYMBOL(ib_alloc_mr); | 2012 | EXPORT_SYMBOL(ib_alloc_mr_user); |
2009 | 2013 | ||
2010 | /* "Fast" memory regions */ | 2014 | /* "Fast" memory regions */ |
2011 | 2015 | ||
@@ -2151,7 +2155,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) | |||
2151 | } | 2155 | } |
2152 | EXPORT_SYMBOL(__ib_alloc_xrcd); | 2156 | EXPORT_SYMBOL(__ib_alloc_xrcd); |
2153 | 2157 | ||
2154 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) | 2158 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) |
2155 | { | 2159 | { |
2156 | struct ib_qp *qp; | 2160 | struct ib_qp *qp; |
2157 | int ret; | 2161 | int ret; |
@@ -2166,7 +2170,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd) | |||
2166 | return ret; | 2170 | return ret; |
2167 | } | 2171 | } |
2168 | 2172 | ||
2169 | return xrcd->device->ops.dealloc_xrcd(xrcd); | 2173 | return xrcd->device->ops.dealloc_xrcd(xrcd, udata); |
2170 | } | 2174 | } |
2171 | EXPORT_SYMBOL(ib_dealloc_xrcd); | 2175 | EXPORT_SYMBOL(ib_dealloc_xrcd); |
2172 | 2176 | ||
@@ -2210,10 +2214,11 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd, | |||
2210 | EXPORT_SYMBOL(ib_create_wq); | 2214 | EXPORT_SYMBOL(ib_create_wq); |
2211 | 2215 | ||
2212 | /** | 2216 | /** |
2213 | * ib_destroy_wq - Destroys the specified WQ. | 2217 | * ib_destroy_wq - Destroys the specified user WQ. |
2214 | * @wq: The WQ to destroy. | 2218 | * @wq: The WQ to destroy. |
2219 | * @udata: Valid user data | ||
2215 | */ | 2220 | */ |
2216 | int ib_destroy_wq(struct ib_wq *wq) | 2221 | int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) |
2217 | { | 2222 | { |
2218 | int err; | 2223 | int err; |
2219 | struct ib_cq *cq = wq->cq; | 2224 | struct ib_cq *cq = wq->cq; |
@@ -2222,7 +2227,7 @@ int ib_destroy_wq(struct ib_wq *wq) | |||
2222 | if (atomic_read(&wq->usecnt)) | 2227 | if (atomic_read(&wq->usecnt)) |
2223 | return -EBUSY; | 2228 | return -EBUSY; |
2224 | 2229 | ||
2225 | err = wq->device->ops.destroy_wq(wq); | 2230 | err = wq->device->ops.destroy_wq(wq, udata); |
2226 | if (!err) { | 2231 | if (!err) { |
2227 | atomic_dec(&pd->usecnt); | 2232 | atomic_dec(&pd->usecnt); |
2228 | atomic_dec(&cq->usecnt); | 2233 | atomic_dec(&cq->usecnt); |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 33b2a06c6dde..a586ac28630b 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -564,7 +564,7 @@ fail: | |||
564 | } | 564 | } |
565 | 565 | ||
566 | /* Protection Domains */ | 566 | /* Protection Domains */ |
567 | void bnxt_re_dealloc_pd(struct ib_pd *ib_pd) | 567 | void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) |
568 | { | 568 | { |
569 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); | 569 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
570 | struct bnxt_re_dev *rdev = pd->rdev; | 570 | struct bnxt_re_dev *rdev = pd->rdev; |
@@ -635,7 +635,7 @@ fail: | |||
635 | } | 635 | } |
636 | 636 | ||
637 | /* Address Handles */ | 637 | /* Address Handles */ |
638 | int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) | 638 | int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags, struct ib_udata *udata) |
639 | { | 639 | { |
640 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); | 640 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
641 | struct bnxt_re_dev *rdev = ah->rdev; | 641 | struct bnxt_re_dev *rdev = ah->rdev; |
@@ -789,7 +789,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, | |||
789 | } | 789 | } |
790 | 790 | ||
791 | /* Queue Pairs */ | 791 | /* Queue Pairs */ |
792 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp) | 792 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) |
793 | { | 793 | { |
794 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); | 794 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
795 | struct bnxt_re_dev *rdev = qp->rdev; | 795 | struct bnxt_re_dev *rdev = qp->rdev; |
@@ -1327,7 +1327,7 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) | |||
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | /* Shared Receive Queues */ | 1329 | /* Shared Receive Queues */ |
1330 | int bnxt_re_destroy_srq(struct ib_srq *ib_srq) | 1330 | int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) |
1331 | { | 1331 | { |
1332 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, | 1332 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1333 | ib_srq); | 1333 | ib_srq); |
@@ -2560,7 +2560,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, | |||
2560 | } | 2560 | } |
2561 | 2561 | ||
2562 | /* Completion Queues */ | 2562 | /* Completion Queues */ |
2563 | int bnxt_re_destroy_cq(struct ib_cq *ib_cq) | 2563 | int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
2564 | { | 2564 | { |
2565 | int rc; | 2565 | int rc; |
2566 | struct bnxt_re_cq *cq; | 2566 | struct bnxt_re_cq *cq; |
@@ -3382,7 +3382,7 @@ fail: | |||
3382 | return ERR_PTR(rc); | 3382 | return ERR_PTR(rc); |
3383 | } | 3383 | } |
3384 | 3384 | ||
3385 | int bnxt_re_dereg_mr(struct ib_mr *ib_mr) | 3385 | int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
3386 | { | 3386 | { |
3387 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); | 3387 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
3388 | struct bnxt_re_dev *rdev = mr->rdev; | 3388 | struct bnxt_re_dev *rdev = mr->rdev; |
@@ -3428,7 +3428,7 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, | |||
3428 | } | 3428 | } |
3429 | 3429 | ||
3430 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, | 3430 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, |
3431 | u32 max_num_sg) | 3431 | u32 max_num_sg, struct ib_udata *udata) |
3432 | { | 3432 | { |
3433 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); | 3433 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
3434 | struct bnxt_re_dev *rdev = pd->rdev; | 3434 | struct bnxt_re_dev *rdev = pd->rdev; |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index e45465ed4eee..44e49988600e 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h | |||
@@ -165,14 +165,14 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, | |||
165 | u8 port_num); | 165 | u8 port_num); |
166 | int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, | 166 | int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, |
167 | struct ib_udata *udata); | 167 | struct ib_udata *udata); |
168 | void bnxt_re_dealloc_pd(struct ib_pd *pd); | 168 | void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); |
169 | struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, | 169 | struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, |
170 | struct rdma_ah_attr *ah_attr, | 170 | struct rdma_ah_attr *ah_attr, |
171 | u32 flags, | 171 | u32 flags, |
172 | struct ib_udata *udata); | 172 | struct ib_udata *udata); |
173 | int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); | 173 | int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
174 | int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); | 174 | int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
175 | int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags); | 175 | int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); |
176 | struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd, | 176 | struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd, |
177 | struct ib_srq_init_attr *srq_init_attr, | 177 | struct ib_srq_init_attr *srq_init_attr, |
178 | struct ib_udata *udata); | 178 | struct ib_udata *udata); |
@@ -180,7 +180,7 @@ int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, | |||
180 | enum ib_srq_attr_mask srq_attr_mask, | 180 | enum ib_srq_attr_mask srq_attr_mask, |
181 | struct ib_udata *udata); | 181 | struct ib_udata *udata); |
182 | int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); | 182 | int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); |
183 | int bnxt_re_destroy_srq(struct ib_srq *srq); | 183 | int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
184 | int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, | 184 | int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, |
185 | const struct ib_recv_wr **bad_recv_wr); | 185 | const struct ib_recv_wr **bad_recv_wr); |
186 | struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd, | 186 | struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd, |
@@ -190,7 +190,7 @@ int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, | |||
190 | int qp_attr_mask, struct ib_udata *udata); | 190 | int qp_attr_mask, struct ib_udata *udata); |
191 | int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, | 191 | int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
192 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); | 192 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); |
193 | int bnxt_re_destroy_qp(struct ib_qp *qp); | 193 | int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
194 | int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr, | 194 | int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr, |
195 | const struct ib_send_wr **bad_send_wr); | 195 | const struct ib_send_wr **bad_send_wr); |
196 | int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, | 196 | int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, |
@@ -199,7 +199,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, | |||
199 | const struct ib_cq_init_attr *attr, | 199 | const struct ib_cq_init_attr *attr, |
200 | struct ib_ucontext *context, | 200 | struct ib_ucontext *context, |
201 | struct ib_udata *udata); | 201 | struct ib_udata *udata); |
202 | int bnxt_re_destroy_cq(struct ib_cq *cq); | 202 | int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
203 | int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); | 203 | int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); |
204 | int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); | 204 | int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); |
205 | struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); | 205 | struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); |
@@ -207,8 +207,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); | |||
207 | int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, | 207 | int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, |
208 | unsigned int *sg_offset); | 208 | unsigned int *sg_offset); |
209 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, | 209 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, |
210 | u32 max_num_sg); | 210 | u32 max_num_sg, struct ib_udata *udata); |
211 | int bnxt_re_dereg_mr(struct ib_mr *mr); | 211 | int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); |
212 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, | 212 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
213 | struct ib_udata *udata); | 213 | struct ib_udata *udata); |
214 | int bnxt_re_dealloc_mw(struct ib_mw *mw); | 214 | int bnxt_re_dealloc_mw(struct ib_mw *mw); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 21aac6bca06f..e10a56242998 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -88,7 +88,7 @@ static int iwch_alloc_ucontext(struct ib_ucontext *ucontext, | |||
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
90 | 90 | ||
91 | static int iwch_destroy_cq(struct ib_cq *ib_cq) | 91 | static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
92 | { | 92 | { |
93 | struct iwch_cq *chp; | 93 | struct iwch_cq *chp; |
94 | 94 | ||
@@ -175,7 +175,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, | |||
175 | 175 | ||
176 | mm = kmalloc(sizeof *mm, GFP_KERNEL); | 176 | mm = kmalloc(sizeof *mm, GFP_KERNEL); |
177 | if (!mm) { | 177 | if (!mm) { |
178 | iwch_destroy_cq(&chp->ibcq); | 178 | iwch_destroy_cq(&chp->ibcq, udata); |
179 | return ERR_PTR(-ENOMEM); | 179 | return ERR_PTR(-ENOMEM); |
180 | } | 180 | } |
181 | uresp.cqid = chp->cq.cqid; | 181 | uresp.cqid = chp->cq.cqid; |
@@ -201,7 +201,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, | |||
201 | } | 201 | } |
202 | if (ib_copy_to_udata(udata, &uresp, resplen)) { | 202 | if (ib_copy_to_udata(udata, &uresp, resplen)) { |
203 | kfree(mm); | 203 | kfree(mm); |
204 | iwch_destroy_cq(&chp->ibcq); | 204 | iwch_destroy_cq(&chp->ibcq, udata); |
205 | return ERR_PTR(-EFAULT); | 205 | return ERR_PTR(-EFAULT); |
206 | } | 206 | } |
207 | insert_mmap(ucontext, mm); | 207 | insert_mmap(ucontext, mm); |
@@ -367,7 +367,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
367 | return ret; | 367 | return ret; |
368 | } | 368 | } |
369 | 369 | ||
370 | static void iwch_deallocate_pd(struct ib_pd *pd) | 370 | static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) |
371 | { | 371 | { |
372 | struct iwch_dev *rhp; | 372 | struct iwch_dev *rhp; |
373 | struct iwch_pd *php; | 373 | struct iwch_pd *php; |
@@ -398,7 +398,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, | |||
398 | struct iwch_alloc_pd_resp resp = {.pdid = php->pdid}; | 398 | struct iwch_alloc_pd_resp resp = {.pdid = php->pdid}; |
399 | 399 | ||
400 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { | 400 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { |
401 | iwch_deallocate_pd(&php->ibpd); | 401 | iwch_deallocate_pd(&php->ibpd, udata); |
402 | return -EFAULT; | 402 | return -EFAULT; |
403 | } | 403 | } |
404 | } | 404 | } |
@@ -406,7 +406,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, | |||
406 | return 0; | 406 | return 0; |
407 | } | 407 | } |
408 | 408 | ||
409 | static int iwch_dereg_mr(struct ib_mr *ib_mr) | 409 | static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
410 | { | 410 | { |
411 | struct iwch_dev *rhp; | 411 | struct iwch_dev *rhp; |
412 | struct iwch_mr *mhp; | 412 | struct iwch_mr *mhp; |
@@ -590,7 +590,7 @@ pbl_done: | |||
590 | uresp.pbl_addr); | 590 | uresp.pbl_addr); |
591 | 591 | ||
592 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | 592 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { |
593 | iwch_dereg_mr(&mhp->ibmr); | 593 | iwch_dereg_mr(&mhp->ibmr, udata); |
594 | err = -EFAULT; | 594 | err = -EFAULT; |
595 | goto err; | 595 | goto err; |
596 | } | 596 | } |
@@ -661,9 +661,8 @@ static int iwch_dealloc_mw(struct ib_mw *mw) | |||
661 | return 0; | 661 | return 0; |
662 | } | 662 | } |
663 | 663 | ||
664 | static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, | 664 | static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
665 | enum ib_mr_type mr_type, | 665 | u32 max_num_sg, struct ib_udata *udata) |
666 | u32 max_num_sg) | ||
667 | { | 666 | { |
668 | struct iwch_dev *rhp; | 667 | struct iwch_dev *rhp; |
669 | struct iwch_pd *php; | 668 | struct iwch_pd *php; |
@@ -742,7 +741,7 @@ static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, | |||
742 | return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page); | 741 | return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page); |
743 | } | 742 | } |
744 | 743 | ||
745 | static int iwch_destroy_qp(struct ib_qp *ib_qp) | 744 | static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) |
746 | { | 745 | { |
747 | struct iwch_dev *rhp; | 746 | struct iwch_dev *rhp; |
748 | struct iwch_qp *qhp; | 747 | struct iwch_qp *qhp; |
@@ -885,14 +884,14 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | |||
885 | 884 | ||
886 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); | 885 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); |
887 | if (!mm1) { | 886 | if (!mm1) { |
888 | iwch_destroy_qp(&qhp->ibqp); | 887 | iwch_destroy_qp(&qhp->ibqp, udata); |
889 | return ERR_PTR(-ENOMEM); | 888 | return ERR_PTR(-ENOMEM); |
890 | } | 889 | } |
891 | 890 | ||
892 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | 891 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); |
893 | if (!mm2) { | 892 | if (!mm2) { |
894 | kfree(mm1); | 893 | kfree(mm1); |
895 | iwch_destroy_qp(&qhp->ibqp); | 894 | iwch_destroy_qp(&qhp->ibqp, udata); |
896 | return ERR_PTR(-ENOMEM); | 895 | return ERR_PTR(-ENOMEM); |
897 | } | 896 | } |
898 | 897 | ||
@@ -909,7 +908,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | |||
909 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | 908 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { |
910 | kfree(mm1); | 909 | kfree(mm1); |
911 | kfree(mm2); | 910 | kfree(mm2); |
912 | iwch_destroy_qp(&qhp->ibqp); | 911 | iwch_destroy_qp(&qhp->ibqp, udata); |
913 | return ERR_PTR(-EFAULT); | 912 | return ERR_PTR(-EFAULT); |
914 | } | 913 | } |
915 | mm1->key = uresp.key; | 914 | mm1->key = uresp.key; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 1fa5f6445be3..562187f0c5af 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -968,7 +968,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
968 | return !err || err == -ENODATA ? npolled : err; | 968 | return !err || err == -ENODATA ? npolled : err; |
969 | } | 969 | } |
970 | 970 | ||
971 | int c4iw_destroy_cq(struct ib_cq *ib_cq) | 971 | int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
972 | { | 972 | { |
973 | struct c4iw_cq *chp; | 973 | struct c4iw_cq *chp; |
974 | struct c4iw_ucontext *ucontext; | 974 | struct c4iw_ucontext *ucontext; |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 4c918fe2430e..586fd1a00d33 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -979,9 +979,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); | |||
979 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); | 979 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); |
980 | void c4iw_qp_add_ref(struct ib_qp *qp); | 980 | void c4iw_qp_add_ref(struct ib_qp *qp); |
981 | void c4iw_qp_rem_ref(struct ib_qp *qp); | 981 | void c4iw_qp_rem_ref(struct ib_qp *qp); |
982 | struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, | 982 | struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
983 | enum ib_mr_type mr_type, | 983 | u32 max_num_sg, struct ib_udata *udata); |
984 | u32 max_num_sg); | ||
985 | int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | 984 | int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
986 | unsigned int *sg_offset); | 985 | unsigned int *sg_offset); |
987 | int c4iw_dealloc_mw(struct ib_mw *mw); | 986 | int c4iw_dealloc_mw(struct ib_mw *mw); |
@@ -992,8 +991,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, | |||
992 | u64 length, u64 virt, int acc, | 991 | u64 length, u64 virt, int acc, |
993 | struct ib_udata *udata); | 992 | struct ib_udata *udata); |
994 | struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); | 993 | struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); |
995 | int c4iw_dereg_mr(struct ib_mr *ib_mr); | 994 | int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); |
996 | int c4iw_destroy_cq(struct ib_cq *ib_cq); | 995 | int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); |
997 | struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, | 996 | struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, |
998 | const struct ib_cq_init_attr *attr, | 997 | const struct ib_cq_init_attr *attr, |
999 | struct ib_ucontext *ib_context, | 998 | struct ib_ucontext *ib_context, |
@@ -1002,11 +1001,11 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | |||
1002 | int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, | 1001 | int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, |
1003 | enum ib_srq_attr_mask srq_attr_mask, | 1002 | enum ib_srq_attr_mask srq_attr_mask, |
1004 | struct ib_udata *udata); | 1003 | struct ib_udata *udata); |
1005 | int c4iw_destroy_srq(struct ib_srq *ib_srq); | 1004 | int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata); |
1006 | struct ib_srq *c4iw_create_srq(struct ib_pd *pd, | 1005 | struct ib_srq *c4iw_create_srq(struct ib_pd *pd, |
1007 | struct ib_srq_init_attr *attrs, | 1006 | struct ib_srq_init_attr *attrs, |
1008 | struct ib_udata *udata); | 1007 | struct ib_udata *udata); |
1009 | int c4iw_destroy_qp(struct ib_qp *ib_qp); | 1008 | int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata); |
1010 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, | 1009 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, |
1011 | struct ib_qp_init_attr *attrs, | 1010 | struct ib_qp_init_attr *attrs, |
1012 | struct ib_udata *udata); | 1011 | struct ib_udata *udata); |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 81f5b5b026b1..811c0c8c5b16 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -683,9 +683,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw) | |||
683 | return 0; | 683 | return 0; |
684 | } | 684 | } |
685 | 685 | ||
686 | struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, | 686 | struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
687 | enum ib_mr_type mr_type, | 687 | u32 max_num_sg, struct ib_udata *udata) |
688 | u32 max_num_sg) | ||
689 | { | 688 | { |
690 | struct c4iw_dev *rhp; | 689 | struct c4iw_dev *rhp; |
691 | struct c4iw_pd *php; | 690 | struct c4iw_pd *php; |
@@ -786,7 +785,7 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | |||
786 | return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); | 785 | return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); |
787 | } | 786 | } |
788 | 787 | ||
789 | int c4iw_dereg_mr(struct ib_mr *ib_mr) | 788 | int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
790 | { | 789 | { |
791 | struct c4iw_dev *rhp; | 790 | struct c4iw_dev *rhp; |
792 | struct c4iw_mr *mhp; | 791 | struct c4iw_mr *mhp; |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 507c54572cc9..12f7d3ae6a53 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -190,7 +190,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
190 | return ret; | 190 | return ret; |
191 | } | 191 | } |
192 | 192 | ||
193 | static void c4iw_deallocate_pd(struct ib_pd *pd) | 193 | static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) |
194 | { | 194 | { |
195 | struct c4iw_dev *rhp; | 195 | struct c4iw_dev *rhp; |
196 | struct c4iw_pd *php; | 196 | struct c4iw_pd *php; |
@@ -224,7 +224,7 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, | |||
224 | struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; | 224 | struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; |
225 | 225 | ||
226 | if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { | 226 | if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { |
227 | c4iw_deallocate_pd(&php->ibpd); | 227 | c4iw_deallocate_pd(&php->ibpd, udata); |
228 | return -EFAULT; | 228 | return -EFAULT; |
229 | } | 229 | } |
230 | } | 230 | } |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index b2ae5b40cc3e..76e6544cf0b9 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -2095,7 +2095,7 @@ out: | |||
2095 | return ret; | 2095 | return ret; |
2096 | } | 2096 | } |
2097 | 2097 | ||
2098 | int c4iw_destroy_qp(struct ib_qp *ib_qp) | 2098 | int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) |
2099 | { | 2099 | { |
2100 | struct c4iw_dev *rhp; | 2100 | struct c4iw_dev *rhp; |
2101 | struct c4iw_qp *qhp; | 2101 | struct c4iw_qp *qhp; |
@@ -2826,7 +2826,7 @@ err_free_srq: | |||
2826 | return ERR_PTR(ret); | 2826 | return ERR_PTR(ret); |
2827 | } | 2827 | } |
2828 | 2828 | ||
2829 | int c4iw_destroy_srq(struct ib_srq *ibsrq) | 2829 | int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
2830 | { | 2830 | { |
2831 | struct c4iw_dev *rhp; | 2831 | struct c4iw_dev *rhp; |
2832 | struct c4iw_srq *srq; | 2832 | struct c4iw_srq *srq; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index b3c8c45ec1e3..42067325ae5e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c | |||
@@ -111,7 +111,7 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) | |||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags) | 114 | int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
115 | { | 115 | { |
116 | kfree(to_hr_ah(ah)); | 116 | kfree(to_hr_ah(ah)); |
117 | 117 | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index c50f241211e9..a4e95a310c16 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c | |||
@@ -444,14 +444,14 @@ err_cq: | |||
444 | } | 444 | } |
445 | EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq); | 445 | EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq); |
446 | 446 | ||
447 | int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) | 447 | int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
448 | { | 448 | { |
449 | struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); | 449 | struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); |
450 | struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); | 450 | struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); |
451 | int ret = 0; | 451 | int ret = 0; |
452 | 452 | ||
453 | if (hr_dev->hw->destroy_cq) { | 453 | if (hr_dev->hw->destroy_cq) { |
454 | ret = hr_dev->hw->destroy_cq(ib_cq); | 454 | ret = hr_dev->hw->destroy_cq(ib_cq, udata); |
455 | } else { | 455 | } else { |
456 | hns_roce_free_cq(hr_dev, hr_cq); | 456 | hns_roce_free_cq(hr_dev, hr_cq); |
457 | hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); | 457 | hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); |
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 61411ca655f5..780a7ba204db 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h | |||
@@ -905,7 +905,7 @@ struct hns_roce_hw { | |||
905 | int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, | 905 | int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, |
906 | int attr_mask, enum ib_qp_state cur_state, | 906 | int attr_mask, enum ib_qp_state cur_state, |
907 | enum ib_qp_state new_state); | 907 | enum ib_qp_state new_state); |
908 | int (*destroy_qp)(struct ib_qp *ibqp); | 908 | int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata); |
909 | int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, | 909 | int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, |
910 | struct hns_roce_qp *hr_qp); | 910 | struct hns_roce_qp *hr_qp); |
911 | int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, | 911 | int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
@@ -914,8 +914,9 @@ struct hns_roce_hw { | |||
914 | const struct ib_recv_wr **bad_recv_wr); | 914 | const struct ib_recv_wr **bad_recv_wr); |
915 | int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | 915 | int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
916 | int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 916 | int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
917 | int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); | 917 | int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, |
918 | int (*destroy_cq)(struct ib_cq *ibcq); | 918 | struct ib_udata *udata); |
919 | int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); | ||
919 | int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); | 920 | int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
920 | int (*init_eq)(struct hns_roce_dev *hr_dev); | 921 | int (*init_eq)(struct hns_roce_dev *hr_dev); |
921 | void (*cleanup_eq)(struct hns_roce_dev *hr_dev); | 922 | void (*cleanup_eq)(struct hns_roce_dev *hr_dev); |
@@ -1109,11 +1110,11 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, | |||
1109 | u32 flags, | 1110 | u32 flags, |
1110 | struct ib_udata *udata); | 1111 | struct ib_udata *udata); |
1111 | int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); | 1112 | int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
1112 | int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags); | 1113 | int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); |
1113 | 1114 | ||
1114 | int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, | 1115 | int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, |
1115 | struct ib_udata *udata); | 1116 | struct ib_udata *udata); |
1116 | void hns_roce_dealloc_pd(struct ib_pd *pd); | 1117 | void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); |
1117 | 1118 | ||
1118 | struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); | 1119 | struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); |
1119 | struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | 1120 | struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
@@ -1123,10 +1124,10 @@ int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, | |||
1123 | u64 virt_addr, int mr_access_flags, struct ib_pd *pd, | 1124 | u64 virt_addr, int mr_access_flags, struct ib_pd *pd, |
1124 | struct ib_udata *udata); | 1125 | struct ib_udata *udata); |
1125 | struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, | 1126 | struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
1126 | u32 max_num_sg); | 1127 | u32 max_num_sg, struct ib_udata *udata); |
1127 | int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | 1128 | int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
1128 | unsigned int *sg_offset); | 1129 | unsigned int *sg_offset); |
1129 | int hns_roce_dereg_mr(struct ib_mr *ibmr); | 1130 | int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); |
1130 | int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, | 1131 | int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, |
1131 | struct hns_roce_cmd_mailbox *mailbox, | 1132 | struct hns_roce_cmd_mailbox *mailbox, |
1132 | unsigned long mpt_index); | 1133 | unsigned long mpt_index); |
@@ -1150,7 +1151,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, | |||
1150 | int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, | 1151 | int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, |
1151 | enum ib_srq_attr_mask srq_attr_mask, | 1152 | enum ib_srq_attr_mask srq_attr_mask, |
1152 | struct ib_udata *udata); | 1153 | struct ib_udata *udata); |
1153 | int hns_roce_destroy_srq(struct ib_srq *ibsrq); | 1154 | int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); |
1154 | 1155 | ||
1155 | struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, | 1156 | struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, |
1156 | struct ib_qp_init_attr *init_attr, | 1157 | struct ib_qp_init_attr *init_attr, |
@@ -1179,7 +1180,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, | |||
1179 | struct ib_ucontext *context, | 1180 | struct ib_ucontext *context, |
1180 | struct ib_udata *udata); | 1181 | struct ib_udata *udata); |
1181 | 1182 | ||
1182 | int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq); | 1183 | int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); |
1183 | void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); | 1184 | void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); |
1184 | 1185 | ||
1185 | int hns_roce_db_map_user(struct hns_roce_ucontext *context, | 1186 | int hns_roce_db_map_user(struct hns_roce_ucontext *context, |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 97515c340134..1863516f6be9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c | |||
@@ -855,17 +855,17 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) | |||
855 | create_lp_qp_failed: | 855 | create_lp_qp_failed: |
856 | for (i -= 1; i >= 0; i--) { | 856 | for (i -= 1; i >= 0; i--) { |
857 | hr_qp = free_mr->mr_free_qp[i]; | 857 | hr_qp = free_mr->mr_free_qp[i]; |
858 | if (hns_roce_v1_destroy_qp(&hr_qp->ibqp)) | 858 | if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL)) |
859 | dev_err(dev, "Destroy qp %d for mr free failed!\n", i); | 859 | dev_err(dev, "Destroy qp %d for mr free failed!\n", i); |
860 | } | 860 | } |
861 | 861 | ||
862 | hns_roce_dealloc_pd(pd); | 862 | hns_roce_dealloc_pd(pd, NULL); |
863 | 863 | ||
864 | alloc_pd_failed: | 864 | alloc_pd_failed: |
865 | kfree(pd); | 865 | kfree(pd); |
866 | 866 | ||
867 | alloc_mem_failed: | 867 | alloc_mem_failed: |
868 | if (hns_roce_ib_destroy_cq(cq)) | 868 | if (hns_roce_ib_destroy_cq(cq, NULL)) |
869 | dev_err(dev, "Destroy cq for create_lp_qp failed!\n"); | 869 | dev_err(dev, "Destroy cq for create_lp_qp failed!\n"); |
870 | 870 | ||
871 | return ret; | 871 | return ret; |
@@ -888,17 +888,17 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) | |||
888 | if (!hr_qp) | 888 | if (!hr_qp) |
889 | continue; | 889 | continue; |
890 | 890 | ||
891 | ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); | 891 | ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL); |
892 | if (ret) | 892 | if (ret) |
893 | dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", | 893 | dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", |
894 | i, ret); | 894 | i, ret); |
895 | } | 895 | } |
896 | 896 | ||
897 | ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq); | 897 | ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL); |
898 | if (ret) | 898 | if (ret) |
899 | dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); | 899 | dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); |
900 | 900 | ||
901 | hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd); | 901 | hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); |
902 | } | 902 | } |
903 | 903 | ||
904 | static int hns_roce_db_init(struct hns_roce_dev *hr_dev) | 904 | static int hns_roce_db_init(struct hns_roce_dev *hr_dev) |
@@ -1096,7 +1096,7 @@ free_work: | |||
1096 | } | 1096 | } |
1097 | 1097 | ||
1098 | static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, | 1098 | static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, |
1099 | struct hns_roce_mr *mr) | 1099 | struct hns_roce_mr *mr, struct ib_udata *udata) |
1100 | { | 1100 | { |
1101 | struct device *dev = &hr_dev->pdev->dev; | 1101 | struct device *dev = &hr_dev->pdev->dev; |
1102 | struct hns_roce_mr_free_work *mr_work; | 1102 | struct hns_roce_mr_free_work *mr_work; |
@@ -3921,7 +3921,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) | |||
3921 | dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); | 3921 | dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); |
3922 | } | 3922 | } |
3923 | 3923 | ||
3924 | int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) | 3924 | int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
3925 | { | 3925 | { |
3926 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | 3926 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
3927 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | 3927 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
@@ -3998,7 +3998,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) | |||
3998 | return 0; | 3998 | return 0; |
3999 | } | 3999 | } |
4000 | 4000 | ||
4001 | static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq) | 4001 | static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
4002 | { | 4002 | { |
4003 | struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); | 4003 | struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); |
4004 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); | 4004 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h index 66440147d9eb..1a2c38785c7f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h | |||
@@ -1106,6 +1106,6 @@ struct hns_roce_v1_priv { | |||
1106 | 1106 | ||
1107 | int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); | 1107 | int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); |
1108 | int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 1108 | int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
1109 | int hns_roce_v1_destroy_qp(struct ib_qp *ibqp); | 1109 | int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); |
1110 | 1110 | ||
1111 | #endif | 1111 | #endif |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index dafc33b02e09..30b00240b7c8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c | |||
@@ -4513,7 +4513,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, | |||
4513 | return 0; | 4513 | return 0; |
4514 | } | 4514 | } |
4515 | 4515 | ||
4516 | static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) | 4516 | static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
4517 | { | 4517 | { |
4518 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | 4518 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
4519 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | 4519 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index b09f1cde2ff5..9119d875b13d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c | |||
@@ -1282,14 +1282,14 @@ free_cmd_mbox: | |||
1282 | return ret; | 1282 | return ret; |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | int hns_roce_dereg_mr(struct ib_mr *ibmr) | 1285 | int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
1286 | { | 1286 | { |
1287 | struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); | 1287 | struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); |
1288 | struct hns_roce_mr *mr = to_hr_mr(ibmr); | 1288 | struct hns_roce_mr *mr = to_hr_mr(ibmr); |
1289 | int ret = 0; | 1289 | int ret = 0; |
1290 | 1290 | ||
1291 | if (hr_dev->hw->dereg_mr) { | 1291 | if (hr_dev->hw->dereg_mr) { |
1292 | ret = hr_dev->hw->dereg_mr(hr_dev, mr); | 1292 | ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata); |
1293 | } else { | 1293 | } else { |
1294 | hns_roce_mr_free(hr_dev, mr); | 1294 | hns_roce_mr_free(hr_dev, mr); |
1295 | 1295 | ||
@@ -1303,7 +1303,7 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr) | |||
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, | 1305 | struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
1306 | u32 max_num_sg) | 1306 | u32 max_num_sg, struct ib_udata *udata) |
1307 | { | 1307 | { |
1308 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); | 1308 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); |
1309 | struct device *dev = hr_dev->dev; | 1309 | struct device *dev = hr_dev->dev; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index b9b97c5e97e6..504e6e466d72 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c | |||
@@ -86,7 +86,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
86 | } | 86 | } |
87 | EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); | 87 | EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); |
88 | 88 | ||
89 | void hns_roce_dealloc_pd(struct ib_pd *pd) | 89 | void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
90 | { | 90 | { |
91 | hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); | 91 | hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); |
92 | } | 92 | } |
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index a8ee2f6da967..5874dbb391fd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c | |||
@@ -423,7 +423,7 @@ err_srq: | |||
423 | return ERR_PTR(ret); | 423 | return ERR_PTR(ret); |
424 | } | 424 | } |
425 | 425 | ||
426 | int hns_roce_destroy_srq(struct ib_srq *ibsrq) | 426 | int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
427 | { | 427 | { |
428 | struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); | 428 | struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); |
429 | struct hns_roce_srq *srq = to_hr_srq(ibsrq); | 429 | struct hns_roce_srq *srq = to_hr_srq(ibsrq); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 1c6aa0efd2b6..8233f5a4e623 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c | |||
@@ -3490,7 +3490,8 @@ static void i40iw_qp_disconnect(struct i40iw_qp *iwqp) | |||
3490 | /* Need to free the Last Streaming Mode Message */ | 3490 | /* Need to free the Last Streaming Mode Message */ |
3491 | if (iwqp->ietf_mem.va) { | 3491 | if (iwqp->ietf_mem.va) { |
3492 | if (iwqp->lsmm_mr) | 3492 | if (iwqp->lsmm_mr) |
3493 | iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr); | 3493 | iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr, |
3494 | NULL); | ||
3494 | i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem); | 3495 | i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem); |
3495 | } | 3496 | } |
3496 | } | 3497 | } |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index a8352e3ca23d..fd2d7426c832 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -342,8 +342,9 @@ error: | |||
342 | /** | 342 | /** |
343 | * i40iw_dealloc_pd - deallocate pd | 343 | * i40iw_dealloc_pd - deallocate pd |
344 | * @ibpd: ptr of pd to be deallocated | 344 | * @ibpd: ptr of pd to be deallocated |
345 | * @udata: user data or null for kernel object | ||
345 | */ | 346 | */ |
346 | static void i40iw_dealloc_pd(struct ib_pd *ibpd) | 347 | static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
347 | { | 348 | { |
348 | struct i40iw_pd *iwpd = to_iwpd(ibpd); | 349 | struct i40iw_pd *iwpd = to_iwpd(ibpd); |
349 | struct i40iw_device *iwdev = to_iwdev(ibpd->device); | 350 | struct i40iw_device *iwdev = to_iwdev(ibpd->device); |
@@ -413,7 +414,7 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq) | |||
413 | * i40iw_destroy_qp - destroy qp | 414 | * i40iw_destroy_qp - destroy qp |
414 | * @ibqp: qp's ib pointer also to get to device's qp address | 415 | * @ibqp: qp's ib pointer also to get to device's qp address |
415 | */ | 416 | */ |
416 | static int i40iw_destroy_qp(struct ib_qp *ibqp) | 417 | static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
417 | { | 418 | { |
418 | struct i40iw_qp *iwqp = to_iwqp(ibqp); | 419 | struct i40iw_qp *iwqp = to_iwqp(ibqp); |
419 | 420 | ||
@@ -744,8 +745,8 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, | |||
744 | err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | 745 | err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); |
745 | if (err_code) { | 746 | if (err_code) { |
746 | i40iw_pr_err("copy_to_udata failed\n"); | 747 | i40iw_pr_err("copy_to_udata failed\n"); |
747 | i40iw_destroy_qp(&iwqp->ibqp); | 748 | i40iw_destroy_qp(&iwqp->ibqp, udata); |
748 | /* let the completion of the qp destroy free the qp */ | 749 | /* let the completion of the qp destroy free the qp */ |
749 | return ERR_PTR(err_code); | 750 | return ERR_PTR(err_code); |
750 | } | 751 | } |
751 | } | 752 | } |
@@ -1063,8 +1064,9 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq) | |||
1063 | /** | 1064 | /** |
1064 | * i40iw_destroy_cq - destroy cq | 1065 | * i40iw_destroy_cq - destroy cq |
1065 | * @ib_cq: cq pointer | 1066 | * @ib_cq: cq pointer |
1067 | * @udata: user data or NULL for kernel object | ||
1066 | */ | 1068 | */ |
1067 | static int i40iw_destroy_cq(struct ib_cq *ib_cq) | 1069 | static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
1068 | { | 1070 | { |
1069 | struct i40iw_cq *iwcq; | 1071 | struct i40iw_cq *iwcq; |
1070 | struct i40iw_device *iwdev; | 1072 | struct i40iw_device *iwdev; |
@@ -1601,10 +1603,10 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr | |||
1601 | * @pd: ibpd pointer | 1603 | * @pd: ibpd pointer |
1602 | * @mr_type: memory for stag registrion | 1604 | * @mr_type: memory for stag registrion |
1603 | * @max_num_sg: man number of pages | 1605 | * @max_num_sg: man number of pages |
1606 | * @udata: user data or NULL for kernel objects | ||
1604 | */ | 1607 | */ |
1605 | static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, | 1608 | static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
1606 | enum ib_mr_type mr_type, | 1609 | u32 max_num_sg, struct ib_udata *udata) |
1607 | u32 max_num_sg) | ||
1608 | { | 1610 | { |
1609 | struct i40iw_pd *iwpd = to_iwpd(pd); | 1611 | struct i40iw_pd *iwpd = to_iwpd(pd); |
1610 | struct i40iw_device *iwdev = to_iwdev(pd->device); | 1612 | struct i40iw_device *iwdev = to_iwdev(pd->device); |
@@ -2038,7 +2040,7 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr, | |||
2038 | * i40iw_dereg_mr - deregister mr | 2040 | * i40iw_dereg_mr - deregister mr |
2039 | * @ib_mr: mr ptr for dereg | 2041 | * @ib_mr: mr ptr for dereg |
2040 | */ | 2042 | */ |
2041 | static int i40iw_dereg_mr(struct ib_mr *ib_mr) | 2043 | static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
2042 | { | 2044 | { |
2043 | struct ib_pd *ibpd = ib_mr->pd; | 2045 | struct ib_pd *ibpd = ib_mr->pd; |
2044 | struct i40iw_pd *iwpd = to_iwpd(ibpd); | 2046 | struct i40iw_pd *iwpd = to_iwpd(ibpd); |
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 1672808262ba..6f552b780b89 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
@@ -250,7 +250,7 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) | |||
250 | return 0; | 250 | return 0; |
251 | } | 251 | } |
252 | 252 | ||
253 | int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags) | 253 | int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
254 | { | 254 | { |
255 | kfree(to_mah(ah)); | 255 | kfree(to_mah(ah)); |
256 | return 0; | 256 | return 0; |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 03ac72339dd2..0b730737fb25 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -485,7 +485,7 @@ out: | |||
485 | return err; | 485 | return err; |
486 | } | 486 | } |
487 | 487 | ||
488 | int mlx4_ib_destroy_cq(struct ib_cq *cq) | 488 | int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) |
489 | { | 489 | { |
490 | struct mlx4_ib_dev *dev = to_mdev(cq->device); | 490 | struct mlx4_ib_dev *dev = to_mdev(cq->device); |
491 | struct mlx4_ib_cq *mcq = to_mcq(cq); | 491 | struct mlx4_ib_cq *mcq = to_mcq(cq); |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 936ee1314bcd..f090c1b40433 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
1411 | 1411 | ||
1412 | sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); | 1412 | sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); |
1413 | if (sqp->tx_ring[wire_tx_ix].ah) | 1413 | if (sqp->tx_ring[wire_tx_ix].ah) |
1414 | mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); | 1414 | mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0, NULL); |
1415 | sqp->tx_ring[wire_tx_ix].ah = ah; | 1415 | sqp->tx_ring[wire_tx_ix].ah = ah; |
1416 | ib_dma_sync_single_for_cpu(&dev->ib_dev, | 1416 | ib_dma_sync_single_for_cpu(&dev->ib_dev, |
1417 | sqp->tx_ring[wire_tx_ix].buf.map, | 1417 | sqp->tx_ring[wire_tx_ix].buf.map, |
@@ -1450,7 +1450,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
1450 | spin_unlock(&sqp->tx_lock); | 1450 | spin_unlock(&sqp->tx_lock); |
1451 | sqp->tx_ring[wire_tx_ix].ah = NULL; | 1451 | sqp->tx_ring[wire_tx_ix].ah = NULL; |
1452 | out: | 1452 | out: |
1453 | mlx4_ib_destroy_ah(ah, 0); | 1453 | mlx4_ib_destroy_ah(ah, 0, NULL); |
1454 | return ret; | 1454 | return ret; |
1455 | } | 1455 | } |
1456 | 1456 | ||
@@ -1903,7 +1903,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) | |||
1903 | switch (wc.opcode) { | 1903 | switch (wc.opcode) { |
1904 | case IB_WC_SEND: | 1904 | case IB_WC_SEND: |
1905 | mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & | 1905 | mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & |
1906 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); | 1906 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, |
1907 | 0, NULL); | ||
1907 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | 1908 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah |
1908 | = NULL; | 1909 | = NULL; |
1909 | spin_lock(&sqp->tx_lock); | 1910 | spin_lock(&sqp->tx_lock); |
@@ -1932,7 +1933,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) | |||
1932 | ctx->slave, wc.status, wc.wr_id); | 1933 | ctx->slave, wc.status, wc.wr_id); |
1933 | if (!MLX4_TUN_IS_RECV(wc.wr_id)) { | 1934 | if (!MLX4_TUN_IS_RECV(wc.wr_id)) { |
1934 | mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & | 1935 | mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & |
1935 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); | 1936 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, |
1937 | 0, NULL); | ||
1936 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | 1938 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah |
1937 | = NULL; | 1939 | = NULL; |
1938 | spin_lock(&sqp->tx_lock); | 1940 | spin_lock(&sqp->tx_lock); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 733f7bbd5901..e50f9de71119 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -1195,7 +1195,7 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
1195 | return 0; | 1195 | return 0; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | static void mlx4_ib_dealloc_pd(struct ib_pd *pd) | 1198 | static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
1199 | { | 1199 | { |
1200 | mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); | 1200 | mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); |
1201 | } | 1201 | } |
@@ -1243,7 +1243,7 @@ err1: | |||
1243 | return ERR_PTR(err); | 1243 | return ERR_PTR(err); |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd) | 1246 | static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) |
1247 | { | 1247 | { |
1248 | ib_destroy_cq(to_mxrcd(xrcd)->cq); | 1248 | ib_destroy_cq(to_mxrcd(xrcd)->cq); |
1249 | ib_dealloc_pd(to_mxrcd(xrcd)->pd); | 1249 | ib_dealloc_pd(to_mxrcd(xrcd)->pd); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 24633fc29a29..58112b59cc7c 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -734,13 +734,12 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | |||
734 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | 734 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
735 | u64 virt_addr, int access_flags, | 735 | u64 virt_addr, int access_flags, |
736 | struct ib_udata *udata); | 736 | struct ib_udata *udata); |
737 | int mlx4_ib_dereg_mr(struct ib_mr *mr); | 737 | int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); |
738 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, | 738 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
739 | struct ib_udata *udata); | 739 | struct ib_udata *udata); |
740 | int mlx4_ib_dealloc_mw(struct ib_mw *mw); | 740 | int mlx4_ib_dealloc_mw(struct ib_mw *mw); |
741 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, | 741 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
742 | enum ib_mr_type mr_type, | 742 | u32 max_num_sg, struct ib_udata *udata); |
743 | u32 max_num_sg); | ||
744 | int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | 743 | int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
745 | unsigned int *sg_offset); | 744 | unsigned int *sg_offset); |
746 | int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); | 745 | int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
@@ -749,7 +748,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, | |||
749 | const struct ib_cq_init_attr *attr, | 748 | const struct ib_cq_init_attr *attr, |
750 | struct ib_ucontext *context, | 749 | struct ib_ucontext *context, |
751 | struct ib_udata *udata); | 750 | struct ib_udata *udata); |
752 | int mlx4_ib_destroy_cq(struct ib_cq *cq); | 751 | int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
753 | int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 752 | int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
754 | int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); | 753 | int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); |
755 | void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); | 754 | void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); |
@@ -762,7 +761,7 @@ struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd, | |||
762 | int slave_sgid_index, u8 *s_mac, | 761 | int slave_sgid_index, u8 *s_mac, |
763 | u16 vlan_tag); | 762 | u16 vlan_tag); |
764 | int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); | 763 | int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
765 | int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags); | 764 | int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); |
766 | 765 | ||
767 | struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | 766 | struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, |
768 | struct ib_srq_init_attr *init_attr, | 767 | struct ib_srq_init_attr *init_attr, |
@@ -770,7 +769,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
770 | int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 769 | int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
771 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | 770 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
772 | int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); | 771 | int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); |
773 | int mlx4_ib_destroy_srq(struct ib_srq *srq); | 772 | int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
774 | void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); | 773 | void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); |
775 | int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, | 774 | int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
776 | const struct ib_recv_wr **bad_wr); | 775 | const struct ib_recv_wr **bad_wr); |
@@ -778,7 +777,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, | |||
778 | struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | 777 | struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, |
779 | struct ib_qp_init_attr *init_attr, | 778 | struct ib_qp_init_attr *init_attr, |
780 | struct ib_udata *udata); | 779 | struct ib_udata *udata); |
781 | int mlx4_ib_destroy_qp(struct ib_qp *qp); | 780 | int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
782 | void mlx4_ib_drain_sq(struct ib_qp *qp); | 781 | void mlx4_ib_drain_sq(struct ib_qp *qp); |
783 | void mlx4_ib_drain_rq(struct ib_qp *qp); | 782 | void mlx4_ib_drain_rq(struct ib_qp *qp); |
784 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 783 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
@@ -913,7 +912,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port); | |||
913 | struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, | 912 | struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, |
914 | struct ib_wq_init_attr *init_attr, | 913 | struct ib_wq_init_attr *init_attr, |
915 | struct ib_udata *udata); | 914 | struct ib_udata *udata); |
916 | int mlx4_ib_destroy_wq(struct ib_wq *wq); | 915 | int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); |
917 | int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, | 916 | int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
918 | u32 wq_attr_mask, struct ib_udata *udata); | 917 | u32 wq_attr_mask, struct ib_udata *udata); |
919 | 918 | ||
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 395379a480cb..355205a28544 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -595,7 +595,7 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) | |||
595 | } | 595 | } |
596 | } | 596 | } |
597 | 597 | ||
598 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) | 598 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
599 | { | 599 | { |
600 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | 600 | struct mlx4_ib_mr *mr = to_mmr(ibmr); |
601 | int ret; | 601 | int ret; |
@@ -655,9 +655,8 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) | |||
655 | return 0; | 655 | return 0; |
656 | } | 656 | } |
657 | 657 | ||
658 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, | 658 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
659 | enum ib_mr_type mr_type, | 659 | u32 max_num_sg, struct ib_udata *udata) |
660 | u32 max_num_sg) | ||
661 | { | 660 | { |
662 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | 661 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
663 | struct mlx4_ib_mr *mr; | 662 | struct mlx4_ib_mr *mr; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 429a59c5801c..25dfdcc90a05 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1626,7 +1626,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp) | |||
1626 | return 0; | 1626 | return 0; |
1627 | } | 1627 | } |
1628 | 1628 | ||
1629 | int mlx4_ib_destroy_qp(struct ib_qp *qp) | 1629 | int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
1630 | { | 1630 | { |
1631 | struct mlx4_ib_qp *mqp = to_mqp(qp); | 1631 | struct mlx4_ib_qp *mqp = to_mqp(qp); |
1632 | 1632 | ||
@@ -4244,7 +4244,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr, | |||
4244 | return err; | 4244 | return err; |
4245 | } | 4245 | } |
4246 | 4246 | ||
4247 | int mlx4_ib_destroy_wq(struct ib_wq *ibwq) | 4247 | int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) |
4248 | { | 4248 | { |
4249 | struct mlx4_ib_dev *dev = to_mdev(ibwq->device); | 4249 | struct mlx4_ib_dev *dev = to_mdev(ibwq->device); |
4250 | struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); | 4250 | struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 381cf899bcef..b51f632f3f7d 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -272,7 +272,7 @@ int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |||
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | 274 | ||
275 | int mlx4_ib_destroy_srq(struct ib_srq *srq) | 275 | int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) |
276 | { | 276 | { |
277 | struct mlx4_ib_dev *dev = to_mdev(srq->device); | 277 | struct mlx4_ib_dev *dev = to_mdev(srq->device); |
278 | struct mlx4_ib_srq *msrq = to_msrq(srq); | 278 | struct mlx4_ib_srq *msrq = to_msrq(srq); |
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index 420ae0897333..2e377f9699f1 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c | |||
@@ -131,7 +131,7 @@ int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) | 134 | int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
135 | { | 135 | { |
136 | kfree(to_mah(ah)); | 136 | kfree(to_mah(ah)); |
137 | return 0; | 137 | return 0; |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 18704e503508..5d238a8ee132 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -996,8 +996,7 @@ err_create: | |||
996 | return ERR_PTR(err); | 996 | return ERR_PTR(err); |
997 | } | 997 | } |
998 | 998 | ||
999 | 999 | int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) | |
1000 | int mlx5_ib_destroy_cq(struct ib_cq *cq) | ||
1001 | { | 1000 | { |
1002 | struct mlx5_ib_dev *dev = to_mdev(cq->device); | 1001 | struct mlx5_ib_dev *dev = to_mdev(cq->device); |
1003 | struct mlx5_ib_cq *mcq = to_mcq(cq); | 1002 | struct mlx5_ib_cq *mcq = to_mcq(cq); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 531ff20b32ad..468544819c79 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -2314,7 +2314,7 @@ err_free: | |||
2314 | return ERR_PTR(err); | 2314 | return ERR_PTR(err); |
2315 | } | 2315 | } |
2316 | 2316 | ||
2317 | int mlx5_ib_dealloc_dm(struct ib_dm *ibdm) | 2317 | int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) |
2318 | { | 2318 | { |
2319 | struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic; | 2319 | struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic; |
2320 | struct mlx5_ib_dm *dm = to_mdm(ibdm); | 2320 | struct mlx5_ib_dm *dm = to_mdm(ibdm); |
@@ -2370,7 +2370,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
2370 | return 0; | 2370 | return 0; |
2371 | } | 2371 | } |
2372 | 2372 | ||
2373 | static void mlx5_ib_dealloc_pd(struct ib_pd *pd) | 2373 | static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
2374 | { | 2374 | { |
2375 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); | 2375 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); |
2376 | struct mlx5_ib_pd *mpd = to_mpd(pd); | 2376 | struct mlx5_ib_pd *mpd = to_mpd(pd); |
@@ -4590,7 +4590,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev) | |||
4590 | mlx5_ib_warn(dev, "mr cache cleanup failed\n"); | 4590 | mlx5_ib_warn(dev, "mr cache cleanup failed\n"); |
4591 | 4591 | ||
4592 | if (dev->umrc.qp) | 4592 | if (dev->umrc.qp) |
4593 | mlx5_ib_destroy_qp(dev->umrc.qp); | 4593 | mlx5_ib_destroy_qp(dev->umrc.qp, NULL); |
4594 | if (dev->umrc.cq) | 4594 | if (dev->umrc.cq) |
4595 | ib_free_cq(dev->umrc.cq); | 4595 | ib_free_cq(dev->umrc.cq); |
4596 | if (dev->umrc.pd) | 4596 | if (dev->umrc.pd) |
@@ -4695,7 +4695,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev) | |||
4695 | return 0; | 4695 | return 0; |
4696 | 4696 | ||
4697 | error_4: | 4697 | error_4: |
4698 | mlx5_ib_destroy_qp(qp); | 4698 | mlx5_ib_destroy_qp(qp, NULL); |
4699 | dev->umrc.qp = NULL; | 4699 | dev->umrc.qp = NULL; |
4700 | 4700 | ||
4701 | error_3: | 4701 | error_3: |
@@ -4837,15 +4837,15 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) | |||
4837 | return 0; | 4837 | return 0; |
4838 | 4838 | ||
4839 | error5: | 4839 | error5: |
4840 | mlx5_ib_destroy_srq(devr->s0); | 4840 | mlx5_ib_destroy_srq(devr->s0, NULL); |
4841 | error4: | 4841 | error4: |
4842 | mlx5_ib_dealloc_xrcd(devr->x1); | 4842 | mlx5_ib_dealloc_xrcd(devr->x1, NULL); |
4843 | error3: | 4843 | error3: |
4844 | mlx5_ib_dealloc_xrcd(devr->x0); | 4844 | mlx5_ib_dealloc_xrcd(devr->x0, NULL); |
4845 | error2: | 4845 | error2: |
4846 | mlx5_ib_destroy_cq(devr->c0); | 4846 | mlx5_ib_destroy_cq(devr->c0, NULL); |
4847 | error1: | 4847 | error1: |
4848 | mlx5_ib_dealloc_pd(devr->p0); | 4848 | mlx5_ib_dealloc_pd(devr->p0, NULL); |
4849 | error0: | 4849 | error0: |
4850 | kfree(devr->p0); | 4850 | kfree(devr->p0); |
4851 | return ret; | 4851 | return ret; |
@@ -4857,12 +4857,12 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr) | |||
4857 | container_of(devr, struct mlx5_ib_dev, devr); | 4857 | container_of(devr, struct mlx5_ib_dev, devr); |
4858 | int port; | 4858 | int port; |
4859 | 4859 | ||
4860 | mlx5_ib_destroy_srq(devr->s1); | 4860 | mlx5_ib_destroy_srq(devr->s1, NULL); |
4861 | mlx5_ib_destroy_srq(devr->s0); | 4861 | mlx5_ib_destroy_srq(devr->s0, NULL); |
4862 | mlx5_ib_dealloc_xrcd(devr->x0); | 4862 | mlx5_ib_dealloc_xrcd(devr->x0, NULL); |
4863 | mlx5_ib_dealloc_xrcd(devr->x1); | 4863 | mlx5_ib_dealloc_xrcd(devr->x1, NULL); |
4864 | mlx5_ib_destroy_cq(devr->c0); | 4864 | mlx5_ib_destroy_cq(devr->c0, NULL); |
4865 | mlx5_ib_dealloc_pd(devr->p0); | 4865 | mlx5_ib_dealloc_pd(devr->p0, NULL); |
4866 | kfree(devr->p0); | 4866 | kfree(devr->p0); |
4867 | 4867 | ||
4868 | /* Make sure no change P_Key work items are still executing */ | 4868 | /* Make sure no change P_Key work items are still executing */ |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 4a617d78eae1..e45f59b0cc52 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -1049,14 +1049,14 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); | |||
1049 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, | 1049 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
1050 | u32 flags, struct ib_udata *udata); | 1050 | u32 flags, struct ib_udata *udata); |
1051 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); | 1051 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
1052 | int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags); | 1052 | int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); |
1053 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | 1053 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, |
1054 | struct ib_srq_init_attr *init_attr, | 1054 | struct ib_srq_init_attr *init_attr, |
1055 | struct ib_udata *udata); | 1055 | struct ib_udata *udata); |
1056 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 1056 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
1057 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | 1057 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
1058 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); | 1058 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); |
1059 | int mlx5_ib_destroy_srq(struct ib_srq *srq); | 1059 | int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
1060 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, | 1060 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
1061 | const struct ib_recv_wr **bad_wr); | 1061 | const struct ib_recv_wr **bad_wr); |
1062 | int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); | 1062 | int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); |
@@ -1068,7 +1068,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1068 | int attr_mask, struct ib_udata *udata); | 1068 | int attr_mask, struct ib_udata *udata); |
1069 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | 1069 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, |
1070 | struct ib_qp_init_attr *qp_init_attr); | 1070 | struct ib_qp_init_attr *qp_init_attr); |
1071 | int mlx5_ib_destroy_qp(struct ib_qp *qp); | 1071 | int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
1072 | void mlx5_ib_drain_sq(struct ib_qp *qp); | 1072 | void mlx5_ib_drain_sq(struct ib_qp *qp); |
1073 | void mlx5_ib_drain_rq(struct ib_qp *qp); | 1073 | void mlx5_ib_drain_rq(struct ib_qp *qp); |
1074 | int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, | 1074 | int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
@@ -1085,7 +1085,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
1085 | const struct ib_cq_init_attr *attr, | 1085 | const struct ib_cq_init_attr *attr, |
1086 | struct ib_ucontext *context, | 1086 | struct ib_ucontext *context, |
1087 | struct ib_udata *udata); | 1087 | struct ib_udata *udata); |
1088 | int mlx5_ib_destroy_cq(struct ib_cq *cq); | 1088 | int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
1089 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 1089 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
1090 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | 1090 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
1091 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); | 1091 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
@@ -1112,10 +1112,9 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); | |||
1112 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | 1112 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, |
1113 | u64 length, u64 virt_addr, int access_flags, | 1113 | u64 length, u64 virt_addr, int access_flags, |
1114 | struct ib_pd *pd, struct ib_udata *udata); | 1114 | struct ib_pd *pd, struct ib_udata *udata); |
1115 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr); | 1115 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); |
1116 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, | 1116 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
1117 | enum ib_mr_type mr_type, | 1117 | u32 max_num_sg, struct ib_udata *udata); |
1118 | u32 max_num_sg); | ||
1119 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | 1118 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
1120 | unsigned int *sg_offset); | 1119 | unsigned int *sg_offset); |
1121 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | 1120 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
@@ -1126,7 +1125,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |||
1126 | struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, | 1125 | struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, |
1127 | struct ib_ucontext *context, | 1126 | struct ib_ucontext *context, |
1128 | struct ib_udata *udata); | 1127 | struct ib_udata *udata); |
1129 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); | 1128 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); |
1130 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); | 1129 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); |
1131 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); | 1130 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); |
1132 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, | 1131 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, |
@@ -1170,7 +1169,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, | |||
1170 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, | 1169 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, |
1171 | struct ib_wq_init_attr *init_attr, | 1170 | struct ib_wq_init_attr *init_attr, |
1172 | struct ib_udata *udata); | 1171 | struct ib_udata *udata); |
1173 | int mlx5_ib_destroy_wq(struct ib_wq *wq); | 1172 | int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); |
1174 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, | 1173 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
1175 | u32 wq_attr_mask, struct ib_udata *udata); | 1174 | u32 wq_attr_mask, struct ib_udata *udata); |
1176 | struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, | 1175 | struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, |
@@ -1182,7 +1181,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, | |||
1182 | struct ib_ucontext *context, | 1181 | struct ib_ucontext *context, |
1183 | struct ib_dm_alloc_attr *attr, | 1182 | struct ib_dm_alloc_attr *attr, |
1184 | struct uverbs_attr_bundle *attrs); | 1183 | struct uverbs_attr_bundle *attrs); |
1185 | int mlx5_ib_dealloc_dm(struct ib_dm *ibdm); | 1184 | int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs); |
1186 | struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, | 1185 | struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, |
1187 | struct ib_dm_mr_attr *attr, | 1186 | struct ib_dm_mr_attr *attr, |
1188 | struct uverbs_attr_bundle *attrs); | 1187 | struct uverbs_attr_bundle *attrs); |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index c85f00255884..7de3683aebbe 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -1623,15 +1623,14 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1623 | kfree(mr); | 1623 | kfree(mr); |
1624 | } | 1624 | } |
1625 | 1625 | ||
1626 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | 1626 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
1627 | { | 1627 | { |
1628 | dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr)); | 1628 | dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr)); |
1629 | return 0; | 1629 | return 0; |
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, | 1632 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
1633 | enum ib_mr_type mr_type, | 1633 | u32 max_num_sg, struct ib_udata *udata) |
1634 | u32 max_num_sg) | ||
1635 | { | 1634 | { |
1636 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 1635 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
1637 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | 1636 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index f864e454de8f..cd62c909b7eb 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -2732,7 +2732,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) | |||
2732 | return 0; | 2732 | return 0; |
2733 | } | 2733 | } |
2734 | 2734 | ||
2735 | int mlx5_ib_destroy_qp(struct ib_qp *qp) | 2735 | int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
2736 | { | 2736 | { |
2737 | struct mlx5_ib_dev *dev = to_mdev(qp->device); | 2737 | struct mlx5_ib_dev *dev = to_mdev(qp->device); |
2738 | struct mlx5_ib_qp *mqp = to_mqp(qp); | 2738 | struct mlx5_ib_qp *mqp = to_mqp(qp); |
@@ -5647,7 +5647,7 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, | |||
5647 | return &xrcd->ibxrcd; | 5647 | return &xrcd->ibxrcd; |
5648 | } | 5648 | } |
5649 | 5649 | ||
5650 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) | 5650 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) |
5651 | { | 5651 | { |
5652 | struct mlx5_ib_dev *dev = to_mdev(xrcd->device); | 5652 | struct mlx5_ib_dev *dev = to_mdev(xrcd->device); |
5653 | u32 xrcdn = to_mxrcd(xrcd)->xrcdn; | 5653 | u32 xrcdn = to_mxrcd(xrcd)->xrcdn; |
@@ -5965,7 +5965,7 @@ err: | |||
5965 | return ERR_PTR(err); | 5965 | return ERR_PTR(err); |
5966 | } | 5966 | } |
5967 | 5967 | ||
5968 | int mlx5_ib_destroy_wq(struct ib_wq *wq) | 5968 | int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) |
5969 | { | 5969 | { |
5970 | struct mlx5_ib_dev *dev = to_mdev(wq->device); | 5970 | struct mlx5_ib_dev *dev = to_mdev(wq->device); |
5971 | struct mlx5_ib_rwq *rwq = to_mrwq(wq); | 5971 | struct mlx5_ib_rwq *rwq = to_mrwq(wq); |
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 1ec1beb1296b..bc1ca6bcea43 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -387,7 +387,7 @@ out_box: | |||
387 | return ret; | 387 | return ret; |
388 | } | 388 | } |
389 | 389 | ||
390 | int mlx5_ib_destroy_srq(struct ib_srq *srq) | 390 | int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) |
391 | { | 391 | { |
392 | struct mlx5_ib_dev *dev = to_mdev(srq->device); | 392 | struct mlx5_ib_dev *dev = to_mdev(srq->device); |
393 | struct mlx5_ib_srq *msrq = to_msrq(srq); | 393 | struct mlx5_ib_srq *msrq = to_msrq(srq); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 35c3119726bb..872f0ad556a7 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -384,7 +384,7 @@ static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
384 | return 0; | 384 | return 0; |
385 | } | 385 | } |
386 | 386 | ||
387 | static void mthca_dealloc_pd(struct ib_pd *pd) | 387 | static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
388 | { | 388 | { |
389 | mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); | 389 | mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); |
390 | } | 390 | } |
@@ -411,7 +411,7 @@ static struct ib_ah *mthca_ah_create(struct ib_pd *pd, | |||
411 | return &ah->ibah; | 411 | return &ah->ibah; |
412 | } | 412 | } |
413 | 413 | ||
414 | static int mthca_ah_destroy(struct ib_ah *ah, u32 flags) | 414 | static int mthca_ah_destroy(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
415 | { | 415 | { |
416 | mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); | 416 | mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); |
417 | kfree(ah); | 417 | kfree(ah); |
@@ -477,7 +477,7 @@ err_free: | |||
477 | return ERR_PTR(err); | 477 | return ERR_PTR(err); |
478 | } | 478 | } |
479 | 479 | ||
480 | static int mthca_destroy_srq(struct ib_srq *srq) | 480 | static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) |
481 | { | 481 | { |
482 | struct mthca_ucontext *context; | 482 | struct mthca_ucontext *context; |
483 | 483 | ||
@@ -607,7 +607,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
607 | return &qp->ibqp; | 607 | return &qp->ibqp; |
608 | } | 608 | } |
609 | 609 | ||
610 | static int mthca_destroy_qp(struct ib_qp *qp) | 610 | static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
611 | { | 611 | { |
612 | if (qp->uobject) { | 612 | if (qp->uobject) { |
613 | mthca_unmap_user_db(to_mdev(qp->device), | 613 | mthca_unmap_user_db(to_mdev(qp->device), |
@@ -827,7 +827,7 @@ out: | |||
827 | return ret; | 827 | return ret; |
828 | } | 828 | } |
829 | 829 | ||
830 | static int mthca_destroy_cq(struct ib_cq *cq) | 830 | static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) |
831 | { | 831 | { |
832 | if (cq->uobject) { | 832 | if (cq->uobject) { |
833 | mthca_unmap_user_db(to_mdev(cq->device), | 833 | mthca_unmap_user_db(to_mdev(cq->device), |
@@ -974,7 +974,7 @@ err: | |||
974 | return ERR_PTR(err); | 974 | return ERR_PTR(err); |
975 | } | 975 | } |
976 | 976 | ||
977 | static int mthca_dereg_mr(struct ib_mr *mr) | 977 | static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata) |
978 | { | 978 | { |
979 | struct mthca_mr *mmr = to_mmr(mr); | 979 | struct mthca_mr *mmr = to_mmr(mr); |
980 | 980 | ||
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 032883180f65..79a43531c66d 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -3033,7 +3033,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) | |||
3033 | /* Need to free the Last Streaming Mode Message */ | 3033 | /* Need to free the Last Streaming Mode Message */ |
3034 | if (nesqp->ietf_frame) { | 3034 | if (nesqp->ietf_frame) { |
3035 | if (nesqp->lsmm_mr) | 3035 | if (nesqp->lsmm_mr) |
3036 | nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr); | 3036 | nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr, |
3037 | NULL); | ||
3037 | pci_free_consistent(nesdev->pcidev, | 3038 | pci_free_consistent(nesdev->pcidev, |
3038 | nesqp->private_data_len + nesqp->ietf_frame_size, | 3039 | nesqp->private_data_len + nesqp->ietf_frame_size, |
3039 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); | 3040 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 526092d435df..4b7855c7dacf 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -52,7 +52,7 @@ atomic_t qps_created; | |||
52 | atomic_t sw_qps_destroyed; | 52 | atomic_t sw_qps_destroyed; |
53 | 53 | ||
54 | static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); | 54 | static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); |
55 | static int nes_dereg_mr(struct ib_mr *ib_mr); | 55 | static int nes_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); |
56 | 56 | ||
57 | /** | 57 | /** |
58 | * nes_alloc_mw | 58 | * nes_alloc_mw |
@@ -306,9 +306,8 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, | |||
306 | /* | 306 | /* |
307 | * nes_alloc_mr | 307 | * nes_alloc_mr |
308 | */ | 308 | */ |
309 | static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd, | 309 | static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, |
310 | enum ib_mr_type mr_type, | 310 | u32 max_num_sg, struct ib_udata *udata) |
311 | u32 max_num_sg) | ||
312 | { | 311 | { |
313 | struct nes_pd *nespd = to_nespd(ibpd); | 312 | struct nes_pd *nespd = to_nespd(ibpd); |
314 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); | 313 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); |
@@ -386,7 +385,7 @@ static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd, | |||
386 | return ibmr; | 385 | return ibmr; |
387 | 386 | ||
388 | err: | 387 | err: |
389 | nes_dereg_mr(ibmr); | 388 | nes_dereg_mr(ibmr, udata); |
390 | 389 | ||
391 | return ERR_PTR(-ENOMEM); | 390 | return ERR_PTR(-ENOMEM); |
392 | } | 391 | } |
@@ -700,7 +699,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, | |||
700 | /** | 699 | /** |
701 | * nes_dealloc_pd | 700 | * nes_dealloc_pd |
702 | */ | 701 | */ |
703 | static void nes_dealloc_pd(struct ib_pd *ibpd) | 702 | static void nes_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
704 | { | 703 | { |
705 | struct nes_ucontext *nesucontext; | 704 | struct nes_ucontext *nesucontext; |
706 | struct nes_pd *nespd = to_nespd(ibpd); | 705 | struct nes_pd *nespd = to_nespd(ibpd); |
@@ -1298,7 +1297,7 @@ static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq) | |||
1298 | /** | 1297 | /** |
1299 | * nes_destroy_qp | 1298 | * nes_destroy_qp |
1300 | */ | 1299 | */ |
1301 | static int nes_destroy_qp(struct ib_qp *ibqp) | 1300 | static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
1302 | { | 1301 | { |
1303 | struct nes_qp *nesqp = to_nesqp(ibqp); | 1302 | struct nes_qp *nesqp = to_nesqp(ibqp); |
1304 | struct nes_ucontext *nes_ucontext; | 1303 | struct nes_ucontext *nes_ucontext; |
@@ -1626,7 +1625,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, | |||
1626 | /** | 1625 | /** |
1627 | * nes_destroy_cq | 1626 | * nes_destroy_cq |
1628 | */ | 1627 | */ |
1629 | static int nes_destroy_cq(struct ib_cq *ib_cq) | 1628 | static int nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
1630 | { | 1629 | { |
1631 | struct nes_cq *nescq; | 1630 | struct nes_cq *nescq; |
1632 | struct nes_device *nesdev; | 1631 | struct nes_device *nesdev; |
@@ -2377,7 +2376,7 @@ reg_user_mr_err: | |||
2377 | /** | 2376 | /** |
2378 | * nes_dereg_mr | 2377 | * nes_dereg_mr |
2379 | */ | 2378 | */ |
2380 | static int nes_dereg_mr(struct ib_mr *ib_mr) | 2379 | static int nes_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
2381 | { | 2380 | { |
2382 | struct nes_mr *nesmr = to_nesmr(ib_mr); | 2381 | struct nes_mr *nesmr = to_nesmr(ib_mr); |
2383 | struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device); | 2382 | struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index a7295322efbc..c0419133edfd 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
@@ -219,7 +219,7 @@ av_err: | |||
219 | return ERR_PTR(status); | 219 | return ERR_PTR(status); |
220 | } | 220 | } |
221 | 221 | ||
222 | int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags) | 222 | int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata) |
223 | { | 223 | { |
224 | struct ocrdma_ah *ah = get_ocrdma_ah(ibah); | 224 | struct ocrdma_ah *ah = get_ocrdma_ah(ibah); |
225 | struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device); | 225 | struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index eb996e14b520..9b84034d8164 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h | |||
@@ -53,7 +53,7 @@ enum { | |||
53 | 53 | ||
54 | struct ib_ah *ocrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, | 54 | struct ib_ah *ocrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
55 | u32 flags, struct ib_udata *udata); | 55 | u32 flags, struct ib_udata *udata); |
56 | int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags); | 56 | int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); |
57 | int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); | 57 | int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
58 | 58 | ||
59 | int ocrdma_process_mad(struct ib_device *, | 59 | int ocrdma_process_mad(struct ib_device *, |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index b4e1777c2c97..b8f891660516 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -680,7 +680,7 @@ exit: | |||
680 | return status; | 680 | return status; |
681 | } | 681 | } |
682 | 682 | ||
683 | void ocrdma_dealloc_pd(struct ib_pd *ibpd) | 683 | void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
684 | { | 684 | { |
685 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | 685 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); |
686 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | 686 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
@@ -922,7 +922,7 @@ umem_err: | |||
922 | return ERR_PTR(status); | 922 | return ERR_PTR(status); |
923 | } | 923 | } |
924 | 924 | ||
925 | int ocrdma_dereg_mr(struct ib_mr *ib_mr) | 925 | int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
926 | { | 926 | { |
927 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); | 927 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); |
928 | struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); | 928 | struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); |
@@ -1076,7 +1076,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq) | |||
1076 | spin_unlock_irqrestore(&cq->cq_lock, flags); | 1076 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | int ocrdma_destroy_cq(struct ib_cq *ibcq) | 1079 | int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
1080 | { | 1080 | { |
1081 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | 1081 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
1082 | struct ocrdma_eq *eq = NULL; | 1082 | struct ocrdma_eq *eq = NULL; |
@@ -1697,7 +1697,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp) | |||
1697 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | 1697 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); |
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | int ocrdma_destroy_qp(struct ib_qp *ibqp) | 1700 | int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
1701 | { | 1701 | { |
1702 | struct ocrdma_pd *pd; | 1702 | struct ocrdma_pd *pd; |
1703 | struct ocrdma_qp *qp; | 1703 | struct ocrdma_qp *qp; |
@@ -1885,7 +1885,7 @@ int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |||
1885 | return status; | 1885 | return status; |
1886 | } | 1886 | } |
1887 | 1887 | ||
1888 | int ocrdma_destroy_srq(struct ib_srq *ibsrq) | 1888 | int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
1889 | { | 1889 | { |
1890 | int status; | 1890 | int status; |
1891 | struct ocrdma_srq *srq; | 1891 | struct ocrdma_srq *srq; |
@@ -2931,9 +2931,8 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | |||
2931 | return 0; | 2931 | return 0; |
2932 | } | 2932 | } |
2933 | 2933 | ||
2934 | struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, | 2934 | struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, |
2935 | enum ib_mr_type mr_type, | 2935 | u32 max_num_sg, struct ib_udata *udata) |
2936 | u32 max_num_sg) | ||
2937 | { | 2936 | { |
2938 | int status; | 2937 | int status; |
2939 | struct ocrdma_mr *mr; | 2938 | struct ocrdma_mr *mr; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index 4c04ab40798e..3636cbcbcaa4 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | |||
@@ -71,14 +71,14 @@ int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); | |||
71 | 71 | ||
72 | int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, | 72 | int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, |
73 | struct ib_udata *udata); | 73 | struct ib_udata *udata); |
74 | void ocrdma_dealloc_pd(struct ib_pd *pd); | 74 | void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); |
75 | 75 | ||
76 | struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, | 76 | struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, |
77 | const struct ib_cq_init_attr *attr, | 77 | const struct ib_cq_init_attr *attr, |
78 | struct ib_ucontext *ib_ctx, | 78 | struct ib_ucontext *ib_ctx, |
79 | struct ib_udata *udata); | 79 | struct ib_udata *udata); |
80 | int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); | 80 | int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); |
81 | int ocrdma_destroy_cq(struct ib_cq *); | 81 | int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); |
82 | 82 | ||
83 | struct ib_qp *ocrdma_create_qp(struct ib_pd *, | 83 | struct ib_qp *ocrdma_create_qp(struct ib_pd *, |
84 | struct ib_qp_init_attr *attrs, | 84 | struct ib_qp_init_attr *attrs, |
@@ -90,7 +90,7 @@ int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, | |||
90 | int ocrdma_query_qp(struct ib_qp *, | 90 | int ocrdma_query_qp(struct ib_qp *, |
91 | struct ib_qp_attr *qp_attr, | 91 | struct ib_qp_attr *qp_attr, |
92 | int qp_attr_mask, struct ib_qp_init_attr *); | 92 | int qp_attr_mask, struct ib_qp_init_attr *); |
93 | int ocrdma_destroy_qp(struct ib_qp *); | 93 | int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); |
94 | void ocrdma_del_flush_qp(struct ocrdma_qp *qp); | 94 | void ocrdma_del_flush_qp(struct ocrdma_qp *qp); |
95 | 95 | ||
96 | struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, | 96 | struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, |
@@ -98,17 +98,16 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, | |||
98 | int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *, | 98 | int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *, |
99 | enum ib_srq_attr_mask, struct ib_udata *); | 99 | enum ib_srq_attr_mask, struct ib_udata *); |
100 | int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *); | 100 | int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *); |
101 | int ocrdma_destroy_srq(struct ib_srq *); | 101 | int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); |
102 | int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *, | 102 | int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *, |
103 | const struct ib_recv_wr **bad_recv_wr); | 103 | const struct ib_recv_wr **bad_recv_wr); |
104 | 104 | ||
105 | int ocrdma_dereg_mr(struct ib_mr *); | 105 | int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); |
106 | struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc); | 106 | struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc); |
107 | struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, | 107 | struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, |
108 | u64 virt, int acc, struct ib_udata *); | 108 | u64 virt, int acc, struct ib_udata *); |
109 | struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, | 109 | struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
110 | enum ib_mr_type mr_type, | 110 | u32 max_num_sg, struct ib_udata *udata); |
111 | u32 max_num_sg); | ||
112 | int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | 111 | int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
113 | unsigned int *sg_offset); | 112 | unsigned int *sg_offset); |
114 | 113 | ||
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index e9fc15392dda..42755e7a10a8 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -478,7 +478,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
478 | return 0; | 478 | return 0; |
479 | } | 479 | } |
480 | 480 | ||
481 | void qedr_dealloc_pd(struct ib_pd *ibpd) | 481 | void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
482 | { | 482 | { |
483 | struct qedr_dev *dev = get_qedr_dev(ibpd->device); | 483 | struct qedr_dev *dev = get_qedr_dev(ibpd->device); |
484 | struct qedr_pd *pd = get_qedr_pd(ibpd); | 484 | struct qedr_pd *pd = get_qedr_pd(ibpd); |
@@ -962,7 +962,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) | |||
962 | #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) | 962 | #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) |
963 | #define QEDR_DESTROY_CQ_ITER_DURATION (10) | 963 | #define QEDR_DESTROY_CQ_ITER_DURATION (10) |
964 | 964 | ||
965 | int qedr_destroy_cq(struct ib_cq *ibcq) | 965 | int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
966 | { | 966 | { |
967 | struct qedr_dev *dev = get_qedr_dev(ibcq->device); | 967 | struct qedr_dev *dev = get_qedr_dev(ibcq->device); |
968 | struct qed_rdma_destroy_cq_out_params oparams; | 968 | struct qed_rdma_destroy_cq_out_params oparams; |
@@ -1485,7 +1485,7 @@ err0: | |||
1485 | return ERR_PTR(-EFAULT); | 1485 | return ERR_PTR(-EFAULT); |
1486 | } | 1486 | } |
1487 | 1487 | ||
1488 | int qedr_destroy_srq(struct ib_srq *ibsrq) | 1488 | int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
1489 | { | 1489 | { |
1490 | struct qed_rdma_destroy_srq_in_params in_params = {}; | 1490 | struct qed_rdma_destroy_srq_in_params in_params = {}; |
1491 | struct qedr_dev *dev = get_qedr_dev(ibsrq->device); | 1491 | struct qedr_dev *dev = get_qedr_dev(ibsrq->device); |
@@ -2488,7 +2488,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp) | |||
2488 | return 0; | 2488 | return 0; |
2489 | } | 2489 | } |
2490 | 2490 | ||
2491 | int qedr_destroy_qp(struct ib_qp *ibqp) | 2491 | int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
2492 | { | 2492 | { |
2493 | struct qedr_qp *qp = get_qedr_qp(ibqp); | 2493 | struct qedr_qp *qp = get_qedr_qp(ibqp); |
2494 | struct qedr_dev *dev = qp->dev; | 2494 | struct qedr_dev *dev = qp->dev; |
@@ -2556,7 +2556,7 @@ struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, | |||
2556 | return &ah->ibah; | 2556 | return &ah->ibah; |
2557 | } | 2557 | } |
2558 | 2558 | ||
2559 | int qedr_destroy_ah(struct ib_ah *ibah, u32 flags) | 2559 | int qedr_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata) |
2560 | { | 2560 | { |
2561 | struct qedr_ah *ah = get_qedr_ah(ibah); | 2561 | struct qedr_ah *ah = get_qedr_ah(ibah); |
2562 | 2562 | ||
@@ -2711,7 +2711,7 @@ err0: | |||
2711 | return ERR_PTR(rc); | 2711 | return ERR_PTR(rc); |
2712 | } | 2712 | } |
2713 | 2713 | ||
2714 | int qedr_dereg_mr(struct ib_mr *ib_mr) | 2714 | int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
2715 | { | 2715 | { |
2716 | struct qedr_mr *mr = get_qedr_mr(ib_mr); | 2716 | struct qedr_mr *mr = get_qedr_mr(ib_mr); |
2717 | struct qedr_dev *dev = get_qedr_dev(ib_mr->device); | 2717 | struct qedr_dev *dev = get_qedr_dev(ib_mr->device); |
@@ -2803,8 +2803,8 @@ err0: | |||
2803 | return ERR_PTR(rc); | 2803 | return ERR_PTR(rc); |
2804 | } | 2804 | } |
2805 | 2805 | ||
2806 | struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, | 2806 | struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, |
2807 | enum ib_mr_type mr_type, u32 max_num_sg) | 2807 | u32 max_num_sg, struct ib_udata *udata) |
2808 | { | 2808 | { |
2809 | struct qedr_mr *mr; | 2809 | struct qedr_mr *mr; |
2810 | 2810 | ||
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index f0c05f4771ac..cd9659ac2aad 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h | |||
@@ -49,14 +49,14 @@ void qedr_dealloc_ucontext(struct ib_ucontext *uctx); | |||
49 | int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); | 49 | int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); |
50 | int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, | 50 | int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, |
51 | struct ib_udata *udata); | 51 | struct ib_udata *udata); |
52 | void qedr_dealloc_pd(struct ib_pd *pd); | 52 | void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); |
53 | 53 | ||
54 | struct ib_cq *qedr_create_cq(struct ib_device *ibdev, | 54 | struct ib_cq *qedr_create_cq(struct ib_device *ibdev, |
55 | const struct ib_cq_init_attr *attr, | 55 | const struct ib_cq_init_attr *attr, |
56 | struct ib_ucontext *ib_ctx, | 56 | struct ib_ucontext *ib_ctx, |
57 | struct ib_udata *udata); | 57 | struct ib_udata *udata); |
58 | int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); | 58 | int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); |
59 | int qedr_destroy_cq(struct ib_cq *); | 59 | int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); |
60 | int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | 60 | int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
61 | struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, | 61 | struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, |
62 | struct ib_udata *); | 62 | struct ib_udata *); |
@@ -64,7 +64,7 @@ int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, | |||
64 | int attr_mask, struct ib_udata *udata); | 64 | int attr_mask, struct ib_udata *udata); |
65 | int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, | 65 | int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, |
66 | int qp_attr_mask, struct ib_qp_init_attr *); | 66 | int qp_attr_mask, struct ib_qp_init_attr *); |
67 | int qedr_destroy_qp(struct ib_qp *ibqp); | 67 | int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); |
68 | 68 | ||
69 | struct ib_srq *qedr_create_srq(struct ib_pd *ibpd, | 69 | struct ib_srq *qedr_create_srq(struct ib_pd *ibpd, |
70 | struct ib_srq_init_attr *attr, | 70 | struct ib_srq_init_attr *attr, |
@@ -72,14 +72,14 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd, | |||
72 | int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 72 | int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
73 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | 73 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
74 | int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | 74 | int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); |
75 | int qedr_destroy_srq(struct ib_srq *ibsrq); | 75 | int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); |
76 | int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, | 76 | int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
77 | const struct ib_recv_wr **bad_recv_wr); | 77 | const struct ib_recv_wr **bad_recv_wr); |
78 | struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, | 78 | struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, |
79 | u32 flags, struct ib_udata *udata); | 79 | u32 flags, struct ib_udata *udata); |
80 | int qedr_destroy_ah(struct ib_ah *ibah, u32 flags); | 80 | int qedr_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata); |
81 | 81 | ||
82 | int qedr_dereg_mr(struct ib_mr *); | 82 | int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); |
83 | struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc); | 83 | struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc); |
84 | 84 | ||
85 | struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length, | 85 | struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length, |
@@ -89,7 +89,7 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, | |||
89 | int sg_nents, unsigned int *sg_offset); | 89 | int sg_nents, unsigned int *sg_offset); |
90 | 90 | ||
91 | struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, | 91 | struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
92 | u32 max_num_sg); | 92 | u32 max_num_sg, struct ib_udata *udata); |
93 | int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); | 93 | int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); |
94 | int qedr_post_send(struct ib_qp *, const struct ib_send_wr *, | 94 | int qedr_post_send(struct ib_qp *, const struct ib_send_wr *, |
95 | const struct ib_send_wr **bad_wr); | 95 | const struct ib_send_wr **bad_wr); |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index bd4521b2cc5f..cdb6357337c0 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c | |||
@@ -461,7 +461,7 @@ int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
461 | return 0; | 461 | return 0; |
462 | } | 462 | } |
463 | 463 | ||
464 | void usnic_ib_dealloc_pd(struct ib_pd *pd) | 464 | void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
465 | { | 465 | { |
466 | usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); | 466 | usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); |
467 | } | 467 | } |
@@ -539,7 +539,7 @@ out_release_mutex: | |||
539 | return ERR_PTR(err); | 539 | return ERR_PTR(err); |
540 | } | 540 | } |
541 | 541 | ||
542 | int usnic_ib_destroy_qp(struct ib_qp *qp) | 542 | int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
543 | { | 543 | { |
544 | struct usnic_ib_qp_grp *qp_grp; | 544 | struct usnic_ib_qp_grp *qp_grp; |
545 | struct usnic_ib_vf *vf; | 545 | struct usnic_ib_vf *vf; |
@@ -606,7 +606,7 @@ struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, | |||
606 | return cq; | 606 | return cq; |
607 | } | 607 | } |
608 | 608 | ||
609 | int usnic_ib_destroy_cq(struct ib_cq *cq) | 609 | int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) |
610 | { | 610 | { |
611 | usnic_dbg("\n"); | 611 | usnic_dbg("\n"); |
612 | kfree(cq); | 612 | kfree(cq); |
@@ -642,7 +642,7 @@ err_free: | |||
642 | return ERR_PTR(err); | 642 | return ERR_PTR(err); |
643 | } | 643 | } |
644 | 644 | ||
645 | int usnic_ib_dereg_mr(struct ib_mr *ibmr) | 645 | int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
646 | { | 646 | { |
647 | struct usnic_ib_mr *mr = to_umr(ibmr); | 647 | struct usnic_ib_mr *mr = to_umr(ibmr); |
648 | 648 | ||
@@ -731,4 +731,3 @@ int usnic_ib_mmap(struct ib_ucontext *context, | |||
731 | return -EINVAL; | 731 | return -EINVAL; |
732 | } | 732 | } |
733 | 733 | ||
734 | /* End of ib callbacks section */ | ||
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index c40e89b6246f..349c8dc13a12 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h | |||
@@ -52,22 +52,22 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, | |||
52 | u16 *pkey); | 52 | u16 *pkey); |
53 | int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | 53 | int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, |
54 | struct ib_udata *udata); | 54 | struct ib_udata *udata); |
55 | void usnic_ib_dealloc_pd(struct ib_pd *pd); | 55 | void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); |
56 | struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, | 56 | struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, |
57 | struct ib_qp_init_attr *init_attr, | 57 | struct ib_qp_init_attr *init_attr, |
58 | struct ib_udata *udata); | 58 | struct ib_udata *udata); |
59 | int usnic_ib_destroy_qp(struct ib_qp *qp); | 59 | int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
60 | int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 60 | int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
61 | int attr_mask, struct ib_udata *udata); | 61 | int attr_mask, struct ib_udata *udata); |
62 | struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, | 62 | struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, |
63 | const struct ib_cq_init_attr *attr, | 63 | const struct ib_cq_init_attr *attr, |
64 | struct ib_ucontext *context, | 64 | struct ib_ucontext *context, |
65 | struct ib_udata *udata); | 65 | struct ib_udata *udata); |
66 | int usnic_ib_destroy_cq(struct ib_cq *cq); | 66 | int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
67 | struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, | 67 | struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, |
68 | u64 virt_addr, int access_flags, | 68 | u64 virt_addr, int access_flags, |
69 | struct ib_udata *udata); | 69 | struct ib_udata *udata); |
70 | int usnic_ib_dereg_mr(struct ib_mr *ibmr); | 70 | int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); |
71 | int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); | 71 | int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); |
72 | void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); | 72 | void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); |
73 | int usnic_ib_mmap(struct ib_ucontext *context, | 73 | int usnic_ib_mmap(struct ib_ucontext *context, |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 104c7db4704f..5ba278324134 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | |||
@@ -210,7 +210,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
210 | if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { | 210 | if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { |
211 | dev_warn(&dev->pdev->dev, | 211 | dev_warn(&dev->pdev->dev, |
212 | "failed to copy back udata\n"); | 212 | "failed to copy back udata\n"); |
213 | pvrdma_destroy_cq(&cq->ibcq); | 213 | pvrdma_destroy_cq(&cq->ibcq, udata); |
214 | return ERR_PTR(-EINVAL); | 214 | return ERR_PTR(-EINVAL); |
215 | } | 215 | } |
216 | } | 216 | } |
@@ -245,10 +245,11 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) | |||
245 | /** | 245 | /** |
246 | * pvrdma_destroy_cq - destroy completion queue | 246 | * pvrdma_destroy_cq - destroy completion queue |
247 | * @cq: the completion queue to destroy. | 247 | * @cq: the completion queue to destroy. |
248 | * @udata: user data or null for kernel object | ||
248 | * | 249 | * |
249 | * @return: 0 for success. | 250 | * @return: 0 for success. |
250 | */ | 251 | */ |
251 | int pvrdma_destroy_cq(struct ib_cq *cq) | 252 | int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) |
252 | { | 253 | { |
253 | struct pvrdma_cq *vcq = to_vcq(cq); | 254 | struct pvrdma_cq *vcq = to_vcq(cq); |
254 | union pvrdma_cmd_req req; | 255 | union pvrdma_cmd_req req; |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c index a85884e90e84..9e6c44ebaf54 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | |||
@@ -201,7 +201,7 @@ err_umem: | |||
201 | * @return: ib_mr pointer on success, otherwise returns an errno. | 201 | * @return: ib_mr pointer on success, otherwise returns an errno. |
202 | */ | 202 | */ |
203 | struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, | 203 | struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
204 | u32 max_num_sg) | 204 | u32 max_num_sg, struct ib_udata *udata) |
205 | { | 205 | { |
206 | struct pvrdma_dev *dev = to_vdev(pd->device); | 206 | struct pvrdma_dev *dev = to_vdev(pd->device); |
207 | struct pvrdma_user_mr *mr; | 207 | struct pvrdma_user_mr *mr; |
@@ -272,7 +272,7 @@ freemr: | |||
272 | * | 272 | * |
273 | * @return: 0 on success. | 273 | * @return: 0 on success. |
274 | */ | 274 | */ |
275 | int pvrdma_dereg_mr(struct ib_mr *ibmr) | 275 | int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
276 | { | 276 | { |
277 | struct pvrdma_user_mr *mr = to_vmr(ibmr); | 277 | struct pvrdma_user_mr *mr = to_vmr(ibmr); |
278 | struct pvrdma_dev *dev = to_vdev(ibmr->device); | 278 | struct pvrdma_dev *dev = to_vdev(ibmr->device); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 08f4257169bd..0eaaead5baec 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | |||
@@ -446,10 +446,11 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) | |||
446 | /** | 446 | /** |
447 | * pvrdma_destroy_qp - destroy a queue pair | 447 | * pvrdma_destroy_qp - destroy a queue pair |
448 | * @qp: the queue pair to destroy | 448 | * @qp: the queue pair to destroy |
449 | * @udata: user data or null for kernel object | ||
449 | * | 450 | * |
450 | * @return: 0 on success. | 451 | * @return: 0 on success. |
451 | */ | 452 | */ |
452 | int pvrdma_destroy_qp(struct ib_qp *qp) | 453 | int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
453 | { | 454 | { |
454 | struct pvrdma_qp *vqp = to_vqp(qp); | 455 | struct pvrdma_qp *vqp = to_vqp(qp); |
455 | union pvrdma_cmd_req req; | 456 | union pvrdma_cmd_req req; |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index 951d9d68107a..21a95780e0ea 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | |||
@@ -204,7 +204,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |||
204 | /* Copy udata back. */ | 204 | /* Copy udata back. */ |
205 | if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { | 205 | if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { |
206 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); | 206 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); |
207 | pvrdma_destroy_srq(&srq->ibsrq); | 207 | pvrdma_destroy_srq(&srq->ibsrq, udata); |
208 | return ERR_PTR(-EINVAL); | 208 | return ERR_PTR(-EINVAL); |
209 | } | 209 | } |
210 | 210 | ||
@@ -246,10 +246,11 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) | |||
246 | /** | 246 | /** |
247 | * pvrdma_destroy_srq - destroy shared receive queue | 247 | * pvrdma_destroy_srq - destroy shared receive queue |
248 | * @srq: the shared receive queue to destroy | 248 | * @srq: the shared receive queue to destroy |
249 | * @udata: user data or null for kernel object | ||
249 | * | 250 | * |
250 | * @return: 0 for success. | 251 | * @return: 0 for success. |
251 | */ | 252 | */ |
252 | int pvrdma_destroy_srq(struct ib_srq *srq) | 253 | int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) |
253 | { | 254 | { |
254 | struct pvrdma_srq *vsrq = to_vsrq(srq); | 255 | struct pvrdma_srq *vsrq = to_vsrq(srq); |
255 | union pvrdma_cmd_req req; | 256 | union pvrdma_cmd_req req; |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 8a32e1e435a9..19ff6004b477 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | |||
@@ -460,7 +460,7 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
460 | if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { | 460 | if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { |
461 | dev_warn(&dev->pdev->dev, | 461 | dev_warn(&dev->pdev->dev, |
462 | "failed to copy back protection domain\n"); | 462 | "failed to copy back protection domain\n"); |
463 | pvrdma_dealloc_pd(&pd->ibpd); | 463 | pvrdma_dealloc_pd(&pd->ibpd, udata); |
464 | return -EFAULT; | 464 | return -EFAULT; |
465 | } | 465 | } |
466 | } | 466 | } |
@@ -476,10 +476,11 @@ err: | |||
476 | /** | 476 | /** |
477 | * pvrdma_dealloc_pd - deallocate protection domain | 477 | * pvrdma_dealloc_pd - deallocate protection domain |
478 | * @pd: the protection domain to be released | 478 | * @pd: the protection domain to be released |
479 | * @udata: user data or null for kernel object | ||
479 | * | 480 | * |
480 | * @return: 0 on success, otherwise errno. | 481 | * @return: 0 on success, otherwise errno. |
481 | */ | 482 | */ |
482 | void pvrdma_dealloc_pd(struct ib_pd *pd) | 483 | void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
483 | { | 484 | { |
484 | struct pvrdma_dev *dev = to_vdev(pd->device); | 485 | struct pvrdma_dev *dev = to_vdev(pd->device); |
485 | union pvrdma_cmd_req req = {}; | 486 | union pvrdma_cmd_req req = {}; |
@@ -556,7 +557,7 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, | |||
556 | * | 557 | * |
557 | * @return: 0 on success. | 558 | * @return: 0 on success. |
558 | */ | 559 | */ |
559 | int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags) | 560 | int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
560 | { | 561 | { |
561 | struct pvrdma_dev *dev = to_vdev(ah->device); | 562 | struct pvrdma_dev *dev = to_vdev(ah->device); |
562 | 563 | ||
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 607aa131d67c..2c8ba5bf8d0f 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | |||
@@ -400,26 +400,26 @@ int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); | |||
400 | void pvrdma_dealloc_ucontext(struct ib_ucontext *context); | 400 | void pvrdma_dealloc_ucontext(struct ib_ucontext *context); |
401 | int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, | 401 | int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, |
402 | struct ib_udata *udata); | 402 | struct ib_udata *udata); |
403 | void pvrdma_dealloc_pd(struct ib_pd *ibpd); | 403 | void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); |
404 | struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); | 404 | struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); |
405 | struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | 405 | struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
406 | u64 virt_addr, int access_flags, | 406 | u64 virt_addr, int access_flags, |
407 | struct ib_udata *udata); | 407 | struct ib_udata *udata); |
408 | int pvrdma_dereg_mr(struct ib_mr *mr); | 408 | int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); |
409 | struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, | 409 | struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
410 | u32 max_num_sg); | 410 | u32 max_num_sg, struct ib_udata *udata); |
411 | int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, | 411 | int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, |
412 | int sg_nents, unsigned int *sg_offset); | 412 | int sg_nents, unsigned int *sg_offset); |
413 | struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | 413 | struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, |
414 | const struct ib_cq_init_attr *attr, | 414 | const struct ib_cq_init_attr *attr, |
415 | struct ib_ucontext *context, | 415 | struct ib_ucontext *context, |
416 | struct ib_udata *udata); | 416 | struct ib_udata *udata); |
417 | int pvrdma_destroy_cq(struct ib_cq *cq); | 417 | int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
418 | int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 418 | int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
419 | int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); | 419 | int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); |
420 | struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, | 420 | struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
421 | u32 flags, struct ib_udata *udata); | 421 | u32 flags, struct ib_udata *udata); |
422 | int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags); | 422 | int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); |
423 | 423 | ||
424 | struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | 424 | struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, |
425 | struct ib_srq_init_attr *init_attr, | 425 | struct ib_srq_init_attr *init_attr, |
@@ -427,7 +427,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |||
427 | int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 427 | int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
428 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | 428 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
429 | int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); | 429 | int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); |
430 | int pvrdma_destroy_srq(struct ib_srq *srq); | 430 | int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
431 | 431 | ||
432 | struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | 432 | struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, |
433 | struct ib_qp_init_attr *init_attr, | 433 | struct ib_qp_init_attr *init_attr, |
@@ -436,7 +436,7 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
436 | int attr_mask, struct ib_udata *udata); | 436 | int attr_mask, struct ib_udata *udata); |
437 | int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | 437 | int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
438 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); | 438 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); |
439 | int pvrdma_destroy_qp(struct ib_qp *qp); | 439 | int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
440 | int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, | 440 | int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
441 | const struct ib_send_wr **bad_wr); | 441 | const struct ib_send_wr **bad_wr); |
442 | int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, | 442 | int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c index fc10e4e26ca7..001a5c052580 100644 --- a/drivers/infiniband/sw/rdmavt/ah.c +++ b/drivers/infiniband/sw/rdmavt/ah.c | |||
@@ -138,10 +138,12 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd, | |||
138 | * rvt_destory_ah - Destory an address handle | 138 | * rvt_destory_ah - Destory an address handle |
139 | * @ibah: address handle | 139 | * @ibah: address handle |
140 | * @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags) | 140 | * @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags) |
141 | * @udata: user data or NULL for kernel object | ||
141 | * | 142 | * |
142 | * Return: 0 on success | 143 | * Return: 0 on success |
143 | */ | 144 | */ |
144 | int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags) | 145 | int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags, |
146 | struct ib_udata *udata) | ||
145 | { | 147 | { |
146 | struct rvt_dev_info *dev = ib_to_rvt(ibah->device); | 148 | struct rvt_dev_info *dev = ib_to_rvt(ibah->device); |
147 | struct rvt_ah *ah = ibah_to_rvtah(ibah); | 149 | struct rvt_ah *ah = ibah_to_rvtah(ibah); |
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h index 72431a618d5d..7b27b82d8a90 100644 --- a/drivers/infiniband/sw/rdmavt/ah.h +++ b/drivers/infiniband/sw/rdmavt/ah.h | |||
@@ -54,7 +54,8 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd, | |||
54 | struct rdma_ah_attr *ah_attr, | 54 | struct rdma_ah_attr *ah_attr, |
55 | u32 create_flags, | 55 | u32 create_flags, |
56 | struct ib_udata *udata); | 56 | struct ib_udata *udata); |
57 | int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags); | 57 | int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags, |
58 | struct ib_udata *udata); | ||
58 | int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); | 59 | int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
59 | int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); | 60 | int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
60 | 61 | ||
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 4f1544ad4aff..6f7ff2384506 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c | |||
@@ -299,12 +299,13 @@ done: | |||
299 | /** | 299 | /** |
300 | * rvt_destroy_cq - destroy a completion queue | 300 | * rvt_destroy_cq - destroy a completion queue |
301 | * @ibcq: the completion queue to destroy. | 301 | * @ibcq: the completion queue to destroy. |
302 | * @udata: user data or NULL for kernel object | ||
302 | * | 303 | * |
303 | * Called by ib_destroy_cq() in the generic verbs code. | 304 | * Called by ib_destroy_cq() in the generic verbs code. |
304 | * | 305 | * |
305 | * Return: always 0 | 306 | * Return: always 0 |
306 | */ | 307 | */ |
307 | int rvt_destroy_cq(struct ib_cq *ibcq) | 308 | int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
308 | { | 309 | { |
309 | struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); | 310 | struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); |
310 | struct rvt_dev_info *rdi = cq->rdi; | 311 | struct rvt_dev_info *rdi = cq->rdi; |
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h index 72184b1c176b..e42661ecdef8 100644 --- a/drivers/infiniband/sw/rdmavt/cq.h +++ b/drivers/infiniband/sw/rdmavt/cq.h | |||
@@ -55,7 +55,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, | |||
55 | const struct ib_cq_init_attr *attr, | 55 | const struct ib_cq_init_attr *attr, |
56 | struct ib_ucontext *context, | 56 | struct ib_ucontext *context, |
57 | struct ib_udata *udata); | 57 | struct ib_udata *udata); |
58 | int rvt_destroy_cq(struct ib_cq *ibcq); | 58 | int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); |
59 | int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); | 59 | int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); |
60 | int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); | 60 | int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); |
61 | int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); | 61 | int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); |
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index e8b03ae54914..2d1b88a34f8e 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c | |||
@@ -548,7 +548,7 @@ bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey) | |||
548 | * | 548 | * |
549 | * Returns 0 on success. | 549 | * Returns 0 on success. |
550 | */ | 550 | */ |
551 | int rvt_dereg_mr(struct ib_mr *ibmr) | 551 | int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
552 | { | 552 | { |
553 | struct rvt_mr *mr = to_imr(ibmr); | 553 | struct rvt_mr *mr = to_imr(ibmr); |
554 | int ret; | 554 | int ret; |
@@ -575,9 +575,8 @@ out: | |||
575 | * | 575 | * |
576 | * Return: the memory region on success, otherwise return an errno. | 576 | * Return: the memory region on success, otherwise return an errno. |
577 | */ | 577 | */ |
578 | struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, | 578 | struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
579 | enum ib_mr_type mr_type, | 579 | u32 max_num_sg, struct ib_udata *udata) |
580 | u32 max_num_sg) | ||
581 | { | 580 | { |
582 | struct rvt_mr *mr; | 581 | struct rvt_mr *mr; |
583 | 582 | ||
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h index 132800ee0205..2c8d0752e8e3 100644 --- a/drivers/infiniband/sw/rdmavt/mr.h +++ b/drivers/infiniband/sw/rdmavt/mr.h | |||
@@ -78,10 +78,9 @@ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc); | |||
78 | struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | 78 | struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
79 | u64 virt_addr, int mr_access_flags, | 79 | u64 virt_addr, int mr_access_flags, |
80 | struct ib_udata *udata); | 80 | struct ib_udata *udata); |
81 | int rvt_dereg_mr(struct ib_mr *ibmr); | 81 | int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); |
82 | struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, | 82 | struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
83 | enum ib_mr_type mr_type, | 83 | u32 max_num_sg, struct ib_udata *udata); |
84 | u32 max_num_sg); | ||
85 | int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, | 84 | int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, |
86 | int sg_nents, unsigned int *sg_offset); | 85 | int sg_nents, unsigned int *sg_offset); |
87 | struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | 86 | struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags, |
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c index 6033054b22fa..e84341282374 100644 --- a/drivers/infiniband/sw/rdmavt/pd.c +++ b/drivers/infiniband/sw/rdmavt/pd.c | |||
@@ -93,10 +93,11 @@ bail: | |||
93 | /** | 93 | /** |
94 | * rvt_dealloc_pd - Free PD | 94 | * rvt_dealloc_pd - Free PD |
95 | * @ibpd: Free up PD | 95 | * @ibpd: Free up PD |
96 | * @udata: Valid user data or NULL for kernel object | ||
96 | * | 97 | * |
97 | * Return: always 0 | 98 | * Return: always 0 |
98 | */ | 99 | */ |
99 | void rvt_dealloc_pd(struct ib_pd *ibpd) | 100 | void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
100 | { | 101 | { |
101 | struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); | 102 | struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); |
102 | 103 | ||
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h index 7a887e4a45e7..d0368a625e03 100644 --- a/drivers/infiniband/sw/rdmavt/pd.h +++ b/drivers/infiniband/sw/rdmavt/pd.h | |||
@@ -52,6 +52,6 @@ | |||
52 | 52 | ||
53 | int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, | 53 | int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, |
54 | struct ib_udata *udata); | 54 | struct ib_udata *udata); |
55 | void rvt_dealloc_pd(struct ib_pd *ibpd); | 55 | void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); |
56 | 56 | ||
57 | #endif /* DEF_RDMAVTPD_H */ | 57 | #endif /* DEF_RDMAVTPD_H */ |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index a34b9a2a32b6..e8bba7e56c29 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -1617,7 +1617,7 @@ inval: | |||
1617 | * | 1617 | * |
1618 | * Return: 0 on success. | 1618 | * Return: 0 on success. |
1619 | */ | 1619 | */ |
1620 | int rvt_destroy_qp(struct ib_qp *ibqp) | 1620 | int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
1621 | { | 1621 | { |
1622 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); | 1622 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
1623 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); | 1623 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h index 6d883972e0b8..450b27ea1fa4 100644 --- a/drivers/infiniband/sw/rdmavt/qp.h +++ b/drivers/infiniband/sw/rdmavt/qp.h | |||
@@ -57,7 +57,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, | |||
57 | struct ib_udata *udata); | 57 | struct ib_udata *udata); |
58 | int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 58 | int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
59 | int attr_mask, struct ib_udata *udata); | 59 | int attr_mask, struct ib_udata *udata); |
60 | int rvt_destroy_qp(struct ib_qp *ibqp); | 60 | int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); |
61 | int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 61 | int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
62 | int attr_mask, struct ib_qp_init_attr *init_attr); | 62 | int attr_mask, struct ib_qp_init_attr *init_attr); |
63 | int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, | 63 | int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c index 895b3fabd0bf..3090b0935714 100644 --- a/drivers/infiniband/sw/rdmavt/srq.c +++ b/drivers/infiniband/sw/rdmavt/srq.c | |||
@@ -340,7 +340,7 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | |||
340 | * | 340 | * |
341 | * Return always 0 | 341 | * Return always 0 |
342 | */ | 342 | */ |
343 | int rvt_destroy_srq(struct ib_srq *ibsrq) | 343 | int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
344 | { | 344 | { |
345 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); | 345 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); |
346 | struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); | 346 | struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); |
diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h index bf0eaaf56465..69cad2f65408 100644 --- a/drivers/infiniband/sw/rdmavt/srq.h +++ b/drivers/infiniband/sw/rdmavt/srq.h | |||
@@ -57,6 +57,6 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
57 | enum ib_srq_attr_mask attr_mask, | 57 | enum ib_srq_attr_mask attr_mask, |
58 | struct ib_udata *udata); | 58 | struct ib_udata *udata); |
59 | int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | 59 | int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); |
60 | int rvt_destroy_srq(struct ib_srq *ibsrq); | 60 | int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); |
61 | 61 | ||
62 | #endif /* DEF_RVTSRQ_H */ | 62 | #endif /* DEF_RVTSRQ_H */ |
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 6ecf28570ff0..e625731ae42d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c | |||
@@ -185,7 +185,7 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, | |||
185 | return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem); | 185 | return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void rxe_dealloc_pd(struct ib_pd *ibpd) | 188 | static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
189 | { | 189 | { |
190 | struct rxe_pd *pd = to_rpd(ibpd); | 190 | struct rxe_pd *pd = to_rpd(ibpd); |
191 | 191 | ||
@@ -242,7 +242,7 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) | |||
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) | 245 | static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata) |
246 | { | 246 | { |
247 | struct rxe_ah *ah = to_rah(ibah); | 247 | struct rxe_ah *ah = to_rah(ibah); |
248 | 248 | ||
@@ -389,7 +389,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | |||
389 | return 0; | 389 | return 0; |
390 | } | 390 | } |
391 | 391 | ||
392 | static int rxe_destroy_srq(struct ib_srq *ibsrq) | 392 | static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
393 | { | 393 | { |
394 | struct rxe_srq *srq = to_rsrq(ibsrq); | 394 | struct rxe_srq *srq = to_rsrq(ibsrq); |
395 | 395 | ||
@@ -509,7 +509,7 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
509 | return 0; | 509 | return 0; |
510 | } | 510 | } |
511 | 511 | ||
512 | static int rxe_destroy_qp(struct ib_qp *ibqp) | 512 | static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
513 | { | 513 | { |
514 | struct rxe_qp *qp = to_rqp(ibqp); | 514 | struct rxe_qp *qp = to_rqp(ibqp); |
515 | 515 | ||
@@ -839,7 +839,7 @@ err1: | |||
839 | return ERR_PTR(err); | 839 | return ERR_PTR(err); |
840 | } | 840 | } |
841 | 841 | ||
842 | static int rxe_destroy_cq(struct ib_cq *ibcq) | 842 | static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
843 | { | 843 | { |
844 | struct rxe_cq *cq = to_rcq(ibcq); | 844 | struct rxe_cq *cq = to_rcq(ibcq); |
845 | 845 | ||
@@ -990,7 +990,7 @@ err2: | |||
990 | return ERR_PTR(err); | 990 | return ERR_PTR(err); |
991 | } | 991 | } |
992 | 992 | ||
993 | static int rxe_dereg_mr(struct ib_mr *ibmr) | 993 | static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
994 | { | 994 | { |
995 | struct rxe_mem *mr = to_rmr(ibmr); | 995 | struct rxe_mem *mr = to_rmr(ibmr); |
996 | 996 | ||
@@ -1001,9 +1001,8 @@ static int rxe_dereg_mr(struct ib_mr *ibmr) | |||
1001 | return 0; | 1001 | return 0; |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, | 1004 | static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, |
1005 | enum ib_mr_type mr_type, | 1005 | u32 max_num_sg, struct ib_udata *udata) |
1006 | u32 max_num_sg) | ||
1007 | { | 1006 | { |
1008 | struct rxe_dev *rxe = to_rdev(ibpd->device); | 1007 | struct rxe_dev *rxe = to_rdev(ibpd->device); |
1009 | struct rxe_pd *pd = to_rpd(ibpd); | 1008 | struct rxe_pd *pd = to_rpd(ibpd); |