diff options
author | Bryan Tan <bryantan@vmware.com> | 2017-11-06 14:48:53 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2017-11-13 16:18:33 -0500 |
commit | 8b10ba783c9d0c69d53e7d78ff7f2cd921f80729 (patch) | |
tree | d8030172091d36e8f17f79d9f6b14abc90ce8845 | |
parent | cb9fd89f91337aaca9c96d265930f22b31462e5e (diff) |
RDMA/vmw_pvrdma: Add shared receive queue support
Add the required functions needed to support SRQs. Currently, kernel
clients are not supported. SRQs will only be available in userspace.
Reviewed-by: Adit Ranadive <aditr@vmware.com>
Reviewed-by: Aditya Sarwade <asarwade@vmware.com>
Reviewed-by: Jorgen Hansen <jhansen@vmware.com>
Reviewed-by: Nitish Bhat <bnitish@vmware.com>
Signed-off-by: Bryan Tan <bryantan@vmware.com>
Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/Makefile | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | 25 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h | 54 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 59 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 55 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 319 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 18 | ||||
-rw-r--r-- | include/uapi/rdma/vmw_pvrdma-abi.h | 2 |
9 files changed, 523 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile index 0194ed19f542..2f52e0a044a0 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/Makefile +++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o | 1 | obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o |
2 | 2 | ||
3 | vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_verbs.o | 3 | vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_srq.o pvrdma_verbs.o |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 984aa3484928..63bc2efc34eb 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | |||
@@ -162,6 +162,22 @@ struct pvrdma_ah { | |||
162 | struct pvrdma_av av; | 162 | struct pvrdma_av av; |
163 | }; | 163 | }; |
164 | 164 | ||
165 | struct pvrdma_srq { | ||
166 | struct ib_srq ibsrq; | ||
167 | int offset; | ||
168 | spinlock_t lock; /* SRQ lock. */ | ||
169 | int wqe_cnt; | ||
170 | int wqe_size; | ||
171 | int max_gs; | ||
172 | struct ib_umem *umem; | ||
173 | struct pvrdma_ring_state *ring; | ||
174 | struct pvrdma_page_dir pdir; | ||
175 | u32 srq_handle; | ||
176 | int npages; | ||
177 | refcount_t refcnt; | ||
178 | wait_queue_head_t wait; | ||
179 | }; | ||
180 | |||
165 | struct pvrdma_qp { | 181 | struct pvrdma_qp { |
166 | struct ib_qp ibqp; | 182 | struct ib_qp ibqp; |
167 | u32 qp_handle; | 183 | u32 qp_handle; |
@@ -171,6 +187,7 @@ struct pvrdma_qp { | |||
171 | struct ib_umem *rumem; | 187 | struct ib_umem *rumem; |
172 | struct ib_umem *sumem; | 188 | struct ib_umem *sumem; |
173 | struct pvrdma_page_dir pdir; | 189 | struct pvrdma_page_dir pdir; |
190 | struct pvrdma_srq *srq; | ||
174 | int npages; | 191 | int npages; |
175 | int npages_send; | 192 | int npages_send; |
176 | int npages_recv; | 193 | int npages_recv; |
@@ -210,6 +227,8 @@ struct pvrdma_dev { | |||
210 | struct pvrdma_page_dir cq_pdir; | 227 | struct pvrdma_page_dir cq_pdir; |
211 | struct pvrdma_cq **cq_tbl; | 228 | struct pvrdma_cq **cq_tbl; |
212 | spinlock_t cq_tbl_lock; | 229 | spinlock_t cq_tbl_lock; |
230 | struct pvrdma_srq **srq_tbl; | ||
231 | spinlock_t srq_tbl_lock; | ||
213 | struct pvrdma_qp **qp_tbl; | 232 | struct pvrdma_qp **qp_tbl; |
214 | spinlock_t qp_tbl_lock; | 233 | spinlock_t qp_tbl_lock; |
215 | struct pvrdma_uar_table uar_table; | 234 | struct pvrdma_uar_table uar_table; |
@@ -221,6 +240,7 @@ struct pvrdma_dev { | |||
221 | bool ib_active; | 240 | bool ib_active; |
222 | atomic_t num_qps; | 241 | atomic_t num_qps; |
223 | atomic_t num_cqs; | 242 | atomic_t num_cqs; |
243 | atomic_t num_srqs; | ||
224 | atomic_t num_pds; | 244 | atomic_t num_pds; |
225 | atomic_t num_ahs; | 245 | atomic_t num_ahs; |
226 | 246 | ||
@@ -256,6 +276,11 @@ static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq) | |||
256 | return container_of(ibcq, struct pvrdma_cq, ibcq); | 276 | return container_of(ibcq, struct pvrdma_cq, ibcq); |
257 | } | 277 | } |
258 | 278 | ||
279 | static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq) | ||
280 | { | ||
281 | return container_of(ibsrq, struct pvrdma_srq, ibsrq); | ||
282 | } | ||
283 | |||
259 | static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr) | 284 | static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr) |
260 | { | 285 | { |
261 | return container_of(ibmr, struct pvrdma_user_mr, ibmr); | 286 | return container_of(ibmr, struct pvrdma_user_mr, ibmr); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h index df0a6b525021..6fd5a8f4e2f6 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h | |||
@@ -339,6 +339,10 @@ enum { | |||
339 | PVRDMA_CMD_DESTROY_UC, | 339 | PVRDMA_CMD_DESTROY_UC, |
340 | PVRDMA_CMD_CREATE_BIND, | 340 | PVRDMA_CMD_CREATE_BIND, |
341 | PVRDMA_CMD_DESTROY_BIND, | 341 | PVRDMA_CMD_DESTROY_BIND, |
342 | PVRDMA_CMD_CREATE_SRQ, | ||
343 | PVRDMA_CMD_MODIFY_SRQ, | ||
344 | PVRDMA_CMD_QUERY_SRQ, | ||
345 | PVRDMA_CMD_DESTROY_SRQ, | ||
342 | PVRDMA_CMD_MAX, | 346 | PVRDMA_CMD_MAX, |
343 | }; | 347 | }; |
344 | 348 | ||
@@ -361,6 +365,10 @@ enum { | |||
361 | PVRDMA_CMD_DESTROY_UC_RESP_NOOP, | 365 | PVRDMA_CMD_DESTROY_UC_RESP_NOOP, |
362 | PVRDMA_CMD_CREATE_BIND_RESP_NOOP, | 366 | PVRDMA_CMD_CREATE_BIND_RESP_NOOP, |
363 | PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, | 367 | PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, |
368 | PVRDMA_CMD_CREATE_SRQ_RESP, | ||
369 | PVRDMA_CMD_MODIFY_SRQ_RESP, | ||
370 | PVRDMA_CMD_QUERY_SRQ_RESP, | ||
371 | PVRDMA_CMD_DESTROY_SRQ_RESP, | ||
364 | PVRDMA_CMD_MAX_RESP, | 372 | PVRDMA_CMD_MAX_RESP, |
365 | }; | 373 | }; |
366 | 374 | ||
@@ -495,6 +503,46 @@ struct pvrdma_cmd_destroy_cq { | |||
495 | u8 reserved[4]; | 503 | u8 reserved[4]; |
496 | }; | 504 | }; |
497 | 505 | ||
506 | struct pvrdma_cmd_create_srq { | ||
507 | struct pvrdma_cmd_hdr hdr; | ||
508 | u64 pdir_dma; | ||
509 | u32 pd_handle; | ||
510 | u32 nchunks; | ||
511 | struct pvrdma_srq_attr attrs; | ||
512 | u8 srq_type; | ||
513 | u8 reserved[7]; | ||
514 | }; | ||
515 | |||
516 | struct pvrdma_cmd_create_srq_resp { | ||
517 | struct pvrdma_cmd_resp_hdr hdr; | ||
518 | u32 srqn; | ||
519 | u8 reserved[4]; | ||
520 | }; | ||
521 | |||
522 | struct pvrdma_cmd_modify_srq { | ||
523 | struct pvrdma_cmd_hdr hdr; | ||
524 | u32 srq_handle; | ||
525 | u32 attr_mask; | ||
526 | struct pvrdma_srq_attr attrs; | ||
527 | }; | ||
528 | |||
529 | struct pvrdma_cmd_query_srq { | ||
530 | struct pvrdma_cmd_hdr hdr; | ||
531 | u32 srq_handle; | ||
532 | u8 reserved[4]; | ||
533 | }; | ||
534 | |||
535 | struct pvrdma_cmd_query_srq_resp { | ||
536 | struct pvrdma_cmd_resp_hdr hdr; | ||
537 | struct pvrdma_srq_attr attrs; | ||
538 | }; | ||
539 | |||
540 | struct pvrdma_cmd_destroy_srq { | ||
541 | struct pvrdma_cmd_hdr hdr; | ||
542 | u32 srq_handle; | ||
543 | u8 reserved[4]; | ||
544 | }; | ||
545 | |||
498 | struct pvrdma_cmd_create_qp { | 546 | struct pvrdma_cmd_create_qp { |
499 | struct pvrdma_cmd_hdr hdr; | 547 | struct pvrdma_cmd_hdr hdr; |
500 | u64 pdir_dma; | 548 | u64 pdir_dma; |
@@ -594,6 +642,10 @@ union pvrdma_cmd_req { | |||
594 | struct pvrdma_cmd_destroy_qp destroy_qp; | 642 | struct pvrdma_cmd_destroy_qp destroy_qp; |
595 | struct pvrdma_cmd_create_bind create_bind; | 643 | struct pvrdma_cmd_create_bind create_bind; |
596 | struct pvrdma_cmd_destroy_bind destroy_bind; | 644 | struct pvrdma_cmd_destroy_bind destroy_bind; |
645 | struct pvrdma_cmd_create_srq create_srq; | ||
646 | struct pvrdma_cmd_modify_srq modify_srq; | ||
647 | struct pvrdma_cmd_query_srq query_srq; | ||
648 | struct pvrdma_cmd_destroy_srq destroy_srq; | ||
597 | }; | 649 | }; |
598 | 650 | ||
599 | union pvrdma_cmd_resp { | 651 | union pvrdma_cmd_resp { |
@@ -608,6 +660,8 @@ union pvrdma_cmd_resp { | |||
608 | struct pvrdma_cmd_create_qp_resp create_qp_resp; | 660 | struct pvrdma_cmd_create_qp_resp create_qp_resp; |
609 | struct pvrdma_cmd_query_qp_resp query_qp_resp; | 661 | struct pvrdma_cmd_query_qp_resp query_qp_resp; |
610 | struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp; | 662 | struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp; |
663 | struct pvrdma_cmd_create_srq_resp create_srq_resp; | ||
664 | struct pvrdma_cmd_query_srq_resp query_srq_resp; | ||
611 | }; | 665 | }; |
612 | 666 | ||
613 | #endif /* __PVRDMA_DEV_API_H__ */ | 667 | #endif /* __PVRDMA_DEV_API_H__ */ |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 6ce709a67959..1f4e18717a00 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | |||
@@ -118,6 +118,7 @@ static int pvrdma_init_device(struct pvrdma_dev *dev) | |||
118 | spin_lock_init(&dev->cmd_lock); | 118 | spin_lock_init(&dev->cmd_lock); |
119 | sema_init(&dev->cmd_sema, 1); | 119 | sema_init(&dev->cmd_sema, 1); |
120 | atomic_set(&dev->num_qps, 0); | 120 | atomic_set(&dev->num_qps, 0); |
121 | atomic_set(&dev->num_srqs, 0); | ||
121 | atomic_set(&dev->num_cqs, 0); | 122 | atomic_set(&dev->num_cqs, 0); |
122 | atomic_set(&dev->num_pds, 0); | 123 | atomic_set(&dev->num_pds, 0); |
123 | atomic_set(&dev->num_ahs, 0); | 124 | atomic_set(&dev->num_ahs, 0); |
@@ -254,9 +255,32 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) | |||
254 | goto err_cq_free; | 255 | goto err_cq_free; |
255 | spin_lock_init(&dev->qp_tbl_lock); | 256 | spin_lock_init(&dev->qp_tbl_lock); |
256 | 257 | ||
258 | /* Check if SRQ is supported by backend */ | ||
259 | if (dev->dsr->caps.max_srq) { | ||
260 | dev->ib_dev.uverbs_cmd_mask |= | ||
261 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | | ||
262 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | | ||
263 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | | ||
264 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | | ||
265 | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); | ||
266 | |||
267 | dev->ib_dev.create_srq = pvrdma_create_srq; | ||
268 | dev->ib_dev.modify_srq = pvrdma_modify_srq; | ||
269 | dev->ib_dev.query_srq = pvrdma_query_srq; | ||
270 | dev->ib_dev.destroy_srq = pvrdma_destroy_srq; | ||
271 | dev->ib_dev.post_srq_recv = pvrdma_post_srq_recv; | ||
272 | |||
273 | dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq, | ||
274 | sizeof(struct pvrdma_srq *), | ||
275 | GFP_KERNEL); | ||
276 | if (!dev->srq_tbl) | ||
277 | goto err_qp_free; | ||
278 | } | ||
279 | spin_lock_init(&dev->srq_tbl_lock); | ||
280 | |||
257 | ret = ib_register_device(&dev->ib_dev, NULL); | 281 | ret = ib_register_device(&dev->ib_dev, NULL); |
258 | if (ret) | 282 | if (ret) |
259 | goto err_qp_free; | 283 | goto err_srq_free; |
260 | 284 | ||
261 | for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) { | 285 | for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) { |
262 | ret = device_create_file(&dev->ib_dev.dev, | 286 | ret = device_create_file(&dev->ib_dev.dev, |
@@ -271,6 +295,8 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) | |||
271 | 295 | ||
272 | err_class: | 296 | err_class: |
273 | ib_unregister_device(&dev->ib_dev); | 297 | ib_unregister_device(&dev->ib_dev); |
298 | err_srq_free: | ||
299 | kfree(dev->srq_tbl); | ||
274 | err_qp_free: | 300 | err_qp_free: |
275 | kfree(dev->qp_tbl); | 301 | kfree(dev->qp_tbl); |
276 | err_cq_free: | 302 | err_cq_free: |
@@ -353,6 +379,35 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) | |||
353 | } | 379 | } |
354 | } | 380 | } |
355 | 381 | ||
382 | static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) | ||
383 | { | ||
384 | struct pvrdma_srq *srq; | ||
385 | unsigned long flags; | ||
386 | |||
387 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); | ||
388 | if (dev->srq_tbl) | ||
389 | srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq]; | ||
390 | else | ||
391 | srq = NULL; | ||
392 | if (srq) | ||
393 | refcount_inc(&srq->refcnt); | ||
394 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | ||
395 | |||
396 | if (srq && srq->ibsrq.event_handler) { | ||
397 | struct ib_srq *ibsrq = &srq->ibsrq; | ||
398 | struct ib_event e; | ||
399 | |||
400 | e.device = ibsrq->device; | ||
401 | e.element.srq = ibsrq; | ||
402 | e.event = type; /* 1:1 mapping for now. */ | ||
403 | ibsrq->event_handler(&e, ibsrq->srq_context); | ||
404 | } | ||
405 | if (srq) { | ||
406 | if (refcount_dec_and_test(&srq->refcnt)) | ||
407 | wake_up(&srq->wait); | ||
408 | } | ||
409 | } | ||
410 | |||
356 | static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, | 411 | static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, |
357 | enum ib_event_type event) | 412 | enum ib_event_type event) |
358 | { | 413 | { |
@@ -423,6 +478,7 @@ static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id) | |||
423 | 478 | ||
424 | case PVRDMA_EVENT_SRQ_ERR: | 479 | case PVRDMA_EVENT_SRQ_ERR: |
425 | case PVRDMA_EVENT_SRQ_LIMIT_REACHED: | 480 | case PVRDMA_EVENT_SRQ_LIMIT_REACHED: |
481 | pvrdma_srq_event(dev, eqe->info, eqe->type); | ||
426 | break; | 482 | break; |
427 | 483 | ||
428 | case PVRDMA_EVENT_PORT_ACTIVE: | 484 | case PVRDMA_EVENT_PORT_ACTIVE: |
@@ -1059,6 +1115,7 @@ static void pvrdma_pci_remove(struct pci_dev *pdev) | |||
1059 | iounmap(dev->regs); | 1115 | iounmap(dev->regs); |
1060 | kfree(dev->sgid_tbl); | 1116 | kfree(dev->sgid_tbl); |
1061 | kfree(dev->cq_tbl); | 1117 | kfree(dev->cq_tbl); |
1118 | kfree(dev->srq_tbl); | ||
1062 | kfree(dev->qp_tbl); | 1119 | kfree(dev->qp_tbl); |
1063 | pvrdma_uar_table_cleanup(dev); | 1120 | pvrdma_uar_table_cleanup(dev); |
1064 | iounmap(dev->driver_uar.map); | 1121 | iounmap(dev->driver_uar.map); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index ed34d5a581fa..10420a18d02f 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | |||
@@ -198,6 +198,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
198 | struct pvrdma_create_qp ucmd; | 198 | struct pvrdma_create_qp ucmd; |
199 | unsigned long flags; | 199 | unsigned long flags; |
200 | int ret; | 200 | int ret; |
201 | bool is_srq = !!init_attr->srq; | ||
201 | 202 | ||
202 | if (init_attr->create_flags) { | 203 | if (init_attr->create_flags) { |
203 | dev_warn(&dev->pdev->dev, | 204 | dev_warn(&dev->pdev->dev, |
@@ -214,6 +215,12 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
214 | return ERR_PTR(-EINVAL); | 215 | return ERR_PTR(-EINVAL); |
215 | } | 216 | } |
216 | 217 | ||
218 | if (is_srq && !dev->dsr->caps.max_srq) { | ||
219 | dev_warn(&dev->pdev->dev, | ||
220 | "SRQs not supported by device\n"); | ||
221 | return ERR_PTR(-EINVAL); | ||
222 | } | ||
223 | |||
217 | if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) | 224 | if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) |
218 | return ERR_PTR(-ENOMEM); | 225 | return ERR_PTR(-ENOMEM); |
219 | 226 | ||
@@ -252,26 +259,36 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
252 | goto err_qp; | 259 | goto err_qp; |
253 | } | 260 | } |
254 | 261 | ||
255 | /* set qp->sq.wqe_cnt, shift, buf_size.. */ | 262 | if (!is_srq) { |
256 | qp->rumem = ib_umem_get(pd->uobject->context, | 263 | /* set qp->sq.wqe_cnt, shift, buf_size.. */ |
257 | ucmd.rbuf_addr, | 264 | qp->rumem = ib_umem_get(pd->uobject->context, |
258 | ucmd.rbuf_size, 0, 0); | 265 | ucmd.rbuf_addr, |
259 | if (IS_ERR(qp->rumem)) { | 266 | ucmd.rbuf_size, 0, 0); |
260 | ret = PTR_ERR(qp->rumem); | 267 | if (IS_ERR(qp->rumem)) { |
261 | goto err_qp; | 268 | ret = PTR_ERR(qp->rumem); |
269 | goto err_qp; | ||
270 | } | ||
271 | qp->srq = NULL; | ||
272 | } else { | ||
273 | qp->rumem = NULL; | ||
274 | qp->srq = to_vsrq(init_attr->srq); | ||
262 | } | 275 | } |
263 | 276 | ||
264 | qp->sumem = ib_umem_get(pd->uobject->context, | 277 | qp->sumem = ib_umem_get(pd->uobject->context, |
265 | ucmd.sbuf_addr, | 278 | ucmd.sbuf_addr, |
266 | ucmd.sbuf_size, 0, 0); | 279 | ucmd.sbuf_size, 0, 0); |
267 | if (IS_ERR(qp->sumem)) { | 280 | if (IS_ERR(qp->sumem)) { |
268 | ib_umem_release(qp->rumem); | 281 | if (!is_srq) |
282 | ib_umem_release(qp->rumem); | ||
269 | ret = PTR_ERR(qp->sumem); | 283 | ret = PTR_ERR(qp->sumem); |
270 | goto err_qp; | 284 | goto err_qp; |
271 | } | 285 | } |
272 | 286 | ||
273 | qp->npages_send = ib_umem_page_count(qp->sumem); | 287 | qp->npages_send = ib_umem_page_count(qp->sumem); |
274 | qp->npages_recv = ib_umem_page_count(qp->rumem); | 288 | if (!is_srq) |
289 | qp->npages_recv = ib_umem_page_count(qp->rumem); | ||
290 | else | ||
291 | qp->npages_recv = 0; | ||
275 | qp->npages = qp->npages_send + qp->npages_recv; | 292 | qp->npages = qp->npages_send + qp->npages_recv; |
276 | } else { | 293 | } else { |
277 | qp->is_kernel = true; | 294 | qp->is_kernel = true; |
@@ -312,12 +329,14 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
312 | 329 | ||
313 | if (!qp->is_kernel) { | 330 | if (!qp->is_kernel) { |
314 | pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0); | 331 | pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0); |
315 | pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem, | 332 | if (!is_srq) |
316 | qp->npages_send); | 333 | pvrdma_page_dir_insert_umem(&qp->pdir, |
334 | qp->rumem, | ||
335 | qp->npages_send); | ||
317 | } else { | 336 | } else { |
318 | /* Ring state is always the first page. */ | 337 | /* Ring state is always the first page. */ |
319 | qp->sq.ring = qp->pdir.pages[0]; | 338 | qp->sq.ring = qp->pdir.pages[0]; |
320 | qp->rq.ring = &qp->sq.ring[1]; | 339 | qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1]; |
321 | } | 340 | } |
322 | break; | 341 | break; |
323 | default: | 342 | default: |
@@ -333,6 +352,10 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
333 | cmd->pd_handle = to_vpd(pd)->pd_handle; | 352 | cmd->pd_handle = to_vpd(pd)->pd_handle; |
334 | cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; | 353 | cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; |
335 | cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; | 354 | cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; |
355 | if (is_srq) | ||
356 | cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle; | ||
357 | else | ||
358 | cmd->srq_handle = 0; | ||
336 | cmd->max_send_wr = init_attr->cap.max_send_wr; | 359 | cmd->max_send_wr = init_attr->cap.max_send_wr; |
337 | cmd->max_recv_wr = init_attr->cap.max_recv_wr; | 360 | cmd->max_recv_wr = init_attr->cap.max_recv_wr; |
338 | cmd->max_send_sge = init_attr->cap.max_send_sge; | 361 | cmd->max_send_sge = init_attr->cap.max_send_sge; |
@@ -340,6 +363,8 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
340 | cmd->max_inline_data = init_attr->cap.max_inline_data; | 363 | cmd->max_inline_data = init_attr->cap.max_inline_data; |
341 | cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; | 364 | cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; |
342 | cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); | 365 | cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); |
366 | cmd->is_srq = is_srq; | ||
367 | cmd->lkey = 0; | ||
343 | cmd->access_flags = IB_ACCESS_LOCAL_WRITE; | 368 | cmd->access_flags = IB_ACCESS_LOCAL_WRITE; |
344 | cmd->total_chunks = qp->npages; | 369 | cmd->total_chunks = qp->npages; |
345 | cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; | 370 | cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; |
@@ -815,6 +840,12 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
815 | return -EINVAL; | 840 | return -EINVAL; |
816 | } | 841 | } |
817 | 842 | ||
843 | if (qp->srq) { | ||
844 | dev_warn(&dev->pdev->dev, "QP associated with SRQ\n"); | ||
845 | *bad_wr = wr; | ||
846 | return -EINVAL; | ||
847 | } | ||
848 | |||
818 | spin_lock_irqsave(&qp->rq.lock, flags); | 849 | spin_lock_irqsave(&qp->rq.lock, flags); |
819 | 850 | ||
820 | while (wr) { | 851 | while (wr) { |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c new file mode 100644 index 000000000000..826ccb864596 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016-2017 VMware, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of EITHER the GNU General Public License | ||
6 | * version 2 as published by the Free Software Foundation or the BSD | ||
7 | * 2-Clause License. This program is distributed in the hope that it | ||
8 | * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED | ||
9 | * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. | ||
10 | * See the GNU General Public License version 2 for more details at | ||
11 | * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program available in the file COPYING in the main | ||
15 | * directory of this source tree. | ||
16 | * | ||
17 | * The BSD 2-Clause License | ||
18 | * | ||
19 | * Redistribution and use in source and binary forms, with or | ||
20 | * without modification, are permitted provided that the following | ||
21 | * conditions are met: | ||
22 | * | ||
23 | * - Redistributions of source code must retain the above | ||
24 | * copyright notice, this list of conditions and the following | ||
25 | * disclaimer. | ||
26 | * | ||
27 | * - Redistributions in binary form must reproduce the above | ||
28 | * copyright notice, this list of conditions and the following | ||
29 | * disclaimer in the documentation and/or other materials | ||
30 | * provided with the distribution. | ||
31 | * | ||
32 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
33 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
34 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
35 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
36 | * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | ||
37 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
38 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | ||
39 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
40 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
41 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
42 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED | ||
43 | * OF THE POSSIBILITY OF SUCH DAMAGE. | ||
44 | */ | ||
45 | |||
46 | #include <asm/page.h> | ||
47 | #include <linux/io.h> | ||
48 | #include <linux/wait.h> | ||
49 | #include <rdma/ib_addr.h> | ||
50 | #include <rdma/ib_smi.h> | ||
51 | #include <rdma/ib_user_verbs.h> | ||
52 | |||
53 | #include "pvrdma.h" | ||
54 | |||
55 | int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
56 | struct ib_recv_wr **bad_wr) | ||
57 | { | ||
58 | /* No support for kernel clients. */ | ||
59 | return -EOPNOTSUPP; | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * pvrdma_query_srq - query shared receive queue | ||
64 | * @ibsrq: the shared receive queue to query | ||
65 | * @srq_attr: attributes to query and return to client | ||
66 | * | ||
67 | * @return: 0 for success, otherwise returns an errno. | ||
68 | */ | ||
69 | int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | ||
70 | { | ||
71 | struct pvrdma_dev *dev = to_vdev(ibsrq->device); | ||
72 | struct pvrdma_srq *srq = to_vsrq(ibsrq); | ||
73 | union pvrdma_cmd_req req; | ||
74 | union pvrdma_cmd_resp rsp; | ||
75 | struct pvrdma_cmd_query_srq *cmd = &req.query_srq; | ||
76 | struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp; | ||
77 | int ret; | ||
78 | |||
79 | memset(cmd, 0, sizeof(*cmd)); | ||
80 | cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ; | ||
81 | cmd->srq_handle = srq->srq_handle; | ||
82 | |||
83 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP); | ||
84 | if (ret < 0) { | ||
85 | dev_warn(&dev->pdev->dev, | ||
86 | "could not query shared receive queue, error: %d\n", | ||
87 | ret); | ||
88 | return -EINVAL; | ||
89 | } | ||
90 | |||
91 | srq_attr->srq_limit = resp->attrs.srq_limit; | ||
92 | srq_attr->max_wr = resp->attrs.max_wr; | ||
93 | srq_attr->max_sge = resp->attrs.max_sge; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * pvrdma_create_srq - create shared receive queue | ||
100 | * @pd: protection domain | ||
101 | * @init_attr: shared receive queue attributes | ||
102 | * @udata: user data | ||
103 | * | ||
104 | * @return: the ib_srq pointer on success, otherwise returns an errno. | ||
105 | */ | ||
106 | struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | ||
107 | struct ib_srq_init_attr *init_attr, | ||
108 | struct ib_udata *udata) | ||
109 | { | ||
110 | struct pvrdma_srq *srq = NULL; | ||
111 | struct pvrdma_dev *dev = to_vdev(pd->device); | ||
112 | union pvrdma_cmd_req req; | ||
113 | union pvrdma_cmd_resp rsp; | ||
114 | struct pvrdma_cmd_create_srq *cmd = &req.create_srq; | ||
115 | struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; | ||
116 | struct pvrdma_create_srq ucmd; | ||
117 | unsigned long flags; | ||
118 | int ret; | ||
119 | |||
120 | if (!(pd->uobject && udata)) { | ||
121 | /* No support for kernel clients. */ | ||
122 | dev_warn(&dev->pdev->dev, | ||
123 | "no shared receive queue support for kernel client\n"); | ||
124 | return ERR_PTR(-EOPNOTSUPP); | ||
125 | } | ||
126 | |||
127 | if (init_attr->srq_type != IB_SRQT_BASIC) { | ||
128 | dev_warn(&dev->pdev->dev, | ||
129 | "shared receive queue type %d not supported\n", | ||
130 | init_attr->srq_type); | ||
131 | return ERR_PTR(-EINVAL); | ||
132 | } | ||
133 | |||
134 | if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr || | ||
135 | init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) { | ||
136 | dev_warn(&dev->pdev->dev, | ||
137 | "shared receive queue size invalid\n"); | ||
138 | return ERR_PTR(-EINVAL); | ||
139 | } | ||
140 | |||
141 | if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq)) | ||
142 | return ERR_PTR(-ENOMEM); | ||
143 | |||
144 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | ||
145 | if (!srq) { | ||
146 | ret = -ENOMEM; | ||
147 | goto err_srq; | ||
148 | } | ||
149 | |||
150 | spin_lock_init(&srq->lock); | ||
151 | refcount_set(&srq->refcnt, 1); | ||
152 | init_waitqueue_head(&srq->wait); | ||
153 | |||
154 | dev_dbg(&dev->pdev->dev, | ||
155 | "create shared receive queue from user space\n"); | ||
156 | |||
157 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | ||
158 | ret = -EFAULT; | ||
159 | goto err_srq; | ||
160 | } | ||
161 | |||
162 | srq->umem = ib_umem_get(pd->uobject->context, | ||
163 | ucmd.buf_addr, | ||
164 | ucmd.buf_size, 0, 0); | ||
165 | if (IS_ERR(srq->umem)) { | ||
166 | ret = PTR_ERR(srq->umem); | ||
167 | goto err_srq; | ||
168 | } | ||
169 | |||
170 | srq->npages = ib_umem_page_count(srq->umem); | ||
171 | |||
172 | if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { | ||
173 | dev_warn(&dev->pdev->dev, | ||
174 | "overflow pages in shared receive queue\n"); | ||
175 | ret = -EINVAL; | ||
176 | goto err_umem; | ||
177 | } | ||
178 | |||
179 | ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false); | ||
180 | if (ret) { | ||
181 | dev_warn(&dev->pdev->dev, | ||
182 | "could not allocate page directory\n"); | ||
183 | goto err_umem; | ||
184 | } | ||
185 | |||
186 | pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); | ||
187 | |||
188 | memset(cmd, 0, sizeof(*cmd)); | ||
189 | cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ; | ||
190 | cmd->srq_type = init_attr->srq_type; | ||
191 | cmd->nchunks = srq->npages; | ||
192 | cmd->pd_handle = to_vpd(pd)->pd_handle; | ||
193 | cmd->attrs.max_wr = init_attr->attr.max_wr; | ||
194 | cmd->attrs.max_sge = init_attr->attr.max_sge; | ||
195 | cmd->attrs.srq_limit = init_attr->attr.srq_limit; | ||
196 | cmd->pdir_dma = srq->pdir.dir_dma; | ||
197 | |||
198 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP); | ||
199 | if (ret < 0) { | ||
200 | dev_warn(&dev->pdev->dev, | ||
201 | "could not create shared receive queue, error: %d\n", | ||
202 | ret); | ||
203 | goto err_page_dir; | ||
204 | } | ||
205 | |||
206 | srq->srq_handle = resp->srqn; | ||
207 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); | ||
208 | dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; | ||
209 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | ||
210 | |||
211 | /* Copy udata back. */ | ||
212 | if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { | ||
213 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); | ||
214 | pvrdma_destroy_srq(&srq->ibsrq); | ||
215 | return ERR_PTR(-EINVAL); | ||
216 | } | ||
217 | |||
218 | return &srq->ibsrq; | ||
219 | |||
220 | err_page_dir: | ||
221 | pvrdma_page_dir_cleanup(dev, &srq->pdir); | ||
222 | err_umem: | ||
223 | ib_umem_release(srq->umem); | ||
224 | err_srq: | ||
225 | kfree(srq); | ||
226 | atomic_dec(&dev->num_srqs); | ||
227 | |||
228 | return ERR_PTR(ret); | ||
229 | } | ||
230 | |||
231 | static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) | ||
232 | { | ||
233 | unsigned long flags; | ||
234 | |||
235 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); | ||
236 | dev->srq_tbl[srq->srq_handle] = NULL; | ||
237 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | ||
238 | |||
239 | refcount_dec(&srq->refcnt); | ||
240 | wait_event(srq->wait, !refcount_read(&srq->refcnt)); | ||
241 | |||
242 | /* There is no support for kernel clients, so this is safe. */ | ||
243 | ib_umem_release(srq->umem); | ||
244 | |||
245 | pvrdma_page_dir_cleanup(dev, &srq->pdir); | ||
246 | |||
247 | kfree(srq); | ||
248 | |||
249 | atomic_dec(&dev->num_srqs); | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * pvrdma_destroy_srq - destroy shared receive queue | ||
254 | * @srq: the shared receive queue to destroy | ||
255 | * | ||
256 | * @return: 0 for success. | ||
257 | */ | ||
258 | int pvrdma_destroy_srq(struct ib_srq *srq) | ||
259 | { | ||
260 | struct pvrdma_srq *vsrq = to_vsrq(srq); | ||
261 | union pvrdma_cmd_req req; | ||
262 | struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq; | ||
263 | struct pvrdma_dev *dev = to_vdev(srq->device); | ||
264 | int ret; | ||
265 | |||
266 | memset(cmd, 0, sizeof(*cmd)); | ||
267 | cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ; | ||
268 | cmd->srq_handle = vsrq->srq_handle; | ||
269 | |||
270 | ret = pvrdma_cmd_post(dev, &req, NULL, 0); | ||
271 | if (ret < 0) | ||
272 | dev_warn(&dev->pdev->dev, | ||
273 | "destroy shared receive queue failed, error: %d\n", | ||
274 | ret); | ||
275 | |||
276 | pvrdma_free_srq(dev, vsrq); | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * pvrdma_modify_srq - modify shared receive queue attributes | ||
283 | * @ibsrq: the shared receive queue to modify | ||
284 | * @attr: the shared receive queue's new attributes | ||
285 | * @attr_mask: attributes mask | ||
286 | * @udata: user data | ||
287 | * | ||
288 | * @returns 0 on success, otherwise returns an errno. | ||
289 | */ | ||
290 | int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
291 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) | ||
292 | { | ||
293 | struct pvrdma_srq *vsrq = to_vsrq(ibsrq); | ||
294 | union pvrdma_cmd_req req; | ||
295 | struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq; | ||
296 | struct pvrdma_dev *dev = to_vdev(ibsrq->device); | ||
297 | int ret; | ||
298 | |||
299 | /* Only support SRQ limit. */ | ||
300 | if (!(attr_mask & IB_SRQ_LIMIT)) | ||
301 | return -EINVAL; | ||
302 | |||
303 | memset(cmd, 0, sizeof(*cmd)); | ||
304 | cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ; | ||
305 | cmd->srq_handle = vsrq->srq_handle; | ||
306 | cmd->attrs.srq_limit = attr->srq_limit; | ||
307 | cmd->attr_mask = attr_mask; | ||
308 | |||
309 | ret = pvrdma_cmd_post(dev, &req, NULL, 0); | ||
310 | if (ret < 0) { | ||
311 | dev_warn(&dev->pdev->dev, | ||
312 | "could not modify shared receive queue, error: %d\n", | ||
313 | ret); | ||
314 | |||
315 | return -EINVAL; | ||
316 | } | ||
317 | |||
318 | return ret; | ||
319 | } | ||
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 48776f5ffb0e..16b96616ef7e 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | |||
@@ -85,6 +85,9 @@ int pvrdma_query_device(struct ib_device *ibdev, | |||
85 | props->max_sge = dev->dsr->caps.max_sge; | 85 | props->max_sge = dev->dsr->caps.max_sge; |
86 | props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge, | 86 | props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge, |
87 | dev->dsr->caps.max_sge_rd); | 87 | dev->dsr->caps.max_sge_rd); |
88 | props->max_srq = dev->dsr->caps.max_srq; | ||
89 | props->max_srq_wr = dev->dsr->caps.max_srq_wr; | ||
90 | props->max_srq_sge = dev->dsr->caps.max_srq_sge; | ||
88 | props->max_cq = dev->dsr->caps.max_cq; | 91 | props->max_cq = dev->dsr->caps.max_cq; |
89 | props->max_cqe = dev->dsr->caps.max_cqe; | 92 | props->max_cqe = dev->dsr->caps.max_cqe; |
90 | props->max_mr = dev->dsr->caps.max_mr; | 93 | props->max_mr = dev->dsr->caps.max_mr; |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 002a9b066e70..b7b25728a7e5 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | |||
@@ -324,6 +324,13 @@ enum pvrdma_mw_type { | |||
324 | PVRDMA_MW_TYPE_2 = 2, | 324 | PVRDMA_MW_TYPE_2 = 2, |
325 | }; | 325 | }; |
326 | 326 | ||
327 | struct pvrdma_srq_attr { | ||
328 | u32 max_wr; | ||
329 | u32 max_sge; | ||
330 | u32 srq_limit; | ||
331 | u32 reserved; | ||
332 | }; | ||
333 | |||
327 | struct pvrdma_qp_attr { | 334 | struct pvrdma_qp_attr { |
328 | enum pvrdma_qp_state qp_state; | 335 | enum pvrdma_qp_state qp_state; |
329 | enum pvrdma_qp_state cur_qp_state; | 336 | enum pvrdma_qp_state cur_qp_state; |
@@ -420,6 +427,17 @@ int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); | |||
420 | struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, | 427 | struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
421 | struct ib_udata *udata); | 428 | struct ib_udata *udata); |
422 | int pvrdma_destroy_ah(struct ib_ah *ah); | 429 | int pvrdma_destroy_ah(struct ib_ah *ah); |
430 | |||
431 | struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | ||
432 | struct ib_srq_init_attr *init_attr, | ||
433 | struct ib_udata *udata); | ||
434 | int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
435 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | ||
436 | int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); | ||
437 | int pvrdma_destroy_srq(struct ib_srq *srq); | ||
438 | int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
439 | struct ib_recv_wr **bad_wr); | ||
440 | |||
423 | struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | 441 | struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, |
424 | struct ib_qp_init_attr *init_attr, | 442 | struct ib_qp_init_attr *init_attr, |
425 | struct ib_udata *udata); | 443 | struct ib_udata *udata); |
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index c6569b0032ec..846c6f4859db 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h | |||
@@ -158,6 +158,8 @@ struct pvrdma_resize_cq { | |||
158 | 158 | ||
159 | struct pvrdma_create_srq { | 159 | struct pvrdma_create_srq { |
160 | __u64 buf_addr; | 160 | __u64 buf_addr; |
161 | __u32 buf_size; | ||
162 | __u32 reserved; | ||
161 | }; | 163 | }; |
162 | 164 | ||
163 | struct pvrdma_create_srq_resp { | 165 | struct pvrdma_create_srq_resp { |