diff options
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 163 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 30 |
4 files changed, 162 insertions, 48 deletions
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 9058e38ca4cd..c4c308cd2034 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -1463,6 +1463,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1463 | } | 1463 | } |
1464 | 1464 | ||
1465 | if (cmd.qp_type != IB_QPT_XRC_TGT) { | 1465 | if (cmd.qp_type != IB_QPT_XRC_TGT) { |
1466 | qp->real_qp = qp; | ||
1466 | qp->device = device; | 1467 | qp->device = device; |
1467 | qp->pd = pd; | 1468 | qp->pd = pd; |
1468 | qp->send_cq = attr.send_cq; | 1469 | qp->send_cq = attr.send_cq; |
@@ -1729,8 +1730,12 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | |||
1729 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; | 1730 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; |
1730 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; | 1731 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; |
1731 | 1732 | ||
1732 | ret = qp->device->modify_qp(qp, attr, | 1733 | if (qp->real_qp == qp) { |
1733 | modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); | 1734 | ret = qp->device->modify_qp(qp, attr, |
1735 | modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); | ||
1736 | } else { | ||
1737 | ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); | ||
1738 | } | ||
1734 | 1739 | ||
1735 | put_qp_read(qp); | 1740 | put_qp_read(qp); |
1736 | 1741 | ||
@@ -1927,7 +1932,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
1927 | } | 1932 | } |
1928 | 1933 | ||
1929 | resp.bad_wr = 0; | 1934 | resp.bad_wr = 0; |
1930 | ret = qp->device->post_send(qp, wr, &bad_wr); | 1935 | ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); |
1931 | if (ret) | 1936 | if (ret) |
1932 | for (next = wr; next; next = next->next) { | 1937 | for (next = wr; next; next = next->next) { |
1933 | ++resp.bad_wr; | 1938 | ++resp.bad_wr; |
@@ -2065,7 +2070,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, | |||
2065 | goto out; | 2070 | goto out; |
2066 | 2071 | ||
2067 | resp.bad_wr = 0; | 2072 | resp.bad_wr = 0; |
2068 | ret = qp->device->post_recv(qp, wr, &bad_wr); | 2073 | ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); |
2069 | 2074 | ||
2070 | put_qp_read(qp); | 2075 | put_qp_read(qp); |
2071 | 2076 | ||
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 0cb69e039f75..9c877e24eb60 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -206,8 +206,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
206 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | 206 | container_of(uobj, struct ib_uqp_object, uevent.uobject); |
207 | 207 | ||
208 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); | 208 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); |
209 | if (qp->qp_type == IB_QPT_XRC_TGT) { | 209 | if (qp != qp->real_qp) { |
210 | ib_release_qp(qp); | 210 | ib_close_qp(qp); |
211 | } else { | 211 | } else { |
212 | ib_uverbs_detach_umcast(qp, uqp); | 212 | ib_uverbs_detach_umcast(qp, uqp); |
213 | ib_destroy_qp(qp); | 213 | ib_destroy_qp(qp); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a6d95e635699..e02898bcc991 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/errno.h> | 39 | #include <linux/errno.h> |
40 | #include <linux/err.h> | 40 | #include <linux/err.h> |
41 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/slab.h> | ||
42 | 43 | ||
43 | #include <rdma/ib_verbs.h> | 44 | #include <rdma/ib_verbs.h> |
44 | #include <rdma/ib_cache.h> | 45 | #include <rdma/ib_cache.h> |
@@ -316,6 +317,14 @@ EXPORT_SYMBOL(ib_destroy_srq); | |||
316 | 317 | ||
317 | /* Queue pairs */ | 318 | /* Queue pairs */ |
318 | 319 | ||
320 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) | ||
321 | { | ||
322 | struct ib_qp *qp = context; | ||
323 | |||
324 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) | ||
325 | event->element.qp->event_handler(event, event->element.qp->qp_context); | ||
326 | } | ||
327 | |||
319 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) | 328 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) |
320 | { | 329 | { |
321 | mutex_lock(&xrcd->tgt_qp_mutex); | 330 | mutex_lock(&xrcd->tgt_qp_mutex); |
@@ -323,33 +332,90 @@ static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) | |||
323 | mutex_unlock(&xrcd->tgt_qp_mutex); | 332 | mutex_unlock(&xrcd->tgt_qp_mutex); |
324 | } | 333 | } |
325 | 334 | ||
326 | static void __ib_remove_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) | 335 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, |
336 | void (*event_handler)(struct ib_event *, void *), | ||
337 | void *qp_context) | ||
327 | { | 338 | { |
339 | struct ib_qp *qp; | ||
340 | unsigned long flags; | ||
341 | |||
342 | qp = kzalloc(sizeof *qp, GFP_KERNEL); | ||
343 | if (!qp) | ||
344 | return ERR_PTR(-ENOMEM); | ||
345 | |||
346 | qp->real_qp = real_qp; | ||
347 | atomic_inc(&real_qp->usecnt); | ||
348 | qp->device = real_qp->device; | ||
349 | qp->event_handler = event_handler; | ||
350 | qp->qp_context = qp_context; | ||
351 | qp->qp_num = real_qp->qp_num; | ||
352 | qp->qp_type = real_qp->qp_type; | ||
353 | |||
354 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
355 | list_add(&qp->open_list, &real_qp->open_list); | ||
356 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
357 | |||
358 | return qp; | ||
359 | } | ||
360 | |||
361 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | ||
362 | struct ib_qp_open_attr *qp_open_attr) | ||
363 | { | ||
364 | struct ib_qp *qp, *real_qp; | ||
365 | |||
366 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) | ||
367 | return ERR_PTR(-EINVAL); | ||
368 | |||
369 | qp = ERR_PTR(-EINVAL); | ||
328 | mutex_lock(&xrcd->tgt_qp_mutex); | 370 | mutex_lock(&xrcd->tgt_qp_mutex); |
329 | list_del(&qp->xrcd_list); | 371 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { |
372 | if (real_qp->qp_num == qp_open_attr->qp_num) { | ||
373 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, | ||
374 | qp_open_attr->qp_context); | ||
375 | break; | ||
376 | } | ||
377 | } | ||
330 | mutex_unlock(&xrcd->tgt_qp_mutex); | 378 | mutex_unlock(&xrcd->tgt_qp_mutex); |
379 | return qp; | ||
331 | } | 380 | } |
381 | EXPORT_SYMBOL(ib_open_qp); | ||
332 | 382 | ||
333 | struct ib_qp *ib_create_qp(struct ib_pd *pd, | 383 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
334 | struct ib_qp_init_attr *qp_init_attr) | 384 | struct ib_qp_init_attr *qp_init_attr) |
335 | { | 385 | { |
336 | struct ib_qp *qp; | 386 | struct ib_qp *qp, *real_qp; |
337 | struct ib_device *device; | 387 | struct ib_device *device; |
338 | 388 | ||
339 | device = pd ? pd->device : qp_init_attr->xrcd->device; | 389 | device = pd ? pd->device : qp_init_attr->xrcd->device; |
340 | qp = device->create_qp(pd, qp_init_attr, NULL); | 390 | qp = device->create_qp(pd, qp_init_attr, NULL); |
341 | 391 | ||
342 | if (!IS_ERR(qp)) { | 392 | if (!IS_ERR(qp)) { |
343 | qp->device = device; | 393 | qp->device = device; |
394 | qp->real_qp = qp; | ||
395 | qp->uobject = NULL; | ||
396 | qp->qp_type = qp_init_attr->qp_type; | ||
344 | 397 | ||
345 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { | 398 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { |
399 | qp->event_handler = __ib_shared_qp_event_handler; | ||
400 | qp->qp_context = qp; | ||
346 | qp->pd = NULL; | 401 | qp->pd = NULL; |
347 | qp->send_cq = qp->recv_cq = NULL; | 402 | qp->send_cq = qp->recv_cq = NULL; |
348 | qp->srq = NULL; | 403 | qp->srq = NULL; |
349 | qp->xrcd = qp_init_attr->xrcd; | 404 | qp->xrcd = qp_init_attr->xrcd; |
350 | atomic_inc(&qp_init_attr->xrcd->usecnt); | 405 | atomic_inc(&qp_init_attr->xrcd->usecnt); |
351 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, qp); | 406 | INIT_LIST_HEAD(&qp->open_list); |
407 | atomic_set(&qp->usecnt, 0); | ||
408 | |||
409 | real_qp = qp; | ||
410 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, | ||
411 | qp_init_attr->qp_context); | ||
412 | if (!IS_ERR(qp)) | ||
413 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); | ||
414 | else | ||
415 | real_qp->device->destroy_qp(real_qp); | ||
352 | } else { | 416 | } else { |
417 | qp->event_handler = qp_init_attr->event_handler; | ||
418 | qp->qp_context = qp_init_attr->qp_context; | ||
353 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { | 419 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { |
354 | qp->recv_cq = NULL; | 420 | qp->recv_cq = NULL; |
355 | qp->srq = NULL; | 421 | qp->srq = NULL; |
@@ -368,11 +434,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
368 | atomic_inc(&pd->usecnt); | 434 | atomic_inc(&pd->usecnt); |
369 | atomic_inc(&qp_init_attr->send_cq->usecnt); | 435 | atomic_inc(&qp_init_attr->send_cq->usecnt); |
370 | } | 436 | } |
371 | |||
372 | qp->uobject = NULL; | ||
373 | qp->event_handler = qp_init_attr->event_handler; | ||
374 | qp->qp_context = qp_init_attr->qp_context; | ||
375 | qp->qp_type = qp_init_attr->qp_type; | ||
376 | } | 437 | } |
377 | 438 | ||
378 | return qp; | 439 | return qp; |
@@ -717,7 +778,7 @@ int ib_modify_qp(struct ib_qp *qp, | |||
717 | struct ib_qp_attr *qp_attr, | 778 | struct ib_qp_attr *qp_attr, |
718 | int qp_attr_mask) | 779 | int qp_attr_mask) |
719 | { | 780 | { |
720 | return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); | 781 | return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); |
721 | } | 782 | } |
722 | EXPORT_SYMBOL(ib_modify_qp); | 783 | EXPORT_SYMBOL(ib_modify_qp); |
723 | 784 | ||
@@ -727,26 +788,76 @@ int ib_query_qp(struct ib_qp *qp, | |||
727 | struct ib_qp_init_attr *qp_init_attr) | 788 | struct ib_qp_init_attr *qp_init_attr) |
728 | { | 789 | { |
729 | return qp->device->query_qp ? | 790 | return qp->device->query_qp ? |
730 | qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : | 791 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
731 | -ENOSYS; | 792 | -ENOSYS; |
732 | } | 793 | } |
733 | EXPORT_SYMBOL(ib_query_qp); | 794 | EXPORT_SYMBOL(ib_query_qp); |
734 | 795 | ||
796 | int ib_close_qp(struct ib_qp *qp) | ||
797 | { | ||
798 | struct ib_qp *real_qp; | ||
799 | unsigned long flags; | ||
800 | |||
801 | real_qp = qp->real_qp; | ||
802 | if (real_qp == qp) | ||
803 | return -EINVAL; | ||
804 | |||
805 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
806 | list_del(&qp->open_list); | ||
807 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
808 | |||
809 | atomic_dec(&real_qp->usecnt); | ||
810 | kfree(qp); | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | EXPORT_SYMBOL(ib_close_qp); | ||
815 | |||
816 | static int __ib_destroy_shared_qp(struct ib_qp *qp) | ||
817 | { | ||
818 | struct ib_xrcd *xrcd; | ||
819 | struct ib_qp *real_qp; | ||
820 | int ret; | ||
821 | |||
822 | real_qp = qp->real_qp; | ||
823 | xrcd = real_qp->xrcd; | ||
824 | |||
825 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
826 | ib_close_qp(qp); | ||
827 | if (atomic_read(&real_qp->usecnt) == 0) | ||
828 | list_del(&real_qp->xrcd_list); | ||
829 | else | ||
830 | real_qp = NULL; | ||
831 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
832 | |||
833 | if (real_qp) { | ||
834 | ret = ib_destroy_qp(real_qp); | ||
835 | if (!ret) | ||
836 | atomic_dec(&xrcd->usecnt); | ||
837 | else | ||
838 | __ib_insert_xrcd_qp(xrcd, real_qp); | ||
839 | } | ||
840 | |||
841 | return 0; | ||
842 | } | ||
843 | |||
735 | int ib_destroy_qp(struct ib_qp *qp) | 844 | int ib_destroy_qp(struct ib_qp *qp) |
736 | { | 845 | { |
737 | struct ib_pd *pd; | 846 | struct ib_pd *pd; |
738 | struct ib_cq *scq, *rcq; | 847 | struct ib_cq *scq, *rcq; |
739 | struct ib_srq *srq; | 848 | struct ib_srq *srq; |
740 | struct ib_xrcd *xrcd; | ||
741 | int ret; | 849 | int ret; |
742 | 850 | ||
851 | if (atomic_read(&qp->usecnt)) | ||
852 | return -EBUSY; | ||
853 | |||
854 | if (qp->real_qp != qp) | ||
855 | return __ib_destroy_shared_qp(qp); | ||
856 | |||
743 | pd = qp->pd; | 857 | pd = qp->pd; |
744 | scq = qp->send_cq; | 858 | scq = qp->send_cq; |
745 | rcq = qp->recv_cq; | 859 | rcq = qp->recv_cq; |
746 | srq = qp->srq; | 860 | srq = qp->srq; |
747 | xrcd = qp->xrcd; | ||
748 | if (xrcd) | ||
749 | __ib_remove_xrcd_qp(xrcd, qp); | ||
750 | 861 | ||
751 | ret = qp->device->destroy_qp(qp); | 862 | ret = qp->device->destroy_qp(qp); |
752 | if (!ret) { | 863 | if (!ret) { |
@@ -758,32 +869,12 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
758 | atomic_dec(&rcq->usecnt); | 869 | atomic_dec(&rcq->usecnt); |
759 | if (srq) | 870 | if (srq) |
760 | atomic_dec(&srq->usecnt); | 871 | atomic_dec(&srq->usecnt); |
761 | if (xrcd) | ||
762 | atomic_dec(&xrcd->usecnt); | ||
763 | } else if (xrcd) { | ||
764 | __ib_insert_xrcd_qp(xrcd, qp); | ||
765 | } | 872 | } |
766 | 873 | ||
767 | return ret; | 874 | return ret; |
768 | } | 875 | } |
769 | EXPORT_SYMBOL(ib_destroy_qp); | 876 | EXPORT_SYMBOL(ib_destroy_qp); |
770 | 877 | ||
771 | int ib_release_qp(struct ib_qp *qp) | ||
772 | { | ||
773 | unsigned long flags; | ||
774 | |||
775 | if (qp->qp_type != IB_QPT_XRC_TGT) | ||
776 | return -EINVAL; | ||
777 | |||
778 | spin_lock_irqsave(&qp->device->event_handler_lock, flags); | ||
779 | qp->event_handler = NULL; | ||
780 | spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); | ||
781 | |||
782 | atomic_dec(&qp->xrcd->usecnt); | ||
783 | return 0; | ||
784 | } | ||
785 | EXPORT_SYMBOL(ib_release_qp); | ||
786 | |||
787 | /* Completion queues */ | 878 | /* Completion queues */ |
788 | 879 | ||
789 | struct ib_cq *ib_create_cq(struct ib_device *device, | 880 | struct ib_cq *ib_create_cq(struct ib_device *device, |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index dfd9b87b7ffd..8705539bce75 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -605,6 +605,13 @@ struct ib_qp_init_attr { | |||
605 | u8 port_num; /* special QP types only */ | 605 | u8 port_num; /* special QP types only */ |
606 | }; | 606 | }; |
607 | 607 | ||
608 | struct ib_qp_open_attr { | ||
609 | void (*event_handler)(struct ib_event *, void *); | ||
610 | void *qp_context; | ||
611 | u32 qp_num; | ||
612 | enum ib_qp_type qp_type; | ||
613 | }; | ||
614 | |||
608 | enum ib_rnr_timeout { | 615 | enum ib_rnr_timeout { |
609 | IB_RNR_TIMER_655_36 = 0, | 616 | IB_RNR_TIMER_655_36 = 0, |
610 | IB_RNR_TIMER_000_01 = 1, | 617 | IB_RNR_TIMER_000_01 = 1, |
@@ -932,6 +939,9 @@ struct ib_qp { | |||
932 | struct ib_srq *srq; | 939 | struct ib_srq *srq; |
933 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ | 940 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
934 | struct list_head xrcd_list; | 941 | struct list_head xrcd_list; |
942 | atomic_t usecnt; /* count times opened */ | ||
943 | struct list_head open_list; | ||
944 | struct ib_qp *real_qp; | ||
935 | struct ib_uobject *uobject; | 945 | struct ib_uobject *uobject; |
936 | void (*event_handler)(struct ib_event *, void *); | 946 | void (*event_handler)(struct ib_event *, void *); |
937 | void *qp_context; | 947 | void *qp_context; |
@@ -1488,15 +1498,23 @@ int ib_query_qp(struct ib_qp *qp, | |||
1488 | int ib_destroy_qp(struct ib_qp *qp); | 1498 | int ib_destroy_qp(struct ib_qp *qp); |
1489 | 1499 | ||
1490 | /** | 1500 | /** |
1491 | * ib_release_qp - Release an external reference to a QP. | 1501 | * ib_open_qp - Obtain a reference to an existing sharable QP. |
1502 | * @xrcd - XRC domain | ||
1503 | * @qp_open_attr: Attributes identifying the QP to open. | ||
1504 | * | ||
1505 | * Returns a reference to a sharable QP. | ||
1506 | */ | ||
1507 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | ||
1508 | struct ib_qp_open_attr *qp_open_attr); | ||
1509 | |||
1510 | /** | ||
1511 | * ib_close_qp - Release an external reference to a QP. | ||
1492 | * @qp: The QP handle to release | 1512 | * @qp: The QP handle to release |
1493 | * | 1513 | * |
1494 | * The specified QP handle is released by the caller. If the QP is | 1514 | * The opened QP handle is released by the caller. The underlying |
1495 | * referenced internally, it is not destroyed until all internal | 1515 | * shared QP is not destroyed until all internal references are released. |
1496 | * references are released. After releasing the qp, the caller | ||
1497 | * can no longer access it and all events on the QP are discarded. | ||
1498 | */ | 1516 | */ |
1499 | int ib_release_qp(struct ib_qp *qp); | 1517 | int ib_close_qp(struct ib_qp *qp); |
1500 | 1518 | ||
1501 | /** | 1519 | /** |
1502 | * ib_post_send - Posts a list of work requests to the send queue of | 1520 | * ib_post_send - Posts a list of work requests to the send queue of |