aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/verbs.c
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2011-08-08 18:31:51 -0400
committerRoland Dreier <roland@purestorage.com>2011-10-13 12:49:51 -0400
commit0e0ec7e0638ef48e0c661873dfcc8caccab984c6 (patch)
tree54314a25a402244036a5417f098c70af441a56a8 /drivers/infiniband/core/verbs.c
parent0a1405da9952a72dd587829a3321695adde7dca1 (diff)
RDMA/core: Export ib_open_qp() to share XRC TGT QPs
XRC TGT QPs are shared resources among multiple processes. Since the creating process may exit, allow other processes which share the same XRC domain to open an existing QP. This allows us to transfer ownership of an XRC TGT QP to another process. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/core/verbs.c')
-rw-r--r--drivers/infiniband/core/verbs.c163
1 files changed, 127 insertions, 36 deletions
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index a6d95e63569..e02898bcc99 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -39,6 +39,7 @@
39#include <linux/errno.h> 39#include <linux/errno.h>
40#include <linux/err.h> 40#include <linux/err.h>
41#include <linux/string.h> 41#include <linux/string.h>
42#include <linux/slab.h>
42 43
43#include <rdma/ib_verbs.h> 44#include <rdma/ib_verbs.h>
44#include <rdma/ib_cache.h> 45#include <rdma/ib_cache.h>
@@ -316,6 +317,14 @@ EXPORT_SYMBOL(ib_destroy_srq);
316 317
317/* Queue pairs */ 318/* Queue pairs */
318 319
320static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
321{
322 struct ib_qp *qp = context;
323
324 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
325 event->element.qp->event_handler(event, event->element.qp->qp_context);
326}
327
319static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 328static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
320{ 329{
321 mutex_lock(&xrcd->tgt_qp_mutex); 330 mutex_lock(&xrcd->tgt_qp_mutex);
@@ -323,33 +332,90 @@ static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
323 mutex_unlock(&xrcd->tgt_qp_mutex); 332 mutex_unlock(&xrcd->tgt_qp_mutex);
324} 333}
325 334
326static void __ib_remove_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 335static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
336 void (*event_handler)(struct ib_event *, void *),
337 void *qp_context)
327{ 338{
339 struct ib_qp *qp;
340 unsigned long flags;
341
342 qp = kzalloc(sizeof *qp, GFP_KERNEL);
343 if (!qp)
344 return ERR_PTR(-ENOMEM);
345
346 qp->real_qp = real_qp;
347 atomic_inc(&real_qp->usecnt);
348 qp->device = real_qp->device;
349 qp->event_handler = event_handler;
350 qp->qp_context = qp_context;
351 qp->qp_num = real_qp->qp_num;
352 qp->qp_type = real_qp->qp_type;
353
354 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
355 list_add(&qp->open_list, &real_qp->open_list);
356 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
357
358 return qp;
359}
360
361struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
362 struct ib_qp_open_attr *qp_open_attr)
363{
364 struct ib_qp *qp, *real_qp;
365
366 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
367 return ERR_PTR(-EINVAL);
368
369 qp = ERR_PTR(-EINVAL);
328 mutex_lock(&xrcd->tgt_qp_mutex); 370 mutex_lock(&xrcd->tgt_qp_mutex);
329 list_del(&qp->xrcd_list); 371 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
372 if (real_qp->qp_num == qp_open_attr->qp_num) {
373 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
374 qp_open_attr->qp_context);
375 break;
376 }
377 }
330 mutex_unlock(&xrcd->tgt_qp_mutex); 378 mutex_unlock(&xrcd->tgt_qp_mutex);
379 return qp;
331} 380}
381EXPORT_SYMBOL(ib_open_qp);
332 382
333struct ib_qp *ib_create_qp(struct ib_pd *pd, 383struct ib_qp *ib_create_qp(struct ib_pd *pd,
334 struct ib_qp_init_attr *qp_init_attr) 384 struct ib_qp_init_attr *qp_init_attr)
335{ 385{
336 struct ib_qp *qp; 386 struct ib_qp *qp, *real_qp;
337 struct ib_device *device; 387 struct ib_device *device;
338 388
339 device = pd ? pd->device : qp_init_attr->xrcd->device; 389 device = pd ? pd->device : qp_init_attr->xrcd->device;
340 qp = device->create_qp(pd, qp_init_attr, NULL); 390 qp = device->create_qp(pd, qp_init_attr, NULL);
341 391
342 if (!IS_ERR(qp)) { 392 if (!IS_ERR(qp)) {
343 qp->device = device; 393 qp->device = device;
394 qp->real_qp = qp;
395 qp->uobject = NULL;
396 qp->qp_type = qp_init_attr->qp_type;
344 397
345 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { 398 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
399 qp->event_handler = __ib_shared_qp_event_handler;
400 qp->qp_context = qp;
346 qp->pd = NULL; 401 qp->pd = NULL;
347 qp->send_cq = qp->recv_cq = NULL; 402 qp->send_cq = qp->recv_cq = NULL;
348 qp->srq = NULL; 403 qp->srq = NULL;
349 qp->xrcd = qp_init_attr->xrcd; 404 qp->xrcd = qp_init_attr->xrcd;
350 atomic_inc(&qp_init_attr->xrcd->usecnt); 405 atomic_inc(&qp_init_attr->xrcd->usecnt);
351 __ib_insert_xrcd_qp(qp_init_attr->xrcd, qp); 406 INIT_LIST_HEAD(&qp->open_list);
407 atomic_set(&qp->usecnt, 0);
408
409 real_qp = qp;
410 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
411 qp_init_attr->qp_context);
412 if (!IS_ERR(qp))
413 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
414 else
415 real_qp->device->destroy_qp(real_qp);
352 } else { 416 } else {
417 qp->event_handler = qp_init_attr->event_handler;
418 qp->qp_context = qp_init_attr->qp_context;
353 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 419 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
354 qp->recv_cq = NULL; 420 qp->recv_cq = NULL;
355 qp->srq = NULL; 421 qp->srq = NULL;
@@ -368,11 +434,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
368 atomic_inc(&pd->usecnt); 434 atomic_inc(&pd->usecnt);
369 atomic_inc(&qp_init_attr->send_cq->usecnt); 435 atomic_inc(&qp_init_attr->send_cq->usecnt);
370 } 436 }
371
372 qp->uobject = NULL;
373 qp->event_handler = qp_init_attr->event_handler;
374 qp->qp_context = qp_init_attr->qp_context;
375 qp->qp_type = qp_init_attr->qp_type;
376 } 437 }
377 438
378 return qp; 439 return qp;
@@ -717,7 +778,7 @@ int ib_modify_qp(struct ib_qp *qp,
717 struct ib_qp_attr *qp_attr, 778 struct ib_qp_attr *qp_attr,
718 int qp_attr_mask) 779 int qp_attr_mask)
719{ 780{
720 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); 781 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
721} 782}
722EXPORT_SYMBOL(ib_modify_qp); 783EXPORT_SYMBOL(ib_modify_qp);
723 784
@@ -727,26 +788,76 @@ int ib_query_qp(struct ib_qp *qp,
727 struct ib_qp_init_attr *qp_init_attr) 788 struct ib_qp_init_attr *qp_init_attr)
728{ 789{
729 return qp->device->query_qp ? 790 return qp->device->query_qp ?
730 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : 791 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
731 -ENOSYS; 792 -ENOSYS;
732} 793}
733EXPORT_SYMBOL(ib_query_qp); 794EXPORT_SYMBOL(ib_query_qp);
734 795
796int ib_close_qp(struct ib_qp *qp)
797{
798 struct ib_qp *real_qp;
799 unsigned long flags;
800
801 real_qp = qp->real_qp;
802 if (real_qp == qp)
803 return -EINVAL;
804
805 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
806 list_del(&qp->open_list);
807 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
808
809 atomic_dec(&real_qp->usecnt);
810 kfree(qp);
811
812 return 0;
813}
814EXPORT_SYMBOL(ib_close_qp);
815
816static int __ib_destroy_shared_qp(struct ib_qp *qp)
817{
818 struct ib_xrcd *xrcd;
819 struct ib_qp *real_qp;
820 int ret;
821
822 real_qp = qp->real_qp;
823 xrcd = real_qp->xrcd;
824
825 mutex_lock(&xrcd->tgt_qp_mutex);
826 ib_close_qp(qp);
827 if (atomic_read(&real_qp->usecnt) == 0)
828 list_del(&real_qp->xrcd_list);
829 else
830 real_qp = NULL;
831 mutex_unlock(&xrcd->tgt_qp_mutex);
832
833 if (real_qp) {
834 ret = ib_destroy_qp(real_qp);
835 if (!ret)
836 atomic_dec(&xrcd->usecnt);
837 else
838 __ib_insert_xrcd_qp(xrcd, real_qp);
839 }
840
841 return 0;
842}
843
735int ib_destroy_qp(struct ib_qp *qp) 844int ib_destroy_qp(struct ib_qp *qp)
736{ 845{
737 struct ib_pd *pd; 846 struct ib_pd *pd;
738 struct ib_cq *scq, *rcq; 847 struct ib_cq *scq, *rcq;
739 struct ib_srq *srq; 848 struct ib_srq *srq;
740 struct ib_xrcd *xrcd;
741 int ret; 849 int ret;
742 850
851 if (atomic_read(&qp->usecnt))
852 return -EBUSY;
853
854 if (qp->real_qp != qp)
855 return __ib_destroy_shared_qp(qp);
856
743 pd = qp->pd; 857 pd = qp->pd;
744 scq = qp->send_cq; 858 scq = qp->send_cq;
745 rcq = qp->recv_cq; 859 rcq = qp->recv_cq;
746 srq = qp->srq; 860 srq = qp->srq;
747 xrcd = qp->xrcd;
748 if (xrcd)
749 __ib_remove_xrcd_qp(xrcd, qp);
750 861
751 ret = qp->device->destroy_qp(qp); 862 ret = qp->device->destroy_qp(qp);
752 if (!ret) { 863 if (!ret) {
@@ -758,32 +869,12 @@ int ib_destroy_qp(struct ib_qp *qp)
758 atomic_dec(&rcq->usecnt); 869 atomic_dec(&rcq->usecnt);
759 if (srq) 870 if (srq)
760 atomic_dec(&srq->usecnt); 871 atomic_dec(&srq->usecnt);
761 if (xrcd)
762 atomic_dec(&xrcd->usecnt);
763 } else if (xrcd) {
764 __ib_insert_xrcd_qp(xrcd, qp);
765 } 872 }
766 873
767 return ret; 874 return ret;
768} 875}
769EXPORT_SYMBOL(ib_destroy_qp); 876EXPORT_SYMBOL(ib_destroy_qp);
770 877
771int ib_release_qp(struct ib_qp *qp)
772{
773 unsigned long flags;
774
775 if (qp->qp_type != IB_QPT_XRC_TGT)
776 return -EINVAL;
777
778 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
779 qp->event_handler = NULL;
780 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
781
782 atomic_dec(&qp->xrcd->usecnt);
783 return 0;
784}
785EXPORT_SYMBOL(ib_release_qp);
786
787/* Completion queues */ 878/* Completion queues */
788 879
789struct ib_cq *ib_create_cq(struct ib_device *device, 880struct ib_cq *ib_create_cq(struct ib_device *device,