aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2011-05-27 02:06:44 -0400
committerRoland Dreier <roland@purestorage.com>2011-10-13 12:20:27 -0400
commitd3d72d909e783d048ee39046aa7b4fa798a4dda8 (patch)
treeb773014148c09565b02ad542295b5c6caa7fd498 /drivers
parentb42b63cf0dde2af6eec462b2d6cca7d938702a28 (diff)
RDMA/verbs: Cleanup XRC TGT QPs when destroying XRCD
XRC TGT QPs are intended to be shared among multiple users and processes. Allow the destruction of an XRC TGT QP to be done explicitly through ib_destroy_qp() or when the XRCD is destroyed. To support destroying an XRC TGT QP, we need to track TGT QPs with the XRCD. When the XRCD is destroyed, all tracked XRC TGT QPs are also cleaned up. To avoid stale reference issues, if a user is holding a reference on a TGT QP, we increment a reference count on the QP. The user releases the reference by calling ib_release_qp. This releases any access to the QP from a user above verbs, but allows the QP to continue to exist until destroyed by the XRCD. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/verbs.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 89277e5129be..8c6da5bda4c6 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -316,6 +316,20 @@ EXPORT_SYMBOL(ib_destroy_srq);
316 316
317/* Queue pairs */ 317/* Queue pairs */
318 318
319static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
320{
321 mutex_lock(&xrcd->tgt_qp_mutex);
322 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
323 mutex_unlock(&xrcd->tgt_qp_mutex);
324}
325
326static void __ib_remove_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
327{
328 mutex_lock(&xrcd->tgt_qp_mutex);
329 list_del(&qp->xrcd_list);
330 mutex_unlock(&xrcd->tgt_qp_mutex);
331}
332
319struct ib_qp *ib_create_qp(struct ib_pd *pd, 333struct ib_qp *ib_create_qp(struct ib_pd *pd,
320 struct ib_qp_init_attr *qp_init_attr) 334 struct ib_qp_init_attr *qp_init_attr)
321{ 335{
@@ -334,6 +348,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
334 qp->srq = NULL; 348 qp->srq = NULL;
335 qp->xrcd = qp_init_attr->xrcd; 349 qp->xrcd = qp_init_attr->xrcd;
336 atomic_inc(&qp_init_attr->xrcd->usecnt); 350 atomic_inc(&qp_init_attr->xrcd->usecnt);
351 __ib_insert_xrcd_qp(qp_init_attr->xrcd, qp);
337 } else { 352 } else {
338 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 353 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
339 qp->recv_cq = NULL; 354 qp->recv_cq = NULL;
@@ -730,6 +745,8 @@ int ib_destroy_qp(struct ib_qp *qp)
730 rcq = qp->recv_cq; 745 rcq = qp->recv_cq;
731 srq = qp->srq; 746 srq = qp->srq;
732 xrcd = qp->xrcd; 747 xrcd = qp->xrcd;
748 if (xrcd)
749 __ib_remove_xrcd_qp(xrcd, qp);
733 750
734 ret = qp->device->destroy_qp(qp); 751 ret = qp->device->destroy_qp(qp);
735 if (!ret) { 752 if (!ret) {
@@ -743,12 +760,30 @@ int ib_destroy_qp(struct ib_qp *qp)
743 atomic_dec(&srq->usecnt); 760 atomic_dec(&srq->usecnt);
744 if (xrcd) 761 if (xrcd)
745 atomic_dec(&xrcd->usecnt); 762 atomic_dec(&xrcd->usecnt);
763 } else if (xrcd) {
764 __ib_insert_xrcd_qp(xrcd, qp);
746 } 765 }
747 766
748 return ret; 767 return ret;
749} 768}
750EXPORT_SYMBOL(ib_destroy_qp); 769EXPORT_SYMBOL(ib_destroy_qp);
751 770
771int ib_release_qp(struct ib_qp *qp)
772{
773 unsigned long flags;
774
775 if (qp->qp_type != IB_QPT_XRC_TGT)
776 return -EINVAL;
777
778 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
779 qp->event_handler = NULL;
780 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
781
782 atomic_dec(&qp->xrcd->usecnt);
783 return 0;
784}
785EXPORT_SYMBOL(ib_release_qp);
786
752/* Completion queues */ 787/* Completion queues */
753 788
754struct ib_cq *ib_create_cq(struct ib_device *device, 789struct ib_cq *ib_create_cq(struct ib_device *device,
@@ -1062,6 +1097,8 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1062 if (!IS_ERR(xrcd)) { 1097 if (!IS_ERR(xrcd)) {
1063 xrcd->device = device; 1098 xrcd->device = device;
1064 atomic_set(&xrcd->usecnt, 0); 1099 atomic_set(&xrcd->usecnt, 0);
1100 mutex_init(&xrcd->tgt_qp_mutex);
1101 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1065 } 1102 }
1066 1103
1067 return xrcd; 1104 return xrcd;
@@ -1070,9 +1107,19 @@ EXPORT_SYMBOL(ib_alloc_xrcd);
1070 1107
1071int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1108int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1072{ 1109{
1110 struct ib_qp *qp;
1111 int ret;
1112
1073 if (atomic_read(&xrcd->usecnt)) 1113 if (atomic_read(&xrcd->usecnt))
1074 return -EBUSY; 1114 return -EBUSY;
1075 1115
1116 while (!list_empty(&xrcd->tgt_qp_list)) {
1117 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1118 ret = ib_destroy_qp(qp);
1119 if (ret)
1120 return ret;
1121 }
1122
1076 return xrcd->device->dealloc_xrcd(xrcd); 1123 return xrcd->device->dealloc_xrcd(xrcd);
1077} 1124}
1078EXPORT_SYMBOL(ib_dealloc_xrcd); 1125EXPORT_SYMBOL(ib_dealloc_xrcd);