aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2013-05-29 13:09:23 -0400
committerRoland Dreier <roland@purestorage.com>2013-06-21 02:35:37 -0400
commit5c438135adf90b33cb00e5351becf1e557bbdd9d (patch)
treeabf0d3ed1ba29fffe24a5d14735ba571b4e8e43d
parente8160e15930969de709ba9b46df9571448b78ce5 (diff)
RDMA/cma: Set qkey for AF_IB
Allow the user to specify the qkey when using AF_IB. The qkey is added to struct rdma_ucm_conn_param in place of a reserved field, but for backwards compatability, is only accessed if the associated rdma_cm_id is using AF_IB. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/core/cma.c35
-rw-r--r--drivers/infiniband/core/ucma.c8
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/uapi/rdma/rdma_user_cm.h2
4 files changed, 28 insertions, 18 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 7a9b033e37ce..96d0b9a6e15e 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -293,16 +293,25 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
293 return id_priv->id.route.addr.src_addr.ss_family; 293 return id_priv->id.route.addr.src_addr.ss_family;
294} 294}
295 295
296static int cma_set_qkey(struct rdma_id_private *id_priv) 296static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
297{ 297{
298 struct ib_sa_mcmember_rec rec; 298 struct ib_sa_mcmember_rec rec;
299 int ret = 0; 299 int ret = 0;
300 300
301 if (id_priv->qkey) 301 if (id_priv->qkey) {
302 if (qkey && id_priv->qkey != qkey)
303 return -EINVAL;
302 return 0; 304 return 0;
305 }
306
307 if (qkey) {
308 id_priv->qkey = qkey;
309 return 0;
310 }
303 311
304 switch (id_priv->id.ps) { 312 switch (id_priv->id.ps) {
305 case RDMA_PS_UDP: 313 case RDMA_PS_UDP:
314 case RDMA_PS_IB:
306 id_priv->qkey = RDMA_UDP_QKEY; 315 id_priv->qkey = RDMA_UDP_QKEY;
307 break; 316 break;
308 case RDMA_PS_IPOIB: 317 case RDMA_PS_IPOIB:
@@ -689,7 +698,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
689 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 698 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
690 699
691 if (id_priv->id.qp_type == IB_QPT_UD) { 700 if (id_priv->id.qp_type == IB_QPT_UD) {
692 ret = cma_set_qkey(id_priv); 701 ret = cma_set_qkey(id_priv, 0);
693 if (ret) 702 if (ret)
694 return ret; 703 return ret;
695 704
@@ -2624,15 +2633,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2624 event.status = ib_event->param.sidr_rep_rcvd.status; 2633 event.status = ib_event->param.sidr_rep_rcvd.status;
2625 break; 2634 break;
2626 } 2635 }
2627 ret = cma_set_qkey(id_priv); 2636 ret = cma_set_qkey(id_priv, rep->qkey);
2628 if (ret) { 2637 if (ret) {
2629 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2638 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2630 event.status = -EINVAL; 2639 event.status = ret;
2631 break;
2632 }
2633 if (id_priv->qkey != rep->qkey) {
2634 event.event = RDMA_CM_EVENT_UNREACHABLE;
2635 event.status = -EINVAL;
2636 break; 2640 break;
2637 } 2641 }
2638 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2642 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
@@ -2922,7 +2926,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
2922} 2926}
2923 2927
2924static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2928static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2925 enum ib_cm_sidr_status status, 2929 enum ib_cm_sidr_status status, u32 qkey,
2926 const void *private_data, int private_data_len) 2930 const void *private_data, int private_data_len)
2927{ 2931{
2928 struct ib_cm_sidr_rep_param rep; 2932 struct ib_cm_sidr_rep_param rep;
@@ -2931,7 +2935,7 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2931 memset(&rep, 0, sizeof rep); 2935 memset(&rep, 0, sizeof rep);
2932 rep.status = status; 2936 rep.status = status;
2933 if (status == IB_SIDR_SUCCESS) { 2937 if (status == IB_SIDR_SUCCESS) {
2934 ret = cma_set_qkey(id_priv); 2938 ret = cma_set_qkey(id_priv, qkey);
2935 if (ret) 2939 if (ret)
2936 return ret; 2940 return ret;
2937 rep.qp_num = id_priv->qp_num; 2941 rep.qp_num = id_priv->qp_num;
@@ -2965,11 +2969,12 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2965 if (id->qp_type == IB_QPT_UD) { 2969 if (id->qp_type == IB_QPT_UD) {
2966 if (conn_param) 2970 if (conn_param)
2967 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2971 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2972 conn_param->qkey,
2968 conn_param->private_data, 2973 conn_param->private_data,
2969 conn_param->private_data_len); 2974 conn_param->private_data_len);
2970 else 2975 else
2971 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2976 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2972 NULL, 0); 2977 0, NULL, 0);
2973 } else { 2978 } else {
2974 if (conn_param) 2979 if (conn_param)
2975 ret = cma_accept_ib(id_priv, conn_param); 2980 ret = cma_accept_ib(id_priv, conn_param);
@@ -3030,7 +3035,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
3030 switch (rdma_node_get_transport(id->device->node_type)) { 3035 switch (rdma_node_get_transport(id->device->node_type)) {
3031 case RDMA_TRANSPORT_IB: 3036 case RDMA_TRANSPORT_IB:
3032 if (id->qp_type == IB_QPT_UD) 3037 if (id->qp_type == IB_QPT_UD)
3033 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 3038 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
3034 private_data, private_data_len); 3039 private_data, private_data_len);
3035 else 3040 else
3036 ret = ib_send_cm_rej(id_priv->cm_id.ib, 3041 ret = ib_send_cm_rej(id_priv->cm_id.ib,
@@ -3091,6 +3096,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3091 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) 3096 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
3092 return 0; 3097 return 0;
3093 3098
3099 if (!status)
3100 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
3094 mutex_lock(&id_priv->qp_mutex); 3101 mutex_lock(&id_priv->qp_mutex);
3095 if (!status && id_priv->id.qp) 3102 if (!status && id_priv->id.qp)
3096 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3103 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5ca44cd9b00c..e813774bf7a7 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -709,7 +709,8 @@ out:
709 return ret; 709 return ret;
710} 710}
711 711
712static void ucma_copy_conn_param(struct rdma_conn_param *dst, 712static void ucma_copy_conn_param(struct rdma_cm_id *id,
713 struct rdma_conn_param *dst,
713 struct rdma_ucm_conn_param *src) 714 struct rdma_ucm_conn_param *src)
714{ 715{
715 dst->private_data = src->private_data; 716 dst->private_data = src->private_data;
@@ -721,6 +722,7 @@ static void ucma_copy_conn_param(struct rdma_conn_param *dst,
721 dst->rnr_retry_count = src->rnr_retry_count; 722 dst->rnr_retry_count = src->rnr_retry_count;
722 dst->srq = src->srq; 723 dst->srq = src->srq;
723 dst->qp_num = src->qp_num; 724 dst->qp_num = src->qp_num;
725 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
724} 726}
725 727
726static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, 728static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
@@ -741,7 +743,7 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
741 if (IS_ERR(ctx)) 743 if (IS_ERR(ctx))
742 return PTR_ERR(ctx); 744 return PTR_ERR(ctx);
743 745
744 ucma_copy_conn_param(&conn_param, &cmd.conn_param); 746 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
745 ret = rdma_connect(ctx->cm_id, &conn_param); 747 ret = rdma_connect(ctx->cm_id, &conn_param);
746 ucma_put_ctx(ctx); 748 ucma_put_ctx(ctx);
747 return ret; 749 return ret;
@@ -784,7 +786,7 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
784 return PTR_ERR(ctx); 786 return PTR_ERR(ctx);
785 787
786 if (cmd.conn_param.valid) { 788 if (cmd.conn_param.valid) {
787 ucma_copy_conn_param(&conn_param, &cmd.conn_param); 789 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
788 mutex_lock(&file->mut); 790 mutex_lock(&file->mut);
789 ret = rdma_accept(ctx->cm_id, &conn_param); 791 ret = rdma_accept(ctx->cm_id, &conn_param);
790 if (!ret) 792 if (!ret)
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 1e6c3c7af898..966f90ba8d8a 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -98,6 +98,7 @@ struct rdma_conn_param {
98 /* Fields below ignored if a QP is created on the rdma_cm_id. */ 98 /* Fields below ignored if a QP is created on the rdma_cm_id. */
99 u8 srq; 99 u8 srq;
100 u32 qp_num; 100 u32 qp_num;
101 u32 qkey;
101}; 102};
102 103
103struct rdma_ud_param { 104struct rdma_ud_param {
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 1ee9239ff8c2..29de08f603ac 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -131,7 +131,7 @@ struct rdma_ucm_query_route_resp {
131 131
132struct rdma_ucm_conn_param { 132struct rdma_ucm_conn_param {
133 __u32 qp_num; 133 __u32 qp_num;
134 __u32 reserved; 134 __u32 qkey;
135 __u8 private_data[RDMA_MAX_PRIVATE_DATA]; 135 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
136 __u8 private_data_len; 136 __u8 private_data_len;
137 __u8 srq; 137 __u8 srq;