aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cma.c
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2010-04-01 13:08:41 -0400
committerRoland Dreier <roland@purestorage.com>2011-05-25 16:46:23 -0400
commitb26f9b9949013fec31b23c426fc463164ae08891 (patch)
tree072bbf5abb93baea33a4aebaad2381ff69563a0b /drivers/infiniband/core/cma.c
parent9a7147b506ccae8552b0cf218b3c02982012eb4d (diff)
RDMA/cma: Pass QP type into rdma_create_id()
The RDMA CM currently infers the QP type from the port space selected by the user. In the future (eg with RDMA_PS_IB or XRC), there may not be a 1-1 correspondence between port space and QP type. For netlink export of RDMA CM state, we want to export the QP type to userspace, so it is cleaner to explicitly associate a QP type to an ID. Modify rdma_create_id() to allow the user to specify the QP type, and use it to make our selections of datagram versus connected mode. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r--drivers/infiniband/core/cma.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 451d39e19cb4..44be1c9ed05b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -265,11 +265,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
265 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 265 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
266} 266}
267 267
268static inline int cma_is_ud_ps(enum rdma_port_space ps)
269{
270 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
271}
272
273static void cma_attach_to_dev(struct rdma_id_private *id_priv, 268static void cma_attach_to_dev(struct rdma_id_private *id_priv,
274 struct cma_device *cma_dev) 269 struct cma_device *cma_dev)
275{ 270{
@@ -415,7 +410,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)
415} 410}
416 411
417struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 412struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
418 void *context, enum rdma_port_space ps) 413 void *context, enum rdma_port_space ps,
414 enum ib_qp_type qp_type)
419{ 415{
420 struct rdma_id_private *id_priv; 416 struct rdma_id_private *id_priv;
421 417
@@ -427,6 +423,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
427 id_priv->id.context = context; 423 id_priv->id.context = context;
428 id_priv->id.event_handler = event_handler; 424 id_priv->id.event_handler = event_handler;
429 id_priv->id.ps = ps; 425 id_priv->id.ps = ps;
426 id_priv->id.qp_type = qp_type;
430 spin_lock_init(&id_priv->lock); 427 spin_lock_init(&id_priv->lock);
431 mutex_init(&id_priv->qp_mutex); 428 mutex_init(&id_priv->qp_mutex);
432 init_completion(&id_priv->comp); 429 init_completion(&id_priv->comp);
@@ -494,7 +491,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
494 if (IS_ERR(qp)) 491 if (IS_ERR(qp))
495 return PTR_ERR(qp); 492 return PTR_ERR(qp);
496 493
497 if (cma_is_ud_ps(id_priv->id.ps)) 494 if (id->qp_type == IB_QPT_UD)
498 ret = cma_init_ud_qp(id_priv, qp); 495 ret = cma_init_ud_qp(id_priv, qp);
499 else 496 else
500 ret = cma_init_conn_qp(id_priv, qp); 497 ret = cma_init_conn_qp(id_priv, qp);
@@ -622,7 +619,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
622 qp_attr->port_num = id_priv->id.port_num; 619 qp_attr->port_num = id_priv->id.port_num;
623 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 620 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
624 621
625 if (cma_is_ud_ps(id_priv->id.ps)) { 622 if (id_priv->id.qp_type == IB_QPT_UD) {
626 ret = cma_set_qkey(id_priv); 623 ret = cma_set_qkey(id_priv);
627 if (ret) 624 if (ret)
628 return ret; 625 return ret;
@@ -645,7 +642,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
645 id_priv = container_of(id, struct rdma_id_private, id); 642 id_priv = container_of(id, struct rdma_id_private, id);
646 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 643 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
647 case RDMA_TRANSPORT_IB: 644 case RDMA_TRANSPORT_IB:
648 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 645 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
649 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 646 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
650 else 647 else
651 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 648 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
@@ -1088,7 +1085,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1088 goto err; 1085 goto err;
1089 1086
1090 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1087 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1091 listen_id->ps); 1088 listen_id->ps, ib_event->param.req_rcvd.qp_type);
1092 if (IS_ERR(id)) 1089 if (IS_ERR(id))
1093 goto err; 1090 goto err;
1094 1091
@@ -1139,7 +1136,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1139 int ret; 1136 int ret;
1140 1137
1141 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1138 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1142 listen_id->ps); 1139 listen_id->ps, IB_QPT_UD);
1143 if (IS_ERR(id)) 1140 if (IS_ERR(id))
1144 return NULL; 1141 return NULL;
1145 1142
@@ -1194,7 +1191,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1194 memset(&event, 0, sizeof event); 1191 memset(&event, 0, sizeof event);
1195 offset = cma_user_data_offset(listen_id->id.ps); 1192 offset = cma_user_data_offset(listen_id->id.ps);
1196 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1193 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1197 if (cma_is_ud_ps(listen_id->id.ps)) { 1194 if (listen_id->id.qp_type == IB_QPT_UD) {
1198 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1195 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1199 event.param.ud.private_data = ib_event->private_data + offset; 1196 event.param.ud.private_data = ib_event->private_data + offset;
1200 event.param.ud.private_data_len = 1197 event.param.ud.private_data_len =
@@ -1230,8 +1227,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1230 * while we're accessing the cm_id. 1227 * while we're accessing the cm_id.
1231 */ 1228 */
1232 mutex_lock(&lock); 1229 mutex_lock(&lock);
1233 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1230 if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
1234 !cma_is_ud_ps(conn_id->id.ps))
1235 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1231 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1236 mutex_unlock(&lock); 1232 mutex_unlock(&lock);
1237 mutex_unlock(&conn_id->handler_mutex); 1233 mutex_unlock(&conn_id->handler_mutex);
@@ -1386,7 +1382,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1386 /* Create a new RDMA id for the new IW CM ID */ 1382 /* Create a new RDMA id for the new IW CM ID */
1387 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1383 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1388 listen_id->id.context, 1384 listen_id->id.context,
1389 RDMA_PS_TCP); 1385 RDMA_PS_TCP, IB_QPT_RC);
1390 if (IS_ERR(new_cm_id)) { 1386 if (IS_ERR(new_cm_id)) {
1391 ret = -ENOMEM; 1387 ret = -ENOMEM;
1392 goto out; 1388 goto out;
@@ -1535,7 +1531,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1535 struct rdma_cm_id *id; 1531 struct rdma_cm_id *id;
1536 int ret; 1532 int ret;
1537 1533
1538 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1534 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
1535 id_priv->id.qp_type);
1539 if (IS_ERR(id)) 1536 if (IS_ERR(id))
1540 return; 1537 return;
1541 1538
@@ -2645,7 +2642,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2645 2642
2646 switch (rdma_node_get_transport(id->device->node_type)) { 2643 switch (rdma_node_get_transport(id->device->node_type)) {
2647 case RDMA_TRANSPORT_IB: 2644 case RDMA_TRANSPORT_IB:
2648 if (cma_is_ud_ps(id->ps)) 2645 if (id->qp_type == IB_QPT_UD)
2649 ret = cma_resolve_ib_udp(id_priv, conn_param); 2646 ret = cma_resolve_ib_udp(id_priv, conn_param);
2650 else 2647 else
2651 ret = cma_connect_ib(id_priv, conn_param); 2648 ret = cma_connect_ib(id_priv, conn_param);
@@ -2758,7 +2755,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2758 2755
2759 switch (rdma_node_get_transport(id->device->node_type)) { 2756 switch (rdma_node_get_transport(id->device->node_type)) {
2760 case RDMA_TRANSPORT_IB: 2757 case RDMA_TRANSPORT_IB:
2761 if (cma_is_ud_ps(id->ps)) 2758 if (id->qp_type == IB_QPT_UD)
2762 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2759 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2763 conn_param->private_data, 2760 conn_param->private_data,
2764 conn_param->private_data_len); 2761 conn_param->private_data_len);
@@ -2819,7 +2816,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2819 2816
2820 switch (rdma_node_get_transport(id->device->node_type)) { 2817 switch (rdma_node_get_transport(id->device->node_type)) {
2821 case RDMA_TRANSPORT_IB: 2818 case RDMA_TRANSPORT_IB:
2822 if (cma_is_ud_ps(id->ps)) 2819 if (id->qp_type == IB_QPT_UD)
2823 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2820 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2824 private_data, private_data_len); 2821 private_data, private_data_len);
2825 else 2822 else