aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c35
-rw-r--r--drivers/infiniband/core/ucma.c27
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
3 files changed, 40 insertions, 24 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 451d39e19cb4..44be1c9ed05b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -265,11 +265,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
265 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 265 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
266} 266}
267 267
268static inline int cma_is_ud_ps(enum rdma_port_space ps)
269{
270 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
271}
272
273static void cma_attach_to_dev(struct rdma_id_private *id_priv, 268static void cma_attach_to_dev(struct rdma_id_private *id_priv,
274 struct cma_device *cma_dev) 269 struct cma_device *cma_dev)
275{ 270{
@@ -415,7 +410,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)
415} 410}
416 411
417struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 412struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
418 void *context, enum rdma_port_space ps) 413 void *context, enum rdma_port_space ps,
414 enum ib_qp_type qp_type)
419{ 415{
420 struct rdma_id_private *id_priv; 416 struct rdma_id_private *id_priv;
421 417
@@ -427,6 +423,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
427 id_priv->id.context = context; 423 id_priv->id.context = context;
428 id_priv->id.event_handler = event_handler; 424 id_priv->id.event_handler = event_handler;
429 id_priv->id.ps = ps; 425 id_priv->id.ps = ps;
426 id_priv->id.qp_type = qp_type;
430 spin_lock_init(&id_priv->lock); 427 spin_lock_init(&id_priv->lock);
431 mutex_init(&id_priv->qp_mutex); 428 mutex_init(&id_priv->qp_mutex);
432 init_completion(&id_priv->comp); 429 init_completion(&id_priv->comp);
@@ -494,7 +491,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
494 if (IS_ERR(qp)) 491 if (IS_ERR(qp))
495 return PTR_ERR(qp); 492 return PTR_ERR(qp);
496 493
497 if (cma_is_ud_ps(id_priv->id.ps)) 494 if (id->qp_type == IB_QPT_UD)
498 ret = cma_init_ud_qp(id_priv, qp); 495 ret = cma_init_ud_qp(id_priv, qp);
499 else 496 else
500 ret = cma_init_conn_qp(id_priv, qp); 497 ret = cma_init_conn_qp(id_priv, qp);
@@ -622,7 +619,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
622 qp_attr->port_num = id_priv->id.port_num; 619 qp_attr->port_num = id_priv->id.port_num;
623 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 620 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
624 621
625 if (cma_is_ud_ps(id_priv->id.ps)) { 622 if (id_priv->id.qp_type == IB_QPT_UD) {
626 ret = cma_set_qkey(id_priv); 623 ret = cma_set_qkey(id_priv);
627 if (ret) 624 if (ret)
628 return ret; 625 return ret;
@@ -645,7 +642,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
645 id_priv = container_of(id, struct rdma_id_private, id); 642 id_priv = container_of(id, struct rdma_id_private, id);
646 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 643 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
647 case RDMA_TRANSPORT_IB: 644 case RDMA_TRANSPORT_IB:
648 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 645 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
649 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 646 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
650 else 647 else
651 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 648 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
@@ -1088,7 +1085,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1088 goto err; 1085 goto err;
1089 1086
1090 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1087 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1091 listen_id->ps); 1088 listen_id->ps, ib_event->param.req_rcvd.qp_type);
1092 if (IS_ERR(id)) 1089 if (IS_ERR(id))
1093 goto err; 1090 goto err;
1094 1091
@@ -1139,7 +1136,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1139 int ret; 1136 int ret;
1140 1137
1141 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1138 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1142 listen_id->ps); 1139 listen_id->ps, IB_QPT_UD);
1143 if (IS_ERR(id)) 1140 if (IS_ERR(id))
1144 return NULL; 1141 return NULL;
1145 1142
@@ -1194,7 +1191,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1194 memset(&event, 0, sizeof event); 1191 memset(&event, 0, sizeof event);
1195 offset = cma_user_data_offset(listen_id->id.ps); 1192 offset = cma_user_data_offset(listen_id->id.ps);
1196 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1193 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1197 if (cma_is_ud_ps(listen_id->id.ps)) { 1194 if (listen_id->id.qp_type == IB_QPT_UD) {
1198 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1195 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1199 event.param.ud.private_data = ib_event->private_data + offset; 1196 event.param.ud.private_data = ib_event->private_data + offset;
1200 event.param.ud.private_data_len = 1197 event.param.ud.private_data_len =
@@ -1230,8 +1227,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1230 * while we're accessing the cm_id. 1227 * while we're accessing the cm_id.
1231 */ 1228 */
1232 mutex_lock(&lock); 1229 mutex_lock(&lock);
1233 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1230 if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
1234 !cma_is_ud_ps(conn_id->id.ps))
1235 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1231 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1236 mutex_unlock(&lock); 1232 mutex_unlock(&lock);
1237 mutex_unlock(&conn_id->handler_mutex); 1233 mutex_unlock(&conn_id->handler_mutex);
@@ -1386,7 +1382,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1386 /* Create a new RDMA id for the new IW CM ID */ 1382 /* Create a new RDMA id for the new IW CM ID */
1387 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1383 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1388 listen_id->id.context, 1384 listen_id->id.context,
1389 RDMA_PS_TCP); 1385 RDMA_PS_TCP, IB_QPT_RC);
1390 if (IS_ERR(new_cm_id)) { 1386 if (IS_ERR(new_cm_id)) {
1391 ret = -ENOMEM; 1387 ret = -ENOMEM;
1392 goto out; 1388 goto out;
@@ -1535,7 +1531,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1535 struct rdma_cm_id *id; 1531 struct rdma_cm_id *id;
1536 int ret; 1532 int ret;
1537 1533
1538 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1534 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
1535 id_priv->id.qp_type);
1539 if (IS_ERR(id)) 1536 if (IS_ERR(id))
1540 return; 1537 return;
1541 1538
@@ -2645,7 +2642,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2645 2642
2646 switch (rdma_node_get_transport(id->device->node_type)) { 2643 switch (rdma_node_get_transport(id->device->node_type)) {
2647 case RDMA_TRANSPORT_IB: 2644 case RDMA_TRANSPORT_IB:
2648 if (cma_is_ud_ps(id->ps)) 2645 if (id->qp_type == IB_QPT_UD)
2649 ret = cma_resolve_ib_udp(id_priv, conn_param); 2646 ret = cma_resolve_ib_udp(id_priv, conn_param);
2650 else 2647 else
2651 ret = cma_connect_ib(id_priv, conn_param); 2648 ret = cma_connect_ib(id_priv, conn_param);
@@ -2758,7 +2755,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2758 2755
2759 switch (rdma_node_get_transport(id->device->node_type)) { 2756 switch (rdma_node_get_transport(id->device->node_type)) {
2760 case RDMA_TRANSPORT_IB: 2757 case RDMA_TRANSPORT_IB:
2761 if (cma_is_ud_ps(id->ps)) 2758 if (id->qp_type == IB_QPT_UD)
2762 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2759 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2763 conn_param->private_data, 2760 conn_param->private_data,
2764 conn_param->private_data_len); 2761 conn_param->private_data_len);
@@ -2819,7 +2816,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2819 2816
2820 switch (rdma_node_get_transport(id->device->node_type)) { 2817 switch (rdma_node_get_transport(id->device->node_type)) {
2821 case RDMA_TRANSPORT_IB: 2818 case RDMA_TRANSPORT_IB:
2822 if (cma_is_ud_ps(id->ps)) 2819 if (id->qp_type == IB_QPT_UD)
2823 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2820 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2824 private_data, private_data_len); 2821 private_data, private_data_len);
2825 else 2822 else
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b3fa798525b2..7109d5d23ba5 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -367,13 +367,28 @@ done:
367 return ret; 367 return ret;
368} 368}
369 369
370static ssize_t ucma_create_id(struct ucma_file *file, 370static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
371 const char __user *inbuf, 371{
372 int in_len, int out_len) 372 switch (cmd->ps) {
373 case RDMA_PS_TCP:
374 *qp_type = IB_QPT_RC;
375 return 0;
376 case RDMA_PS_UDP:
377 case RDMA_PS_IPOIB:
378 *qp_type = IB_QPT_UD;
379 return 0;
380 default:
381 return -EINVAL;
382 }
383}
384
385static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
386 int in_len, int out_len)
373{ 387{
374 struct rdma_ucm_create_id cmd; 388 struct rdma_ucm_create_id cmd;
375 struct rdma_ucm_create_id_resp resp; 389 struct rdma_ucm_create_id_resp resp;
376 struct ucma_context *ctx; 390 struct ucma_context *ctx;
391 enum ib_qp_type qp_type;
377 int ret; 392 int ret;
378 393
379 if (out_len < sizeof(resp)) 394 if (out_len < sizeof(resp))
@@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file,
382 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 397 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
383 return -EFAULT; 398 return -EFAULT;
384 399
400 ret = ucma_get_qp_type(&cmd, &qp_type);
401 if (ret)
402 return ret;
403
385 mutex_lock(&file->mut); 404 mutex_lock(&file->mut);
386 ctx = ucma_alloc_ctx(file); 405 ctx = ucma_alloc_ctx(file);
387 mutex_unlock(&file->mut); 406 mutex_unlock(&file->mut);
@@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file,
389 return -ENOMEM; 408 return -ENOMEM;
390 409
391 ctx->uid = cmd.uid; 410 ctx->uid = cmd.uid;
392 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); 411 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
393 if (IS_ERR(ctx->cm_id)) { 412 if (IS_ERR(ctx->cm_id)) {
394 ret = PTR_ERR(ctx->cm_id); 413 ret = PTR_ERR(ctx->cm_id);
395 goto err1; 414 goto err1;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 9876865732f7..ede1475bee09 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn,
548 iser_conn_get(ib_conn); /* ref ib conn's cma id */ 548 iser_conn_get(ib_conn); /* ref ib conn's cma id */
549 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
550 (void *)ib_conn, 550 (void *)ib_conn,
551 RDMA_PS_TCP); 551 RDMA_PS_TCP, IB_QPT_RC);
552 if (IS_ERR(ib_conn->cma_id)) { 552 if (IS_ERR(ib_conn->cma_id)) {
553 err = PTR_ERR(ib_conn->cma_id); 553 err = PTR_ERR(ib_conn->cma_id);
554 iser_err("rdma_create_id failed: %d\n", err); 554 iser_err("rdma_create_id failed: %d\n", err);