aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/cma.c84
-rw-r--r--drivers/infiniband/core/device.c6
3 files changed, 52 insertions, 41 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f9ba7d74dfc..9353992f9ee 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -302,7 +302,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
302 event->event == IB_EVENT_LID_CHANGE || 302 event->event == IB_EVENT_LID_CHANGE ||
303 event->event == IB_EVENT_PKEY_CHANGE || 303 event->event == IB_EVENT_PKEY_CHANGE ||
304 event->event == IB_EVENT_SM_CHANGE || 304 event->event == IB_EVENT_SM_CHANGE ||
305 event->event == IB_EVENT_CLIENT_REREGISTER) { 305 event->event == IB_EVENT_CLIENT_REREGISTER ||
306 event->event == IB_EVENT_GID_CHANGE) {
306 work = kmalloc(sizeof *work, GFP_ATOMIC); 307 work = kmalloc(sizeof *work, GFP_ATOMIC);
307 if (work) { 308 if (work) {
308 INIT_WORK(&work->work, ib_cache_task); 309 INIT_WORK(&work->work, ib_cache_task);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b6a33b3c516..ca4c5dcd713 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -359,6 +359,10 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
359 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? 359 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
360 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 360 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
361 361
362 if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
363 id_priv->id.ps == RDMA_PS_IPOIB)
364 return -EINVAL;
365
362 mutex_lock(&lock); 366 mutex_lock(&lock);
363 iboe_addr_get_sgid(dev_addr, &iboe_gid); 367 iboe_addr_get_sgid(dev_addr, &iboe_gid);
364 memcpy(&gid, dev_addr->src_dev_addr + 368 memcpy(&gid, dev_addr->src_dev_addr +
@@ -406,11 +410,6 @@ static int cma_disable_callback(struct rdma_id_private *id_priv,
406 return 0; 410 return 0;
407} 411}
408 412
409static int cma_has_cm_dev(struct rdma_id_private *id_priv)
410{
411 return (id_priv->id.device && id_priv->cm_id.ib);
412}
413
414struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 413struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
415 void *context, enum rdma_port_space ps, 414 void *context, enum rdma_port_space ps,
416 enum ib_qp_type qp_type) 415 enum ib_qp_type qp_type)
@@ -920,11 +919,11 @@ void rdma_destroy_id(struct rdma_cm_id *id)
920 if (id_priv->cma_dev) { 919 if (id_priv->cma_dev) {
921 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 920 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
922 case RDMA_TRANSPORT_IB: 921 case RDMA_TRANSPORT_IB:
923 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 922 if (id_priv->cm_id.ib)
924 ib_destroy_cm_id(id_priv->cm_id.ib); 923 ib_destroy_cm_id(id_priv->cm_id.ib);
925 break; 924 break;
926 case RDMA_TRANSPORT_IWARP: 925 case RDMA_TRANSPORT_IWARP:
927 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 926 if (id_priv->cm_id.iw)
928 iw_destroy_cm_id(id_priv->cm_id.iw); 927 iw_destroy_cm_id(id_priv->cm_id.iw);
929 break; 928 break;
930 default: 929 default:
@@ -1085,12 +1084,12 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1085 1084
1086 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1085 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1087 &ip_ver, &port, &src, &dst)) 1086 &ip_ver, &port, &src, &dst))
1088 goto err; 1087 return NULL;
1089 1088
1090 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1089 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1091 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1090 listen_id->ps, ib_event->param.req_rcvd.qp_type);
1092 if (IS_ERR(id)) 1091 if (IS_ERR(id))
1093 goto err; 1092 return NULL;
1094 1093
1095 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1094 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1096 ip_ver, port, src, dst); 1095 ip_ver, port, src, dst);
@@ -1100,7 +1099,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1100 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1099 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1101 GFP_KERNEL); 1100 GFP_KERNEL);
1102 if (!rt->path_rec) 1101 if (!rt->path_rec)
1103 goto destroy_id; 1102 goto err;
1104 1103
1105 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1104 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1106 if (rt->num_paths == 2) 1105 if (rt->num_paths == 2)
@@ -1114,7 +1113,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1114 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, 1113 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
1115 &rt->addr.dev_addr); 1114 &rt->addr.dev_addr);
1116 if (ret) 1115 if (ret)
1117 goto destroy_id; 1116 goto err;
1118 } 1117 }
1119 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1118 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1120 1119
@@ -1122,9 +1121,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1122 id_priv->state = RDMA_CM_CONNECT; 1121 id_priv->state = RDMA_CM_CONNECT;
1123 return id_priv; 1122 return id_priv;
1124 1123
1125destroy_id:
1126 rdma_destroy_id(id);
1127err: 1124err:
1125 rdma_destroy_id(id);
1128 return NULL; 1126 return NULL;
1129} 1127}
1130 1128
@@ -1468,13 +1466,15 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
1468{ 1466{
1469 struct ib_cm_compare_data compare_data; 1467 struct ib_cm_compare_data compare_data;
1470 struct sockaddr *addr; 1468 struct sockaddr *addr;
1469 struct ib_cm_id *id;
1471 __be64 svc_id; 1470 __be64 svc_id;
1472 int ret; 1471 int ret;
1473 1472
1474 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1473 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
1475 id_priv); 1474 if (IS_ERR(id))
1476 if (IS_ERR(id_priv->cm_id.ib)) 1475 return PTR_ERR(id);
1477 return PTR_ERR(id_priv->cm_id.ib); 1476
1477 id_priv->cm_id.ib = id;
1478 1478
1479 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1479 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1480 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1480 svc_id = cma_get_service_id(id_priv->id.ps, addr);
@@ -1497,12 +1497,15 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1497{ 1497{
1498 int ret; 1498 int ret;
1499 struct sockaddr_in *sin; 1499 struct sockaddr_in *sin;
1500 struct iw_cm_id *id;
1501
1502 id = iw_create_cm_id(id_priv->id.device,
1503 iw_conn_req_handler,
1504 id_priv);
1505 if (IS_ERR(id))
1506 return PTR_ERR(id);
1500 1507
1501 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1508 id_priv->cm_id.iw = id;
1502 iw_conn_req_handler,
1503 id_priv);
1504 if (IS_ERR(id_priv->cm_id.iw))
1505 return PTR_ERR(id_priv->cm_id.iw);
1506 1509
1507 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1510 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1508 id_priv->cm_id.iw->local_addr = *sin; 1511 id_priv->cm_id.iw->local_addr = *sin;
@@ -2484,6 +2487,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2484{ 2487{
2485 struct ib_cm_sidr_req_param req; 2488 struct ib_cm_sidr_req_param req;
2486 struct rdma_route *route; 2489 struct rdma_route *route;
2490 struct ib_cm_id *id;
2487 int ret; 2491 int ret;
2488 2492
2489 req.private_data_len = sizeof(struct cma_hdr) + 2493 req.private_data_len = sizeof(struct cma_hdr) +
@@ -2501,12 +2505,13 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2501 if (ret) 2505 if (ret)
2502 goto out; 2506 goto out;
2503 2507
2504 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2508 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
2505 cma_sidr_rep_handler, id_priv); 2509 id_priv);
2506 if (IS_ERR(id_priv->cm_id.ib)) { 2510 if (IS_ERR(id)) {
2507 ret = PTR_ERR(id_priv->cm_id.ib); 2511 ret = PTR_ERR(id);
2508 goto out; 2512 goto out;
2509 } 2513 }
2514 id_priv->cm_id.ib = id;
2510 2515
2511 req.path = route->path_rec; 2516 req.path = route->path_rec;
2512 req.service_id = cma_get_service_id(id_priv->id.ps, 2517 req.service_id = cma_get_service_id(id_priv->id.ps,
@@ -2530,6 +2535,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2530 struct ib_cm_req_param req; 2535 struct ib_cm_req_param req;
2531 struct rdma_route *route; 2536 struct rdma_route *route;
2532 void *private_data; 2537 void *private_data;
2538 struct ib_cm_id *id;
2533 int offset, ret; 2539 int offset, ret;
2534 2540
2535 memset(&req, 0, sizeof req); 2541 memset(&req, 0, sizeof req);
@@ -2543,12 +2549,12 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2543 memcpy(private_data + offset, conn_param->private_data, 2549 memcpy(private_data + offset, conn_param->private_data,
2544 conn_param->private_data_len); 2550 conn_param->private_data_len);
2545 2551
2546 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2552 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
2547 id_priv); 2553 if (IS_ERR(id)) {
2548 if (IS_ERR(id_priv->cm_id.ib)) { 2554 ret = PTR_ERR(id);
2549 ret = PTR_ERR(id_priv->cm_id.ib);
2550 goto out; 2555 goto out;
2551 } 2556 }
2557 id_priv->cm_id.ib = id;
2552 2558
2553 route = &id_priv->id.route; 2559 route = &id_priv->id.route;
2554 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2560 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
@@ -2577,8 +2583,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2577 2583
2578 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2584 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2579out: 2585out:
2580 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2586 if (ret && !IS_ERR(id)) {
2581 ib_destroy_cm_id(id_priv->cm_id.ib); 2587 ib_destroy_cm_id(id);
2582 id_priv->cm_id.ib = NULL; 2588 id_priv->cm_id.ib = NULL;
2583 } 2589 }
2584 2590
@@ -2595,10 +2601,8 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
2595 struct iw_cm_conn_param iw_param; 2601 struct iw_cm_conn_param iw_param;
2596 2602
2597 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 2603 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2598 if (IS_ERR(cm_id)) { 2604 if (IS_ERR(cm_id))
2599 ret = PTR_ERR(cm_id); 2605 return PTR_ERR(cm_id);
2600 goto out;
2601 }
2602 2606
2603 id_priv->cm_id.iw = cm_id; 2607 id_priv->cm_id.iw = cm_id;
2604 2608
@@ -2622,7 +2626,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
2622 iw_param.qpn = conn_param->qp_num; 2626 iw_param.qpn = conn_param->qp_num;
2623 ret = iw_cm_connect(cm_id, &iw_param); 2627 ret = iw_cm_connect(cm_id, &iw_param);
2624out: 2628out:
2625 if (ret && !IS_ERR(cm_id)) { 2629 if (ret) {
2626 iw_destroy_cm_id(cm_id); 2630 iw_destroy_cm_id(cm_id);
2627 id_priv->cm_id.iw = NULL; 2631 id_priv->cm_id.iw = NULL;
2628 } 2632 }
@@ -2795,7 +2799,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2795 int ret; 2799 int ret;
2796 2800
2797 id_priv = container_of(id, struct rdma_id_private, id); 2801 id_priv = container_of(id, struct rdma_id_private, id);
2798 if (!cma_has_cm_dev(id_priv)) 2802 if (!id_priv->cm_id.ib)
2799 return -EINVAL; 2803 return -EINVAL;
2800 2804
2801 switch (id->device->node_type) { 2805 switch (id->device->node_type) {
@@ -2817,7 +2821,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2817 int ret; 2821 int ret;
2818 2822
2819 id_priv = container_of(id, struct rdma_id_private, id); 2823 id_priv = container_of(id, struct rdma_id_private, id);
2820 if (!cma_has_cm_dev(id_priv)) 2824 if (!id_priv->cm_id.ib)
2821 return -EINVAL; 2825 return -EINVAL;
2822 2826
2823 switch (rdma_node_get_transport(id->device->node_type)) { 2827 switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2848,7 +2852,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
2848 int ret; 2852 int ret;
2849 2853
2850 id_priv = container_of(id, struct rdma_id_private, id); 2854 id_priv = container_of(id, struct rdma_id_private, id);
2851 if (!cma_has_cm_dev(id_priv)) 2855 if (!id_priv->cm_id.ib)
2852 return -EINVAL; 2856 return -EINVAL;
2853 2857
2854 switch (rdma_node_get_transport(id->device->node_type)) { 2858 switch (rdma_node_get_transport(id->device->node_type)) {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 4007f721d25..e711de400a0 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -627,6 +627,9 @@ int ib_modify_device(struct ib_device *device,
627 int device_modify_mask, 627 int device_modify_mask,
628 struct ib_device_modify *device_modify) 628 struct ib_device_modify *device_modify)
629{ 629{
630 if (!device->modify_device)
631 return -ENOSYS;
632
630 return device->modify_device(device, device_modify_mask, 633 return device->modify_device(device, device_modify_mask,
631 device_modify); 634 device_modify);
632} 635}
@@ -647,6 +650,9 @@ int ib_modify_port(struct ib_device *device,
647 u8 port_num, int port_modify_mask, 650 u8 port_num, int port_modify_mask,
648 struct ib_port_modify *port_modify) 651 struct ib_port_modify *port_modify)
649{ 652{
653 if (!device->modify_port)
654 return -ENOSYS;
655
650 if (port_num < start_port(device) || port_num > end_port(device)) 656 if (port_num < start_port(device) || port_num > end_port(device))
651 return -EINVAL; 657 return -EINVAL;
652 658