aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/cma.c37
-rw-r--r--drivers/infiniband/core/ucma.c14
2 files changed, 24 insertions, 27 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e980ff3335db..d951896ff7fc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -155,9 +155,7 @@ struct cma_multicast {
155 } multicast; 155 } multicast;
156 struct list_head list; 156 struct list_head list;
157 void *context; 157 void *context;
158 struct sockaddr addr; 158 struct sockaddr_storage addr;
159 u8 pad[sizeof(struct sockaddr_in6) -
160 sizeof(struct sockaddr)];
161}; 159};
162 160
163struct cma_work { 161struct cma_work {
@@ -786,8 +784,8 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
786 cma_cancel_route(id_priv); 784 cma_cancel_route(id_priv);
787 break; 785 break;
788 case CMA_LISTEN: 786 case CMA_LISTEN:
789 if (cma_any_addr(&id_priv->id.route.addr.src_addr) && 787 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
790 !id_priv->cma_dev) 788 && !id_priv->cma_dev)
791 cma_cancel_listens(id_priv); 789 cma_cancel_listens(id_priv);
792 break; 790 break;
793 default: 791 default:
@@ -1026,7 +1024,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1026 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1024 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1027 1025
1028 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1026 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1029 ret = rdma_translate_ip(&id->route.addr.src_addr, 1027 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1030 &id->route.addr.dev_addr); 1028 &id->route.addr.dev_addr);
1031 if (ret) 1029 if (ret)
1032 goto destroy_id; 1030 goto destroy_id;
@@ -1064,7 +1062,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1064 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1062 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1065 ip_ver, port, src, dst); 1063 ip_ver, port, src, dst);
1066 1064
1067 ret = rdma_translate_ip(&id->route.addr.src_addr, 1065 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1068 &id->route.addr.dev_addr); 1066 &id->route.addr.dev_addr);
1069 if (ret) 1067 if (ret)
1070 goto err; 1068 goto err;
@@ -1377,7 +1375,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
1377 if (IS_ERR(id_priv->cm_id.ib)) 1375 if (IS_ERR(id_priv->cm_id.ib))
1378 return PTR_ERR(id_priv->cm_id.ib); 1376 return PTR_ERR(id_priv->cm_id.ib);
1379 1377
1380 addr = &id_priv->id.route.addr.src_addr; 1378 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1381 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1379 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1382 if (cma_any_addr(addr)) 1380 if (cma_any_addr(addr))
1383 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1381 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
@@ -1443,7 +1441,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1443 1441
1444 dev_id_priv->state = CMA_ADDR_BOUND; 1442 dev_id_priv->state = CMA_ADDR_BOUND;
1445 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1443 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1446 ip_addr_size(&id_priv->id.route.addr.src_addr)); 1444 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1447 1445
1448 cma_attach_to_dev(dev_id_priv, cma_dev); 1446 cma_attach_to_dev(dev_id_priv, cma_dev);
1449 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1447 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
@@ -1563,13 +1561,14 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1563 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1561 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1564 path_rec.numb_path = 1; 1562 path_rec.numb_path = 1;
1565 path_rec.reversible = 1; 1563 path_rec.reversible = 1;
1566 path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); 1564 path_rec.service_id = cma_get_service_id(id_priv->id.ps,
1565 (struct sockaddr *) &addr->dst_addr);
1567 1566
1568 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1567 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1569 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1568 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1570 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1569 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1571 1570
1572 if (addr->src_addr.sa_family == AF_INET) { 1571 if (addr->src_addr.ss_family == AF_INET) {
1573 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1572 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1574 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1573 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1575 } else { 1574 } else {
@@ -1848,7 +1847,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1848 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1847 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1849 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1848 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1850 1849
1851 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { 1850 if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
1852 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1851 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1853 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1852 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1854 src_in->sin_family = dst_in->sin_family; 1853 src_in->sin_family = dst_in->sin_family;
@@ -1897,7 +1896,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1897 if (cma_any_addr(dst_addr)) 1896 if (cma_any_addr(dst_addr))
1898 ret = cma_resolve_loopback(id_priv); 1897 ret = cma_resolve_loopback(id_priv);
1899 else 1898 else
1900 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, 1899 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
1901 dst_addr, &id->route.addr.dev_addr, 1900 dst_addr, &id->route.addr.dev_addr,
1902 timeout_ms, addr_handler, id_priv); 1901 timeout_ms, addr_handler, id_priv);
1903 if (ret) 1902 if (ret)
@@ -2021,11 +2020,11 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2021 * We don't support binding to any address if anyone is bound to 2020 * We don't support binding to any address if anyone is bound to
2022 * a specific address on the same port. 2021 * a specific address on the same port.
2023 */ 2022 */
2024 if (cma_any_addr(&id_priv->id.route.addr.src_addr)) 2023 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2025 return -EADDRNOTAVAIL; 2024 return -EADDRNOTAVAIL;
2026 2025
2027 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2026 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2028 if (cma_any_addr(&cur_id->id.route.addr.src_addr)) 2027 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
2029 return -EADDRNOTAVAIL; 2028 return -EADDRNOTAVAIL;
2030 2029
2031 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2030 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
@@ -2060,7 +2059,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
2060 } 2059 }
2061 2060
2062 mutex_lock(&lock); 2061 mutex_lock(&lock);
2063 if (cma_any_port(&id_priv->id.route.addr.src_addr)) 2062 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2064 ret = cma_alloc_any_port(ps, id_priv); 2063 ret = cma_alloc_any_port(ps, id_priv);
2065 else 2064 else
2066 ret = cma_use_port(ps, id_priv); 2065 ret = cma_use_port(ps, id_priv);
@@ -2232,7 +2231,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2232 2231
2233 req.path = route->path_rec; 2232 req.path = route->path_rec;
2234 req.service_id = cma_get_service_id(id_priv->id.ps, 2233 req.service_id = cma_get_service_id(id_priv->id.ps,
2235 &route->addr.dst_addr); 2234 (struct sockaddr *) &route->addr.dst_addr);
2236 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2235 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2237 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2236 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2238 2237
@@ -2283,7 +2282,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2283 req.alternate_path = &route->path_rec[1]; 2282 req.alternate_path = &route->path_rec[1];
2284 2283
2285 req.service_id = cma_get_service_id(id_priv->id.ps, 2284 req.service_id = cma_get_service_id(id_priv->id.ps,
2286 &route->addr.dst_addr); 2285 (struct sockaddr *) &route->addr.dst_addr);
2287 req.qp_num = id_priv->qp_num; 2286 req.qp_num = id_priv->qp_num;
2288 req.qp_type = IB_QPT_RC; 2287 req.qp_type = IB_QPT_RC;
2289 req.starting_psn = id_priv->seq_num; 2288 req.starting_psn = id_priv->seq_num;
@@ -2667,7 +2666,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2667 if (ret) 2666 if (ret)
2668 return ret; 2667 return ret;
2669 2668
2670 cma_set_mgid(id_priv, &mc->addr, &rec.mgid); 2669 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
2671 if (id_priv->id.ps == RDMA_PS_UDP) 2670 if (id_priv->id.ps == RDMA_PS_UDP)
2672 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2671 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2673 ib_addr_get_sgid(dev_addr, &rec.port_gid); 2672 ib_addr_get_sgid(dev_addr, &rec.port_gid);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b41dd26bbfa1..3ddacf39b7ba 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -81,9 +81,7 @@ struct ucma_multicast {
81 81
82 u64 uid; 82 u64 uid;
83 struct list_head list; 83 struct list_head list;
84 struct sockaddr addr; 84 struct sockaddr_storage addr;
85 u8 pad[sizeof(struct sockaddr_in6) -
86 sizeof(struct sockaddr)];
87}; 85};
88 86
89struct ucma_event { 87struct ucma_event {
@@ -603,11 +601,11 @@ static ssize_t ucma_query_route(struct ucma_file *file,
603 return PTR_ERR(ctx); 601 return PTR_ERR(ctx);
604 602
605 memset(&resp, 0, sizeof resp); 603 memset(&resp, 0, sizeof resp);
606 addr = &ctx->cm_id->route.addr.src_addr; 604 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
607 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 605 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
608 sizeof(struct sockaddr_in) : 606 sizeof(struct sockaddr_in) :
609 sizeof(struct sockaddr_in6)); 607 sizeof(struct sockaddr_in6));
610 addr = &ctx->cm_id->route.addr.dst_addr; 608 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
611 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 609 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
612 sizeof(struct sockaddr_in) : 610 sizeof(struct sockaddr_in) :
613 sizeof(struct sockaddr_in6)); 611 sizeof(struct sockaddr_in6));
@@ -913,7 +911,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
913 911
914 mc->uid = cmd.uid; 912 mc->uid = cmd.uid;
915 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); 913 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
916 ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); 914 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
917 if (ret) 915 if (ret)
918 goto err2; 916 goto err2;
919 917
@@ -929,7 +927,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
929 return 0; 927 return 0;
930 928
931err3: 929err3:
932 rdma_leave_multicast(ctx->cm_id, &mc->addr); 930 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
933 ucma_cleanup_mc_events(mc); 931 ucma_cleanup_mc_events(mc);
934err2: 932err2:
935 mutex_lock(&mut); 933 mutex_lock(&mut);
@@ -975,7 +973,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
975 goto out; 973 goto out;
976 } 974 }
977 975
978 rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); 976 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
979 mutex_lock(&mc->ctx->file->mut); 977 mutex_lock(&mc->ctx->file->mut);
980 ucma_cleanup_mc_events(mc); 978 ucma_cleanup_mc_events(mc);
981 list_del(&mc->list); 979 list_del(&mc->list);