diff options
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r-- | drivers/infiniband/core/cma.c | 173 |
1 files changed, 117 insertions, 56 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 36b12d560e17..944cd90417bc 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | #include <net/route.h> | 45 | #include <net/route.h> |
46 | 46 | ||
47 | #include <net/net_namespace.h> | ||
48 | #include <net/netns/generic.h> | ||
47 | #include <net/tcp.h> | 49 | #include <net/tcp.h> |
48 | #include <net/ipv6.h> | 50 | #include <net/ipv6.h> |
49 | #include <net/ip_fib.h> | 51 | #include <net/ip_fib.h> |
@@ -86,7 +88,7 @@ static const char * const cma_events[] = { | |||
86 | [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", | 88 | [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", |
87 | }; | 89 | }; |
88 | 90 | ||
89 | const char *rdma_event_msg(enum rdma_cm_event_type event) | 91 | const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) |
90 | { | 92 | { |
91 | size_t index = event; | 93 | size_t index = event; |
92 | 94 | ||
@@ -110,22 +112,33 @@ static LIST_HEAD(dev_list); | |||
110 | static LIST_HEAD(listen_any_list); | 112 | static LIST_HEAD(listen_any_list); |
111 | static DEFINE_MUTEX(lock); | 113 | static DEFINE_MUTEX(lock); |
112 | static struct workqueue_struct *cma_wq; | 114 | static struct workqueue_struct *cma_wq; |
113 | static DEFINE_IDR(tcp_ps); | 115 | static int cma_pernet_id; |
114 | static DEFINE_IDR(udp_ps); | ||
115 | static DEFINE_IDR(ipoib_ps); | ||
116 | static DEFINE_IDR(ib_ps); | ||
117 | 116 | ||
118 | static struct idr *cma_idr(enum rdma_port_space ps) | 117 | struct cma_pernet { |
118 | struct idr tcp_ps; | ||
119 | struct idr udp_ps; | ||
120 | struct idr ipoib_ps; | ||
121 | struct idr ib_ps; | ||
122 | }; | ||
123 | |||
124 | static struct cma_pernet *cma_pernet(struct net *net) | ||
125 | { | ||
126 | return net_generic(net, cma_pernet_id); | ||
127 | } | ||
128 | |||
129 | static struct idr *cma_pernet_idr(struct net *net, enum rdma_port_space ps) | ||
119 | { | 130 | { |
131 | struct cma_pernet *pernet = cma_pernet(net); | ||
132 | |||
120 | switch (ps) { | 133 | switch (ps) { |
121 | case RDMA_PS_TCP: | 134 | case RDMA_PS_TCP: |
122 | return &tcp_ps; | 135 | return &pernet->tcp_ps; |
123 | case RDMA_PS_UDP: | 136 | case RDMA_PS_UDP: |
124 | return &udp_ps; | 137 | return &pernet->udp_ps; |
125 | case RDMA_PS_IPOIB: | 138 | case RDMA_PS_IPOIB: |
126 | return &ipoib_ps; | 139 | return &pernet->ipoib_ps; |
127 | case RDMA_PS_IB: | 140 | case RDMA_PS_IB: |
128 | return &ib_ps; | 141 | return &pernet->ib_ps; |
129 | default: | 142 | default: |
130 | return NULL; | 143 | return NULL; |
131 | } | 144 | } |
@@ -145,24 +158,25 @@ struct rdma_bind_list { | |||
145 | unsigned short port; | 158 | unsigned short port; |
146 | }; | 159 | }; |
147 | 160 | ||
148 | static int cma_ps_alloc(enum rdma_port_space ps, | 161 | static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, |
149 | struct rdma_bind_list *bind_list, int snum) | 162 | struct rdma_bind_list *bind_list, int snum) |
150 | { | 163 | { |
151 | struct idr *idr = cma_idr(ps); | 164 | struct idr *idr = cma_pernet_idr(net, ps); |
152 | 165 | ||
153 | return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); | 166 | return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); |
154 | } | 167 | } |
155 | 168 | ||
156 | static struct rdma_bind_list *cma_ps_find(enum rdma_port_space ps, int snum) | 169 | static struct rdma_bind_list *cma_ps_find(struct net *net, |
170 | enum rdma_port_space ps, int snum) | ||
157 | { | 171 | { |
158 | struct idr *idr = cma_idr(ps); | 172 | struct idr *idr = cma_pernet_idr(net, ps); |
159 | 173 | ||
160 | return idr_find(idr, snum); | 174 | return idr_find(idr, snum); |
161 | } | 175 | } |
162 | 176 | ||
163 | static void cma_ps_remove(enum rdma_port_space ps, int snum) | 177 | static void cma_ps_remove(struct net *net, enum rdma_port_space ps, int snum) |
164 | { | 178 | { |
165 | struct idr *idr = cma_idr(ps); | 179 | struct idr *idr = cma_pernet_idr(net, ps); |
166 | 180 | ||
167 | idr_remove(idr, snum); | 181 | idr_remove(idr, snum); |
168 | } | 182 | } |
@@ -427,10 +441,11 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a | |||
427 | } | 441 | } |
428 | 442 | ||
429 | static inline int cma_validate_port(struct ib_device *device, u8 port, | 443 | static inline int cma_validate_port(struct ib_device *device, u8 port, |
430 | union ib_gid *gid, int dev_type) | 444 | union ib_gid *gid, int dev_type, |
445 | int bound_if_index) | ||
431 | { | 446 | { |
432 | u8 found_port; | ||
433 | int ret = -ENODEV; | 447 | int ret = -ENODEV; |
448 | struct net_device *ndev = NULL; | ||
434 | 449 | ||
435 | if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) | 450 | if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) |
436 | return ret; | 451 | return ret; |
@@ -438,9 +453,13 @@ static inline int cma_validate_port(struct ib_device *device, u8 port, | |||
438 | if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) | 453 | if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) |
439 | return ret; | 454 | return ret; |
440 | 455 | ||
441 | ret = ib_find_cached_gid(device, gid, &found_port, NULL); | 456 | if (dev_type == ARPHRD_ETHER) |
442 | if (port != found_port) | 457 | ndev = dev_get_by_index(&init_net, bound_if_index); |
443 | return -ENODEV; | 458 | |
459 | ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL); | ||
460 | |||
461 | if (ndev) | ||
462 | dev_put(ndev); | ||
444 | 463 | ||
445 | return ret; | 464 | return ret; |
446 | } | 465 | } |
@@ -472,7 +491,8 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, | |||
472 | &iboe_gid : &gid; | 491 | &iboe_gid : &gid; |
473 | 492 | ||
474 | ret = cma_validate_port(cma_dev->device, port, gidp, | 493 | ret = cma_validate_port(cma_dev->device, port, gidp, |
475 | dev_addr->dev_type); | 494 | dev_addr->dev_type, |
495 | dev_addr->bound_dev_if); | ||
476 | if (!ret) { | 496 | if (!ret) { |
477 | id_priv->id.port_num = port; | 497 | id_priv->id.port_num = port; |
478 | goto out; | 498 | goto out; |
@@ -490,7 +510,8 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, | |||
490 | &iboe_gid : &gid; | 510 | &iboe_gid : &gid; |
491 | 511 | ||
492 | ret = cma_validate_port(cma_dev->device, port, gidp, | 512 | ret = cma_validate_port(cma_dev->device, port, gidp, |
493 | dev_addr->dev_type); | 513 | dev_addr->dev_type, |
514 | dev_addr->bound_dev_if); | ||
494 | if (!ret) { | 515 | if (!ret) { |
495 | id_priv->id.port_num = port; | 516 | id_priv->id.port_num = port; |
496 | goto out; | 517 | goto out; |
@@ -531,7 +552,9 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
531 | if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) | 552 | if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) |
532 | continue; | 553 | continue; |
533 | 554 | ||
534 | for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) { | 555 | for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, |
556 | &gid, NULL); | ||
557 | i++) { | ||
535 | if (!memcmp(&gid, dgid, sizeof(gid))) { | 558 | if (!memcmp(&gid, dgid, sizeof(gid))) { |
536 | cma_dev = cur_dev; | 559 | cma_dev = cur_dev; |
537 | sgid = gid; | 560 | sgid = gid; |
@@ -577,7 +600,8 @@ static int cma_disable_callback(struct rdma_id_private *id_priv, | |||
577 | return 0; | 600 | return 0; |
578 | } | 601 | } |
579 | 602 | ||
580 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | 603 | struct rdma_cm_id *rdma_create_id(struct net *net, |
604 | rdma_cm_event_handler event_handler, | ||
581 | void *context, enum rdma_port_space ps, | 605 | void *context, enum rdma_port_space ps, |
582 | enum ib_qp_type qp_type) | 606 | enum ib_qp_type qp_type) |
583 | { | 607 | { |
@@ -601,6 +625,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | |||
601 | INIT_LIST_HEAD(&id_priv->listen_list); | 625 | INIT_LIST_HEAD(&id_priv->listen_list); |
602 | INIT_LIST_HEAD(&id_priv->mc_list); | 626 | INIT_LIST_HEAD(&id_priv->mc_list); |
603 | get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); | 627 | get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); |
628 | id_priv->id.route.addr.dev_addr.net = get_net(net); | ||
604 | 629 | ||
605 | return &id_priv->id; | 630 | return &id_priv->id; |
606 | } | 631 | } |
@@ -718,18 +743,12 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, | |||
718 | goto out; | 743 | goto out; |
719 | 744 | ||
720 | ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, | 745 | ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, |
721 | qp_attr.ah_attr.grh.sgid_index, &sgid); | 746 | qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); |
722 | if (ret) | 747 | if (ret) |
723 | goto out; | 748 | goto out; |
724 | 749 | ||
725 | BUG_ON(id_priv->cma_dev->device != id_priv->id.device); | 750 | BUG_ON(id_priv->cma_dev->device != id_priv->id.device); |
726 | 751 | ||
727 | if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { | ||
728 | ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); | ||
729 | |||
730 | if (ret) | ||
731 | goto out; | ||
732 | } | ||
733 | if (conn_param) | 752 | if (conn_param) |
734 | qp_attr.max_dest_rd_atomic = conn_param->responder_resources; | 753 | qp_attr.max_dest_rd_atomic = conn_param->responder_resources; |
735 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); | 754 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); |
@@ -1260,7 +1279,7 @@ static bool cma_match_net_dev(const struct rdma_id_private *id_priv, | |||
1260 | cma_protocol_roce(&id_priv->id); | 1279 | cma_protocol_roce(&id_priv->id); |
1261 | 1280 | ||
1262 | return !addr->dev_addr.bound_dev_if || | 1281 | return !addr->dev_addr.bound_dev_if || |
1263 | (net_eq(dev_net(net_dev), &init_net) && | 1282 | (net_eq(dev_net(net_dev), addr->dev_addr.net) && |
1264 | addr->dev_addr.bound_dev_if == net_dev->ifindex); | 1283 | addr->dev_addr.bound_dev_if == net_dev->ifindex); |
1265 | } | 1284 | } |
1266 | 1285 | ||
@@ -1321,7 +1340,8 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, | |||
1321 | } | 1340 | } |
1322 | } | 1341 | } |
1323 | 1342 | ||
1324 | bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id), | 1343 | bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, |
1344 | rdma_ps_from_service_id(req.service_id), | ||
1325 | cma_port_from_service_id(req.service_id)); | 1345 | cma_port_from_service_id(req.service_id)); |
1326 | id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); | 1346 | id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); |
1327 | if (IS_ERR(id_priv) && *net_dev) { | 1347 | if (IS_ERR(id_priv) && *net_dev) { |
@@ -1392,6 +1412,7 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv, | |||
1392 | static void cma_release_port(struct rdma_id_private *id_priv) | 1412 | static void cma_release_port(struct rdma_id_private *id_priv) |
1393 | { | 1413 | { |
1394 | struct rdma_bind_list *bind_list = id_priv->bind_list; | 1414 | struct rdma_bind_list *bind_list = id_priv->bind_list; |
1415 | struct net *net = id_priv->id.route.addr.dev_addr.net; | ||
1395 | 1416 | ||
1396 | if (!bind_list) | 1417 | if (!bind_list) |
1397 | return; | 1418 | return; |
@@ -1399,7 +1420,7 @@ static void cma_release_port(struct rdma_id_private *id_priv) | |||
1399 | mutex_lock(&lock); | 1420 | mutex_lock(&lock); |
1400 | hlist_del(&id_priv->node); | 1421 | hlist_del(&id_priv->node); |
1401 | if (hlist_empty(&bind_list->owners)) { | 1422 | if (hlist_empty(&bind_list->owners)) { |
1402 | cma_ps_remove(bind_list->ps, bind_list->port); | 1423 | cma_ps_remove(net, bind_list->ps, bind_list->port); |
1403 | kfree(bind_list); | 1424 | kfree(bind_list); |
1404 | } | 1425 | } |
1405 | mutex_unlock(&lock); | 1426 | mutex_unlock(&lock); |
@@ -1458,6 +1479,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) | |||
1458 | cma_deref_id(id_priv->id.context); | 1479 | cma_deref_id(id_priv->id.context); |
1459 | 1480 | ||
1460 | kfree(id_priv->id.route.path_rec); | 1481 | kfree(id_priv->id.route.path_rec); |
1482 | put_net(id_priv->id.route.addr.dev_addr.net); | ||
1461 | kfree(id_priv); | 1483 | kfree(id_priv); |
1462 | } | 1484 | } |
1463 | EXPORT_SYMBOL(rdma_destroy_id); | 1485 | EXPORT_SYMBOL(rdma_destroy_id); |
@@ -1588,7 +1610,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1588 | ib_event->param.req_rcvd.primary_path->service_id; | 1610 | ib_event->param.req_rcvd.primary_path->service_id; |
1589 | int ret; | 1611 | int ret; |
1590 | 1612 | ||
1591 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | 1613 | id = rdma_create_id(listen_id->route.addr.dev_addr.net, |
1614 | listen_id->event_handler, listen_id->context, | ||
1592 | listen_id->ps, ib_event->param.req_rcvd.qp_type); | 1615 | listen_id->ps, ib_event->param.req_rcvd.qp_type); |
1593 | if (IS_ERR(id)) | 1616 | if (IS_ERR(id)) |
1594 | return NULL; | 1617 | return NULL; |
@@ -1643,9 +1666,10 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | |||
1643 | struct rdma_id_private *id_priv; | 1666 | struct rdma_id_private *id_priv; |
1644 | struct rdma_cm_id *id; | 1667 | struct rdma_cm_id *id; |
1645 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; | 1668 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; |
1669 | struct net *net = listen_id->route.addr.dev_addr.net; | ||
1646 | int ret; | 1670 | int ret; |
1647 | 1671 | ||
1648 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | 1672 | id = rdma_create_id(net, listen_id->event_handler, listen_id->context, |
1649 | listen_id->ps, IB_QPT_UD); | 1673 | listen_id->ps, IB_QPT_UD); |
1650 | if (IS_ERR(id)) | 1674 | if (IS_ERR(id)) |
1651 | return NULL; | 1675 | return NULL; |
@@ -1882,7 +1906,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1882 | return -ECONNABORTED; | 1906 | return -ECONNABORTED; |
1883 | 1907 | ||
1884 | /* Create a new RDMA id for the new IW CM ID */ | 1908 | /* Create a new RDMA id for the new IW CM ID */ |
1885 | new_cm_id = rdma_create_id(listen_id->id.event_handler, | 1909 | new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, |
1910 | listen_id->id.event_handler, | ||
1886 | listen_id->id.context, | 1911 | listen_id->id.context, |
1887 | RDMA_PS_TCP, IB_QPT_RC); | 1912 | RDMA_PS_TCP, IB_QPT_RC); |
1888 | if (IS_ERR(new_cm_id)) { | 1913 | if (IS_ERR(new_cm_id)) { |
@@ -2010,12 +2035,13 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, | |||
2010 | { | 2035 | { |
2011 | struct rdma_id_private *dev_id_priv; | 2036 | struct rdma_id_private *dev_id_priv; |
2012 | struct rdma_cm_id *id; | 2037 | struct rdma_cm_id *id; |
2038 | struct net *net = id_priv->id.route.addr.dev_addr.net; | ||
2013 | int ret; | 2039 | int ret; |
2014 | 2040 | ||
2015 | if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) | 2041 | if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) |
2016 | return; | 2042 | return; |
2017 | 2043 | ||
2018 | id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, | 2044 | id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, |
2019 | id_priv->id.qp_type); | 2045 | id_priv->id.qp_type); |
2020 | if (IS_ERR(id)) | 2046 | if (IS_ERR(id)) |
2021 | return; | 2047 | return; |
@@ -2294,16 +2320,17 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2294 | 2320 | ||
2295 | route->num_paths = 1; | 2321 | route->num_paths = 1; |
2296 | 2322 | ||
2297 | if (addr->dev_addr.bound_dev_if) | 2323 | if (addr->dev_addr.bound_dev_if) { |
2298 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | 2324 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); |
2325 | route->path_rec->net = &init_net; | ||
2326 | route->path_rec->ifindex = addr->dev_addr.bound_dev_if; | ||
2327 | } | ||
2299 | if (!ndev) { | 2328 | if (!ndev) { |
2300 | ret = -ENODEV; | 2329 | ret = -ENODEV; |
2301 | goto err2; | 2330 | goto err2; |
2302 | } | 2331 | } |
2303 | 2332 | ||
2304 | route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev); | ||
2305 | memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); | 2333 | memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); |
2306 | memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len); | ||
2307 | 2334 | ||
2308 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, | 2335 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
2309 | &route->path_rec->sgid); | 2336 | &route->path_rec->sgid); |
@@ -2426,7 +2453,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) | |||
2426 | p = 1; | 2453 | p = 1; |
2427 | 2454 | ||
2428 | port_found: | 2455 | port_found: |
2429 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); | 2456 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); |
2430 | if (ret) | 2457 | if (ret) |
2431 | goto out; | 2458 | goto out; |
2432 | 2459 | ||
@@ -2688,7 +2715,8 @@ static int cma_alloc_port(enum rdma_port_space ps, | |||
2688 | if (!bind_list) | 2715 | if (!bind_list) |
2689 | return -ENOMEM; | 2716 | return -ENOMEM; |
2690 | 2717 | ||
2691 | ret = cma_ps_alloc(ps, bind_list, snum); | 2718 | ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, |
2719 | snum); | ||
2692 | if (ret < 0) | 2720 | if (ret < 0) |
2693 | goto err; | 2721 | goto err; |
2694 | 2722 | ||
@@ -2707,13 +2735,14 @@ static int cma_alloc_any_port(enum rdma_port_space ps, | |||
2707 | static unsigned int last_used_port; | 2735 | static unsigned int last_used_port; |
2708 | int low, high, remaining; | 2736 | int low, high, remaining; |
2709 | unsigned int rover; | 2737 | unsigned int rover; |
2738 | struct net *net = id_priv->id.route.addr.dev_addr.net; | ||
2710 | 2739 | ||
2711 | inet_get_local_port_range(&init_net, &low, &high); | 2740 | inet_get_local_port_range(net, &low, &high); |
2712 | remaining = (high - low) + 1; | 2741 | remaining = (high - low) + 1; |
2713 | rover = prandom_u32() % remaining + low; | 2742 | rover = prandom_u32() % remaining + low; |
2714 | retry: | 2743 | retry: |
2715 | if (last_used_port != rover && | 2744 | if (last_used_port != rover && |
2716 | !cma_ps_find(ps, (unsigned short)rover)) { | 2745 | !cma_ps_find(net, ps, (unsigned short)rover)) { |
2717 | int ret = cma_alloc_port(ps, id_priv, rover); | 2746 | int ret = cma_alloc_port(ps, id_priv, rover); |
2718 | /* | 2747 | /* |
2719 | * Remember previously used port number in order to avoid | 2748 | * Remember previously used port number in order to avoid |
@@ -2779,7 +2808,7 @@ static int cma_use_port(enum rdma_port_space ps, | |||
2779 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | 2808 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) |
2780 | return -EACCES; | 2809 | return -EACCES; |
2781 | 2810 | ||
2782 | bind_list = cma_ps_find(ps, snum); | 2811 | bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); |
2783 | if (!bind_list) { | 2812 | if (!bind_list) { |
2784 | ret = cma_alloc_port(ps, id_priv, snum); | 2813 | ret = cma_alloc_port(ps, id_priv, snum); |
2785 | } else { | 2814 | } else { |
@@ -2971,8 +3000,11 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2971 | if (addr->sa_family == AF_INET) | 3000 | if (addr->sa_family == AF_INET) |
2972 | id_priv->afonly = 1; | 3001 | id_priv->afonly = 1; |
2973 | #if IS_ENABLED(CONFIG_IPV6) | 3002 | #if IS_ENABLED(CONFIG_IPV6) |
2974 | else if (addr->sa_family == AF_INET6) | 3003 | else if (addr->sa_family == AF_INET6) { |
2975 | id_priv->afonly = init_net.ipv6.sysctl.bindv6only; | 3004 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
3005 | |||
3006 | id_priv->afonly = net->ipv6.sysctl.bindv6only; | ||
3007 | } | ||
2976 | #endif | 3008 | #endif |
2977 | } | 3009 | } |
2978 | ret = cma_get_port(id_priv); | 3010 | ret = cma_get_port(id_priv); |
@@ -3777,6 +3809,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id | |||
3777 | dev_addr = &id_priv->id.route.addr.dev_addr; | 3809 | dev_addr = &id_priv->id.route.addr.dev_addr; |
3778 | 3810 | ||
3779 | if ((dev_addr->bound_dev_if == ndev->ifindex) && | 3811 | if ((dev_addr->bound_dev_if == ndev->ifindex) && |
3812 | (net_eq(dev_net(ndev), dev_addr->net)) && | ||
3780 | memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { | 3813 | memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { |
3781 | printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", | 3814 | printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", |
3782 | ndev->name, &id_priv->id); | 3815 | ndev->name, &id_priv->id); |
@@ -3802,9 +3835,6 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event, | |||
3802 | struct rdma_id_private *id_priv; | 3835 | struct rdma_id_private *id_priv; |
3803 | int ret = NOTIFY_DONE; | 3836 | int ret = NOTIFY_DONE; |
3804 | 3837 | ||
3805 | if (dev_net(ndev) != &init_net) | ||
3806 | return NOTIFY_DONE; | ||
3807 | |||
3808 | if (event != NETDEV_BONDING_FAILOVER) | 3838 | if (event != NETDEV_BONDING_FAILOVER) |
3809 | return NOTIFY_DONE; | 3839 | return NOTIFY_DONE; |
3810 | 3840 | ||
@@ -3999,6 +4029,35 @@ static const struct ibnl_client_cbs cma_cb_table[] = { | |||
3999 | .module = THIS_MODULE }, | 4029 | .module = THIS_MODULE }, |
4000 | }; | 4030 | }; |
4001 | 4031 | ||
4032 | static int cma_init_net(struct net *net) | ||
4033 | { | ||
4034 | struct cma_pernet *pernet = cma_pernet(net); | ||
4035 | |||
4036 | idr_init(&pernet->tcp_ps); | ||
4037 | idr_init(&pernet->udp_ps); | ||
4038 | idr_init(&pernet->ipoib_ps); | ||
4039 | idr_init(&pernet->ib_ps); | ||
4040 | |||
4041 | return 0; | ||
4042 | } | ||
4043 | |||
4044 | static void cma_exit_net(struct net *net) | ||
4045 | { | ||
4046 | struct cma_pernet *pernet = cma_pernet(net); | ||
4047 | |||
4048 | idr_destroy(&pernet->tcp_ps); | ||
4049 | idr_destroy(&pernet->udp_ps); | ||
4050 | idr_destroy(&pernet->ipoib_ps); | ||
4051 | idr_destroy(&pernet->ib_ps); | ||
4052 | } | ||
4053 | |||
4054 | static struct pernet_operations cma_pernet_operations = { | ||
4055 | .init = cma_init_net, | ||
4056 | .exit = cma_exit_net, | ||
4057 | .id = &cma_pernet_id, | ||
4058 | .size = sizeof(struct cma_pernet), | ||
4059 | }; | ||
4060 | |||
4002 | static int __init cma_init(void) | 4061 | static int __init cma_init(void) |
4003 | { | 4062 | { |
4004 | int ret; | 4063 | int ret; |
@@ -4007,6 +4066,10 @@ static int __init cma_init(void) | |||
4007 | if (!cma_wq) | 4066 | if (!cma_wq) |
4008 | return -ENOMEM; | 4067 | return -ENOMEM; |
4009 | 4068 | ||
4069 | ret = register_pernet_subsys(&cma_pernet_operations); | ||
4070 | if (ret) | ||
4071 | goto err_wq; | ||
4072 | |||
4010 | ib_sa_register_client(&sa_client); | 4073 | ib_sa_register_client(&sa_client); |
4011 | rdma_addr_register_client(&addr_client); | 4074 | rdma_addr_register_client(&addr_client); |
4012 | register_netdevice_notifier(&cma_nb); | 4075 | register_netdevice_notifier(&cma_nb); |
@@ -4024,6 +4087,7 @@ err: | |||
4024 | unregister_netdevice_notifier(&cma_nb); | 4087 | unregister_netdevice_notifier(&cma_nb); |
4025 | rdma_addr_unregister_client(&addr_client); | 4088 | rdma_addr_unregister_client(&addr_client); |
4026 | ib_sa_unregister_client(&sa_client); | 4089 | ib_sa_unregister_client(&sa_client); |
4090 | err_wq: | ||
4027 | destroy_workqueue(cma_wq); | 4091 | destroy_workqueue(cma_wq); |
4028 | return ret; | 4092 | return ret; |
4029 | } | 4093 | } |
@@ -4035,11 +4099,8 @@ static void __exit cma_cleanup(void) | |||
4035 | unregister_netdevice_notifier(&cma_nb); | 4099 | unregister_netdevice_notifier(&cma_nb); |
4036 | rdma_addr_unregister_client(&addr_client); | 4100 | rdma_addr_unregister_client(&addr_client); |
4037 | ib_sa_unregister_client(&sa_client); | 4101 | ib_sa_unregister_client(&sa_client); |
4102 | unregister_pernet_subsys(&cma_pernet_operations); | ||
4038 | destroy_workqueue(cma_wq); | 4103 | destroy_workqueue(cma_wq); |
4039 | idr_destroy(&tcp_ps); | ||
4040 | idr_destroy(&udp_ps); | ||
4041 | idr_destroy(&ipoib_ps); | ||
4042 | idr_destroy(&ib_ps); | ||
4043 | } | 4104 | } |
4044 | 4105 | ||
4045 | module_init(cma_init); | 4106 | module_init(cma_init); |