diff options
author | Roland Dreier <roland@purestorage.com> | 2014-01-23 02:24:21 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-01-23 02:24:21 -0500 |
commit | fb1b5034e4987b158179a62732fb6dfb8f7ec88e (patch) | |
tree | 93d02ff7f0f530286fd54d03b632c6eaabc9dcc9 /drivers/infiniband/core/cma.c | |
parent | 8f399921ea9a562bc8221258c4b8a7bd69577939 (diff) | |
parent | 27cdef637c25705b433d5c4deeef4cf8dcb75d6a (diff) |
Merge branch 'ip-roce' into for-next
Conflicts:
drivers/infiniband/hw/mlx4/main.c
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r-- | drivers/infiniband/core/cma.c | 74 |
1 files changed, 61 insertions, 13 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 4173a2ad6d08..9b079a7ea29c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -340,7 +340,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a | |||
340 | int ret; | 340 | int ret; |
341 | 341 | ||
342 | if (addr->sa_family != AF_IB) { | 342 | if (addr->sa_family != AF_IB) { |
343 | ret = rdma_translate_ip(addr, dev_addr); | 343 | ret = rdma_translate_ip(addr, dev_addr, NULL); |
344 | } else { | 344 | } else { |
345 | cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); | 345 | cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); |
346 | ret = 0; | 346 | ret = 0; |
@@ -365,7 +365,9 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, | |||
365 | return -EINVAL; | 365 | return -EINVAL; |
366 | 366 | ||
367 | mutex_lock(&lock); | 367 | mutex_lock(&lock); |
368 | iboe_addr_get_sgid(dev_addr, &iboe_gid); | 368 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
369 | &iboe_gid); | ||
370 | |||
369 | memcpy(&gid, dev_addr->src_dev_addr + | 371 | memcpy(&gid, dev_addr->src_dev_addr + |
370 | rdma_addr_gid_offset(dev_addr), sizeof gid); | 372 | rdma_addr_gid_offset(dev_addr), sizeof gid); |
371 | if (listen_id_priv && | 373 | if (listen_id_priv && |
@@ -603,6 +605,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, | |||
603 | { | 605 | { |
604 | struct ib_qp_attr qp_attr; | 606 | struct ib_qp_attr qp_attr; |
605 | int qp_attr_mask, ret; | 607 | int qp_attr_mask, ret; |
608 | union ib_gid sgid; | ||
606 | 609 | ||
607 | mutex_lock(&id_priv->qp_mutex); | 610 | mutex_lock(&id_priv->qp_mutex); |
608 | if (!id_priv->id.qp) { | 611 | if (!id_priv->id.qp) { |
@@ -625,6 +628,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, | |||
625 | if (ret) | 628 | if (ret) |
626 | goto out; | 629 | goto out; |
627 | 630 | ||
631 | ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, | ||
632 | qp_attr.ah_attr.grh.sgid_index, &sgid); | ||
633 | if (ret) | ||
634 | goto out; | ||
635 | |||
636 | if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) | ||
637 | == RDMA_TRANSPORT_IB && | ||
638 | rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) | ||
639 | == IB_LINK_LAYER_ETHERNET) { | ||
640 | ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); | ||
641 | |||
642 | if (ret) | ||
643 | goto out; | ||
644 | } | ||
628 | if (conn_param) | 645 | if (conn_param) |
629 | qp_attr.max_dest_rd_atomic = conn_param->responder_resources; | 646 | qp_attr.max_dest_rd_atomic = conn_param->responder_resources; |
630 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); | 647 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); |
@@ -725,6 +742,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, | |||
725 | else | 742 | else |
726 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, | 743 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, |
727 | qp_attr_mask); | 744 | qp_attr_mask); |
745 | |||
728 | if (qp_attr->qp_state == IB_QPS_RTR) | 746 | if (qp_attr->qp_state == IB_QPS_RTR) |
729 | qp_attr->rq_psn = id_priv->seq_num; | 747 | qp_attr->rq_psn = id_priv->seq_num; |
730 | break; | 748 | break; |
@@ -1266,6 +1284,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1266 | struct rdma_id_private *listen_id, *conn_id; | 1284 | struct rdma_id_private *listen_id, *conn_id; |
1267 | struct rdma_cm_event event; | 1285 | struct rdma_cm_event event; |
1268 | int offset, ret; | 1286 | int offset, ret; |
1287 | u8 smac[ETH_ALEN]; | ||
1288 | u8 alt_smac[ETH_ALEN]; | ||
1289 | u8 *psmac = smac; | ||
1290 | u8 *palt_smac = alt_smac; | ||
1291 | int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) == | ||
1292 | RDMA_TRANSPORT_IB) && | ||
1293 | (rdma_port_get_link_layer(cm_id->device, | ||
1294 | ib_event->param.req_rcvd.port) == | ||
1295 | IB_LINK_LAYER_ETHERNET)); | ||
1269 | 1296 | ||
1270 | listen_id = cm_id->context; | 1297 | listen_id = cm_id->context; |
1271 | if (!cma_check_req_qp_type(&listen_id->id, ib_event)) | 1298 | if (!cma_check_req_qp_type(&listen_id->id, ib_event)) |
@@ -1310,12 +1337,29 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1310 | if (ret) | 1337 | if (ret) |
1311 | goto err3; | 1338 | goto err3; |
1312 | 1339 | ||
1340 | if (is_iboe) { | ||
1341 | if (ib_event->param.req_rcvd.primary_path != NULL) | ||
1342 | rdma_addr_find_smac_by_sgid( | ||
1343 | &ib_event->param.req_rcvd.primary_path->sgid, | ||
1344 | psmac, NULL); | ||
1345 | else | ||
1346 | psmac = NULL; | ||
1347 | if (ib_event->param.req_rcvd.alternate_path != NULL) | ||
1348 | rdma_addr_find_smac_by_sgid( | ||
1349 | &ib_event->param.req_rcvd.alternate_path->sgid, | ||
1350 | palt_smac, NULL); | ||
1351 | else | ||
1352 | palt_smac = NULL; | ||
1353 | } | ||
1313 | /* | 1354 | /* |
1314 | * Acquire mutex to prevent user executing rdma_destroy_id() | 1355 | * Acquire mutex to prevent user executing rdma_destroy_id() |
1315 | * while we're accessing the cm_id. | 1356 | * while we're accessing the cm_id. |
1316 | */ | 1357 | */ |
1317 | mutex_lock(&lock); | 1358 | mutex_lock(&lock); |
1318 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) | 1359 | if (is_iboe) |
1360 | ib_update_cm_av(cm_id, psmac, palt_smac); | ||
1361 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && | ||
1362 | (conn_id->id.qp_type != IB_QPT_UD)) | ||
1319 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | 1363 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
1320 | mutex_unlock(&lock); | 1364 | mutex_unlock(&lock); |
1321 | mutex_unlock(&conn_id->handler_mutex); | 1365 | mutex_unlock(&conn_id->handler_mutex); |
@@ -1474,7 +1518,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1474 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); | 1518 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
1475 | conn_id->state = RDMA_CM_CONNECT; | 1519 | conn_id->state = RDMA_CM_CONNECT; |
1476 | 1520 | ||
1477 | ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); | 1521 | ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); |
1478 | if (ret) { | 1522 | if (ret) { |
1479 | mutex_unlock(&conn_id->handler_mutex); | 1523 | mutex_unlock(&conn_id->handler_mutex); |
1480 | rdma_destroy_id(new_cm_id); | 1524 | rdma_destroy_id(new_cm_id); |
@@ -1873,7 +1917,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
1873 | struct cma_work *work; | 1917 | struct cma_work *work; |
1874 | int ret; | 1918 | int ret; |
1875 | struct net_device *ndev = NULL; | 1919 | struct net_device *ndev = NULL; |
1876 | u16 vid; | 1920 | |
1877 | 1921 | ||
1878 | work = kzalloc(sizeof *work, GFP_KERNEL); | 1922 | work = kzalloc(sizeof *work, GFP_KERNEL); |
1879 | if (!work) | 1923 | if (!work) |
@@ -1897,10 +1941,14 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
1897 | goto err2; | 1941 | goto err2; |
1898 | } | 1942 | } |
1899 | 1943 | ||
1900 | vid = rdma_vlan_dev_vlan_id(ndev); | 1944 | route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev); |
1945 | memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); | ||
1946 | memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len); | ||
1901 | 1947 | ||
1902 | iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); | 1948 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
1903 | iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); | 1949 | &route->path_rec->sgid); |
1950 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, | ||
1951 | &route->path_rec->dgid); | ||
1904 | 1952 | ||
1905 | route->path_rec->hop_limit = 1; | 1953 | route->path_rec->hop_limit = 1; |
1906 | route->path_rec->reversible = 1; | 1954 | route->path_rec->reversible = 1; |
@@ -2063,6 +2111,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
2063 | RDMA_CM_ADDR_RESOLVED)) | 2111 | RDMA_CM_ADDR_RESOLVED)) |
2064 | goto out; | 2112 | goto out; |
2065 | 2113 | ||
2114 | memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); | ||
2066 | if (!status && !id_priv->cma_dev) | 2115 | if (!status && !id_priv->cma_dev) |
2067 | status = cma_acquire_dev(id_priv, NULL); | 2116 | status = cma_acquire_dev(id_priv, NULL); |
2068 | 2117 | ||
@@ -2072,10 +2121,8 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
2072 | goto out; | 2121 | goto out; |
2073 | event.event = RDMA_CM_EVENT_ADDR_ERROR; | 2122 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
2074 | event.status = status; | 2123 | event.status = status; |
2075 | } else { | 2124 | } else |
2076 | memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); | ||
2077 | event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | 2125 | event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
2078 | } | ||
2079 | 2126 | ||
2080 | if (id_priv->id.event_handler(&id_priv->id, &event)) { | 2127 | if (id_priv->id.event_handler(&id_priv->id, &event)) { |
2081 | cma_exch(id_priv, RDMA_CM_DESTROYING); | 2128 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
@@ -2559,6 +2606,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2559 | if (ret) | 2606 | if (ret) |
2560 | goto err1; | 2607 | goto err1; |
2561 | 2608 | ||
2609 | memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); | ||
2562 | if (!cma_any_addr(addr)) { | 2610 | if (!cma_any_addr(addr)) { |
2563 | ret = cma_translate_addr(addr, &id->route.addr.dev_addr); | 2611 | ret = cma_translate_addr(addr, &id->route.addr.dev_addr); |
2564 | if (ret) | 2612 | if (ret) |
@@ -2569,7 +2617,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2569 | goto err1; | 2617 | goto err1; |
2570 | } | 2618 | } |
2571 | 2619 | ||
2572 | memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); | ||
2573 | if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { | 2620 | if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { |
2574 | if (addr->sa_family == AF_INET) | 2621 | if (addr->sa_family == AF_INET) |
2575 | id_priv->afonly = 1; | 2622 | id_priv->afonly = 1; |
@@ -3298,7 +3345,8 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | |||
3298 | err = -EINVAL; | 3345 | err = -EINVAL; |
3299 | goto out2; | 3346 | goto out2; |
3300 | } | 3347 | } |
3301 | iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); | 3348 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
3349 | &mc->multicast.ib->rec.port_gid); | ||
3302 | work->id = id_priv; | 3350 | work->id = id_priv; |
3303 | work->mc = mc; | 3351 | work->mc = mc; |
3304 | INIT_WORK(&work->work, iboe_mcast_work_handler); | 3352 | INIT_WORK(&work->work, iboe_mcast_work_handler); |