diff options
-rw-r--r-- | drivers/infiniband/core/cma.c | 220 | ||||
-rw-r--r-- | include/rdma/rdma_cm.h | 15 | ||||
-rw-r--r-- | include/rdma/rdma_cm_ib.h | 3 |
3 files changed, 222 insertions, 16 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 9b036706f4d1..533193d4e5df 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock); | |||
70 | static struct workqueue_struct *cma_wq; | 70 | static struct workqueue_struct *cma_wq; |
71 | static DEFINE_IDR(sdp_ps); | 71 | static DEFINE_IDR(sdp_ps); |
72 | static DEFINE_IDR(tcp_ps); | 72 | static DEFINE_IDR(tcp_ps); |
73 | static DEFINE_IDR(udp_ps); | ||
73 | 74 | ||
74 | struct cma_device { | 75 | struct cma_device { |
75 | struct list_head list; | 76 | struct list_head list; |
@@ -508,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr) | |||
508 | return cma_zero_addr(addr) || cma_loopback_addr(addr); | 509 | return cma_zero_addr(addr) || cma_loopback_addr(addr); |
509 | } | 510 | } |
510 | 511 | ||
512 | static inline __be16 cma_port(struct sockaddr *addr) | ||
513 | { | ||
514 | if (addr->sa_family == AF_INET) | ||
515 | return ((struct sockaddr_in *) addr)->sin_port; | ||
516 | else | ||
517 | return ((struct sockaddr_in6 *) addr)->sin6_port; | ||
518 | } | ||
519 | |||
511 | static inline int cma_any_port(struct sockaddr *addr) | 520 | static inline int cma_any_port(struct sockaddr *addr) |
512 | { | 521 | { |
513 | return !((struct sockaddr_in *) addr)->sin_port; | 522 | return !cma_port(addr); |
514 | } | 523 | } |
515 | 524 | ||
516 | static int cma_get_net_info(void *hdr, enum rdma_port_space ps, | 525 | static int cma_get_net_info(void *hdr, enum rdma_port_space ps, |
@@ -847,8 +856,8 @@ out: | |||
847 | return ret; | 856 | return ret; |
848 | } | 857 | } |
849 | 858 | ||
850 | static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, | 859 | static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, |
851 | struct ib_cm_event *ib_event) | 860 | struct ib_cm_event *ib_event) |
852 | { | 861 | { |
853 | struct rdma_id_private *id_priv; | 862 | struct rdma_id_private *id_priv; |
854 | struct rdma_cm_id *id; | 863 | struct rdma_cm_id *id; |
@@ -895,6 +904,42 @@ err: | |||
895 | return NULL; | 904 | return NULL; |
896 | } | 905 | } |
897 | 906 | ||
907 | static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | ||
908 | struct ib_cm_event *ib_event) | ||
909 | { | ||
910 | struct rdma_id_private *id_priv; | ||
911 | struct rdma_cm_id *id; | ||
912 | union cma_ip_addr *src, *dst; | ||
913 | __u16 port; | ||
914 | u8 ip_ver; | ||
915 | int ret; | ||
916 | |||
917 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | ||
918 | listen_id->ps); | ||
919 | if (IS_ERR(id)) | ||
920 | return NULL; | ||
921 | |||
922 | |||
923 | if (cma_get_net_info(ib_event->private_data, listen_id->ps, | ||
924 | &ip_ver, &port, &src, &dst)) | ||
925 | goto err; | ||
926 | |||
927 | cma_save_net_info(&id->route.addr, &listen_id->route.addr, | ||
928 | ip_ver, port, src, dst); | ||
929 | |||
930 | ret = rdma_translate_ip(&id->route.addr.src_addr, | ||
931 | &id->route.addr.dev_addr); | ||
932 | if (ret) | ||
933 | goto err; | ||
934 | |||
935 | id_priv = container_of(id, struct rdma_id_private, id); | ||
936 | id_priv->state = CMA_CONNECT; | ||
937 | return id_priv; | ||
938 | err: | ||
939 | rdma_destroy_id(id); | ||
940 | return NULL; | ||
941 | } | ||
942 | |||
898 | static void cma_set_req_event_data(struct rdma_cm_event *event, | 943 | static void cma_set_req_event_data(struct rdma_cm_event *event, |
899 | struct ib_cm_req_event_param *req_data, | 944 | struct ib_cm_req_event_param *req_data, |
900 | void *private_data, int offset) | 945 | void *private_data, int offset) |
@@ -923,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
923 | goto out; | 968 | goto out; |
924 | } | 969 | } |
925 | 970 | ||
926 | conn_id = cma_new_id(&listen_id->id, ib_event); | 971 | memset(&event, 0, sizeof event); |
972 | offset = cma_user_data_offset(listen_id->id.ps); | ||
973 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | ||
974 | if (listen_id->id.ps == RDMA_PS_UDP) { | ||
975 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); | ||
976 | event.param.ud.private_data = ib_event->private_data + offset; | ||
977 | event.param.ud.private_data_len = | ||
978 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; | ||
979 | } else { | ||
980 | conn_id = cma_new_conn_id(&listen_id->id, ib_event); | ||
981 | cma_set_req_event_data(&event, &ib_event->param.req_rcvd, | ||
982 | ib_event->private_data, offset); | ||
983 | } | ||
927 | if (!conn_id) { | 984 | if (!conn_id) { |
928 | ret = -ENOMEM; | 985 | ret = -ENOMEM; |
929 | goto out; | 986 | goto out; |
@@ -940,11 +997,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
940 | cm_id->context = conn_id; | 997 | cm_id->context = conn_id; |
941 | cm_id->cm_handler = cma_ib_handler; | 998 | cm_id->cm_handler = cma_ib_handler; |
942 | 999 | ||
943 | offset = cma_user_data_offset(listen_id->id.ps); | ||
944 | memset(&event, 0, sizeof event); | ||
945 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | ||
946 | cma_set_req_event_data(&event, &ib_event->param.req_rcvd, | ||
947 | ib_event->private_data, offset); | ||
948 | ret = conn_id->id.event_handler(&conn_id->id, &event); | 1000 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
949 | if (!ret) | 1001 | if (!ret) |
950 | goto out; | 1002 | goto out; |
@@ -964,8 +1016,7 @@ out: | |||
964 | 1016 | ||
965 | static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) | 1017 | static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) |
966 | { | 1018 | { |
967 | return cpu_to_be64(((u64)ps << 16) + | 1019 | return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); |
968 | be16_to_cpu(((struct sockaddr_in *) addr)->sin_port)); | ||
969 | } | 1020 | } |
970 | 1021 | ||
971 | static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, | 1022 | static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, |
@@ -1740,6 +1791,9 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
1740 | case RDMA_PS_TCP: | 1791 | case RDMA_PS_TCP: |
1741 | ps = &tcp_ps; | 1792 | ps = &tcp_ps; |
1742 | break; | 1793 | break; |
1794 | case RDMA_PS_UDP: | ||
1795 | ps = &udp_ps; | ||
1796 | break; | ||
1743 | default: | 1797 | default: |
1744 | return -EPROTONOSUPPORT; | 1798 | return -EPROTONOSUPPORT; |
1745 | } | 1799 | } |
@@ -1828,6 +1882,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps, | |||
1828 | return 0; | 1882 | return 0; |
1829 | } | 1883 | } |
1830 | 1884 | ||
1885 | static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | ||
1886 | struct ib_cm_event *ib_event) | ||
1887 | { | ||
1888 | struct rdma_id_private *id_priv = cm_id->context; | ||
1889 | struct rdma_cm_event event; | ||
1890 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | ||
1891 | int ret = 0; | ||
1892 | |||
1893 | memset(&event, 0, sizeof event); | ||
1894 | atomic_inc(&id_priv->dev_remove); | ||
1895 | if (!cma_comp(id_priv, CMA_CONNECT)) | ||
1896 | goto out; | ||
1897 | |||
1898 | switch (ib_event->event) { | ||
1899 | case IB_CM_SIDR_REQ_ERROR: | ||
1900 | event.event = RDMA_CM_EVENT_UNREACHABLE; | ||
1901 | event.status = -ETIMEDOUT; | ||
1902 | break; | ||
1903 | case IB_CM_SIDR_REP_RECEIVED: | ||
1904 | event.param.ud.private_data = ib_event->private_data; | ||
1905 | event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; | ||
1906 | if (rep->status != IB_SIDR_SUCCESS) { | ||
1907 | event.event = RDMA_CM_EVENT_UNREACHABLE; | ||
1908 | event.status = ib_event->param.sidr_rep_rcvd.status; | ||
1909 | break; | ||
1910 | } | ||
1911 | if (rep->qkey != RDMA_UD_QKEY) { | ||
1912 | event.event = RDMA_CM_EVENT_UNREACHABLE; | ||
1913 | event.status = -EINVAL; | ||
1914 | break; | ||
1915 | } | ||
1916 | ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, | ||
1917 | id_priv->id.route.path_rec, | ||
1918 | &event.param.ud.ah_attr); | ||
1919 | event.param.ud.qp_num = rep->qpn; | ||
1920 | event.param.ud.qkey = rep->qkey; | ||
1921 | event.event = RDMA_CM_EVENT_ESTABLISHED; | ||
1922 | event.status = 0; | ||
1923 | break; | ||
1924 | default: | ||
1925 | printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", | ||
1926 | ib_event->event); | ||
1927 | goto out; | ||
1928 | } | ||
1929 | |||
1930 | ret = id_priv->id.event_handler(&id_priv->id, &event); | ||
1931 | if (ret) { | ||
1932 | /* Destroy the CM ID by returning a non-zero value. */ | ||
1933 | id_priv->cm_id.ib = NULL; | ||
1934 | cma_exch(id_priv, CMA_DESTROYING); | ||
1935 | cma_release_remove(id_priv); | ||
1936 | rdma_destroy_id(&id_priv->id); | ||
1937 | return ret; | ||
1938 | } | ||
1939 | out: | ||
1940 | cma_release_remove(id_priv); | ||
1941 | return ret; | ||
1942 | } | ||
1943 | |||
1944 | static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | ||
1945 | struct rdma_conn_param *conn_param) | ||
1946 | { | ||
1947 | struct ib_cm_sidr_req_param req; | ||
1948 | struct rdma_route *route; | ||
1949 | int ret; | ||
1950 | |||
1951 | req.private_data_len = sizeof(struct cma_hdr) + | ||
1952 | conn_param->private_data_len; | ||
1953 | req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); | ||
1954 | if (!req.private_data) | ||
1955 | return -ENOMEM; | ||
1956 | |||
1957 | if (conn_param->private_data && conn_param->private_data_len) | ||
1958 | memcpy((void *) req.private_data + sizeof(struct cma_hdr), | ||
1959 | conn_param->private_data, conn_param->private_data_len); | ||
1960 | |||
1961 | route = &id_priv->id.route; | ||
1962 | ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); | ||
1963 | if (ret) | ||
1964 | goto out; | ||
1965 | |||
1966 | id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, | ||
1967 | cma_sidr_rep_handler, id_priv); | ||
1968 | if (IS_ERR(id_priv->cm_id.ib)) { | ||
1969 | ret = PTR_ERR(id_priv->cm_id.ib); | ||
1970 | goto out; | ||
1971 | } | ||
1972 | |||
1973 | req.path = route->path_rec; | ||
1974 | req.service_id = cma_get_service_id(id_priv->id.ps, | ||
1975 | &route->addr.dst_addr); | ||
1976 | req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); | ||
1977 | req.max_cm_retries = CMA_MAX_CM_RETRIES; | ||
1978 | |||
1979 | ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); | ||
1980 | if (ret) { | ||
1981 | ib_destroy_cm_id(id_priv->cm_id.ib); | ||
1982 | id_priv->cm_id.ib = NULL; | ||
1983 | } | ||
1984 | out: | ||
1985 | kfree(req.private_data); | ||
1986 | return ret; | ||
1987 | } | ||
1988 | |||
1831 | static int cma_connect_ib(struct rdma_id_private *id_priv, | 1989 | static int cma_connect_ib(struct rdma_id_private *id_priv, |
1832 | struct rdma_conn_param *conn_param) | 1990 | struct rdma_conn_param *conn_param) |
1833 | { | 1991 | { |
@@ -1949,7 +2107,10 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
1949 | 2107 | ||
1950 | switch (rdma_node_get_transport(id->device->node_type)) { | 2108 | switch (rdma_node_get_transport(id->device->node_type)) { |
1951 | case RDMA_TRANSPORT_IB: | 2109 | case RDMA_TRANSPORT_IB: |
1952 | ret = cma_connect_ib(id_priv, conn_param); | 2110 | if (id->ps == RDMA_PS_UDP) |
2111 | ret = cma_resolve_ib_udp(id_priv, conn_param); | ||
2112 | else | ||
2113 | ret = cma_connect_ib(id_priv, conn_param); | ||
1953 | break; | 2114 | break; |
1954 | case RDMA_TRANSPORT_IWARP: | 2115 | case RDMA_TRANSPORT_IWARP: |
1955 | ret = cma_connect_iw(id_priv, conn_param); | 2116 | ret = cma_connect_iw(id_priv, conn_param); |
@@ -2032,6 +2193,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, | |||
2032 | return iw_cm_accept(id_priv->cm_id.iw, &iw_param); | 2193 | return iw_cm_accept(id_priv->cm_id.iw, &iw_param); |
2033 | } | 2194 | } |
2034 | 2195 | ||
2196 | static int cma_send_sidr_rep(struct rdma_id_private *id_priv, | ||
2197 | enum ib_cm_sidr_status status, | ||
2198 | const void *private_data, int private_data_len) | ||
2199 | { | ||
2200 | struct ib_cm_sidr_rep_param rep; | ||
2201 | |||
2202 | memset(&rep, 0, sizeof rep); | ||
2203 | rep.status = status; | ||
2204 | if (status == IB_SIDR_SUCCESS) { | ||
2205 | rep.qp_num = id_priv->qp_num; | ||
2206 | rep.qkey = RDMA_UD_QKEY; | ||
2207 | } | ||
2208 | rep.private_data = private_data; | ||
2209 | rep.private_data_len = private_data_len; | ||
2210 | |||
2211 | return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); | ||
2212 | } | ||
2213 | |||
2035 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | 2214 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
2036 | { | 2215 | { |
2037 | struct rdma_id_private *id_priv; | 2216 | struct rdma_id_private *id_priv; |
@@ -2048,7 +2227,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2048 | 2227 | ||
2049 | switch (rdma_node_get_transport(id->device->node_type)) { | 2228 | switch (rdma_node_get_transport(id->device->node_type)) { |
2050 | case RDMA_TRANSPORT_IB: | 2229 | case RDMA_TRANSPORT_IB: |
2051 | if (conn_param) | 2230 | if (id->ps == RDMA_PS_UDP) |
2231 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | ||
2232 | conn_param->private_data, | ||
2233 | conn_param->private_data_len); | ||
2234 | else if (conn_param) | ||
2052 | ret = cma_accept_ib(id_priv, conn_param); | 2235 | ret = cma_accept_ib(id_priv, conn_param); |
2053 | else | 2236 | else |
2054 | ret = cma_rep_recv(id_priv); | 2237 | ret = cma_rep_recv(id_priv); |
@@ -2105,9 +2288,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, | |||
2105 | 2288 | ||
2106 | switch (rdma_node_get_transport(id->device->node_type)) { | 2289 | switch (rdma_node_get_transport(id->device->node_type)) { |
2107 | case RDMA_TRANSPORT_IB: | 2290 | case RDMA_TRANSPORT_IB: |
2108 | ret = ib_send_cm_rej(id_priv->cm_id.ib, | 2291 | if (id->ps == RDMA_PS_UDP) |
2109 | IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, | 2292 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, |
2110 | private_data, private_data_len); | 2293 | private_data, private_data_len); |
2294 | else | ||
2295 | ret = ib_send_cm_rej(id_priv->cm_id.ib, | ||
2296 | IB_CM_REJ_CONSUMER_DEFINED, NULL, | ||
2297 | 0, private_data, private_data_len); | ||
2111 | break; | 2298 | break; |
2112 | case RDMA_TRANSPORT_IWARP: | 2299 | case RDMA_TRANSPORT_IWARP: |
2113 | ret = iw_cm_reject(id_priv->cm_id.iw, | 2300 | ret = iw_cm_reject(id_priv->cm_id.iw, |
@@ -2277,6 +2464,7 @@ static void cma_cleanup(void) | |||
2277 | destroy_workqueue(cma_wq); | 2464 | destroy_workqueue(cma_wq); |
2278 | idr_destroy(&sdp_ps); | 2465 | idr_destroy(&sdp_ps); |
2279 | idr_destroy(&tcp_ps); | 2466 | idr_destroy(&tcp_ps); |
2467 | idr_destroy(&udp_ps); | ||
2280 | } | 2468 | } |
2281 | 2469 | ||
2282 | module_init(cma_init); | 2470 | module_init(cma_init); |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 6b8ec486d06b..36cd8a8526a0 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -90,11 +90,20 @@ struct rdma_conn_param { | |||
90 | u32 qp_num; | 90 | u32 qp_num; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | struct rdma_ud_param { | ||
94 | const void *private_data; | ||
95 | u8 private_data_len; | ||
96 | struct ib_ah_attr ah_attr; | ||
97 | u32 qp_num; | ||
98 | u32 qkey; | ||
99 | }; | ||
100 | |||
93 | struct rdma_cm_event { | 101 | struct rdma_cm_event { |
94 | enum rdma_cm_event_type event; | 102 | enum rdma_cm_event_type event; |
95 | int status; | 103 | int status; |
96 | union { | 104 | union { |
97 | struct rdma_conn_param conn; | 105 | struct rdma_conn_param conn; |
106 | struct rdma_ud_param ud; | ||
98 | } param; | 107 | } param; |
99 | }; | 108 | }; |
100 | 109 | ||
@@ -220,9 +229,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, | |||
220 | 229 | ||
221 | /** | 230 | /** |
222 | * rdma_connect - Initiate an active connection request. | 231 | * rdma_connect - Initiate an active connection request. |
232 | * @id: Connection identifier to connect. | ||
233 | * @conn_param: Connection information used for connected QPs. | ||
223 | * | 234 | * |
224 | * Users must have resolved a route for the rdma_cm_id to connect with | 235 | * Users must have resolved a route for the rdma_cm_id to connect with |
225 | * by having called rdma_resolve_route before calling this routine. | 236 | * by having called rdma_resolve_route before calling this routine. |
237 | * | ||
238 | * This call will either connect to a remote QP or obtain remote QP | ||
239 | * information for unconnected rdma_cm_id's. The actual operation is | ||
240 | * based on the rdma_cm_id's port space. | ||
226 | */ | 241 | */ |
227 | int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); | 242 | int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); |
228 | 243 | ||
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h index e8c3af1804d4..9b176df1d667 100644 --- a/include/rdma/rdma_cm_ib.h +++ b/include/rdma/rdma_cm_ib.h | |||
@@ -44,4 +44,7 @@ | |||
44 | int rdma_set_ib_paths(struct rdma_cm_id *id, | 44 | int rdma_set_ib_paths(struct rdma_cm_id *id, |
45 | struct ib_sa_path_rec *path_rec, int num_paths); | 45 | struct ib_sa_path_rec *path_rec, int num_paths); |
46 | 46 | ||
47 | /* Global qkey for UD QPs and multicast groups. */ | ||
48 | #define RDMA_UD_QKEY 0x01234567 | ||
49 | |||
47 | #endif /* RDMA_CM_IB_H */ | 50 | #endif /* RDMA_CM_IB_H */ |