diff options
author | Sean Hefty <sean.hefty@intel.com> | 2006-11-30 19:44:16 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-12-12 14:50:21 -0500 |
commit | 628e5f6d39d5a6be96c1272a6709f2dd3ec8b7ce (patch) | |
tree | 91cd315e0bbc7873366fd589d69b4f182a1fd854 /drivers/infiniband | |
parent | 0fe313b000b6a699afbbb59ef9c47a2b22146f1e (diff) |
RDMA/cma: Add support for RDMA_PS_UDP
Allow the use of UD QPs through the rdma_cm, in order to provide
address translation services for resolving IB addresses for datagram
messages using SIDR.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/cma.c | 220 |
1 files changed, 204 insertions, 16 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 9b036706f4d..533193d4e5d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock); | |||
70 | static struct workqueue_struct *cma_wq; | 70 | static struct workqueue_struct *cma_wq; |
71 | static DEFINE_IDR(sdp_ps); | 71 | static DEFINE_IDR(sdp_ps); |
72 | static DEFINE_IDR(tcp_ps); | 72 | static DEFINE_IDR(tcp_ps); |
73 | static DEFINE_IDR(udp_ps); | ||
73 | 74 | ||
74 | struct cma_device { | 75 | struct cma_device { |
75 | struct list_head list; | 76 | struct list_head list; |
@@ -508,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr) | |||
508 | return cma_zero_addr(addr) || cma_loopback_addr(addr); | 509 | return cma_zero_addr(addr) || cma_loopback_addr(addr); |
509 | } | 510 | } |
510 | 511 | ||
512 | static inline __be16 cma_port(struct sockaddr *addr) | ||
513 | { | ||
514 | if (addr->sa_family == AF_INET) | ||
515 | return ((struct sockaddr_in *) addr)->sin_port; | ||
516 | else | ||
517 | return ((struct sockaddr_in6 *) addr)->sin6_port; | ||
518 | } | ||
519 | |||
511 | static inline int cma_any_port(struct sockaddr *addr) | 520 | static inline int cma_any_port(struct sockaddr *addr) |
512 | { | 521 | { |
513 | return !((struct sockaddr_in *) addr)->sin_port; | 522 | return !cma_port(addr); |
514 | } | 523 | } |
515 | 524 | ||
516 | static int cma_get_net_info(void *hdr, enum rdma_port_space ps, | 525 | static int cma_get_net_info(void *hdr, enum rdma_port_space ps, |
@@ -847,8 +856,8 @@ out: | |||
847 | return ret; | 856 | return ret; |
848 | } | 857 | } |
849 | 858 | ||
850 | static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, | 859 | static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, |
851 | struct ib_cm_event *ib_event) | 860 | struct ib_cm_event *ib_event) |
852 | { | 861 | { |
853 | struct rdma_id_private *id_priv; | 862 | struct rdma_id_private *id_priv; |
854 | struct rdma_cm_id *id; | 863 | struct rdma_cm_id *id; |
@@ -895,6 +904,42 @@ err: | |||
895 | return NULL; | 904 | return NULL; |
896 | } | 905 | } |
897 | 906 | ||
907 | static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | ||
908 | struct ib_cm_event *ib_event) | ||
909 | { | ||
910 | struct rdma_id_private *id_priv; | ||
911 | struct rdma_cm_id *id; | ||
912 | union cma_ip_addr *src, *dst; | ||
913 | __u16 port; | ||
914 | u8 ip_ver; | ||
915 | int ret; | ||
916 | |||
917 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | ||
918 | listen_id->ps); | ||
919 | if (IS_ERR(id)) | ||
920 | return NULL; | ||
921 | |||
922 | |||
923 | if (cma_get_net_info(ib_event->private_data, listen_id->ps, | ||
924 | &ip_ver, &port, &src, &dst)) | ||
925 | goto err; | ||
926 | |||
927 | cma_save_net_info(&id->route.addr, &listen_id->route.addr, | ||
928 | ip_ver, port, src, dst); | ||
929 | |||
930 | ret = rdma_translate_ip(&id->route.addr.src_addr, | ||
931 | &id->route.addr.dev_addr); | ||
932 | if (ret) | ||
933 | goto err; | ||
934 | |||
935 | id_priv = container_of(id, struct rdma_id_private, id); | ||
936 | id_priv->state = CMA_CONNECT; | ||
937 | return id_priv; | ||
938 | err: | ||
939 | rdma_destroy_id(id); | ||
940 | return NULL; | ||
941 | } | ||
942 | |||
898 | static void cma_set_req_event_data(struct rdma_cm_event *event, | 943 | static void cma_set_req_event_data(struct rdma_cm_event *event, |
899 | struct ib_cm_req_event_param *req_data, | 944 | struct ib_cm_req_event_param *req_data, |
900 | void *private_data, int offset) | 945 | void *private_data, int offset) |
@@ -923,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
923 | goto out; | 968 | goto out; |
924 | } | 969 | } |
925 | 970 | ||
926 | conn_id = cma_new_id(&listen_id->id, ib_event); | 971 | memset(&event, 0, sizeof event); |
972 | offset = cma_user_data_offset(listen_id->id.ps); | ||
973 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | ||
974 | if (listen_id->id.ps == RDMA_PS_UDP) { | ||
975 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); | ||
976 | event.param.ud.private_data = ib_event->private_data + offset; | ||
977 | event.param.ud.private_data_len = | ||
978 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; | ||
979 | } else { | ||
980 | conn_id = cma_new_conn_id(&listen_id->id, ib_event); | ||
981 | cma_set_req_event_data(&event, &ib_event->param.req_rcvd, | ||
982 | ib_event->private_data, offset); | ||
983 | } | ||
927 | if (!conn_id) { | 984 | if (!conn_id) { |
928 | ret = -ENOMEM; | 985 | ret = -ENOMEM; |
929 | goto out; | 986 | goto out; |
@@ -940,11 +997,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
940 | cm_id->context = conn_id; | 997 | cm_id->context = conn_id; |
941 | cm_id->cm_handler = cma_ib_handler; | 998 | cm_id->cm_handler = cma_ib_handler; |
942 | 999 | ||
943 | offset = cma_user_data_offset(listen_id->id.ps); | ||
944 | memset(&event, 0, sizeof event); | ||
945 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | ||
946 | cma_set_req_event_data(&event, &ib_event->param.req_rcvd, | ||
947 | ib_event->private_data, offset); | ||
948 | ret = conn_id->id.event_handler(&conn_id->id, &event); | 1000 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
949 | if (!ret) | 1001 | if (!ret) |
950 | goto out; | 1002 | goto out; |
@@ -964,8 +1016,7 @@ out: | |||
964 | 1016 | ||
965 | static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) | 1017 | static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) |
966 | { | 1018 | { |
967 | return cpu_to_be64(((u64)ps << 16) + | 1019 | return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); |
968 | be16_to_cpu(((struct sockaddr_in *) addr)->sin_port)); | ||
969 | } | 1020 | } |
970 | 1021 | ||
971 | static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, | 1022 | static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, |
@@ -1740,6 +1791,9 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
1740 | case RDMA_PS_TCP: | 1791 | case RDMA_PS_TCP: |
1741 | ps = &tcp_ps; | 1792 | ps = &tcp_ps; |
1742 | break; | 1793 | break; |
1794 | case RDMA_PS_UDP: | ||
1795 | ps = &udp_ps; | ||
1796 | break; | ||
1743 | default: | 1797 | default: |
1744 | return -EPROTONOSUPPORT; | 1798 | return -EPROTONOSUPPORT; |
1745 | } | 1799 | } |
@@ -1828,6 +1882,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps, | |||
1828 | return 0; | 1882 | return 0; |
1829 | } | 1883 | } |
1830 | 1884 | ||
1885 | static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | ||
1886 | struct ib_cm_event *ib_event) | ||
1887 | { | ||
1888 | struct rdma_id_private *id_priv = cm_id->context; | ||
1889 | struct rdma_cm_event event; | ||
1890 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | ||
1891 | int ret = 0; | ||
1892 | |||
1893 | memset(&event, 0, sizeof event); | ||
1894 | atomic_inc(&id_priv->dev_remove); | ||
1895 | if (!cma_comp(id_priv, CMA_CONNECT)) | ||
1896 | goto out; | ||
1897 | |||
1898 | switch (ib_event->event) { | ||
1899 | case IB_CM_SIDR_REQ_ERROR: | ||
1900 | event.event = RDMA_CM_EVENT_UNREACHABLE; | ||
1901 | event.status = -ETIMEDOUT; | ||
1902 | break; | ||
1903 | case IB_CM_SIDR_REP_RECEIVED: | ||
1904 | event.param.ud.private_data = ib_event->private_data; | ||
1905 | event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; | ||
1906 | if (rep->status != IB_SIDR_SUCCESS) { | ||
1907 | event.event = RDMA_CM_EVENT_UNREACHABLE; | ||
1908 | event.status = ib_event->param.sidr_rep_rcvd.status; | ||
1909 | break; | ||
1910 | } | ||
1911 | if (rep->qkey != RDMA_UD_QKEY) { | ||
1912 | event.event = RDMA_CM_EVENT_UNREACHABLE; | ||
1913 | event.status = -EINVAL; | ||
1914 | break; | ||
1915 | } | ||
1916 | ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, | ||
1917 | id_priv->id.route.path_rec, | ||
1918 | &event.param.ud.ah_attr); | ||
1919 | event.param.ud.qp_num = rep->qpn; | ||
1920 | event.param.ud.qkey = rep->qkey; | ||
1921 | event.event = RDMA_CM_EVENT_ESTABLISHED; | ||
1922 | event.status = 0; | ||
1923 | break; | ||
1924 | default: | ||
1925 | printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", | ||
1926 | ib_event->event); | ||
1927 | goto out; | ||
1928 | } | ||
1929 | |||
1930 | ret = id_priv->id.event_handler(&id_priv->id, &event); | ||
1931 | if (ret) { | ||
1932 | /* Destroy the CM ID by returning a non-zero value. */ | ||
1933 | id_priv->cm_id.ib = NULL; | ||
1934 | cma_exch(id_priv, CMA_DESTROYING); | ||
1935 | cma_release_remove(id_priv); | ||
1936 | rdma_destroy_id(&id_priv->id); | ||
1937 | return ret; | ||
1938 | } | ||
1939 | out: | ||
1940 | cma_release_remove(id_priv); | ||
1941 | return ret; | ||
1942 | } | ||
1943 | |||
1944 | static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | ||
1945 | struct rdma_conn_param *conn_param) | ||
1946 | { | ||
1947 | struct ib_cm_sidr_req_param req; | ||
1948 | struct rdma_route *route; | ||
1949 | int ret; | ||
1950 | |||
1951 | req.private_data_len = sizeof(struct cma_hdr) + | ||
1952 | conn_param->private_data_len; | ||
1953 | req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); | ||
1954 | if (!req.private_data) | ||
1955 | return -ENOMEM; | ||
1956 | |||
1957 | if (conn_param->private_data && conn_param->private_data_len) | ||
1958 | memcpy((void *) req.private_data + sizeof(struct cma_hdr), | ||
1959 | conn_param->private_data, conn_param->private_data_len); | ||
1960 | |||
1961 | route = &id_priv->id.route; | ||
1962 | ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); | ||
1963 | if (ret) | ||
1964 | goto out; | ||
1965 | |||
1966 | id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, | ||
1967 | cma_sidr_rep_handler, id_priv); | ||
1968 | if (IS_ERR(id_priv->cm_id.ib)) { | ||
1969 | ret = PTR_ERR(id_priv->cm_id.ib); | ||
1970 | goto out; | ||
1971 | } | ||
1972 | |||
1973 | req.path = route->path_rec; | ||
1974 | req.service_id = cma_get_service_id(id_priv->id.ps, | ||
1975 | &route->addr.dst_addr); | ||
1976 | req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); | ||
1977 | req.max_cm_retries = CMA_MAX_CM_RETRIES; | ||
1978 | |||
1979 | ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); | ||
1980 | if (ret) { | ||
1981 | ib_destroy_cm_id(id_priv->cm_id.ib); | ||
1982 | id_priv->cm_id.ib = NULL; | ||
1983 | } | ||
1984 | out: | ||
1985 | kfree(req.private_data); | ||
1986 | return ret; | ||
1987 | } | ||
1988 | |||
1831 | static int cma_connect_ib(struct rdma_id_private *id_priv, | 1989 | static int cma_connect_ib(struct rdma_id_private *id_priv, |
1832 | struct rdma_conn_param *conn_param) | 1990 | struct rdma_conn_param *conn_param) |
1833 | { | 1991 | { |
@@ -1949,7 +2107,10 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
1949 | 2107 | ||
1950 | switch (rdma_node_get_transport(id->device->node_type)) { | 2108 | switch (rdma_node_get_transport(id->device->node_type)) { |
1951 | case RDMA_TRANSPORT_IB: | 2109 | case RDMA_TRANSPORT_IB: |
1952 | ret = cma_connect_ib(id_priv, conn_param); | 2110 | if (id->ps == RDMA_PS_UDP) |
2111 | ret = cma_resolve_ib_udp(id_priv, conn_param); | ||
2112 | else | ||
2113 | ret = cma_connect_ib(id_priv, conn_param); | ||
1953 | break; | 2114 | break; |
1954 | case RDMA_TRANSPORT_IWARP: | 2115 | case RDMA_TRANSPORT_IWARP: |
1955 | ret = cma_connect_iw(id_priv, conn_param); | 2116 | ret = cma_connect_iw(id_priv, conn_param); |
@@ -2032,6 +2193,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, | |||
2032 | return iw_cm_accept(id_priv->cm_id.iw, &iw_param); | 2193 | return iw_cm_accept(id_priv->cm_id.iw, &iw_param); |
2033 | } | 2194 | } |
2034 | 2195 | ||
2196 | static int cma_send_sidr_rep(struct rdma_id_private *id_priv, | ||
2197 | enum ib_cm_sidr_status status, | ||
2198 | const void *private_data, int private_data_len) | ||
2199 | { | ||
2200 | struct ib_cm_sidr_rep_param rep; | ||
2201 | |||
2202 | memset(&rep, 0, sizeof rep); | ||
2203 | rep.status = status; | ||
2204 | if (status == IB_SIDR_SUCCESS) { | ||
2205 | rep.qp_num = id_priv->qp_num; | ||
2206 | rep.qkey = RDMA_UD_QKEY; | ||
2207 | } | ||
2208 | rep.private_data = private_data; | ||
2209 | rep.private_data_len = private_data_len; | ||
2210 | |||
2211 | return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); | ||
2212 | } | ||
2213 | |||
2035 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | 2214 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
2036 | { | 2215 | { |
2037 | struct rdma_id_private *id_priv; | 2216 | struct rdma_id_private *id_priv; |
@@ -2048,7 +2227,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2048 | 2227 | ||
2049 | switch (rdma_node_get_transport(id->device->node_type)) { | 2228 | switch (rdma_node_get_transport(id->device->node_type)) { |
2050 | case RDMA_TRANSPORT_IB: | 2229 | case RDMA_TRANSPORT_IB: |
2051 | if (conn_param) | 2230 | if (id->ps == RDMA_PS_UDP) |
2231 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | ||
2232 | conn_param->private_data, | ||
2233 | conn_param->private_data_len); | ||
2234 | else if (conn_param) | ||
2052 | ret = cma_accept_ib(id_priv, conn_param); | 2235 | ret = cma_accept_ib(id_priv, conn_param); |
2053 | else | 2236 | else |
2054 | ret = cma_rep_recv(id_priv); | 2237 | ret = cma_rep_recv(id_priv); |
@@ -2105,9 +2288,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, | |||
2105 | 2288 | ||
2106 | switch (rdma_node_get_transport(id->device->node_type)) { | 2289 | switch (rdma_node_get_transport(id->device->node_type)) { |
2107 | case RDMA_TRANSPORT_IB: | 2290 | case RDMA_TRANSPORT_IB: |
2108 | ret = ib_send_cm_rej(id_priv->cm_id.ib, | 2291 | if (id->ps == RDMA_PS_UDP) |
2109 | IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, | 2292 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, |
2110 | private_data, private_data_len); | 2293 | private_data, private_data_len); |
2294 | else | ||
2295 | ret = ib_send_cm_rej(id_priv->cm_id.ib, | ||
2296 | IB_CM_REJ_CONSUMER_DEFINED, NULL, | ||
2297 | 0, private_data, private_data_len); | ||
2111 | break; | 2298 | break; |
2112 | case RDMA_TRANSPORT_IWARP: | 2299 | case RDMA_TRANSPORT_IWARP: |
2113 | ret = iw_cm_reject(id_priv->cm_id.iw, | 2300 | ret = iw_cm_reject(id_priv->cm_id.iw, |
@@ -2277,6 +2464,7 @@ static void cma_cleanup(void) | |||
2277 | destroy_workqueue(cma_wq); | 2464 | destroy_workqueue(cma_wq); |
2278 | idr_destroy(&sdp_ps); | 2465 | idr_destroy(&sdp_ps); |
2279 | idr_destroy(&tcp_ps); | 2466 | idr_destroy(&tcp_ps); |
2467 | idr_destroy(&udp_ps); | ||
2280 | } | 2468 | } |
2281 | 2469 | ||
2282 | module_init(cma_init); | 2470 | module_init(cma_init); |