aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2006-11-30 19:33:14 -0500
committerRoland Dreier <rolandd@cisco.com>2006-12-12 14:50:21 -0500
commita1b1b61f80aba49f1e0f32b0e4b1c35be91c57fa (patch)
tree8bd7253546835791ff5a74bf17464992ea038cbf /drivers/infiniband
parent9b2e9c0c241e532d923fff23d9a7c0bd31bd96b1 (diff)
RDMA/cma: Report connect info with connect events
Connection information was never given to the recipient of a connection request or reply message. Only the event was delivered. Report the connection data with the event to allows user to reject the connection based on the requested parameters, or adjust their resources to match the request. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c142
1 files changed, 85 insertions, 57 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 914a5a4c7f1e..8187349fb25f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -592,20 +592,6 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
592 } 592 }
593} 593}
594 594
595static int cma_notify_user(struct rdma_id_private *id_priv,
596 enum rdma_cm_event_type type, int status,
597 void *data, u8 data_len)
598{
599 struct rdma_cm_event event;
600
601 event.event = type;
602 event.status = status;
603 event.private_data = data;
604 event.private_data_len = data_len;
605
606 return id_priv->id.event_handler(&id_priv->id, &event);
607}
608
609static void cma_cancel_route(struct rdma_id_private *id_priv) 595static void cma_cancel_route(struct rdma_id_private *id_priv)
610{ 596{
611 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 597 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
@@ -790,47 +776,62 @@ reject:
790 return ret; 776 return ret;
791} 777}
792 778
779static void cma_set_rep_event_data(struct rdma_cm_event *event,
780 struct ib_cm_rep_event_param *rep_data,
781 void *private_data)
782{
783 event->param.conn.private_data = private_data;
784 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
785 event->param.conn.responder_resources = rep_data->responder_resources;
786 event->param.conn.initiator_depth = rep_data->initiator_depth;
787 event->param.conn.flow_control = rep_data->flow_control;
788 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
789 event->param.conn.srq = rep_data->srq;
790 event->param.conn.qp_num = rep_data->remote_qpn;
791}
792
793static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 793static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
794{ 794{
795 struct rdma_id_private *id_priv = cm_id->context; 795 struct rdma_id_private *id_priv = cm_id->context;
796 enum rdma_cm_event_type event; 796 struct rdma_cm_event event;
797 u8 private_data_len = 0; 797 int ret = 0;
798 int ret = 0, status = 0;
799 798
800 atomic_inc(&id_priv->dev_remove); 799 atomic_inc(&id_priv->dev_remove);
801 if (!cma_comp(id_priv, CMA_CONNECT)) 800 if (!cma_comp(id_priv, CMA_CONNECT))
802 goto out; 801 goto out;
803 802
803 memset(&event, 0, sizeof event);
804 switch (ib_event->event) { 804 switch (ib_event->event) {
805 case IB_CM_REQ_ERROR: 805 case IB_CM_REQ_ERROR:
806 case IB_CM_REP_ERROR: 806 case IB_CM_REP_ERROR:
807 event = RDMA_CM_EVENT_UNREACHABLE; 807 event.event = RDMA_CM_EVENT_UNREACHABLE;
808 status = -ETIMEDOUT; 808 event.status = -ETIMEDOUT;
809 break; 809 break;
810 case IB_CM_REP_RECEIVED: 810 case IB_CM_REP_RECEIVED:
811 status = cma_verify_rep(id_priv, ib_event->private_data); 811 event.status = cma_verify_rep(id_priv, ib_event->private_data);
812 if (status) 812 if (event.status)
813 event = RDMA_CM_EVENT_CONNECT_ERROR; 813 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
814 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 814 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
815 status = cma_rep_recv(id_priv); 815 event.status = cma_rep_recv(id_priv);
816 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 816 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
817 RDMA_CM_EVENT_ESTABLISHED; 817 RDMA_CM_EVENT_ESTABLISHED;
818 } else 818 } else
819 event = RDMA_CM_EVENT_CONNECT_RESPONSE; 819 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
820 private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 820 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
821 ib_event->private_data);
821 break; 822 break;
822 case IB_CM_RTU_RECEIVED: 823 case IB_CM_RTU_RECEIVED:
823 status = cma_rtu_recv(id_priv); 824 event.status = cma_rtu_recv(id_priv);
824 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 825 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
825 RDMA_CM_EVENT_ESTABLISHED; 826 RDMA_CM_EVENT_ESTABLISHED;
826 break; 827 break;
827 case IB_CM_DREQ_ERROR: 828 case IB_CM_DREQ_ERROR:
828 status = -ETIMEDOUT; /* fall through */ 829 event.status = -ETIMEDOUT; /* fall through */
829 case IB_CM_DREQ_RECEIVED: 830 case IB_CM_DREQ_RECEIVED:
830 case IB_CM_DREP_RECEIVED: 831 case IB_CM_DREP_RECEIVED:
831 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 832 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
832 goto out; 833 goto out;
833 event = RDMA_CM_EVENT_DISCONNECTED; 834 event.event = RDMA_CM_EVENT_DISCONNECTED;
834 break; 835 break;
835 case IB_CM_TIMEWAIT_EXIT: 836 case IB_CM_TIMEWAIT_EXIT:
836 case IB_CM_MRA_RECEIVED: 837 case IB_CM_MRA_RECEIVED:
@@ -838,9 +839,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
838 goto out; 839 goto out;
839 case IB_CM_REJ_RECEIVED: 840 case IB_CM_REJ_RECEIVED:
840 cma_modify_qp_err(&id_priv->id); 841 cma_modify_qp_err(&id_priv->id);
841 status = ib_event->param.rej_rcvd.reason; 842 event.status = ib_event->param.rej_rcvd.reason;
842 event = RDMA_CM_EVENT_REJECTED; 843 event.event = RDMA_CM_EVENT_REJECTED;
843 private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 844 event.param.conn.private_data = ib_event->private_data;
845 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
844 break; 846 break;
845 default: 847 default:
846 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", 848 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
@@ -848,8 +850,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
848 goto out; 850 goto out;
849 } 851 }
850 852
851 ret = cma_notify_user(id_priv, event, status, ib_event->private_data, 853 ret = id_priv->id.event_handler(&id_priv->id, &event);
852 private_data_len);
853 if (ret) { 854 if (ret) {
854 /* Destroy the CM ID by returning a non-zero value. */ 855 /* Destroy the CM ID by returning a non-zero value. */
855 id_priv->cm_id.ib = NULL; 856 id_priv->cm_id.ib = NULL;
@@ -911,9 +912,25 @@ err:
911 return NULL; 912 return NULL;
912} 913}
913 914
915static void cma_set_req_event_data(struct rdma_cm_event *event,
916 struct ib_cm_req_event_param *req_data,
917 void *private_data, int offset)
918{
919 event->param.conn.private_data = private_data + offset;
920 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
921 event->param.conn.responder_resources = req_data->responder_resources;
922 event->param.conn.initiator_depth = req_data->initiator_depth;
923 event->param.conn.flow_control = req_data->flow_control;
924 event->param.conn.retry_count = req_data->retry_count;
925 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
926 event->param.conn.srq = req_data->srq;
927 event->param.conn.qp_num = req_data->remote_qpn;
928}
929
914static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 930static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
915{ 931{
916 struct rdma_id_private *listen_id, *conn_id; 932 struct rdma_id_private *listen_id, *conn_id;
933 struct rdma_cm_event event;
917 int offset, ret; 934 int offset, ret;
918 935
919 listen_id = cm_id->context; 936 listen_id = cm_id->context;
@@ -941,9 +958,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
941 cm_id->cm_handler = cma_ib_handler; 958 cm_id->cm_handler = cma_ib_handler;
942 959
943 offset = cma_user_data_offset(listen_id->id.ps); 960 offset = cma_user_data_offset(listen_id->id.ps);
944 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 961 memset(&event, 0, sizeof event);
945 ib_event->private_data + offset, 962 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
946 IB_CM_REQ_PRIVATE_DATA_SIZE - offset); 963 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
964 ib_event->private_data, offset);
965 ret = conn_id->id.event_handler(&conn_id->id, &event);
947 if (!ret) 966 if (!ret)
948 goto out; 967 goto out;
949 968
@@ -1019,15 +1038,16 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1019static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1038static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1020{ 1039{
1021 struct rdma_id_private *id_priv = iw_id->context; 1040 struct rdma_id_private *id_priv = iw_id->context;
1022 enum rdma_cm_event_type event = 0; 1041 struct rdma_cm_event event;
1023 struct sockaddr_in *sin; 1042 struct sockaddr_in *sin;
1024 int ret = 0; 1043 int ret = 0;
1025 1044
1045 memset(&event, 0, sizeof event);
1026 atomic_inc(&id_priv->dev_remove); 1046 atomic_inc(&id_priv->dev_remove);
1027 1047
1028 switch (iw_event->event) { 1048 switch (iw_event->event) {
1029 case IW_CM_EVENT_CLOSE: 1049 case IW_CM_EVENT_CLOSE:
1030 event = RDMA_CM_EVENT_DISCONNECTED; 1050 event.event = RDMA_CM_EVENT_DISCONNECTED;
1031 break; 1051 break;
1032 case IW_CM_EVENT_CONNECT_REPLY: 1052 case IW_CM_EVENT_CONNECT_REPLY:
1033 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1053 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
@@ -1035,20 +1055,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1035 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1055 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1036 *sin = iw_event->remote_addr; 1056 *sin = iw_event->remote_addr;
1037 if (iw_event->status) 1057 if (iw_event->status)
1038 event = RDMA_CM_EVENT_REJECTED; 1058 event.event = RDMA_CM_EVENT_REJECTED;
1039 else 1059 else
1040 event = RDMA_CM_EVENT_ESTABLISHED; 1060 event.event = RDMA_CM_EVENT_ESTABLISHED;
1041 break; 1061 break;
1042 case IW_CM_EVENT_ESTABLISHED: 1062 case IW_CM_EVENT_ESTABLISHED:
1043 event = RDMA_CM_EVENT_ESTABLISHED; 1063 event.event = RDMA_CM_EVENT_ESTABLISHED;
1044 break; 1064 break;
1045 default: 1065 default:
1046 BUG_ON(1); 1066 BUG_ON(1);
1047 } 1067 }
1048 1068
1049 ret = cma_notify_user(id_priv, event, iw_event->status, 1069 event.status = iw_event->status;
1050 iw_event->private_data, 1070 event.param.conn.private_data = iw_event->private_data;
1051 iw_event->private_data_len); 1071 event.param.conn.private_data_len = iw_event->private_data_len;
1072 ret = id_priv->id.event_handler(&id_priv->id, &event);
1052 if (ret) { 1073 if (ret) {
1053 /* Destroy the CM ID by returning a non-zero value. */ 1074 /* Destroy the CM ID by returning a non-zero value. */
1054 id_priv->cm_id.iw = NULL; 1075 id_priv->cm_id.iw = NULL;
@@ -1069,6 +1090,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1069 struct rdma_id_private *listen_id, *conn_id; 1090 struct rdma_id_private *listen_id, *conn_id;
1070 struct sockaddr_in *sin; 1091 struct sockaddr_in *sin;
1071 struct net_device *dev = NULL; 1092 struct net_device *dev = NULL;
1093 struct rdma_cm_event event;
1072 int ret; 1094 int ret;
1073 1095
1074 listen_id = cm_id->context; 1096 listen_id = cm_id->context;
@@ -1122,9 +1144,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1122 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1144 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1123 *sin = iw_event->remote_addr; 1145 *sin = iw_event->remote_addr;
1124 1146
1125 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 1147 memset(&event, 0, sizeof event);
1126 iw_event->private_data, 1148 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1127 iw_event->private_data_len); 1149 event.param.conn.private_data = iw_event->private_data;
1150 event.param.conn.private_data_len = iw_event->private_data_len;
1151 ret = conn_id->id.event_handler(&conn_id->id, &event);
1128 if (ret) { 1152 if (ret) {
1129 /* User wants to destroy the CM ID */ 1153 /* User wants to destroy the CM ID */
1130 conn_id->cm_id.iw = NULL; 1154 conn_id->cm_id.iw = NULL;
@@ -1513,8 +1537,9 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1513 struct rdma_dev_addr *dev_addr, void *context) 1537 struct rdma_dev_addr *dev_addr, void *context)
1514{ 1538{
1515 struct rdma_id_private *id_priv = context; 1539 struct rdma_id_private *id_priv = context;
1516 enum rdma_cm_event_type event; 1540 struct rdma_cm_event event;
1517 1541
1542 memset(&event, 0, sizeof event);
1518 atomic_inc(&id_priv->dev_remove); 1543 atomic_inc(&id_priv->dev_remove);
1519 1544
1520 /* 1545 /*
@@ -1534,14 +1559,15 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1534 if (status) { 1559 if (status) {
1535 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1560 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1536 goto out; 1561 goto out;
1537 event = RDMA_CM_EVENT_ADDR_ERROR; 1562 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1563 event.status = status;
1538 } else { 1564 } else {
1539 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1565 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1540 ip_addr_size(src_addr)); 1566 ip_addr_size(src_addr));
1541 event = RDMA_CM_EVENT_ADDR_RESOLVED; 1567 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1542 } 1568 }
1543 1569
1544 if (cma_notify_user(id_priv, event, status, NULL, 0)) { 1570 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1545 cma_exch(id_priv, CMA_DESTROYING); 1571 cma_exch(id_priv, CMA_DESTROYING);
1546 cma_release_remove(id_priv); 1572 cma_release_remove(id_priv);
1547 cma_deref_id(id_priv); 1573 cma_deref_id(id_priv);
@@ -2132,6 +2158,7 @@ static void cma_add_one(struct ib_device *device)
2132 2158
2133static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2159static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2134{ 2160{
2161 struct rdma_cm_event event;
2135 enum cma_state state; 2162 enum cma_state state;
2136 2163
2137 /* Record that we want to remove the device */ 2164 /* Record that we want to remove the device */
@@ -2146,8 +2173,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2146 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2173 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2147 return 0; 2174 return 0;
2148 2175
2149 return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL, 2176 memset(&event, 0, sizeof event);
2150 0, NULL, 0); 2177 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2178 return id_priv->id.event_handler(&id_priv->id, &event);
2151} 2179}
2152 2180
2153static void cma_process_remove(struct cma_device *cma_dev) 2181static void cma_process_remove(struct cma_device *cma_dev)