aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2006-11-30 19:37:15 -0500
committerRoland Dreier <rolandd@cisco.com>2006-12-12 14:50:21 -0500
commit0fe313b000b6a699afbbb59ef9c47a2b22146f1e (patch)
treed3a9a066218cdd07caa13b6b646e0fe140e4901d /drivers/infiniband
parenta1b1b61f80aba49f1e0f32b0e4b1c35be91c57fa (diff)
RDMA/cma: Allow early transition to RTS to handle lost CM messages
During connection establishment, the passive side of a connection can receive messages from the active side before the connection event has been delivered to the user. Allow the passive side to send messages in response to received data before the event is delivered. To handle the case where the connection messages are lost, a new rdma_notify() function is added that users may invoke to force a connection into the established state. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c68
2 files changed, 48 insertions, 24 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 79c937bf6962..d446998b12a4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3289,6 +3289,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3289 3289
3290 spin_lock_irqsave(&cm_id_priv->lock, flags); 3290 spin_lock_irqsave(&cm_id_priv->lock, flags);
3291 switch (cm_id_priv->id.state) { 3291 switch (cm_id_priv->id.state) {
3292 /* Allow transition to RTS before sending REP */
3293 case IB_CM_REQ_RCVD:
3294 case IB_CM_MRA_REQ_SENT:
3295
3292 case IB_CM_REP_RCVD: 3296 case IB_CM_REP_RCVD:
3293 case IB_CM_MRA_REP_SENT: 3297 case IB_CM_MRA_REP_SENT:
3294 case IB_CM_REP_SENT: 3298 case IB_CM_REP_SENT:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 8187349fb25f..9b036706f4d1 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -760,22 +760,6 @@ static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
760 return 0; 760 return 0;
761} 761}
762 762
763static int cma_rtu_recv(struct rdma_id_private *id_priv)
764{
765 int ret;
766
767 ret = cma_modify_qp_rts(&id_priv->id);
768 if (ret)
769 goto reject;
770
771 return 0;
772reject:
773 cma_modify_qp_err(&id_priv->id);
774 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
775 NULL, 0, NULL, 0);
776 return ret;
777}
778
779static void cma_set_rep_event_data(struct rdma_cm_event *event, 763static void cma_set_rep_event_data(struct rdma_cm_event *event,
780 struct ib_cm_rep_event_param *rep_data, 764 struct ib_cm_rep_event_param *rep_data,
781 void *private_data) 765 void *private_data)
@@ -821,9 +805,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
821 ib_event->private_data); 805 ib_event->private_data);
822 break; 806 break;
823 case IB_CM_RTU_RECEIVED: 807 case IB_CM_RTU_RECEIVED:
824 event.status = cma_rtu_recv(id_priv); 808 case IB_CM_USER_ESTABLISHED:
825 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 809 event.event = RDMA_CM_EVENT_ESTABLISHED;
826 RDMA_CM_EVENT_ESTABLISHED;
827 break; 810 break;
828 case IB_CM_DREQ_ERROR: 811 case IB_CM_DREQ_ERROR:
829 event.status = -ETIMEDOUT; /* fall through */ 812 event.status = -ETIMEDOUT; /* fall through */
@@ -1989,11 +1972,25 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1989 struct rdma_conn_param *conn_param) 1972 struct rdma_conn_param *conn_param)
1990{ 1973{
1991 struct ib_cm_rep_param rep; 1974 struct ib_cm_rep_param rep;
1992 int ret; 1975 struct ib_qp_attr qp_attr;
1976 int qp_attr_mask, ret;
1993 1977
1994 ret = cma_modify_qp_rtr(&id_priv->id); 1978 if (id_priv->id.qp) {
1995 if (ret) 1979 ret = cma_modify_qp_rtr(&id_priv->id);
1996 return ret; 1980 if (ret)
1981 goto out;
1982
1983 qp_attr.qp_state = IB_QPS_RTS;
1984 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
1985 &qp_attr_mask);
1986 if (ret)
1987 goto out;
1988
1989 qp_attr.max_rd_atomic = conn_param->initiator_depth;
1990 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1991 if (ret)
1992 goto out;
1993 }
1997 1994
1998 memset(&rep, 0, sizeof rep); 1995 memset(&rep, 0, sizeof rep);
1999 rep.qp_num = id_priv->qp_num; 1996 rep.qp_num = id_priv->qp_num;
@@ -2008,7 +2005,9 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
2008 rep.rnr_retry_count = conn_param->rnr_retry_count; 2005 rep.rnr_retry_count = conn_param->rnr_retry_count;
2009 rep.srq = id_priv->srq ? 1 : 0; 2006 rep.srq = id_priv->srq ? 1 : 0;
2010 2007
2011 return ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2008 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2009out:
2010 return ret;
2012} 2011}
2013 2012
2014static int cma_accept_iw(struct rdma_id_private *id_priv, 2013static int cma_accept_iw(struct rdma_id_private *id_priv,
@@ -2073,6 +2072,27 @@ reject:
2073} 2072}
2074EXPORT_SYMBOL(rdma_accept); 2073EXPORT_SYMBOL(rdma_accept);
2075 2074
2075int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2076{
2077 struct rdma_id_private *id_priv;
2078 int ret;
2079
2080 id_priv = container_of(id, struct rdma_id_private, id);
2081 if (!cma_comp(id_priv, CMA_CONNECT))
2082 return -EINVAL;
2083
2084 switch (id->device->node_type) {
2085 case RDMA_NODE_IB_CA:
2086 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2087 break;
2088 default:
2089 ret = 0;
2090 break;
2091 }
2092 return ret;
2093}
2094EXPORT_SYMBOL(rdma_notify);
2095
2076int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2096int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2077 u8 private_data_len) 2097 u8 private_data_len)
2078{ 2098{