aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2011-05-13 13:46:20 -0400
committerRoland Dreier <roland@purestorage.com>2011-10-13 12:37:55 -0400
commitd26a360b776d527429cf13300837711b0b2fde20 (patch)
tree70462d3c1f74f18ac212960deebd1edba307530e /drivers/infiniband
parentb93f3c18727634a2e847f067e549762d096921cf (diff)
IB/cm: Update protocol to support XRC
Update the REQ and REP messages to support XRC connection setup according to the XRC Annex. Several existing fields must be set to 0 or 1 when connecting XRC QPs, and a reserved field is changed to an extended transport type. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c46
-rw-r--r--drivers/infiniband/core/cm_msgs.h13
2 files changed, 43 insertions, 16 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index fc0f2bd9ca8..d2e1cfb206b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1008,7 +1008,6 @@ static void cm_format_req(struct cm_req_msg *req_msg,
1008 req_msg->service_id = param->service_id; 1008 req_msg->service_id = param->service_id;
1009 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1009 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1010 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 1010 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1011 cm_req_set_resp_res(req_msg, param->responder_resources);
1012 cm_req_set_init_depth(req_msg, param->initiator_depth); 1011 cm_req_set_init_depth(req_msg, param->initiator_depth);
1013 cm_req_set_remote_resp_timeout(req_msg, 1012 cm_req_set_remote_resp_timeout(req_msg,
1014 param->remote_cm_response_timeout); 1013 param->remote_cm_response_timeout);
@@ -1017,12 +1016,16 @@ static void cm_format_req(struct cm_req_msg *req_msg,
1017 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 1016 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1018 cm_req_set_local_resp_timeout(req_msg, 1017 cm_req_set_local_resp_timeout(req_msg,
1019 param->local_cm_response_timeout); 1018 param->local_cm_response_timeout);
1020 cm_req_set_retry_count(req_msg, param->retry_count);
1021 req_msg->pkey = param->primary_path->pkey; 1019 req_msg->pkey = param->primary_path->pkey;
1022 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 1020 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1023 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1024 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 1021 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1025 cm_req_set_srq(req_msg, param->srq); 1022
1023 if (param->qp_type != IB_QPT_XRC_INI) {
1024 cm_req_set_resp_res(req_msg, param->responder_resources);
1025 cm_req_set_retry_count(req_msg, param->retry_count);
1026 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1027 cm_req_set_srq(req_msg, param->srq);
1028 }
1026 1029
1027 if (pri_path->hop_limit <= 1) { 1030 if (pri_path->hop_limit <= 1) {
1028 req_msg->primary_local_lid = pri_path->slid; 1031 req_msg->primary_local_lid = pri_path->slid;
@@ -1080,7 +1083,8 @@ static int cm_validate_req_param(struct ib_cm_req_param *param)
1080 if (!param->primary_path) 1083 if (!param->primary_path)
1081 return -EINVAL; 1084 return -EINVAL;
1082 1085
1083 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 1086 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1087 param->qp_type != IB_QPT_XRC_INI)
1084 return -EINVAL; 1088 return -EINVAL;
1085 1089
1086 if (param->private_data && 1090 if (param->private_data &&
@@ -1604,15 +1608,20 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
1604 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1608 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1605 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1609 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1606 rep_msg->resp_resources = param->responder_resources; 1610 rep_msg->resp_resources = param->responder_resources;
1607 rep_msg->initiator_depth = param->initiator_depth;
1608 cm_rep_set_target_ack_delay(rep_msg, 1611 cm_rep_set_target_ack_delay(rep_msg,
1609 cm_id_priv->av.port->cm_dev->ack_delay); 1612 cm_id_priv->av.port->cm_dev->ack_delay);
1610 cm_rep_set_failover(rep_msg, param->failover_accepted); 1613 cm_rep_set_failover(rep_msg, param->failover_accepted);
1611 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1612 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1614 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1613 cm_rep_set_srq(rep_msg, param->srq);
1614 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1615 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1615 1616
1617 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1618 rep_msg->initiator_depth = param->initiator_depth;
1619 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1620 cm_rep_set_srq(rep_msg, param->srq);
1621 } else {
1622 cm_rep_set_srq(rep_msg, 1);
1623 }
1624
1616 if (param->private_data && param->private_data_len) 1625 if (param->private_data && param->private_data_len)
1617 memcpy(rep_msg->private_data, param->private_data, 1626 memcpy(rep_msg->private_data, param->private_data,
1618 param->private_data_len); 1627 param->private_data_len);
@@ -3492,7 +3501,8 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3492 qp_attr->path_mtu = cm_id_priv->path_mtu; 3501 qp_attr->path_mtu = cm_id_priv->path_mtu;
3493 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3502 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3494 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3503 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3495 if (cm_id_priv->qp_type == IB_QPT_RC) { 3504 if (cm_id_priv->qp_type == IB_QPT_RC ||
3505 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3496 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3506 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3497 IB_QP_MIN_RNR_TIMER; 3507 IB_QP_MIN_RNR_TIMER;
3498 qp_attr->max_dest_rd_atomic = 3508 qp_attr->max_dest_rd_atomic =
@@ -3537,15 +3547,21 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3537 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 3547 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3538 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3548 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3539 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3549 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3540 if (cm_id_priv->qp_type == IB_QPT_RC) { 3550 switch (cm_id_priv->qp_type) {
3541 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3551 case IB_QPT_RC:
3542 IB_QP_RNR_RETRY | 3552 case IB_QPT_XRC_INI:
3553 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3543 IB_QP_MAX_QP_RD_ATOMIC; 3554 IB_QP_MAX_QP_RD_ATOMIC;
3544 qp_attr->timeout = cm_id_priv->av.timeout;
3545 qp_attr->retry_cnt = cm_id_priv->retry_count; 3555 qp_attr->retry_cnt = cm_id_priv->retry_count;
3546 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3556 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3547 qp_attr->max_rd_atomic = 3557 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3548 cm_id_priv->initiator_depth; 3558 /* fall through */
3559 case IB_QPT_XRC_TGT:
3560 *qp_attr_mask |= IB_QP_TIMEOUT;
3561 qp_attr->timeout = cm_id_priv->av.timeout;
3562 break;
3563 default:
3564 break;
3549 } 3565 }
3550 if (cm_id_priv->alt_av.ah_attr.dlid) { 3566 if (cm_id_priv->alt_av.ah_attr.dlid) {
3551 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3567 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 7e63c08f697..3ade3202597 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -86,7 +86,7 @@ struct cm_req_msg {
86 __be16 pkey; 86 __be16 pkey;
87 /* path MTU:4, RDC exists:1, RNR retry count:3. */ 87 /* path MTU:4, RDC exists:1, RNR retry count:3. */
88 u8 offset50; 88 u8 offset50;
89 /* max CM Retries:4, SRQ:1, rsvd:3 */ 89 /* max CM Retries:4, SRQ:1, extended transport type:3 */
90 u8 offset51; 90 u8 offset51;
91 91
92 __be16 primary_local_lid; 92 __be16 primary_local_lid;
@@ -175,6 +175,11 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
175 switch(transport_type) { 175 switch(transport_type) {
176 case 0: return IB_QPT_RC; 176 case 0: return IB_QPT_RC;
177 case 1: return IB_QPT_UC; 177 case 1: return IB_QPT_UC;
178 case 3:
179 switch (req_msg->offset51 & 0x7) {
180 case 1: return IB_QPT_XRC_TGT;
181 default: return 0;
182 }
178 default: return 0; 183 default: return 0;
179 } 184 }
180} 185}
@@ -188,6 +193,12 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
188 req_msg->offset40) & 193 req_msg->offset40) &
189 0xFFFFFFF9) | 0x2); 194 0xFFFFFFF9) | 0x2);
190 break; 195 break;
196 case IB_QPT_XRC_INI:
197 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
198 req_msg->offset40) &
199 0xFFFFFFF9) | 0x6);
200 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
201 break;
191 default: 202 default:
192 req_msg->offset40 = cpu_to_be32(be32_to_cpu( 203 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
193 req_msg->offset40) & 204 req_msg->offset40) &