diff options
74 files changed, 3282 insertions, 925 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index fc0f2bd9ca82..4104ea2427c2 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -889,6 +889,8 @@ retest: | |||
889 | break; | 889 | break; |
890 | case IB_CM_ESTABLISHED: | 890 | case IB_CM_ESTABLISHED: |
891 | spin_unlock_irq(&cm_id_priv->lock); | 891 | spin_unlock_irq(&cm_id_priv->lock); |
892 | if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) | ||
893 | break; | ||
892 | ib_send_cm_dreq(cm_id, NULL, 0); | 894 | ib_send_cm_dreq(cm_id, NULL, 0); |
893 | goto retest; | 895 | goto retest; |
894 | case IB_CM_DREQ_SENT: | 896 | case IB_CM_DREQ_SENT: |
@@ -1008,7 +1010,6 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
1008 | req_msg->service_id = param->service_id; | 1010 | req_msg->service_id = param->service_id; |
1009 | req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; | 1011 | req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; |
1010 | cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); | 1012 | cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); |
1011 | cm_req_set_resp_res(req_msg, param->responder_resources); | ||
1012 | cm_req_set_init_depth(req_msg, param->initiator_depth); | 1013 | cm_req_set_init_depth(req_msg, param->initiator_depth); |
1013 | cm_req_set_remote_resp_timeout(req_msg, | 1014 | cm_req_set_remote_resp_timeout(req_msg, |
1014 | param->remote_cm_response_timeout); | 1015 | param->remote_cm_response_timeout); |
@@ -1017,12 +1018,16 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
1017 | cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); | 1018 | cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); |
1018 | cm_req_set_local_resp_timeout(req_msg, | 1019 | cm_req_set_local_resp_timeout(req_msg, |
1019 | param->local_cm_response_timeout); | 1020 | param->local_cm_response_timeout); |
1020 | cm_req_set_retry_count(req_msg, param->retry_count); | ||
1021 | req_msg->pkey = param->primary_path->pkey; | 1021 | req_msg->pkey = param->primary_path->pkey; |
1022 | cm_req_set_path_mtu(req_msg, param->primary_path->mtu); | 1022 | cm_req_set_path_mtu(req_msg, param->primary_path->mtu); |
1023 | cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); | ||
1024 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); | 1023 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); |
1025 | cm_req_set_srq(req_msg, param->srq); | 1024 | |
1025 | if (param->qp_type != IB_QPT_XRC_INI) { | ||
1026 | cm_req_set_resp_res(req_msg, param->responder_resources); | ||
1027 | cm_req_set_retry_count(req_msg, param->retry_count); | ||
1028 | cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); | ||
1029 | cm_req_set_srq(req_msg, param->srq); | ||
1030 | } | ||
1026 | 1031 | ||
1027 | if (pri_path->hop_limit <= 1) { | 1032 | if (pri_path->hop_limit <= 1) { |
1028 | req_msg->primary_local_lid = pri_path->slid; | 1033 | req_msg->primary_local_lid = pri_path->slid; |
@@ -1080,7 +1085,8 @@ static int cm_validate_req_param(struct ib_cm_req_param *param) | |||
1080 | if (!param->primary_path) | 1085 | if (!param->primary_path) |
1081 | return -EINVAL; | 1086 | return -EINVAL; |
1082 | 1087 | ||
1083 | if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) | 1088 | if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && |
1089 | param->qp_type != IB_QPT_XRC_INI) | ||
1084 | return -EINVAL; | 1090 | return -EINVAL; |
1085 | 1091 | ||
1086 | if (param->private_data && | 1092 | if (param->private_data && |
@@ -1601,18 +1607,24 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg, | |||
1601 | cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); | 1607 | cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); |
1602 | rep_msg->local_comm_id = cm_id_priv->id.local_id; | 1608 | rep_msg->local_comm_id = cm_id_priv->id.local_id; |
1603 | rep_msg->remote_comm_id = cm_id_priv->id.remote_id; | 1609 | rep_msg->remote_comm_id = cm_id_priv->id.remote_id; |
1604 | cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1605 | cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); | 1610 | cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); |
1606 | rep_msg->resp_resources = param->responder_resources; | 1611 | rep_msg->resp_resources = param->responder_resources; |
1607 | rep_msg->initiator_depth = param->initiator_depth; | ||
1608 | cm_rep_set_target_ack_delay(rep_msg, | 1612 | cm_rep_set_target_ack_delay(rep_msg, |
1609 | cm_id_priv->av.port->cm_dev->ack_delay); | 1613 | cm_id_priv->av.port->cm_dev->ack_delay); |
1610 | cm_rep_set_failover(rep_msg, param->failover_accepted); | 1614 | cm_rep_set_failover(rep_msg, param->failover_accepted); |
1611 | cm_rep_set_flow_ctrl(rep_msg, param->flow_control); | ||
1612 | cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); | 1615 | cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); |
1613 | cm_rep_set_srq(rep_msg, param->srq); | ||
1614 | rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; | 1616 | rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; |
1615 | 1617 | ||
1618 | if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { | ||
1619 | rep_msg->initiator_depth = param->initiator_depth; | ||
1620 | cm_rep_set_flow_ctrl(rep_msg, param->flow_control); | ||
1621 | cm_rep_set_srq(rep_msg, param->srq); | ||
1622 | cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1623 | } else { | ||
1624 | cm_rep_set_srq(rep_msg, 1); | ||
1625 | cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1626 | } | ||
1627 | |||
1616 | if (param->private_data && param->private_data_len) | 1628 | if (param->private_data && param->private_data_len) |
1617 | memcpy(rep_msg->private_data, param->private_data, | 1629 | memcpy(rep_msg->private_data, param->private_data, |
1618 | param->private_data_len); | 1630 | param->private_data_len); |
@@ -1660,7 +1672,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, | |||
1660 | cm_id_priv->initiator_depth = param->initiator_depth; | 1672 | cm_id_priv->initiator_depth = param->initiator_depth; |
1661 | cm_id_priv->responder_resources = param->responder_resources; | 1673 | cm_id_priv->responder_resources = param->responder_resources; |
1662 | cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); | 1674 | cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); |
1663 | cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); | 1675 | cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); |
1664 | 1676 | ||
1665 | out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1677 | out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1666 | return ret; | 1678 | return ret; |
@@ -1731,7 +1743,7 @@ error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |||
1731 | } | 1743 | } |
1732 | EXPORT_SYMBOL(ib_send_cm_rtu); | 1744 | EXPORT_SYMBOL(ib_send_cm_rtu); |
1733 | 1745 | ||
1734 | static void cm_format_rep_event(struct cm_work *work) | 1746 | static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) |
1735 | { | 1747 | { |
1736 | struct cm_rep_msg *rep_msg; | 1748 | struct cm_rep_msg *rep_msg; |
1737 | struct ib_cm_rep_event_param *param; | 1749 | struct ib_cm_rep_event_param *param; |
@@ -1740,7 +1752,7 @@ static void cm_format_rep_event(struct cm_work *work) | |||
1740 | param = &work->cm_event.param.rep_rcvd; | 1752 | param = &work->cm_event.param.rep_rcvd; |
1741 | param->remote_ca_guid = rep_msg->local_ca_guid; | 1753 | param->remote_ca_guid = rep_msg->local_ca_guid; |
1742 | param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); | 1754 | param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); |
1743 | param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); | 1755 | param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); |
1744 | param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); | 1756 | param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); |
1745 | param->responder_resources = rep_msg->initiator_depth; | 1757 | param->responder_resources = rep_msg->initiator_depth; |
1746 | param->initiator_depth = rep_msg->resp_resources; | 1758 | param->initiator_depth = rep_msg->resp_resources; |
@@ -1808,7 +1820,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1808 | return -EINVAL; | 1820 | return -EINVAL; |
1809 | } | 1821 | } |
1810 | 1822 | ||
1811 | cm_format_rep_event(work); | 1823 | cm_format_rep_event(work, cm_id_priv->qp_type); |
1812 | 1824 | ||
1813 | spin_lock_irq(&cm_id_priv->lock); | 1825 | spin_lock_irq(&cm_id_priv->lock); |
1814 | switch (cm_id_priv->id.state) { | 1826 | switch (cm_id_priv->id.state) { |
@@ -1823,7 +1835,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1823 | 1835 | ||
1824 | cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; | 1836 | cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; |
1825 | cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; | 1837 | cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; |
1826 | cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); | 1838 | cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); |
1827 | 1839 | ||
1828 | spin_lock(&cm.lock); | 1840 | spin_lock(&cm.lock); |
1829 | /* Check for duplicate REP. */ | 1841 | /* Check for duplicate REP. */ |
@@ -1850,7 +1862,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1850 | 1862 | ||
1851 | cm_id_priv->id.state = IB_CM_REP_RCVD; | 1863 | cm_id_priv->id.state = IB_CM_REP_RCVD; |
1852 | cm_id_priv->id.remote_id = rep_msg->local_comm_id; | 1864 | cm_id_priv->id.remote_id = rep_msg->local_comm_id; |
1853 | cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); | 1865 | cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); |
1854 | cm_id_priv->initiator_depth = rep_msg->resp_resources; | 1866 | cm_id_priv->initiator_depth = rep_msg->resp_resources; |
1855 | cm_id_priv->responder_resources = rep_msg->initiator_depth; | 1867 | cm_id_priv->responder_resources = rep_msg->initiator_depth; |
1856 | cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); | 1868 | cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); |
@@ -3492,7 +3504,8 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, | |||
3492 | qp_attr->path_mtu = cm_id_priv->path_mtu; | 3504 | qp_attr->path_mtu = cm_id_priv->path_mtu; |
3493 | qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); | 3505 | qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); |
3494 | qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); | 3506 | qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); |
3495 | if (cm_id_priv->qp_type == IB_QPT_RC) { | 3507 | if (cm_id_priv->qp_type == IB_QPT_RC || |
3508 | cm_id_priv->qp_type == IB_QPT_XRC_TGT) { | ||
3496 | *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | | 3509 | *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | |
3497 | IB_QP_MIN_RNR_TIMER; | 3510 | IB_QP_MIN_RNR_TIMER; |
3498 | qp_attr->max_dest_rd_atomic = | 3511 | qp_attr->max_dest_rd_atomic = |
@@ -3537,15 +3550,21 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, | |||
3537 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { | 3550 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { |
3538 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; | 3551 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; |
3539 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); | 3552 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); |
3540 | if (cm_id_priv->qp_type == IB_QPT_RC) { | 3553 | switch (cm_id_priv->qp_type) { |
3541 | *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | | 3554 | case IB_QPT_RC: |
3542 | IB_QP_RNR_RETRY | | 3555 | case IB_QPT_XRC_INI: |
3556 | *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | | ||
3543 | IB_QP_MAX_QP_RD_ATOMIC; | 3557 | IB_QP_MAX_QP_RD_ATOMIC; |
3544 | qp_attr->timeout = cm_id_priv->av.timeout; | ||
3545 | qp_attr->retry_cnt = cm_id_priv->retry_count; | 3558 | qp_attr->retry_cnt = cm_id_priv->retry_count; |
3546 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; | 3559 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; |
3547 | qp_attr->max_rd_atomic = | 3560 | qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; |
3548 | cm_id_priv->initiator_depth; | 3561 | /* fall through */ |
3562 | case IB_QPT_XRC_TGT: | ||
3563 | *qp_attr_mask |= IB_QP_TIMEOUT; | ||
3564 | qp_attr->timeout = cm_id_priv->av.timeout; | ||
3565 | break; | ||
3566 | default: | ||
3567 | break; | ||
3549 | } | 3568 | } |
3550 | if (cm_id_priv->alt_av.ah_attr.dlid) { | 3569 | if (cm_id_priv->alt_av.ah_attr.dlid) { |
3551 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; | 3570 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; |
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 7e63c08f697c..505db2a59e7f 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | 3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
4 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. | 4 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
5 | * | 5 | * |
@@ -86,7 +86,7 @@ struct cm_req_msg { | |||
86 | __be16 pkey; | 86 | __be16 pkey; |
87 | /* path MTU:4, RDC exists:1, RNR retry count:3. */ | 87 | /* path MTU:4, RDC exists:1, RNR retry count:3. */ |
88 | u8 offset50; | 88 | u8 offset50; |
89 | /* max CM Retries:4, SRQ:1, rsvd:3 */ | 89 | /* max CM Retries:4, SRQ:1, extended transport type:3 */ |
90 | u8 offset51; | 90 | u8 offset51; |
91 | 91 | ||
92 | __be16 primary_local_lid; | 92 | __be16 primary_local_lid; |
@@ -175,6 +175,11 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg) | |||
175 | switch(transport_type) { | 175 | switch(transport_type) { |
176 | case 0: return IB_QPT_RC; | 176 | case 0: return IB_QPT_RC; |
177 | case 1: return IB_QPT_UC; | 177 | case 1: return IB_QPT_UC; |
178 | case 3: | ||
179 | switch (req_msg->offset51 & 0x7) { | ||
180 | case 1: return IB_QPT_XRC_TGT; | ||
181 | default: return 0; | ||
182 | } | ||
178 | default: return 0; | 183 | default: return 0; |
179 | } | 184 | } |
180 | } | 185 | } |
@@ -188,6 +193,12 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, | |||
188 | req_msg->offset40) & | 193 | req_msg->offset40) & |
189 | 0xFFFFFFF9) | 0x2); | 194 | 0xFFFFFFF9) | 0x2); |
190 | break; | 195 | break; |
196 | case IB_QPT_XRC_INI: | ||
197 | req_msg->offset40 = cpu_to_be32((be32_to_cpu( | ||
198 | req_msg->offset40) & | ||
199 | 0xFFFFFFF9) | 0x6); | ||
200 | req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1; | ||
201 | break; | ||
191 | default: | 202 | default: |
192 | req_msg->offset40 = cpu_to_be32(be32_to_cpu( | 203 | req_msg->offset40 = cpu_to_be32(be32_to_cpu( |
193 | req_msg->offset40) & | 204 | req_msg->offset40) & |
@@ -527,6 +538,23 @@ static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) | |||
527 | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); | 538 | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); |
528 | } | 539 | } |
529 | 540 | ||
541 | static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg) | ||
542 | { | ||
543 | return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8); | ||
544 | } | ||
545 | |||
546 | static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn) | ||
547 | { | ||
548 | rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) | | ||
549 | (be32_to_cpu(rep_msg->offset16) & 0x000000FF)); | ||
550 | } | ||
551 | |||
552 | static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type) | ||
553 | { | ||
554 | return (qp_type == IB_QPT_XRC_INI) ? | ||
555 | cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg); | ||
556 | } | ||
557 | |||
530 | static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) | 558 | static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) |
531 | { | 559 | { |
532 | return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); | 560 | return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index ca4c5dcd7133..872b1842598a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -81,6 +81,7 @@ static DEFINE_IDR(sdp_ps); | |||
81 | static DEFINE_IDR(tcp_ps); | 81 | static DEFINE_IDR(tcp_ps); |
82 | static DEFINE_IDR(udp_ps); | 82 | static DEFINE_IDR(udp_ps); |
83 | static DEFINE_IDR(ipoib_ps); | 83 | static DEFINE_IDR(ipoib_ps); |
84 | static DEFINE_IDR(ib_ps); | ||
84 | 85 | ||
85 | struct cma_device { | 86 | struct cma_device { |
86 | struct list_head list; | 87 | struct list_head list; |
@@ -1179,6 +1180,15 @@ static void cma_set_req_event_data(struct rdma_cm_event *event, | |||
1179 | event->param.conn.qp_num = req_data->remote_qpn; | 1180 | event->param.conn.qp_num = req_data->remote_qpn; |
1180 | } | 1181 | } |
1181 | 1182 | ||
1183 | static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) | ||
1184 | { | ||
1185 | return (((ib_event->event == IB_CM_REQ_RECEIVED) || | ||
1186 | (ib_event->param.req_rcvd.qp_type == id->qp_type)) || | ||
1187 | ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && | ||
1188 | (id->qp_type == IB_QPT_UD)) || | ||
1189 | (!id->qp_type)); | ||
1190 | } | ||
1191 | |||
1182 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | 1192 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) |
1183 | { | 1193 | { |
1184 | struct rdma_id_private *listen_id, *conn_id; | 1194 | struct rdma_id_private *listen_id, *conn_id; |
@@ -1186,13 +1196,16 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1186 | int offset, ret; | 1196 | int offset, ret; |
1187 | 1197 | ||
1188 | listen_id = cm_id->context; | 1198 | listen_id = cm_id->context; |
1199 | if (!cma_check_req_qp_type(&listen_id->id, ib_event)) | ||
1200 | return -EINVAL; | ||
1201 | |||
1189 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) | 1202 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) |
1190 | return -ECONNABORTED; | 1203 | return -ECONNABORTED; |
1191 | 1204 | ||
1192 | memset(&event, 0, sizeof event); | 1205 | memset(&event, 0, sizeof event); |
1193 | offset = cma_user_data_offset(listen_id->id.ps); | 1206 | offset = cma_user_data_offset(listen_id->id.ps); |
1194 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1207 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1195 | if (listen_id->id.qp_type == IB_QPT_UD) { | 1208 | if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { |
1196 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); | 1209 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); |
1197 | event.param.ud.private_data = ib_event->private_data + offset; | 1210 | event.param.ud.private_data = ib_event->private_data + offset; |
1198 | event.param.ud.private_data_len = | 1211 | event.param.ud.private_data_len = |
@@ -1328,6 +1341,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1328 | switch (iw_event->status) { | 1341 | switch (iw_event->status) { |
1329 | case 0: | 1342 | case 0: |
1330 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1343 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
1344 | event.param.conn.initiator_depth = iw_event->ird; | ||
1345 | event.param.conn.responder_resources = iw_event->ord; | ||
1331 | break; | 1346 | break; |
1332 | case -ECONNRESET: | 1347 | case -ECONNRESET: |
1333 | case -ECONNREFUSED: | 1348 | case -ECONNREFUSED: |
@@ -1343,6 +1358,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1343 | break; | 1358 | break; |
1344 | case IW_CM_EVENT_ESTABLISHED: | 1359 | case IW_CM_EVENT_ESTABLISHED: |
1345 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1360 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
1361 | event.param.conn.initiator_depth = iw_event->ird; | ||
1362 | event.param.conn.responder_resources = iw_event->ord; | ||
1346 | break; | 1363 | break; |
1347 | default: | 1364 | default: |
1348 | BUG_ON(1); | 1365 | BUG_ON(1); |
@@ -1433,8 +1450,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1433 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1450 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1434 | event.param.conn.private_data = iw_event->private_data; | 1451 | event.param.conn.private_data = iw_event->private_data; |
1435 | event.param.conn.private_data_len = iw_event->private_data_len; | 1452 | event.param.conn.private_data_len = iw_event->private_data_len; |
1436 | event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; | 1453 | event.param.conn.initiator_depth = iw_event->ird; |
1437 | event.param.conn.responder_resources = attr.max_qp_rd_atom; | 1454 | event.param.conn.responder_resources = iw_event->ord; |
1438 | 1455 | ||
1439 | /* | 1456 | /* |
1440 | * Protect against the user destroying conn_id from another thread | 1457 | * Protect against the user destroying conn_id from another thread |
@@ -2234,6 +2251,9 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
2234 | case RDMA_PS_IPOIB: | 2251 | case RDMA_PS_IPOIB: |
2235 | ps = &ipoib_ps; | 2252 | ps = &ipoib_ps; |
2236 | break; | 2253 | break; |
2254 | case RDMA_PS_IB: | ||
2255 | ps = &ib_ps; | ||
2256 | break; | ||
2237 | default: | 2257 | default: |
2238 | return -EPROTONOSUPPORT; | 2258 | return -EPROTONOSUPPORT; |
2239 | } | 2259 | } |
@@ -2569,7 +2589,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, | |||
2569 | req.service_id = cma_get_service_id(id_priv->id.ps, | 2589 | req.service_id = cma_get_service_id(id_priv->id.ps, |
2570 | (struct sockaddr *) &route->addr.dst_addr); | 2590 | (struct sockaddr *) &route->addr.dst_addr); |
2571 | req.qp_num = id_priv->qp_num; | 2591 | req.qp_num = id_priv->qp_num; |
2572 | req.qp_type = IB_QPT_RC; | 2592 | req.qp_type = id_priv->id.qp_type; |
2573 | req.starting_psn = id_priv->seq_num; | 2593 | req.starting_psn = id_priv->seq_num; |
2574 | req.responder_resources = conn_param->responder_resources; | 2594 | req.responder_resources = conn_param->responder_resources; |
2575 | req.initiator_depth = conn_param->initiator_depth; | 2595 | req.initiator_depth = conn_param->initiator_depth; |
@@ -2616,14 +2636,16 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, | |||
2616 | if (ret) | 2636 | if (ret) |
2617 | goto out; | 2637 | goto out; |
2618 | 2638 | ||
2619 | iw_param.ord = conn_param->initiator_depth; | 2639 | if (conn_param) { |
2620 | iw_param.ird = conn_param->responder_resources; | 2640 | iw_param.ord = conn_param->initiator_depth; |
2621 | iw_param.private_data = conn_param->private_data; | 2641 | iw_param.ird = conn_param->responder_resources; |
2622 | iw_param.private_data_len = conn_param->private_data_len; | 2642 | iw_param.private_data = conn_param->private_data; |
2623 | if (id_priv->id.qp) | 2643 | iw_param.private_data_len = conn_param->private_data_len; |
2644 | iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; | ||
2645 | } else { | ||
2646 | memset(&iw_param, 0, sizeof iw_param); | ||
2624 | iw_param.qpn = id_priv->qp_num; | 2647 | iw_param.qpn = id_priv->qp_num; |
2625 | else | 2648 | } |
2626 | iw_param.qpn = conn_param->qp_num; | ||
2627 | ret = iw_cm_connect(cm_id, &iw_param); | 2649 | ret = iw_cm_connect(cm_id, &iw_param); |
2628 | out: | 2650 | out: |
2629 | if (ret) { | 2651 | if (ret) { |
@@ -2765,14 +2787,20 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2765 | 2787 | ||
2766 | switch (rdma_node_get_transport(id->device->node_type)) { | 2788 | switch (rdma_node_get_transport(id->device->node_type)) { |
2767 | case RDMA_TRANSPORT_IB: | 2789 | case RDMA_TRANSPORT_IB: |
2768 | if (id->qp_type == IB_QPT_UD) | 2790 | if (id->qp_type == IB_QPT_UD) { |
2769 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | 2791 | if (conn_param) |
2770 | conn_param->private_data, | 2792 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2771 | conn_param->private_data_len); | 2793 | conn_param->private_data, |
2772 | else if (conn_param) | 2794 | conn_param->private_data_len); |
2773 | ret = cma_accept_ib(id_priv, conn_param); | 2795 | else |
2774 | else | 2796 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2775 | ret = cma_rep_recv(id_priv); | 2797 | NULL, 0); |
2798 | } else { | ||
2799 | if (conn_param) | ||
2800 | ret = cma_accept_ib(id_priv, conn_param); | ||
2801 | else | ||
2802 | ret = cma_rep_recv(id_priv); | ||
2803 | } | ||
2776 | break; | 2804 | break; |
2777 | case RDMA_TRANSPORT_IWARP: | 2805 | case RDMA_TRANSPORT_IWARP: |
2778 | ret = cma_accept_iw(id_priv, conn_param); | 2806 | ret = cma_accept_iw(id_priv, conn_param); |
@@ -3460,6 +3488,7 @@ static void __exit cma_cleanup(void) | |||
3460 | idr_destroy(&tcp_ps); | 3488 | idr_destroy(&tcp_ps); |
3461 | idr_destroy(&udp_ps); | 3489 | idr_destroy(&udp_ps); |
3462 | idr_destroy(&ipoib_ps); | 3490 | idr_destroy(&ipoib_ps); |
3491 | idr_destroy(&ib_ps); | ||
3463 | } | 3492 | } |
3464 | 3493 | ||
3465 | module_init(cma_init); | 3494 | module_init(cma_init); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index b4d8672a3e4e..056389229ea7 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1596,6 +1596,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
1596 | mad->mad_hdr.class_version].class; | 1596 | mad->mad_hdr.class_version].class; |
1597 | if (!class) | 1597 | if (!class) |
1598 | goto out; | 1598 | goto out; |
1599 | if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= | ||
1600 | IB_MGMT_MAX_METHODS) | ||
1601 | goto out; | ||
1599 | method = class->method_table[convert_mgmt_class( | 1602 | method = class->method_table[convert_mgmt_class( |
1600 | mad->mad_hdr.mgmt_class)]; | 1603 | mad->mad_hdr.mgmt_class)]; |
1601 | if (method) | 1604 | if (method) |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 9ab5df72df7b..2b59b72b57f9 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -185,17 +185,35 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
185 | if (ret) | 185 | if (ret) |
186 | return ret; | 186 | return ret; |
187 | 187 | ||
188 | rate = (25 * attr.active_speed) / 10; | ||
189 | |||
188 | switch (attr.active_speed) { | 190 | switch (attr.active_speed) { |
189 | case 2: speed = " DDR"; break; | 191 | case 2: |
190 | case 4: speed = " QDR"; break; | 192 | speed = " DDR"; |
193 | break; | ||
194 | case 4: | ||
195 | speed = " QDR"; | ||
196 | break; | ||
197 | case 8: | ||
198 | speed = " FDR10"; | ||
199 | rate = 10; | ||
200 | break; | ||
201 | case 16: | ||
202 | speed = " FDR"; | ||
203 | rate = 14; | ||
204 | break; | ||
205 | case 32: | ||
206 | speed = " EDR"; | ||
207 | rate = 25; | ||
208 | break; | ||
191 | } | 209 | } |
192 | 210 | ||
193 | rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed; | 211 | rate *= ib_width_enum_to_int(attr.active_width); |
194 | if (rate < 0) | 212 | if (rate < 0) |
195 | return -EINVAL; | 213 | return -EINVAL; |
196 | 214 | ||
197 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", | 215 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", |
198 | rate / 10, rate % 10 ? ".5" : "", | 216 | rate, (attr.active_speed == 1) ? ".5" : "", |
199 | ib_width_enum_to_int(attr.active_width), speed); | 217 | ib_width_enum_to_int(attr.active_width), speed); |
200 | } | 218 | } |
201 | 219 | ||
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 08f948df8fa9..b8a0b4a7811b 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -1122,7 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, | |||
1122 | if (copy_from_user(&hdr, buf, sizeof(hdr))) | 1122 | if (copy_from_user(&hdr, buf, sizeof(hdr))) |
1123 | return -EFAULT; | 1123 | return -EFAULT; |
1124 | 1124 | ||
1125 | if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) | 1125 | if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) |
1126 | return -EINVAL; | 1126 | return -EINVAL; |
1127 | 1127 | ||
1128 | if (hdr.in + sizeof(hdr) > len) | 1128 | if (hdr.in + sizeof(hdr) > len) |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 71be5eebd683..b69307f4f6d0 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -276,7 +276,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, | |||
276 | ucma_set_event_context(ctx, event, uevent); | 276 | ucma_set_event_context(ctx, event, uevent); |
277 | uevent->resp.event = event->event; | 277 | uevent->resp.event = event->event; |
278 | uevent->resp.status = event->status; | 278 | uevent->resp.status = event->status; |
279 | if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB) | 279 | if (cm_id->qp_type == IB_QPT_UD) |
280 | ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); | 280 | ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); |
281 | else | 281 | else |
282 | ucma_copy_conn_event(&uevent->resp.param.conn, | 282 | ucma_copy_conn_event(&uevent->resp.param.conn, |
@@ -377,6 +377,9 @@ static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_ | |||
377 | case RDMA_PS_IPOIB: | 377 | case RDMA_PS_IPOIB: |
378 | *qp_type = IB_QPT_UD; | 378 | *qp_type = IB_QPT_UD; |
379 | return 0; | 379 | return 0; |
380 | case RDMA_PS_IB: | ||
381 | *qp_type = cmd->qp_type; | ||
382 | return 0; | ||
380 | default: | 383 | default: |
381 | return -EINVAL; | 384 | return -EINVAL; |
382 | } | 385 | } |
@@ -1270,7 +1273,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, | |||
1270 | if (copy_from_user(&hdr, buf, sizeof(hdr))) | 1273 | if (copy_from_user(&hdr, buf, sizeof(hdr))) |
1271 | return -EFAULT; | 1274 | return -EFAULT; |
1272 | 1275 | ||
1273 | if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) | 1276 | if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) |
1274 | return -EINVAL; | 1277 | return -EINVAL; |
1275 | 1278 | ||
1276 | if (hdr.in + sizeof(hdr) > len) | 1279 | if (hdr.in + sizeof(hdr) > len) |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 8d261b6ea5fe..07db22997e97 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -458,8 +458,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
458 | goto err; | 458 | goto err; |
459 | } | 459 | } |
460 | 460 | ||
461 | if (packet->mad.hdr.id < 0 || | 461 | if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { |
462 | packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { | ||
463 | ret = -EINVAL; | 462 | ret = -EINVAL; |
464 | goto err; | 463 | goto err; |
465 | } | 464 | } |
@@ -703,7 +702,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) | |||
703 | mutex_lock(&file->port->file_mutex); | 702 | mutex_lock(&file->port->file_mutex); |
704 | mutex_lock(&file->mutex); | 703 | mutex_lock(&file->mutex); |
705 | 704 | ||
706 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { | 705 | if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { |
707 | ret = -EINVAL; | 706 | ret = -EINVAL; |
708 | goto out; | 707 | goto out; |
709 | } | 708 | } |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index a078e5624d22..5bcb2afd3dcb 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -76,6 +76,8 @@ struct ib_uverbs_device { | |||
76 | struct ib_device *ib_dev; | 76 | struct ib_device *ib_dev; |
77 | int devnum; | 77 | int devnum; |
78 | struct cdev cdev; | 78 | struct cdev cdev; |
79 | struct rb_root xrcd_tree; | ||
80 | struct mutex xrcd_tree_mutex; | ||
79 | }; | 81 | }; |
80 | 82 | ||
81 | struct ib_uverbs_event_file { | 83 | struct ib_uverbs_event_file { |
@@ -120,6 +122,16 @@ struct ib_uevent_object { | |||
120 | u32 events_reported; | 122 | u32 events_reported; |
121 | }; | 123 | }; |
122 | 124 | ||
125 | struct ib_uxrcd_object { | ||
126 | struct ib_uobject uobject; | ||
127 | atomic_t refcnt; | ||
128 | }; | ||
129 | |||
130 | struct ib_usrq_object { | ||
131 | struct ib_uevent_object uevent; | ||
132 | struct ib_uxrcd_object *uxrcd; | ||
133 | }; | ||
134 | |||
123 | struct ib_uqp_object { | 135 | struct ib_uqp_object { |
124 | struct ib_uevent_object uevent; | 136 | struct ib_uevent_object uevent; |
125 | struct list_head mcast_list; | 137 | struct list_head mcast_list; |
@@ -142,6 +154,7 @@ extern struct idr ib_uverbs_ah_idr; | |||
142 | extern struct idr ib_uverbs_cq_idr; | 154 | extern struct idr ib_uverbs_cq_idr; |
143 | extern struct idr ib_uverbs_qp_idr; | 155 | extern struct idr ib_uverbs_qp_idr; |
144 | extern struct idr ib_uverbs_srq_idr; | 156 | extern struct idr ib_uverbs_srq_idr; |
157 | extern struct idr ib_uverbs_xrcd_idr; | ||
145 | 158 | ||
146 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); | 159 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); |
147 | 160 | ||
@@ -161,6 +174,7 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); | |||
161 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); | 174 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); |
162 | void ib_uverbs_event_handler(struct ib_event_handler *handler, | 175 | void ib_uverbs_event_handler(struct ib_event_handler *handler, |
163 | struct ib_event *event); | 176 | struct ib_event *event); |
177 | void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd); | ||
164 | 178 | ||
165 | #define IB_UVERBS_DECLARE_CMD(name) \ | 179 | #define IB_UVERBS_DECLARE_CMD(name) \ |
166 | ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ | 180 | ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ |
@@ -181,6 +195,7 @@ IB_UVERBS_DECLARE_CMD(poll_cq); | |||
181 | IB_UVERBS_DECLARE_CMD(req_notify_cq); | 195 | IB_UVERBS_DECLARE_CMD(req_notify_cq); |
182 | IB_UVERBS_DECLARE_CMD(destroy_cq); | 196 | IB_UVERBS_DECLARE_CMD(destroy_cq); |
183 | IB_UVERBS_DECLARE_CMD(create_qp); | 197 | IB_UVERBS_DECLARE_CMD(create_qp); |
198 | IB_UVERBS_DECLARE_CMD(open_qp); | ||
184 | IB_UVERBS_DECLARE_CMD(query_qp); | 199 | IB_UVERBS_DECLARE_CMD(query_qp); |
185 | IB_UVERBS_DECLARE_CMD(modify_qp); | 200 | IB_UVERBS_DECLARE_CMD(modify_qp); |
186 | IB_UVERBS_DECLARE_CMD(destroy_qp); | 201 | IB_UVERBS_DECLARE_CMD(destroy_qp); |
@@ -195,5 +210,8 @@ IB_UVERBS_DECLARE_CMD(create_srq); | |||
195 | IB_UVERBS_DECLARE_CMD(modify_srq); | 210 | IB_UVERBS_DECLARE_CMD(modify_srq); |
196 | IB_UVERBS_DECLARE_CMD(query_srq); | 211 | IB_UVERBS_DECLARE_CMD(query_srq); |
197 | IB_UVERBS_DECLARE_CMD(destroy_srq); | 212 | IB_UVERBS_DECLARE_CMD(destroy_srq); |
213 | IB_UVERBS_DECLARE_CMD(create_xsrq); | ||
214 | IB_UVERBS_DECLARE_CMD(open_xrcd); | ||
215 | IB_UVERBS_DECLARE_CMD(close_xrcd); | ||
198 | 216 | ||
199 | #endif /* UVERBS_H */ | 217 | #endif /* UVERBS_H */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index c42699285f8e..254f1649c734 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -47,6 +47,7 @@ static struct lock_class_key cq_lock_key; | |||
47 | static struct lock_class_key qp_lock_key; | 47 | static struct lock_class_key qp_lock_key; |
48 | static struct lock_class_key ah_lock_key; | 48 | static struct lock_class_key ah_lock_key; |
49 | static struct lock_class_key srq_lock_key; | 49 | static struct lock_class_key srq_lock_key; |
50 | static struct lock_class_key xrcd_lock_key; | ||
50 | 51 | ||
51 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 52 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
52 | do { \ | 53 | do { \ |
@@ -255,6 +256,18 @@ static void put_srq_read(struct ib_srq *srq) | |||
255 | put_uobj_read(srq->uobject); | 256 | put_uobj_read(srq->uobject); |
256 | } | 257 | } |
257 | 258 | ||
259 | static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, | ||
260 | struct ib_uobject **uobj) | ||
261 | { | ||
262 | *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); | ||
263 | return *uobj ? (*uobj)->object : NULL; | ||
264 | } | ||
265 | |||
266 | static void put_xrcd_read(struct ib_uobject *uobj) | ||
267 | { | ||
268 | put_uobj_read(uobj); | ||
269 | } | ||
270 | |||
258 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | 271 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, |
259 | const char __user *buf, | 272 | const char __user *buf, |
260 | int in_len, int out_len) | 273 | int in_len, int out_len) |
@@ -298,6 +311,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | |||
298 | INIT_LIST_HEAD(&ucontext->qp_list); | 311 | INIT_LIST_HEAD(&ucontext->qp_list); |
299 | INIT_LIST_HEAD(&ucontext->srq_list); | 312 | INIT_LIST_HEAD(&ucontext->srq_list); |
300 | INIT_LIST_HEAD(&ucontext->ah_list); | 313 | INIT_LIST_HEAD(&ucontext->ah_list); |
314 | INIT_LIST_HEAD(&ucontext->xrcd_list); | ||
301 | ucontext->closing = 0; | 315 | ucontext->closing = 0; |
302 | 316 | ||
303 | resp.num_comp_vectors = file->device->num_comp_vectors; | 317 | resp.num_comp_vectors = file->device->num_comp_vectors; |
@@ -579,6 +593,310 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, | |||
579 | return in_len; | 593 | return in_len; |
580 | } | 594 | } |
581 | 595 | ||
596 | struct xrcd_table_entry { | ||
597 | struct rb_node node; | ||
598 | struct ib_xrcd *xrcd; | ||
599 | struct inode *inode; | ||
600 | }; | ||
601 | |||
602 | static int xrcd_table_insert(struct ib_uverbs_device *dev, | ||
603 | struct inode *inode, | ||
604 | struct ib_xrcd *xrcd) | ||
605 | { | ||
606 | struct xrcd_table_entry *entry, *scan; | ||
607 | struct rb_node **p = &dev->xrcd_tree.rb_node; | ||
608 | struct rb_node *parent = NULL; | ||
609 | |||
610 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
611 | if (!entry) | ||
612 | return -ENOMEM; | ||
613 | |||
614 | entry->xrcd = xrcd; | ||
615 | entry->inode = inode; | ||
616 | |||
617 | while (*p) { | ||
618 | parent = *p; | ||
619 | scan = rb_entry(parent, struct xrcd_table_entry, node); | ||
620 | |||
621 | if (inode < scan->inode) { | ||
622 | p = &(*p)->rb_left; | ||
623 | } else if (inode > scan->inode) { | ||
624 | p = &(*p)->rb_right; | ||
625 | } else { | ||
626 | kfree(entry); | ||
627 | return -EEXIST; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | rb_link_node(&entry->node, parent, p); | ||
632 | rb_insert_color(&entry->node, &dev->xrcd_tree); | ||
633 | igrab(inode); | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, | ||
638 | struct inode *inode) | ||
639 | { | ||
640 | struct xrcd_table_entry *entry; | ||
641 | struct rb_node *p = dev->xrcd_tree.rb_node; | ||
642 | |||
643 | while (p) { | ||
644 | entry = rb_entry(p, struct xrcd_table_entry, node); | ||
645 | |||
646 | if (inode < entry->inode) | ||
647 | p = p->rb_left; | ||
648 | else if (inode > entry->inode) | ||
649 | p = p->rb_right; | ||
650 | else | ||
651 | return entry; | ||
652 | } | ||
653 | |||
654 | return NULL; | ||
655 | } | ||
656 | |||
657 | static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) | ||
658 | { | ||
659 | struct xrcd_table_entry *entry; | ||
660 | |||
661 | entry = xrcd_table_search(dev, inode); | ||
662 | if (!entry) | ||
663 | return NULL; | ||
664 | |||
665 | return entry->xrcd; | ||
666 | } | ||
667 | |||
668 | static void xrcd_table_delete(struct ib_uverbs_device *dev, | ||
669 | struct inode *inode) | ||
670 | { | ||
671 | struct xrcd_table_entry *entry; | ||
672 | |||
673 | entry = xrcd_table_search(dev, inode); | ||
674 | if (entry) { | ||
675 | iput(inode); | ||
676 | rb_erase(&entry->node, &dev->xrcd_tree); | ||
677 | kfree(entry); | ||
678 | } | ||
679 | } | ||
680 | |||
681 | ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, | ||
682 | const char __user *buf, int in_len, | ||
683 | int out_len) | ||
684 | { | ||
685 | struct ib_uverbs_open_xrcd cmd; | ||
686 | struct ib_uverbs_open_xrcd_resp resp; | ||
687 | struct ib_udata udata; | ||
688 | struct ib_uxrcd_object *obj; | ||
689 | struct ib_xrcd *xrcd = NULL; | ||
690 | struct file *f = NULL; | ||
691 | struct inode *inode = NULL; | ||
692 | int ret = 0; | ||
693 | int new_xrcd = 0; | ||
694 | |||
695 | if (out_len < sizeof resp) | ||
696 | return -ENOSPC; | ||
697 | |||
698 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
699 | return -EFAULT; | ||
700 | |||
701 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
702 | (unsigned long) cmd.response + sizeof resp, | ||
703 | in_len - sizeof cmd, out_len - sizeof resp); | ||
704 | |||
705 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
706 | |||
707 | if (cmd.fd != -1) { | ||
708 | /* search for file descriptor */ | ||
709 | f = fget(cmd.fd); | ||
710 | if (!f) { | ||
711 | ret = -EBADF; | ||
712 | goto err_tree_mutex_unlock; | ||
713 | } | ||
714 | |||
715 | inode = f->f_dentry->d_inode; | ||
716 | if (!inode) { | ||
717 | ret = -EBADF; | ||
718 | goto err_tree_mutex_unlock; | ||
719 | } | ||
720 | |||
721 | xrcd = find_xrcd(file->device, inode); | ||
722 | if (!xrcd && !(cmd.oflags & O_CREAT)) { | ||
723 | /* no file descriptor. Need CREATE flag */ | ||
724 | ret = -EAGAIN; | ||
725 | goto err_tree_mutex_unlock; | ||
726 | } | ||
727 | |||
728 | if (xrcd && cmd.oflags & O_EXCL) { | ||
729 | ret = -EINVAL; | ||
730 | goto err_tree_mutex_unlock; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | ||
735 | if (!obj) { | ||
736 | ret = -ENOMEM; | ||
737 | goto err_tree_mutex_unlock; | ||
738 | } | ||
739 | |||
740 | init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key); | ||
741 | |||
742 | down_write(&obj->uobject.mutex); | ||
743 | |||
744 | if (!xrcd) { | ||
745 | xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, | ||
746 | file->ucontext, &udata); | ||
747 | if (IS_ERR(xrcd)) { | ||
748 | ret = PTR_ERR(xrcd); | ||
749 | goto err; | ||
750 | } | ||
751 | |||
752 | xrcd->inode = inode; | ||
753 | xrcd->device = file->device->ib_dev; | ||
754 | atomic_set(&xrcd->usecnt, 0); | ||
755 | mutex_init(&xrcd->tgt_qp_mutex); | ||
756 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); | ||
757 | new_xrcd = 1; | ||
758 | } | ||
759 | |||
760 | atomic_set(&obj->refcnt, 0); | ||
761 | obj->uobject.object = xrcd; | ||
762 | ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); | ||
763 | if (ret) | ||
764 | goto err_idr; | ||
765 | |||
766 | memset(&resp, 0, sizeof resp); | ||
767 | resp.xrcd_handle = obj->uobject.id; | ||
768 | |||
769 | if (inode) { | ||
770 | if (new_xrcd) { | ||
771 | /* create new inode/xrcd table entry */ | ||
772 | ret = xrcd_table_insert(file->device, inode, xrcd); | ||
773 | if (ret) | ||
774 | goto err_insert_xrcd; | ||
775 | } | ||
776 | atomic_inc(&xrcd->usecnt); | ||
777 | } | ||
778 | |||
779 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
780 | &resp, sizeof resp)) { | ||
781 | ret = -EFAULT; | ||
782 | goto err_copy; | ||
783 | } | ||
784 | |||
785 | if (f) | ||
786 | fput(f); | ||
787 | |||
788 | mutex_lock(&file->mutex); | ||
789 | list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); | ||
790 | mutex_unlock(&file->mutex); | ||
791 | |||
792 | obj->uobject.live = 1; | ||
793 | up_write(&obj->uobject.mutex); | ||
794 | |||
795 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
796 | return in_len; | ||
797 | |||
798 | err_copy: | ||
799 | if (inode) { | ||
800 | if (new_xrcd) | ||
801 | xrcd_table_delete(file->device, inode); | ||
802 | atomic_dec(&xrcd->usecnt); | ||
803 | } | ||
804 | |||
805 | err_insert_xrcd: | ||
806 | idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); | ||
807 | |||
808 | err_idr: | ||
809 | ib_dealloc_xrcd(xrcd); | ||
810 | |||
811 | err: | ||
812 | put_uobj_write(&obj->uobject); | ||
813 | |||
814 | err_tree_mutex_unlock: | ||
815 | if (f) | ||
816 | fput(f); | ||
817 | |||
818 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
819 | |||
820 | return ret; | ||
821 | } | ||
822 | |||
823 | ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, | ||
824 | const char __user *buf, int in_len, | ||
825 | int out_len) | ||
826 | { | ||
827 | struct ib_uverbs_close_xrcd cmd; | ||
828 | struct ib_uobject *uobj; | ||
829 | struct ib_xrcd *xrcd = NULL; | ||
830 | struct inode *inode = NULL; | ||
831 | struct ib_uxrcd_object *obj; | ||
832 | int live; | ||
833 | int ret = 0; | ||
834 | |||
835 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
836 | return -EFAULT; | ||
837 | |||
838 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
839 | uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); | ||
840 | if (!uobj) { | ||
841 | ret = -EINVAL; | ||
842 | goto out; | ||
843 | } | ||
844 | |||
845 | xrcd = uobj->object; | ||
846 | inode = xrcd->inode; | ||
847 | obj = container_of(uobj, struct ib_uxrcd_object, uobject); | ||
848 | if (atomic_read(&obj->refcnt)) { | ||
849 | put_uobj_write(uobj); | ||
850 | ret = -EBUSY; | ||
851 | goto out; | ||
852 | } | ||
853 | |||
854 | if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { | ||
855 | ret = ib_dealloc_xrcd(uobj->object); | ||
856 | if (!ret) | ||
857 | uobj->live = 0; | ||
858 | } | ||
859 | |||
860 | live = uobj->live; | ||
861 | if (inode && ret) | ||
862 | atomic_inc(&xrcd->usecnt); | ||
863 | |||
864 | put_uobj_write(uobj); | ||
865 | |||
866 | if (ret) | ||
867 | goto out; | ||
868 | |||
869 | if (inode && !live) | ||
870 | xrcd_table_delete(file->device, inode); | ||
871 | |||
872 | idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); | ||
873 | mutex_lock(&file->mutex); | ||
874 | list_del(&uobj->list); | ||
875 | mutex_unlock(&file->mutex); | ||
876 | |||
877 | put_uobj(uobj); | ||
878 | ret = in_len; | ||
879 | |||
880 | out: | ||
881 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
882 | return ret; | ||
883 | } | ||
884 | |||
885 | void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, | ||
886 | struct ib_xrcd *xrcd) | ||
887 | { | ||
888 | struct inode *inode; | ||
889 | |||
890 | inode = xrcd->inode; | ||
891 | if (inode && !atomic_dec_and_test(&xrcd->usecnt)) | ||
892 | return; | ||
893 | |||
894 | ib_dealloc_xrcd(xrcd); | ||
895 | |||
896 | if (inode) | ||
897 | xrcd_table_delete(dev, inode); | ||
898 | } | ||
899 | |||
582 | ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, | 900 | ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, |
583 | const char __user *buf, int in_len, | 901 | const char __user *buf, int in_len, |
584 | int out_len) | 902 | int out_len) |
@@ -1052,9 +1370,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1052 | struct ib_uverbs_create_qp_resp resp; | 1370 | struct ib_uverbs_create_qp_resp resp; |
1053 | struct ib_udata udata; | 1371 | struct ib_udata udata; |
1054 | struct ib_uqp_object *obj; | 1372 | struct ib_uqp_object *obj; |
1055 | struct ib_pd *pd; | 1373 | struct ib_device *device; |
1056 | struct ib_cq *scq, *rcq; | 1374 | struct ib_pd *pd = NULL; |
1057 | struct ib_srq *srq; | 1375 | struct ib_xrcd *xrcd = NULL; |
1376 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1377 | struct ib_cq *scq = NULL, *rcq = NULL; | ||
1378 | struct ib_srq *srq = NULL; | ||
1058 | struct ib_qp *qp; | 1379 | struct ib_qp *qp; |
1059 | struct ib_qp_init_attr attr; | 1380 | struct ib_qp_init_attr attr; |
1060 | int ret; | 1381 | int ret; |
@@ -1076,15 +1397,39 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1076 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); | 1397 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); |
1077 | down_write(&obj->uevent.uobject.mutex); | 1398 | down_write(&obj->uevent.uobject.mutex); |
1078 | 1399 | ||
1079 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; | 1400 | if (cmd.qp_type == IB_QPT_XRC_TGT) { |
1080 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1401 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); |
1081 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); | 1402 | if (!xrcd) { |
1082 | rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? | 1403 | ret = -EINVAL; |
1083 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); | 1404 | goto err_put; |
1405 | } | ||
1406 | device = xrcd->device; | ||
1407 | } else { | ||
1408 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
1409 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); | ||
1410 | if (!pd || !scq) { | ||
1411 | ret = -EINVAL; | ||
1412 | goto err_put; | ||
1413 | } | ||
1084 | 1414 | ||
1085 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { | 1415 | if (cmd.qp_type == IB_QPT_XRC_INI) { |
1086 | ret = -EINVAL; | 1416 | cmd.max_recv_wr = cmd.max_recv_sge = 0; |
1087 | goto err_put; | 1417 | } else { |
1418 | if (cmd.is_srq) { | ||
1419 | srq = idr_read_srq(cmd.srq_handle, file->ucontext); | ||
1420 | if (!srq || srq->srq_type != IB_SRQT_BASIC) { | ||
1421 | ret = -EINVAL; | ||
1422 | goto err_put; | ||
1423 | } | ||
1424 | } | ||
1425 | rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ? | ||
1426 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); | ||
1427 | if (!rcq) { | ||
1428 | ret = -EINVAL; | ||
1429 | goto err_put; | ||
1430 | } | ||
1431 | } | ||
1432 | device = pd->device; | ||
1088 | } | 1433 | } |
1089 | 1434 | ||
1090 | attr.event_handler = ib_uverbs_qp_event_handler; | 1435 | attr.event_handler = ib_uverbs_qp_event_handler; |
@@ -1092,6 +1437,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1092 | attr.send_cq = scq; | 1437 | attr.send_cq = scq; |
1093 | attr.recv_cq = rcq; | 1438 | attr.recv_cq = rcq; |
1094 | attr.srq = srq; | 1439 | attr.srq = srq; |
1440 | attr.xrcd = xrcd; | ||
1095 | attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; | 1441 | attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; |
1096 | attr.qp_type = cmd.qp_type; | 1442 | attr.qp_type = cmd.qp_type; |
1097 | attr.create_flags = 0; | 1443 | attr.create_flags = 0; |
@@ -1106,26 +1452,34 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1106 | INIT_LIST_HEAD(&obj->uevent.event_list); | 1452 | INIT_LIST_HEAD(&obj->uevent.event_list); |
1107 | INIT_LIST_HEAD(&obj->mcast_list); | 1453 | INIT_LIST_HEAD(&obj->mcast_list); |
1108 | 1454 | ||
1109 | qp = pd->device->create_qp(pd, &attr, &udata); | 1455 | if (cmd.qp_type == IB_QPT_XRC_TGT) |
1456 | qp = ib_create_qp(pd, &attr); | ||
1457 | else | ||
1458 | qp = device->create_qp(pd, &attr, &udata); | ||
1459 | |||
1110 | if (IS_ERR(qp)) { | 1460 | if (IS_ERR(qp)) { |
1111 | ret = PTR_ERR(qp); | 1461 | ret = PTR_ERR(qp); |
1112 | goto err_put; | 1462 | goto err_put; |
1113 | } | 1463 | } |
1114 | 1464 | ||
1115 | qp->device = pd->device; | 1465 | if (cmd.qp_type != IB_QPT_XRC_TGT) { |
1116 | qp->pd = pd; | 1466 | qp->real_qp = qp; |
1117 | qp->send_cq = attr.send_cq; | 1467 | qp->device = device; |
1118 | qp->recv_cq = attr.recv_cq; | 1468 | qp->pd = pd; |
1119 | qp->srq = attr.srq; | 1469 | qp->send_cq = attr.send_cq; |
1120 | qp->uobject = &obj->uevent.uobject; | 1470 | qp->recv_cq = attr.recv_cq; |
1121 | qp->event_handler = attr.event_handler; | 1471 | qp->srq = attr.srq; |
1122 | qp->qp_context = attr.qp_context; | 1472 | qp->event_handler = attr.event_handler; |
1123 | qp->qp_type = attr.qp_type; | 1473 | qp->qp_context = attr.qp_context; |
1124 | atomic_inc(&pd->usecnt); | 1474 | qp->qp_type = attr.qp_type; |
1125 | atomic_inc(&attr.send_cq->usecnt); | 1475 | atomic_inc(&pd->usecnt); |
1126 | atomic_inc(&attr.recv_cq->usecnt); | 1476 | atomic_inc(&attr.send_cq->usecnt); |
1127 | if (attr.srq) | 1477 | if (attr.recv_cq) |
1128 | atomic_inc(&attr.srq->usecnt); | 1478 | atomic_inc(&attr.recv_cq->usecnt); |
1479 | if (attr.srq) | ||
1480 | atomic_inc(&attr.srq->usecnt); | ||
1481 | } | ||
1482 | qp->uobject = &obj->uevent.uobject; | ||
1129 | 1483 | ||
1130 | obj->uevent.uobject.object = qp; | 1484 | obj->uevent.uobject.object = qp; |
1131 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | 1485 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); |
@@ -1147,9 +1501,13 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1147 | goto err_copy; | 1501 | goto err_copy; |
1148 | } | 1502 | } |
1149 | 1503 | ||
1150 | put_pd_read(pd); | 1504 | if (xrcd) |
1151 | put_cq_read(scq); | 1505 | put_xrcd_read(xrcd_uobj); |
1152 | if (rcq != scq) | 1506 | if (pd) |
1507 | put_pd_read(pd); | ||
1508 | if (scq) | ||
1509 | put_cq_read(scq); | ||
1510 | if (rcq && rcq != scq) | ||
1153 | put_cq_read(rcq); | 1511 | put_cq_read(rcq); |
1154 | if (srq) | 1512 | if (srq) |
1155 | put_srq_read(srq); | 1513 | put_srq_read(srq); |
@@ -1171,6 +1529,8 @@ err_destroy: | |||
1171 | ib_destroy_qp(qp); | 1529 | ib_destroy_qp(qp); |
1172 | 1530 | ||
1173 | err_put: | 1531 | err_put: |
1532 | if (xrcd) | ||
1533 | put_xrcd_read(xrcd_uobj); | ||
1174 | if (pd) | 1534 | if (pd) |
1175 | put_pd_read(pd); | 1535 | put_pd_read(pd); |
1176 | if (scq) | 1536 | if (scq) |
@@ -1184,6 +1544,98 @@ err_put: | |||
1184 | return ret; | 1544 | return ret; |
1185 | } | 1545 | } |
1186 | 1546 | ||
1547 | ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, | ||
1548 | const char __user *buf, int in_len, int out_len) | ||
1549 | { | ||
1550 | struct ib_uverbs_open_qp cmd; | ||
1551 | struct ib_uverbs_create_qp_resp resp; | ||
1552 | struct ib_udata udata; | ||
1553 | struct ib_uqp_object *obj; | ||
1554 | struct ib_xrcd *xrcd; | ||
1555 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1556 | struct ib_qp *qp; | ||
1557 | struct ib_qp_open_attr attr; | ||
1558 | int ret; | ||
1559 | |||
1560 | if (out_len < sizeof resp) | ||
1561 | return -ENOSPC; | ||
1562 | |||
1563 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
1564 | return -EFAULT; | ||
1565 | |||
1566 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
1567 | (unsigned long) cmd.response + sizeof resp, | ||
1568 | in_len - sizeof cmd, out_len - sizeof resp); | ||
1569 | |||
1570 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | ||
1571 | if (!obj) | ||
1572 | return -ENOMEM; | ||
1573 | |||
1574 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); | ||
1575 | down_write(&obj->uevent.uobject.mutex); | ||
1576 | |||
1577 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); | ||
1578 | if (!xrcd) { | ||
1579 | ret = -EINVAL; | ||
1580 | goto err_put; | ||
1581 | } | ||
1582 | |||
1583 | attr.event_handler = ib_uverbs_qp_event_handler; | ||
1584 | attr.qp_context = file; | ||
1585 | attr.qp_num = cmd.qpn; | ||
1586 | attr.qp_type = cmd.qp_type; | ||
1587 | |||
1588 | obj->uevent.events_reported = 0; | ||
1589 | INIT_LIST_HEAD(&obj->uevent.event_list); | ||
1590 | INIT_LIST_HEAD(&obj->mcast_list); | ||
1591 | |||
1592 | qp = ib_open_qp(xrcd, &attr); | ||
1593 | if (IS_ERR(qp)) { | ||
1594 | ret = PTR_ERR(qp); | ||
1595 | goto err_put; | ||
1596 | } | ||
1597 | |||
1598 | qp->uobject = &obj->uevent.uobject; | ||
1599 | |||
1600 | obj->uevent.uobject.object = qp; | ||
1601 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | ||
1602 | if (ret) | ||
1603 | goto err_destroy; | ||
1604 | |||
1605 | memset(&resp, 0, sizeof resp); | ||
1606 | resp.qpn = qp->qp_num; | ||
1607 | resp.qp_handle = obj->uevent.uobject.id; | ||
1608 | |||
1609 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
1610 | &resp, sizeof resp)) { | ||
1611 | ret = -EFAULT; | ||
1612 | goto err_remove; | ||
1613 | } | ||
1614 | |||
1615 | put_xrcd_read(xrcd_uobj); | ||
1616 | |||
1617 | mutex_lock(&file->mutex); | ||
1618 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); | ||
1619 | mutex_unlock(&file->mutex); | ||
1620 | |||
1621 | obj->uevent.uobject.live = 1; | ||
1622 | |||
1623 | up_write(&obj->uevent.uobject.mutex); | ||
1624 | |||
1625 | return in_len; | ||
1626 | |||
1627 | err_remove: | ||
1628 | idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | ||
1629 | |||
1630 | err_destroy: | ||
1631 | ib_destroy_qp(qp); | ||
1632 | |||
1633 | err_put: | ||
1634 | put_xrcd_read(xrcd_uobj); | ||
1635 | put_uobj_write(&obj->uevent.uobject); | ||
1636 | return ret; | ||
1637 | } | ||
1638 | |||
1187 | ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, | 1639 | ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, |
1188 | const char __user *buf, int in_len, | 1640 | const char __user *buf, int in_len, |
1189 | int out_len) | 1641 | int out_len) |
@@ -1284,6 +1736,20 @@ out: | |||
1284 | return ret ? ret : in_len; | 1736 | return ret ? ret : in_len; |
1285 | } | 1737 | } |
1286 | 1738 | ||
1739 | /* Remove ignored fields set in the attribute mask */ | ||
1740 | static int modify_qp_mask(enum ib_qp_type qp_type, int mask) | ||
1741 | { | ||
1742 | switch (qp_type) { | ||
1743 | case IB_QPT_XRC_INI: | ||
1744 | return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); | ||
1745 | case IB_QPT_XRC_TGT: | ||
1746 | return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | | ||
1747 | IB_QP_RNR_RETRY); | ||
1748 | default: | ||
1749 | return mask; | ||
1750 | } | ||
1751 | } | ||
1752 | |||
1287 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | 1753 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, |
1288 | const char __user *buf, int in_len, | 1754 | const char __user *buf, int in_len, |
1289 | int out_len) | 1755 | int out_len) |
@@ -1356,7 +1822,12 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | |||
1356 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; | 1822 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; |
1357 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; | 1823 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; |
1358 | 1824 | ||
1359 | ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); | 1825 | if (qp->real_qp == qp) { |
1826 | ret = qp->device->modify_qp(qp, attr, | ||
1827 | modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); | ||
1828 | } else { | ||
1829 | ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); | ||
1830 | } | ||
1360 | 1831 | ||
1361 | put_qp_read(qp); | 1832 | put_qp_read(qp); |
1362 | 1833 | ||
@@ -1553,7 +2024,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
1553 | } | 2024 | } |
1554 | 2025 | ||
1555 | resp.bad_wr = 0; | 2026 | resp.bad_wr = 0; |
1556 | ret = qp->device->post_send(qp, wr, &bad_wr); | 2027 | ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); |
1557 | if (ret) | 2028 | if (ret) |
1558 | for (next = wr; next; next = next->next) { | 2029 | for (next = wr; next; next = next->next) { |
1559 | ++resp.bad_wr; | 2030 | ++resp.bad_wr; |
@@ -1691,7 +2162,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, | |||
1691 | goto out; | 2162 | goto out; |
1692 | 2163 | ||
1693 | resp.bad_wr = 0; | 2164 | resp.bad_wr = 0; |
1694 | ret = qp->device->post_recv(qp, wr, &bad_wr); | 2165 | ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); |
1695 | 2166 | ||
1696 | put_qp_read(qp); | 2167 | put_qp_read(qp); |
1697 | 2168 | ||
@@ -1975,107 +2446,199 @@ out_put: | |||
1975 | return ret ? ret : in_len; | 2446 | return ret ? ret : in_len; |
1976 | } | 2447 | } |
1977 | 2448 | ||
1978 | ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | 2449 | int __uverbs_create_xsrq(struct ib_uverbs_file *file, |
1979 | const char __user *buf, int in_len, | 2450 | struct ib_uverbs_create_xsrq *cmd, |
1980 | int out_len) | 2451 | struct ib_udata *udata) |
1981 | { | 2452 | { |
1982 | struct ib_uverbs_create_srq cmd; | ||
1983 | struct ib_uverbs_create_srq_resp resp; | 2453 | struct ib_uverbs_create_srq_resp resp; |
1984 | struct ib_udata udata; | 2454 | struct ib_usrq_object *obj; |
1985 | struct ib_uevent_object *obj; | ||
1986 | struct ib_pd *pd; | 2455 | struct ib_pd *pd; |
1987 | struct ib_srq *srq; | 2456 | struct ib_srq *srq; |
2457 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1988 | struct ib_srq_init_attr attr; | 2458 | struct ib_srq_init_attr attr; |
1989 | int ret; | 2459 | int ret; |
1990 | 2460 | ||
1991 | if (out_len < sizeof resp) | ||
1992 | return -ENOSPC; | ||
1993 | |||
1994 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
1995 | return -EFAULT; | ||
1996 | |||
1997 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
1998 | (unsigned long) cmd.response + sizeof resp, | ||
1999 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2000 | |||
2001 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | 2461 | obj = kmalloc(sizeof *obj, GFP_KERNEL); |
2002 | if (!obj) | 2462 | if (!obj) |
2003 | return -ENOMEM; | 2463 | return -ENOMEM; |
2004 | 2464 | ||
2005 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); | 2465 | init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key); |
2006 | down_write(&obj->uobject.mutex); | 2466 | down_write(&obj->uevent.uobject.mutex); |
2007 | 2467 | ||
2008 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 2468 | pd = idr_read_pd(cmd->pd_handle, file->ucontext); |
2009 | if (!pd) { | 2469 | if (!pd) { |
2010 | ret = -EINVAL; | 2470 | ret = -EINVAL; |
2011 | goto err; | 2471 | goto err; |
2012 | } | 2472 | } |
2013 | 2473 | ||
2474 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2475 | attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); | ||
2476 | if (!attr.ext.xrc.cq) { | ||
2477 | ret = -EINVAL; | ||
2478 | goto err_put_pd; | ||
2479 | } | ||
2480 | |||
2481 | attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); | ||
2482 | if (!attr.ext.xrc.xrcd) { | ||
2483 | ret = -EINVAL; | ||
2484 | goto err_put_cq; | ||
2485 | } | ||
2486 | |||
2487 | obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); | ||
2488 | atomic_inc(&obj->uxrcd->refcnt); | ||
2489 | } | ||
2490 | |||
2014 | attr.event_handler = ib_uverbs_srq_event_handler; | 2491 | attr.event_handler = ib_uverbs_srq_event_handler; |
2015 | attr.srq_context = file; | 2492 | attr.srq_context = file; |
2016 | attr.attr.max_wr = cmd.max_wr; | 2493 | attr.srq_type = cmd->srq_type; |
2017 | attr.attr.max_sge = cmd.max_sge; | 2494 | attr.attr.max_wr = cmd->max_wr; |
2018 | attr.attr.srq_limit = cmd.srq_limit; | 2495 | attr.attr.max_sge = cmd->max_sge; |
2496 | attr.attr.srq_limit = cmd->srq_limit; | ||
2019 | 2497 | ||
2020 | obj->events_reported = 0; | 2498 | obj->uevent.events_reported = 0; |
2021 | INIT_LIST_HEAD(&obj->event_list); | 2499 | INIT_LIST_HEAD(&obj->uevent.event_list); |
2022 | 2500 | ||
2023 | srq = pd->device->create_srq(pd, &attr, &udata); | 2501 | srq = pd->device->create_srq(pd, &attr, udata); |
2024 | if (IS_ERR(srq)) { | 2502 | if (IS_ERR(srq)) { |
2025 | ret = PTR_ERR(srq); | 2503 | ret = PTR_ERR(srq); |
2026 | goto err_put; | 2504 | goto err_put; |
2027 | } | 2505 | } |
2028 | 2506 | ||
2029 | srq->device = pd->device; | 2507 | srq->device = pd->device; |
2030 | srq->pd = pd; | 2508 | srq->pd = pd; |
2031 | srq->uobject = &obj->uobject; | 2509 | srq->srq_type = cmd->srq_type; |
2510 | srq->uobject = &obj->uevent.uobject; | ||
2032 | srq->event_handler = attr.event_handler; | 2511 | srq->event_handler = attr.event_handler; |
2033 | srq->srq_context = attr.srq_context; | 2512 | srq->srq_context = attr.srq_context; |
2513 | |||
2514 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2515 | srq->ext.xrc.cq = attr.ext.xrc.cq; | ||
2516 | srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; | ||
2517 | atomic_inc(&attr.ext.xrc.cq->usecnt); | ||
2518 | atomic_inc(&attr.ext.xrc.xrcd->usecnt); | ||
2519 | } | ||
2520 | |||
2034 | atomic_inc(&pd->usecnt); | 2521 | atomic_inc(&pd->usecnt); |
2035 | atomic_set(&srq->usecnt, 0); | 2522 | atomic_set(&srq->usecnt, 0); |
2036 | 2523 | ||
2037 | obj->uobject.object = srq; | 2524 | obj->uevent.uobject.object = srq; |
2038 | ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject); | 2525 | ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); |
2039 | if (ret) | 2526 | if (ret) |
2040 | goto err_destroy; | 2527 | goto err_destroy; |
2041 | 2528 | ||
2042 | memset(&resp, 0, sizeof resp); | 2529 | memset(&resp, 0, sizeof resp); |
2043 | resp.srq_handle = obj->uobject.id; | 2530 | resp.srq_handle = obj->uevent.uobject.id; |
2044 | resp.max_wr = attr.attr.max_wr; | 2531 | resp.max_wr = attr.attr.max_wr; |
2045 | resp.max_sge = attr.attr.max_sge; | 2532 | resp.max_sge = attr.attr.max_sge; |
2533 | if (cmd->srq_type == IB_SRQT_XRC) | ||
2534 | resp.srqn = srq->ext.xrc.srq_num; | ||
2046 | 2535 | ||
2047 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 2536 | if (copy_to_user((void __user *) (unsigned long) cmd->response, |
2048 | &resp, sizeof resp)) { | 2537 | &resp, sizeof resp)) { |
2049 | ret = -EFAULT; | 2538 | ret = -EFAULT; |
2050 | goto err_copy; | 2539 | goto err_copy; |
2051 | } | 2540 | } |
2052 | 2541 | ||
2542 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2543 | put_uobj_read(xrcd_uobj); | ||
2544 | put_cq_read(attr.ext.xrc.cq); | ||
2545 | } | ||
2053 | put_pd_read(pd); | 2546 | put_pd_read(pd); |
2054 | 2547 | ||
2055 | mutex_lock(&file->mutex); | 2548 | mutex_lock(&file->mutex); |
2056 | list_add_tail(&obj->uobject.list, &file->ucontext->srq_list); | 2549 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); |
2057 | mutex_unlock(&file->mutex); | 2550 | mutex_unlock(&file->mutex); |
2058 | 2551 | ||
2059 | obj->uobject.live = 1; | 2552 | obj->uevent.uobject.live = 1; |
2060 | 2553 | ||
2061 | up_write(&obj->uobject.mutex); | 2554 | up_write(&obj->uevent.uobject.mutex); |
2062 | 2555 | ||
2063 | return in_len; | 2556 | return 0; |
2064 | 2557 | ||
2065 | err_copy: | 2558 | err_copy: |
2066 | idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject); | 2559 | idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); |
2067 | 2560 | ||
2068 | err_destroy: | 2561 | err_destroy: |
2069 | ib_destroy_srq(srq); | 2562 | ib_destroy_srq(srq); |
2070 | 2563 | ||
2071 | err_put: | 2564 | err_put: |
2565 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2566 | atomic_dec(&obj->uxrcd->refcnt); | ||
2567 | put_uobj_read(xrcd_uobj); | ||
2568 | } | ||
2569 | |||
2570 | err_put_cq: | ||
2571 | if (cmd->srq_type == IB_SRQT_XRC) | ||
2572 | put_cq_read(attr.ext.xrc.cq); | ||
2573 | |||
2574 | err_put_pd: | ||
2072 | put_pd_read(pd); | 2575 | put_pd_read(pd); |
2073 | 2576 | ||
2074 | err: | 2577 | err: |
2075 | put_uobj_write(&obj->uobject); | 2578 | put_uobj_write(&obj->uevent.uobject); |
2076 | return ret; | 2579 | return ret; |
2077 | } | 2580 | } |
2078 | 2581 | ||
2582 | ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | ||
2583 | const char __user *buf, int in_len, | ||
2584 | int out_len) | ||
2585 | { | ||
2586 | struct ib_uverbs_create_srq cmd; | ||
2587 | struct ib_uverbs_create_xsrq xcmd; | ||
2588 | struct ib_uverbs_create_srq_resp resp; | ||
2589 | struct ib_udata udata; | ||
2590 | int ret; | ||
2591 | |||
2592 | if (out_len < sizeof resp) | ||
2593 | return -ENOSPC; | ||
2594 | |||
2595 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
2596 | return -EFAULT; | ||
2597 | |||
2598 | xcmd.response = cmd.response; | ||
2599 | xcmd.user_handle = cmd.user_handle; | ||
2600 | xcmd.srq_type = IB_SRQT_BASIC; | ||
2601 | xcmd.pd_handle = cmd.pd_handle; | ||
2602 | xcmd.max_wr = cmd.max_wr; | ||
2603 | xcmd.max_sge = cmd.max_sge; | ||
2604 | xcmd.srq_limit = cmd.srq_limit; | ||
2605 | |||
2606 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
2607 | (unsigned long) cmd.response + sizeof resp, | ||
2608 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2609 | |||
2610 | ret = __uverbs_create_xsrq(file, &xcmd, &udata); | ||
2611 | if (ret) | ||
2612 | return ret; | ||
2613 | |||
2614 | return in_len; | ||
2615 | } | ||
2616 | |||
2617 | ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, | ||
2618 | const char __user *buf, int in_len, int out_len) | ||
2619 | { | ||
2620 | struct ib_uverbs_create_xsrq cmd; | ||
2621 | struct ib_uverbs_create_srq_resp resp; | ||
2622 | struct ib_udata udata; | ||
2623 | int ret; | ||
2624 | |||
2625 | if (out_len < sizeof resp) | ||
2626 | return -ENOSPC; | ||
2627 | |||
2628 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
2629 | return -EFAULT; | ||
2630 | |||
2631 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
2632 | (unsigned long) cmd.response + sizeof resp, | ||
2633 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2634 | |||
2635 | ret = __uverbs_create_xsrq(file, &cmd, &udata); | ||
2636 | if (ret) | ||
2637 | return ret; | ||
2638 | |||
2639 | return in_len; | ||
2640 | } | ||
2641 | |||
2079 | ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | 2642 | ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, |
2080 | const char __user *buf, int in_len, | 2643 | const char __user *buf, int in_len, |
2081 | int out_len) | 2644 | int out_len) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 56898b6578a4..879636746373 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -72,6 +72,7 @@ DEFINE_IDR(ib_uverbs_ah_idr); | |||
72 | DEFINE_IDR(ib_uverbs_cq_idr); | 72 | DEFINE_IDR(ib_uverbs_cq_idr); |
73 | DEFINE_IDR(ib_uverbs_qp_idr); | 73 | DEFINE_IDR(ib_uverbs_qp_idr); |
74 | DEFINE_IDR(ib_uverbs_srq_idr); | 74 | DEFINE_IDR(ib_uverbs_srq_idr); |
75 | DEFINE_IDR(ib_uverbs_xrcd_idr); | ||
75 | 76 | ||
76 | static DEFINE_SPINLOCK(map_lock); | 77 | static DEFINE_SPINLOCK(map_lock); |
77 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); | 78 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); |
@@ -107,6 +108,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | |||
107 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, | 108 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, |
108 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, | 109 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, |
109 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, | 110 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, |
111 | [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd, | ||
112 | [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, | ||
113 | [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, | ||
114 | [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp | ||
110 | }; | 115 | }; |
111 | 116 | ||
112 | static void ib_uverbs_add_one(struct ib_device *device); | 117 | static void ib_uverbs_add_one(struct ib_device *device); |
@@ -202,8 +207,12 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
202 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | 207 | container_of(uobj, struct ib_uqp_object, uevent.uobject); |
203 | 208 | ||
204 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); | 209 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); |
205 | ib_uverbs_detach_umcast(qp, uqp); | 210 | if (qp != qp->real_qp) { |
206 | ib_destroy_qp(qp); | 211 | ib_close_qp(qp); |
212 | } else { | ||
213 | ib_uverbs_detach_umcast(qp, uqp); | ||
214 | ib_destroy_qp(qp); | ||
215 | } | ||
207 | ib_uverbs_release_uevent(file, &uqp->uevent); | 216 | ib_uverbs_release_uevent(file, &uqp->uevent); |
208 | kfree(uqp); | 217 | kfree(uqp); |
209 | } | 218 | } |
@@ -241,6 +250,18 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
241 | kfree(uobj); | 250 | kfree(uobj); |
242 | } | 251 | } |
243 | 252 | ||
253 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
254 | list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) { | ||
255 | struct ib_xrcd *xrcd = uobj->object; | ||
256 | struct ib_uxrcd_object *uxrcd = | ||
257 | container_of(uobj, struct ib_uxrcd_object, uobject); | ||
258 | |||
259 | idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); | ||
260 | ib_uverbs_dealloc_xrcd(file->device, xrcd); | ||
261 | kfree(uxrcd); | ||
262 | } | ||
263 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
264 | |||
244 | list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { | 265 | list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { |
245 | struct ib_pd *pd = uobj->object; | 266 | struct ib_pd *pd = uobj->object; |
246 | 267 | ||
@@ -557,8 +578,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
557 | if (hdr.in_words * 4 != count) | 578 | if (hdr.in_words * 4 != count) |
558 | return -EINVAL; | 579 | return -EINVAL; |
559 | 580 | ||
560 | if (hdr.command < 0 || | 581 | if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || |
561 | hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || | ||
562 | !uverbs_cmd_table[hdr.command]) | 582 | !uverbs_cmd_table[hdr.command]) |
563 | return -EINVAL; | 583 | return -EINVAL; |
564 | 584 | ||
@@ -741,6 +761,8 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
741 | 761 | ||
742 | kref_init(&uverbs_dev->ref); | 762 | kref_init(&uverbs_dev->ref); |
743 | init_completion(&uverbs_dev->comp); | 763 | init_completion(&uverbs_dev->comp); |
764 | uverbs_dev->xrcd_tree = RB_ROOT; | ||
765 | mutex_init(&uverbs_dev->xrcd_tree_mutex); | ||
744 | 766 | ||
745 | spin_lock(&map_lock); | 767 | spin_lock(&map_lock); |
746 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); | 768 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index af7a8b08b2e9..42517500b223 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/errno.h> | 39 | #include <linux/errno.h> |
40 | #include <linux/err.h> | 40 | #include <linux/err.h> |
41 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/slab.h> | ||
42 | 43 | ||
43 | #include <rdma/ib_verbs.h> | 44 | #include <rdma/ib_verbs.h> |
44 | #include <rdma/ib_cache.h> | 45 | #include <rdma/ib_cache.h> |
@@ -77,6 +78,31 @@ enum ib_rate mult_to_ib_rate(int mult) | |||
77 | } | 78 | } |
78 | EXPORT_SYMBOL(mult_to_ib_rate); | 79 | EXPORT_SYMBOL(mult_to_ib_rate); |
79 | 80 | ||
81 | int ib_rate_to_mbps(enum ib_rate rate) | ||
82 | { | ||
83 | switch (rate) { | ||
84 | case IB_RATE_2_5_GBPS: return 2500; | ||
85 | case IB_RATE_5_GBPS: return 5000; | ||
86 | case IB_RATE_10_GBPS: return 10000; | ||
87 | case IB_RATE_20_GBPS: return 20000; | ||
88 | case IB_RATE_30_GBPS: return 30000; | ||
89 | case IB_RATE_40_GBPS: return 40000; | ||
90 | case IB_RATE_60_GBPS: return 60000; | ||
91 | case IB_RATE_80_GBPS: return 80000; | ||
92 | case IB_RATE_120_GBPS: return 120000; | ||
93 | case IB_RATE_14_GBPS: return 14062; | ||
94 | case IB_RATE_56_GBPS: return 56250; | ||
95 | case IB_RATE_112_GBPS: return 112500; | ||
96 | case IB_RATE_168_GBPS: return 168750; | ||
97 | case IB_RATE_25_GBPS: return 25781; | ||
98 | case IB_RATE_100_GBPS: return 103125; | ||
99 | case IB_RATE_200_GBPS: return 206250; | ||
100 | case IB_RATE_300_GBPS: return 309375; | ||
101 | default: return -1; | ||
102 | } | ||
103 | } | ||
104 | EXPORT_SYMBOL(ib_rate_to_mbps); | ||
105 | |||
80 | enum rdma_transport_type | 106 | enum rdma_transport_type |
81 | rdma_node_get_transport(enum rdma_node_type node_type) | 107 | rdma_node_get_transport(enum rdma_node_type node_type) |
82 | { | 108 | { |
@@ -250,6 +276,13 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd, | |||
250 | srq->uobject = NULL; | 276 | srq->uobject = NULL; |
251 | srq->event_handler = srq_init_attr->event_handler; | 277 | srq->event_handler = srq_init_attr->event_handler; |
252 | srq->srq_context = srq_init_attr->srq_context; | 278 | srq->srq_context = srq_init_attr->srq_context; |
279 | srq->srq_type = srq_init_attr->srq_type; | ||
280 | if (srq->srq_type == IB_SRQT_XRC) { | ||
281 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; | ||
282 | srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; | ||
283 | atomic_inc(&srq->ext.xrc.xrcd->usecnt); | ||
284 | atomic_inc(&srq->ext.xrc.cq->usecnt); | ||
285 | } | ||
253 | atomic_inc(&pd->usecnt); | 286 | atomic_inc(&pd->usecnt); |
254 | atomic_set(&srq->usecnt, 0); | 287 | atomic_set(&srq->usecnt, 0); |
255 | } | 288 | } |
@@ -279,16 +312,29 @@ EXPORT_SYMBOL(ib_query_srq); | |||
279 | int ib_destroy_srq(struct ib_srq *srq) | 312 | int ib_destroy_srq(struct ib_srq *srq) |
280 | { | 313 | { |
281 | struct ib_pd *pd; | 314 | struct ib_pd *pd; |
315 | enum ib_srq_type srq_type; | ||
316 | struct ib_xrcd *uninitialized_var(xrcd); | ||
317 | struct ib_cq *uninitialized_var(cq); | ||
282 | int ret; | 318 | int ret; |
283 | 319 | ||
284 | if (atomic_read(&srq->usecnt)) | 320 | if (atomic_read(&srq->usecnt)) |
285 | return -EBUSY; | 321 | return -EBUSY; |
286 | 322 | ||
287 | pd = srq->pd; | 323 | pd = srq->pd; |
324 | srq_type = srq->srq_type; | ||
325 | if (srq_type == IB_SRQT_XRC) { | ||
326 | xrcd = srq->ext.xrc.xrcd; | ||
327 | cq = srq->ext.xrc.cq; | ||
328 | } | ||
288 | 329 | ||
289 | ret = srq->device->destroy_srq(srq); | 330 | ret = srq->device->destroy_srq(srq); |
290 | if (!ret) | 331 | if (!ret) { |
291 | atomic_dec(&pd->usecnt); | 332 | atomic_dec(&pd->usecnt); |
333 | if (srq_type == IB_SRQT_XRC) { | ||
334 | atomic_dec(&xrcd->usecnt); | ||
335 | atomic_dec(&cq->usecnt); | ||
336 | } | ||
337 | } | ||
292 | 338 | ||
293 | return ret; | 339 | return ret; |
294 | } | 340 | } |
@@ -296,28 +342,123 @@ EXPORT_SYMBOL(ib_destroy_srq); | |||
296 | 342 | ||
297 | /* Queue pairs */ | 343 | /* Queue pairs */ |
298 | 344 | ||
345 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) | ||
346 | { | ||
347 | struct ib_qp *qp = context; | ||
348 | |||
349 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) | ||
350 | event->element.qp->event_handler(event, event->element.qp->qp_context); | ||
351 | } | ||
352 | |||
353 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) | ||
354 | { | ||
355 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
356 | list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); | ||
357 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
358 | } | ||
359 | |||
360 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, | ||
361 | void (*event_handler)(struct ib_event *, void *), | ||
362 | void *qp_context) | ||
363 | { | ||
364 | struct ib_qp *qp; | ||
365 | unsigned long flags; | ||
366 | |||
367 | qp = kzalloc(sizeof *qp, GFP_KERNEL); | ||
368 | if (!qp) | ||
369 | return ERR_PTR(-ENOMEM); | ||
370 | |||
371 | qp->real_qp = real_qp; | ||
372 | atomic_inc(&real_qp->usecnt); | ||
373 | qp->device = real_qp->device; | ||
374 | qp->event_handler = event_handler; | ||
375 | qp->qp_context = qp_context; | ||
376 | qp->qp_num = real_qp->qp_num; | ||
377 | qp->qp_type = real_qp->qp_type; | ||
378 | |||
379 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
380 | list_add(&qp->open_list, &real_qp->open_list); | ||
381 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
382 | |||
383 | return qp; | ||
384 | } | ||
385 | |||
386 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | ||
387 | struct ib_qp_open_attr *qp_open_attr) | ||
388 | { | ||
389 | struct ib_qp *qp, *real_qp; | ||
390 | |||
391 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) | ||
392 | return ERR_PTR(-EINVAL); | ||
393 | |||
394 | qp = ERR_PTR(-EINVAL); | ||
395 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
396 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { | ||
397 | if (real_qp->qp_num == qp_open_attr->qp_num) { | ||
398 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, | ||
399 | qp_open_attr->qp_context); | ||
400 | break; | ||
401 | } | ||
402 | } | ||
403 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
404 | return qp; | ||
405 | } | ||
406 | EXPORT_SYMBOL(ib_open_qp); | ||
407 | |||
299 | struct ib_qp *ib_create_qp(struct ib_pd *pd, | 408 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
300 | struct ib_qp_init_attr *qp_init_attr) | 409 | struct ib_qp_init_attr *qp_init_attr) |
301 | { | 410 | { |
302 | struct ib_qp *qp; | 411 | struct ib_qp *qp, *real_qp; |
412 | struct ib_device *device; | ||
303 | 413 | ||
304 | qp = pd->device->create_qp(pd, qp_init_attr, NULL); | 414 | device = pd ? pd->device : qp_init_attr->xrcd->device; |
415 | qp = device->create_qp(pd, qp_init_attr, NULL); | ||
305 | 416 | ||
306 | if (!IS_ERR(qp)) { | 417 | if (!IS_ERR(qp)) { |
307 | qp->device = pd->device; | 418 | qp->device = device; |
308 | qp->pd = pd; | 419 | qp->real_qp = qp; |
309 | qp->send_cq = qp_init_attr->send_cq; | 420 | qp->uobject = NULL; |
310 | qp->recv_cq = qp_init_attr->recv_cq; | 421 | qp->qp_type = qp_init_attr->qp_type; |
311 | qp->srq = qp_init_attr->srq; | 422 | |
312 | qp->uobject = NULL; | 423 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { |
313 | qp->event_handler = qp_init_attr->event_handler; | 424 | qp->event_handler = __ib_shared_qp_event_handler; |
314 | qp->qp_context = qp_init_attr->qp_context; | 425 | qp->qp_context = qp; |
315 | qp->qp_type = qp_init_attr->qp_type; | 426 | qp->pd = NULL; |
316 | atomic_inc(&pd->usecnt); | 427 | qp->send_cq = qp->recv_cq = NULL; |
317 | atomic_inc(&qp_init_attr->send_cq->usecnt); | 428 | qp->srq = NULL; |
318 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | 429 | qp->xrcd = qp_init_attr->xrcd; |
319 | if (qp_init_attr->srq) | 430 | atomic_inc(&qp_init_attr->xrcd->usecnt); |
320 | atomic_inc(&qp_init_attr->srq->usecnt); | 431 | INIT_LIST_HEAD(&qp->open_list); |
432 | atomic_set(&qp->usecnt, 0); | ||
433 | |||
434 | real_qp = qp; | ||
435 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, | ||
436 | qp_init_attr->qp_context); | ||
437 | if (!IS_ERR(qp)) | ||
438 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); | ||
439 | else | ||
440 | real_qp->device->destroy_qp(real_qp); | ||
441 | } else { | ||
442 | qp->event_handler = qp_init_attr->event_handler; | ||
443 | qp->qp_context = qp_init_attr->qp_context; | ||
444 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { | ||
445 | qp->recv_cq = NULL; | ||
446 | qp->srq = NULL; | ||
447 | } else { | ||
448 | qp->recv_cq = qp_init_attr->recv_cq; | ||
449 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | ||
450 | qp->srq = qp_init_attr->srq; | ||
451 | if (qp->srq) | ||
452 | atomic_inc(&qp_init_attr->srq->usecnt); | ||
453 | } | ||
454 | |||
455 | qp->pd = pd; | ||
456 | qp->send_cq = qp_init_attr->send_cq; | ||
457 | qp->xrcd = NULL; | ||
458 | |||
459 | atomic_inc(&pd->usecnt); | ||
460 | atomic_inc(&qp_init_attr->send_cq->usecnt); | ||
461 | } | ||
321 | } | 462 | } |
322 | 463 | ||
323 | return qp; | 464 | return qp; |
@@ -326,8 +467,8 @@ EXPORT_SYMBOL(ib_create_qp); | |||
326 | 467 | ||
327 | static const struct { | 468 | static const struct { |
328 | int valid; | 469 | int valid; |
329 | enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1]; | 470 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; |
330 | enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1]; | 471 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; |
331 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | 472 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
332 | [IB_QPS_RESET] = { | 473 | [IB_QPS_RESET] = { |
333 | [IB_QPS_RESET] = { .valid = 1 }, | 474 | [IB_QPS_RESET] = { .valid = 1 }, |
@@ -343,6 +484,12 @@ static const struct { | |||
343 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | 484 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
344 | IB_QP_PORT | | 485 | IB_QP_PORT | |
345 | IB_QP_ACCESS_FLAGS), | 486 | IB_QP_ACCESS_FLAGS), |
487 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | | ||
488 | IB_QP_PORT | | ||
489 | IB_QP_ACCESS_FLAGS), | ||
490 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | ||
491 | IB_QP_PORT | | ||
492 | IB_QP_ACCESS_FLAGS), | ||
346 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 493 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
347 | IB_QP_QKEY), | 494 | IB_QP_QKEY), |
348 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 495 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -365,6 +512,12 @@ static const struct { | |||
365 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | 512 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
366 | IB_QP_PORT | | 513 | IB_QP_PORT | |
367 | IB_QP_ACCESS_FLAGS), | 514 | IB_QP_ACCESS_FLAGS), |
515 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | | ||
516 | IB_QP_PORT | | ||
517 | IB_QP_ACCESS_FLAGS), | ||
518 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | ||
519 | IB_QP_PORT | | ||
520 | IB_QP_ACCESS_FLAGS), | ||
368 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 521 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
369 | IB_QP_QKEY), | 522 | IB_QP_QKEY), |
370 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 523 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -384,6 +537,16 @@ static const struct { | |||
384 | IB_QP_RQ_PSN | | 537 | IB_QP_RQ_PSN | |
385 | IB_QP_MAX_DEST_RD_ATOMIC | | 538 | IB_QP_MAX_DEST_RD_ATOMIC | |
386 | IB_QP_MIN_RNR_TIMER), | 539 | IB_QP_MIN_RNR_TIMER), |
540 | [IB_QPT_XRC_INI] = (IB_QP_AV | | ||
541 | IB_QP_PATH_MTU | | ||
542 | IB_QP_DEST_QPN | | ||
543 | IB_QP_RQ_PSN), | ||
544 | [IB_QPT_XRC_TGT] = (IB_QP_AV | | ||
545 | IB_QP_PATH_MTU | | ||
546 | IB_QP_DEST_QPN | | ||
547 | IB_QP_RQ_PSN | | ||
548 | IB_QP_MAX_DEST_RD_ATOMIC | | ||
549 | IB_QP_MIN_RNR_TIMER), | ||
387 | }, | 550 | }, |
388 | .opt_param = { | 551 | .opt_param = { |
389 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | 552 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
@@ -394,6 +557,12 @@ static const struct { | |||
394 | [IB_QPT_RC] = (IB_QP_ALT_PATH | | 557 | [IB_QPT_RC] = (IB_QP_ALT_PATH | |
395 | IB_QP_ACCESS_FLAGS | | 558 | IB_QP_ACCESS_FLAGS | |
396 | IB_QP_PKEY_INDEX), | 559 | IB_QP_PKEY_INDEX), |
560 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | | ||
561 | IB_QP_ACCESS_FLAGS | | ||
562 | IB_QP_PKEY_INDEX), | ||
563 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | | ||
564 | IB_QP_ACCESS_FLAGS | | ||
565 | IB_QP_PKEY_INDEX), | ||
397 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 566 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
398 | IB_QP_QKEY), | 567 | IB_QP_QKEY), |
399 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 568 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -414,6 +583,13 @@ static const struct { | |||
414 | IB_QP_RNR_RETRY | | 583 | IB_QP_RNR_RETRY | |
415 | IB_QP_SQ_PSN | | 584 | IB_QP_SQ_PSN | |
416 | IB_QP_MAX_QP_RD_ATOMIC), | 585 | IB_QP_MAX_QP_RD_ATOMIC), |
586 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | | ||
587 | IB_QP_RETRY_CNT | | ||
588 | IB_QP_RNR_RETRY | | ||
589 | IB_QP_SQ_PSN | | ||
590 | IB_QP_MAX_QP_RD_ATOMIC), | ||
591 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | | ||
592 | IB_QP_SQ_PSN), | ||
417 | [IB_QPT_SMI] = IB_QP_SQ_PSN, | 593 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
418 | [IB_QPT_GSI] = IB_QP_SQ_PSN, | 594 | [IB_QPT_GSI] = IB_QP_SQ_PSN, |
419 | }, | 595 | }, |
@@ -429,6 +605,15 @@ static const struct { | |||
429 | IB_QP_ACCESS_FLAGS | | 605 | IB_QP_ACCESS_FLAGS | |
430 | IB_QP_MIN_RNR_TIMER | | 606 | IB_QP_MIN_RNR_TIMER | |
431 | IB_QP_PATH_MIG_STATE), | 607 | IB_QP_PATH_MIG_STATE), |
608 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
609 | IB_QP_ALT_PATH | | ||
610 | IB_QP_ACCESS_FLAGS | | ||
611 | IB_QP_PATH_MIG_STATE), | ||
612 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
613 | IB_QP_ALT_PATH | | ||
614 | IB_QP_ACCESS_FLAGS | | ||
615 | IB_QP_MIN_RNR_TIMER | | ||
616 | IB_QP_PATH_MIG_STATE), | ||
432 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 617 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
433 | IB_QP_QKEY), | 618 | IB_QP_QKEY), |
434 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 619 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -453,6 +638,15 @@ static const struct { | |||
453 | IB_QP_ALT_PATH | | 638 | IB_QP_ALT_PATH | |
454 | IB_QP_PATH_MIG_STATE | | 639 | IB_QP_PATH_MIG_STATE | |
455 | IB_QP_MIN_RNR_TIMER), | 640 | IB_QP_MIN_RNR_TIMER), |
641 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
642 | IB_QP_ACCESS_FLAGS | | ||
643 | IB_QP_ALT_PATH | | ||
644 | IB_QP_PATH_MIG_STATE), | ||
645 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
646 | IB_QP_ACCESS_FLAGS | | ||
647 | IB_QP_ALT_PATH | | ||
648 | IB_QP_PATH_MIG_STATE | | ||
649 | IB_QP_MIN_RNR_TIMER), | ||
456 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 650 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
457 | IB_QP_QKEY), | 651 | IB_QP_QKEY), |
458 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 652 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -465,6 +659,8 @@ static const struct { | |||
465 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 659 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
466 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 660 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
467 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 661 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
662 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, | ||
663 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ | ||
468 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 664 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
469 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY | 665 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY |
470 | } | 666 | } |
@@ -487,6 +683,15 @@ static const struct { | |||
487 | IB_QP_ACCESS_FLAGS | | 683 | IB_QP_ACCESS_FLAGS | |
488 | IB_QP_MIN_RNR_TIMER | | 684 | IB_QP_MIN_RNR_TIMER | |
489 | IB_QP_PATH_MIG_STATE), | 685 | IB_QP_PATH_MIG_STATE), |
686 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
687 | IB_QP_ALT_PATH | | ||
688 | IB_QP_ACCESS_FLAGS | | ||
689 | IB_QP_PATH_MIG_STATE), | ||
690 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
691 | IB_QP_ALT_PATH | | ||
692 | IB_QP_ACCESS_FLAGS | | ||
693 | IB_QP_MIN_RNR_TIMER | | ||
694 | IB_QP_PATH_MIG_STATE), | ||
490 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 695 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
491 | IB_QP_QKEY), | 696 | IB_QP_QKEY), |
492 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 697 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -515,6 +720,25 @@ static const struct { | |||
515 | IB_QP_PKEY_INDEX | | 720 | IB_QP_PKEY_INDEX | |
516 | IB_QP_MIN_RNR_TIMER | | 721 | IB_QP_MIN_RNR_TIMER | |
517 | IB_QP_PATH_MIG_STATE), | 722 | IB_QP_PATH_MIG_STATE), |
723 | [IB_QPT_XRC_INI] = (IB_QP_PORT | | ||
724 | IB_QP_AV | | ||
725 | IB_QP_TIMEOUT | | ||
726 | IB_QP_RETRY_CNT | | ||
727 | IB_QP_RNR_RETRY | | ||
728 | IB_QP_MAX_QP_RD_ATOMIC | | ||
729 | IB_QP_ALT_PATH | | ||
730 | IB_QP_ACCESS_FLAGS | | ||
731 | IB_QP_PKEY_INDEX | | ||
732 | IB_QP_PATH_MIG_STATE), | ||
733 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | | ||
734 | IB_QP_AV | | ||
735 | IB_QP_TIMEOUT | | ||
736 | IB_QP_MAX_DEST_RD_ATOMIC | | ||
737 | IB_QP_ALT_PATH | | ||
738 | IB_QP_ACCESS_FLAGS | | ||
739 | IB_QP_PKEY_INDEX | | ||
740 | IB_QP_MIN_RNR_TIMER | | ||
741 | IB_QP_PATH_MIG_STATE), | ||
518 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 742 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
519 | IB_QP_QKEY), | 743 | IB_QP_QKEY), |
520 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 744 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -579,7 +803,7 @@ int ib_modify_qp(struct ib_qp *qp, | |||
579 | struct ib_qp_attr *qp_attr, | 803 | struct ib_qp_attr *qp_attr, |
580 | int qp_attr_mask) | 804 | int qp_attr_mask) |
581 | { | 805 | { |
582 | return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); | 806 | return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); |
583 | } | 807 | } |
584 | EXPORT_SYMBOL(ib_modify_qp); | 808 | EXPORT_SYMBOL(ib_modify_qp); |
585 | 809 | ||
@@ -589,11 +813,59 @@ int ib_query_qp(struct ib_qp *qp, | |||
589 | struct ib_qp_init_attr *qp_init_attr) | 813 | struct ib_qp_init_attr *qp_init_attr) |
590 | { | 814 | { |
591 | return qp->device->query_qp ? | 815 | return qp->device->query_qp ? |
592 | qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : | 816 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
593 | -ENOSYS; | 817 | -ENOSYS; |
594 | } | 818 | } |
595 | EXPORT_SYMBOL(ib_query_qp); | 819 | EXPORT_SYMBOL(ib_query_qp); |
596 | 820 | ||
821 | int ib_close_qp(struct ib_qp *qp) | ||
822 | { | ||
823 | struct ib_qp *real_qp; | ||
824 | unsigned long flags; | ||
825 | |||
826 | real_qp = qp->real_qp; | ||
827 | if (real_qp == qp) | ||
828 | return -EINVAL; | ||
829 | |||
830 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
831 | list_del(&qp->open_list); | ||
832 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
833 | |||
834 | atomic_dec(&real_qp->usecnt); | ||
835 | kfree(qp); | ||
836 | |||
837 | return 0; | ||
838 | } | ||
839 | EXPORT_SYMBOL(ib_close_qp); | ||
840 | |||
841 | static int __ib_destroy_shared_qp(struct ib_qp *qp) | ||
842 | { | ||
843 | struct ib_xrcd *xrcd; | ||
844 | struct ib_qp *real_qp; | ||
845 | int ret; | ||
846 | |||
847 | real_qp = qp->real_qp; | ||
848 | xrcd = real_qp->xrcd; | ||
849 | |||
850 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
851 | ib_close_qp(qp); | ||
852 | if (atomic_read(&real_qp->usecnt) == 0) | ||
853 | list_del(&real_qp->xrcd_list); | ||
854 | else | ||
855 | real_qp = NULL; | ||
856 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
857 | |||
858 | if (real_qp) { | ||
859 | ret = ib_destroy_qp(real_qp); | ||
860 | if (!ret) | ||
861 | atomic_dec(&xrcd->usecnt); | ||
862 | else | ||
863 | __ib_insert_xrcd_qp(xrcd, real_qp); | ||
864 | } | ||
865 | |||
866 | return 0; | ||
867 | } | ||
868 | |||
597 | int ib_destroy_qp(struct ib_qp *qp) | 869 | int ib_destroy_qp(struct ib_qp *qp) |
598 | { | 870 | { |
599 | struct ib_pd *pd; | 871 | struct ib_pd *pd; |
@@ -601,16 +873,25 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
601 | struct ib_srq *srq; | 873 | struct ib_srq *srq; |
602 | int ret; | 874 | int ret; |
603 | 875 | ||
604 | pd = qp->pd; | 876 | if (atomic_read(&qp->usecnt)) |
605 | scq = qp->send_cq; | 877 | return -EBUSY; |
606 | rcq = qp->recv_cq; | 878 | |
607 | srq = qp->srq; | 879 | if (qp->real_qp != qp) |
880 | return __ib_destroy_shared_qp(qp); | ||
881 | |||
882 | pd = qp->pd; | ||
883 | scq = qp->send_cq; | ||
884 | rcq = qp->recv_cq; | ||
885 | srq = qp->srq; | ||
608 | 886 | ||
609 | ret = qp->device->destroy_qp(qp); | 887 | ret = qp->device->destroy_qp(qp); |
610 | if (!ret) { | 888 | if (!ret) { |
611 | atomic_dec(&pd->usecnt); | 889 | if (pd) |
612 | atomic_dec(&scq->usecnt); | 890 | atomic_dec(&pd->usecnt); |
613 | atomic_dec(&rcq->usecnt); | 891 | if (scq) |
892 | atomic_dec(&scq->usecnt); | ||
893 | if (rcq) | ||
894 | atomic_dec(&rcq->usecnt); | ||
614 | if (srq) | 895 | if (srq) |
615 | atomic_dec(&srq->usecnt); | 896 | atomic_dec(&srq->usecnt); |
616 | } | 897 | } |
@@ -920,3 +1201,42 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) | |||
920 | return qp->device->detach_mcast(qp, gid, lid); | 1201 | return qp->device->detach_mcast(qp, gid, lid); |
921 | } | 1202 | } |
922 | EXPORT_SYMBOL(ib_detach_mcast); | 1203 | EXPORT_SYMBOL(ib_detach_mcast); |
1204 | |||
1205 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) | ||
1206 | { | ||
1207 | struct ib_xrcd *xrcd; | ||
1208 | |||
1209 | if (!device->alloc_xrcd) | ||
1210 | return ERR_PTR(-ENOSYS); | ||
1211 | |||
1212 | xrcd = device->alloc_xrcd(device, NULL, NULL); | ||
1213 | if (!IS_ERR(xrcd)) { | ||
1214 | xrcd->device = device; | ||
1215 | xrcd->inode = NULL; | ||
1216 | atomic_set(&xrcd->usecnt, 0); | ||
1217 | mutex_init(&xrcd->tgt_qp_mutex); | ||
1218 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); | ||
1219 | } | ||
1220 | |||
1221 | return xrcd; | ||
1222 | } | ||
1223 | EXPORT_SYMBOL(ib_alloc_xrcd); | ||
1224 | |||
1225 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) | ||
1226 | { | ||
1227 | struct ib_qp *qp; | ||
1228 | int ret; | ||
1229 | |||
1230 | if (atomic_read(&xrcd->usecnt)) | ||
1231 | return -EBUSY; | ||
1232 | |||
1233 | while (!list_empty(&xrcd->tgt_qp_list)) { | ||
1234 | qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); | ||
1235 | ret = ib_destroy_qp(qp); | ||
1236 | if (ret) | ||
1237 | return ret; | ||
1238 | } | ||
1239 | |||
1240 | return xrcd->device->dealloc_xrcd(xrcd); | ||
1241 | } | ||
1242 | EXPORT_SYMBOL(ib_dealloc_xrcd); | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c index 24f9e3a90e8e..32d34e88d5cf 100644 --- a/drivers/infiniband/hw/amso1100/c2_ae.c +++ b/drivers/infiniband/hw/amso1100/c2_ae.c | |||
@@ -288,6 +288,11 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) | |||
288 | cm_event.private_data_len = | 288 | cm_event.private_data_len = |
289 | be32_to_cpu(req->private_data_length); | 289 | be32_to_cpu(req->private_data_length); |
290 | cm_event.private_data = req->private_data; | 290 | cm_event.private_data = req->private_data; |
291 | /* | ||
292 | * Until ird/ord negotiation via MPAv2 support is added, send | ||
293 | * max supported values | ||
294 | */ | ||
295 | cm_event.ird = cm_event.ord = 128; | ||
291 | 296 | ||
292 | if (cm_id->event_handler) | 297 | if (cm_id->event_handler) |
293 | cm_id->event_handler(cm_id, &cm_event); | 298 | cm_id->event_handler(cm_id, &cm_event); |
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c index 0ebe4e806b86..8951db4ae29d 100644 --- a/drivers/infiniband/hw/amso1100/c2_intr.c +++ b/drivers/infiniband/hw/amso1100/c2_intr.c | |||
@@ -183,6 +183,11 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index) | |||
183 | case IW_CM_EVENT_ESTABLISHED: | 183 | case IW_CM_EVENT_ESTABLISHED: |
184 | c2_set_qp_state(req->qp, | 184 | c2_set_qp_state(req->qp, |
185 | C2_QP_STATE_RTS); | 185 | C2_QP_STATE_RTS); |
186 | /* | ||
187 | * Until ird/ord negotiation via MPAv2 support is added, send | ||
188 | * max supported values | ||
189 | */ | ||
190 | cm_event.ird = cm_event.ord = 128; | ||
186 | case IW_CM_EVENT_CLOSE: | 191 | case IW_CM_EVENT_CLOSE: |
187 | 192 | ||
188 | /* | 193 | /* |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index f101bb73be63..12f923d64e42 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -753,10 +753,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) | |||
753 | memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6); | 753 | memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6); |
754 | 754 | ||
755 | /* Print out the MAC address */ | 755 | /* Print out the MAC address */ |
756 | pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n", | 756 | pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr); |
757 | netdev->name, | ||
758 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
759 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); | ||
760 | 757 | ||
761 | #if 0 | 758 | #if 0 |
762 | /* Disable network packets */ | 759 | /* Disable network packets */ |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 6cd642aaa4de..de6d0774e609 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -753,6 +753,11 @@ static void connect_request_upcall(struct iwch_ep *ep) | |||
753 | event.private_data_len = ep->plen; | 753 | event.private_data_len = ep->plen; |
754 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | 754 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); |
755 | event.provider_data = ep; | 755 | event.provider_data = ep; |
756 | /* | ||
757 | * Until ird/ord negotiation via MPAv2 support is added, send max | ||
758 | * supported values | ||
759 | */ | ||
760 | event.ird = event.ord = 8; | ||
756 | if (state_read(&ep->parent_ep->com) != DEAD) { | 761 | if (state_read(&ep->parent_ep->com) != DEAD) { |
757 | get_ep(&ep->com); | 762 | get_ep(&ep->com); |
758 | ep->parent_ep->com.cm_id->event_handler( | 763 | ep->parent_ep->com.cm_id->event_handler( |
@@ -770,6 +775,11 @@ static void established_upcall(struct iwch_ep *ep) | |||
770 | PDBG("%s ep %p\n", __func__, ep); | 775 | PDBG("%s ep %p\n", __func__, ep); |
771 | memset(&event, 0, sizeof(event)); | 776 | memset(&event, 0, sizeof(event)); |
772 | event.event = IW_CM_EVENT_ESTABLISHED; | 777 | event.event = IW_CM_EVENT_ESTABLISHED; |
778 | /* | ||
779 | * Until ird/ord negotiation via MPAv2 support is added, send max | ||
780 | * supported values | ||
781 | */ | ||
782 | event.ird = event.ord = 8; | ||
773 | if (ep->com.cm_id) { | 783 | if (ep->com.cm_id) { |
774 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); | 784 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); |
775 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 785 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c index 71e0d845da3d..abcc9e76962b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c | |||
@@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
46 | struct ib_event event; | 46 | struct ib_event event; |
47 | struct iwch_qp_attributes attrs; | 47 | struct iwch_qp_attributes attrs; |
48 | struct iwch_qp *qhp; | 48 | struct iwch_qp *qhp; |
49 | unsigned long flag; | ||
49 | 50 | ||
50 | spin_lock(&rnicp->lock); | 51 | spin_lock(&rnicp->lock); |
51 | qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); | 52 | qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); |
@@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
94 | if (qhp->ibqp.event_handler) | 95 | if (qhp->ibqp.event_handler) |
95 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); | 96 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); |
96 | 97 | ||
98 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
97 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 99 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
100 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | ||
98 | 101 | ||
99 | if (atomic_dec_and_test(&qhp->refcnt)) | 102 | if (atomic_dec_and_test(&qhp->refcnt)) |
100 | wake_up(&qhp->wait); | 103 | wake_up(&qhp->wait); |
@@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) | |||
107 | struct iwch_cq *chp; | 110 | struct iwch_cq *chp; |
108 | struct iwch_qp *qhp; | 111 | struct iwch_qp *qhp; |
109 | u32 cqid = RSPQ_CQID(rsp_msg); | 112 | u32 cqid = RSPQ_CQID(rsp_msg); |
113 | unsigned long flag; | ||
110 | 114 | ||
111 | rnicp = (struct iwch_dev *) rdev_p->ulp; | 115 | rnicp = (struct iwch_dev *) rdev_p->ulp; |
112 | spin_lock(&rnicp->lock); | 116 | spin_lock(&rnicp->lock); |
@@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) | |||
170 | */ | 174 | */ |
171 | if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) | 175 | if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) |
172 | dst_confirm(qhp->ep->dst); | 176 | dst_confirm(qhp->ep->dst); |
177 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
173 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 178 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
179 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | ||
174 | break; | 180 | break; |
175 | 181 | ||
176 | case TPT_ERR_STAG: | 182 | case TPT_ERR_STAG: |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index c7d9411f2954..37c224fc3ad9 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -190,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
190 | chp->rhp = rhp; | 190 | chp->rhp = rhp; |
191 | chp->ibcq.cqe = 1 << chp->cq.size_log2; | 191 | chp->ibcq.cqe = 1 << chp->cq.size_log2; |
192 | spin_lock_init(&chp->lock); | 192 | spin_lock_init(&chp->lock); |
193 | spin_lock_init(&chp->comp_handler_lock); | ||
193 | atomic_set(&chp->refcnt, 1); | 194 | atomic_set(&chp->refcnt, 1); |
194 | init_waitqueue_head(&chp->wait); | 195 | init_waitqueue_head(&chp->wait); |
195 | if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { | 196 | if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index 9a342c9b220d..87c14b0c5ac0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
@@ -103,6 +103,7 @@ struct iwch_cq { | |||
103 | struct iwch_dev *rhp; | 103 | struct iwch_dev *rhp; |
104 | struct t3_cq cq; | 104 | struct t3_cq cq; |
105 | spinlock_t lock; | 105 | spinlock_t lock; |
106 | spinlock_t comp_handler_lock; | ||
106 | atomic_t refcnt; | 107 | atomic_t refcnt; |
107 | wait_queue_head_t wait; | 108 | wait_queue_head_t wait; |
108 | u32 __user *user_rptr_addr; | 109 | u32 __user *user_rptr_addr; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index ecd313f359a4..bea5839d89ee 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -822,8 +822,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | |||
822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); | 822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); |
823 | spin_unlock(&qhp->lock); | 823 | spin_unlock(&qhp->lock); |
824 | spin_unlock_irqrestore(&rchp->lock, *flag); | 824 | spin_unlock_irqrestore(&rchp->lock, *flag); |
825 | if (flushed) | 825 | if (flushed) { |
826 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | ||
826 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 827 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
828 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | ||
829 | } | ||
827 | 830 | ||
828 | /* locking hierarchy: cq lock first, then qp lock. */ | 831 | /* locking hierarchy: cq lock first, then qp lock. */ |
829 | spin_lock_irqsave(&schp->lock, *flag); | 832 | spin_lock_irqsave(&schp->lock, *flag); |
@@ -833,8 +836,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | |||
833 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); | 836 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); |
834 | spin_unlock(&qhp->lock); | 837 | spin_unlock(&qhp->lock); |
835 | spin_unlock_irqrestore(&schp->lock, *flag); | 838 | spin_unlock_irqrestore(&schp->lock, *flag); |
836 | if (flushed) | 839 | if (flushed) { |
840 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | ||
837 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 841 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); |
842 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | ||
843 | } | ||
838 | 844 | ||
839 | /* deref */ | 845 | /* deref */ |
840 | if (atomic_dec_and_test(&qhp->refcnt)) | 846 | if (atomic_dec_and_test(&qhp->refcnt)) |
@@ -853,11 +859,15 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
853 | if (qhp->ibqp.uobject) { | 859 | if (qhp->ibqp.uobject) { |
854 | cxio_set_wq_in_error(&qhp->wq); | 860 | cxio_set_wq_in_error(&qhp->wq); |
855 | cxio_set_cq_in_error(&rchp->cq); | 861 | cxio_set_cq_in_error(&rchp->cq); |
862 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | ||
856 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 863 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
864 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | ||
857 | if (schp != rchp) { | 865 | if (schp != rchp) { |
858 | cxio_set_cq_in_error(&schp->cq); | 866 | cxio_set_cq_in_error(&schp->cq); |
867 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | ||
859 | (*schp->ibcq.comp_handler)(&schp->ibcq, | 868 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
860 | schp->ibcq.cq_context); | 869 | schp->ibcq.cq_context); |
870 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | ||
861 | } | 871 | } |
862 | return; | 872 | return; |
863 | } | 873 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 77f769d9227d..b36cdac9c558 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -103,7 +103,8 @@ MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | |||
103 | static int mpa_rev = 1; | 103 | static int mpa_rev = 1; |
104 | module_param(mpa_rev, int, 0644); | 104 | module_param(mpa_rev, int, 0644); |
105 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | 105 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " |
106 | "1 is spec compliant. (default=1)"); | 106 | "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" |
107 | " compliant (default=1)"); | ||
107 | 108 | ||
108 | static int markers_enabled; | 109 | static int markers_enabled; |
109 | module_param(markers_enabled, int, 0644); | 110 | module_param(markers_enabled, int, 0644); |
@@ -497,17 +498,21 @@ static int send_connect(struct c4iw_ep *ep) | |||
497 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 498 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
498 | } | 499 | } |
499 | 500 | ||
500 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) | 501 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, |
502 | u8 mpa_rev_to_use) | ||
501 | { | 503 | { |
502 | int mpalen, wrlen; | 504 | int mpalen, wrlen; |
503 | struct fw_ofld_tx_data_wr *req; | 505 | struct fw_ofld_tx_data_wr *req; |
504 | struct mpa_message *mpa; | 506 | struct mpa_message *mpa; |
507 | struct mpa_v2_conn_params mpa_v2_params; | ||
505 | 508 | ||
506 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | 509 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); |
507 | 510 | ||
508 | BUG_ON(skb_cloned(skb)); | 511 | BUG_ON(skb_cloned(skb)); |
509 | 512 | ||
510 | mpalen = sizeof(*mpa) + ep->plen; | 513 | mpalen = sizeof(*mpa) + ep->plen; |
514 | if (mpa_rev_to_use == 2) | ||
515 | mpalen += sizeof(struct mpa_v2_conn_params); | ||
511 | wrlen = roundup(mpalen + sizeof *req, 16); | 516 | wrlen = roundup(mpalen + sizeof *req, 16); |
512 | skb = get_skb(skb, wrlen, GFP_KERNEL); | 517 | skb = get_skb(skb, wrlen, GFP_KERNEL); |
513 | if (!skb) { | 518 | if (!skb) { |
@@ -533,12 +538,39 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) | |||
533 | mpa = (struct mpa_message *)(req + 1); | 538 | mpa = (struct mpa_message *)(req + 1); |
534 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | 539 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); |
535 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | 540 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | |
536 | (markers_enabled ? MPA_MARKERS : 0); | 541 | (markers_enabled ? MPA_MARKERS : 0) | |
542 | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); | ||
537 | mpa->private_data_size = htons(ep->plen); | 543 | mpa->private_data_size = htons(ep->plen); |
538 | mpa->revision = mpa_rev; | 544 | mpa->revision = mpa_rev_to_use; |
545 | if (mpa_rev_to_use == 1) | ||
546 | ep->tried_with_mpa_v1 = 1; | ||
547 | |||
548 | if (mpa_rev_to_use == 2) { | ||
549 | mpa->private_data_size += | ||
550 | htons(sizeof(struct mpa_v2_conn_params)); | ||
551 | mpa_v2_params.ird = htons((u16)ep->ird); | ||
552 | mpa_v2_params.ord = htons((u16)ep->ord); | ||
553 | |||
554 | if (peer2peer) { | ||
555 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | ||
556 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | ||
557 | mpa_v2_params.ord |= | ||
558 | htons(MPA_V2_RDMA_WRITE_RTR); | ||
559 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | ||
560 | mpa_v2_params.ord |= | ||
561 | htons(MPA_V2_RDMA_READ_RTR); | ||
562 | } | ||
563 | memcpy(mpa->private_data, &mpa_v2_params, | ||
564 | sizeof(struct mpa_v2_conn_params)); | ||
539 | 565 | ||
540 | if (ep->plen) | 566 | if (ep->plen) |
541 | memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); | 567 | memcpy(mpa->private_data + |
568 | sizeof(struct mpa_v2_conn_params), | ||
569 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | ||
570 | } else | ||
571 | if (ep->plen) | ||
572 | memcpy(mpa->private_data, | ||
573 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | ||
542 | 574 | ||
543 | /* | 575 | /* |
544 | * Reference the mpa skb. This ensures the data area | 576 | * Reference the mpa skb. This ensures the data area |
@@ -562,10 +594,13 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
562 | struct fw_ofld_tx_data_wr *req; | 594 | struct fw_ofld_tx_data_wr *req; |
563 | struct mpa_message *mpa; | 595 | struct mpa_message *mpa; |
564 | struct sk_buff *skb; | 596 | struct sk_buff *skb; |
597 | struct mpa_v2_conn_params mpa_v2_params; | ||
565 | 598 | ||
566 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | 599 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); |
567 | 600 | ||
568 | mpalen = sizeof(*mpa) + plen; | 601 | mpalen = sizeof(*mpa) + plen; |
602 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) | ||
603 | mpalen += sizeof(struct mpa_v2_conn_params); | ||
569 | wrlen = roundup(mpalen + sizeof *req, 16); | 604 | wrlen = roundup(mpalen + sizeof *req, 16); |
570 | 605 | ||
571 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | 606 | skb = get_skb(NULL, wrlen, GFP_KERNEL); |
@@ -595,8 +630,29 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
595 | mpa->flags = MPA_REJECT; | 630 | mpa->flags = MPA_REJECT; |
596 | mpa->revision = mpa_rev; | 631 | mpa->revision = mpa_rev; |
597 | mpa->private_data_size = htons(plen); | 632 | mpa->private_data_size = htons(plen); |
598 | if (plen) | 633 | |
599 | memcpy(mpa->private_data, pdata, plen); | 634 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
635 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | ||
636 | mpa->private_data_size += | ||
637 | htons(sizeof(struct mpa_v2_conn_params)); | ||
638 | mpa_v2_params.ird = htons(((u16)ep->ird) | | ||
639 | (peer2peer ? MPA_V2_PEER2PEER_MODEL : | ||
640 | 0)); | ||
641 | mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? | ||
642 | (p2p_type == | ||
643 | FW_RI_INIT_P2PTYPE_RDMA_WRITE ? | ||
644 | MPA_V2_RDMA_WRITE_RTR : p2p_type == | ||
645 | FW_RI_INIT_P2PTYPE_READ_REQ ? | ||
646 | MPA_V2_RDMA_READ_RTR : 0) : 0)); | ||
647 | memcpy(mpa->private_data, &mpa_v2_params, | ||
648 | sizeof(struct mpa_v2_conn_params)); | ||
649 | |||
650 | if (ep->plen) | ||
651 | memcpy(mpa->private_data + | ||
652 | sizeof(struct mpa_v2_conn_params), pdata, plen); | ||
653 | } else | ||
654 | if (plen) | ||
655 | memcpy(mpa->private_data, pdata, plen); | ||
600 | 656 | ||
601 | /* | 657 | /* |
602 | * Reference the mpa skb again. This ensures the data area | 658 | * Reference the mpa skb again. This ensures the data area |
@@ -617,10 +673,13 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
617 | struct fw_ofld_tx_data_wr *req; | 673 | struct fw_ofld_tx_data_wr *req; |
618 | struct mpa_message *mpa; | 674 | struct mpa_message *mpa; |
619 | struct sk_buff *skb; | 675 | struct sk_buff *skb; |
676 | struct mpa_v2_conn_params mpa_v2_params; | ||
620 | 677 | ||
621 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | 678 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); |
622 | 679 | ||
623 | mpalen = sizeof(*mpa) + plen; | 680 | mpalen = sizeof(*mpa) + plen; |
681 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) | ||
682 | mpalen += sizeof(struct mpa_v2_conn_params); | ||
624 | wrlen = roundup(mpalen + sizeof *req, 16); | 683 | wrlen = roundup(mpalen + sizeof *req, 16); |
625 | 684 | ||
626 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | 685 | skb = get_skb(NULL, wrlen, GFP_KERNEL); |
@@ -649,10 +708,36 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
649 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | 708 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); |
650 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | 709 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | |
651 | (markers_enabled ? MPA_MARKERS : 0); | 710 | (markers_enabled ? MPA_MARKERS : 0); |
652 | mpa->revision = mpa_rev; | 711 | mpa->revision = ep->mpa_attr.version; |
653 | mpa->private_data_size = htons(plen); | 712 | mpa->private_data_size = htons(plen); |
654 | if (plen) | 713 | |
655 | memcpy(mpa->private_data, pdata, plen); | 714 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
715 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | ||
716 | mpa->private_data_size += | ||
717 | htons(sizeof(struct mpa_v2_conn_params)); | ||
718 | mpa_v2_params.ird = htons((u16)ep->ird); | ||
719 | mpa_v2_params.ord = htons((u16)ep->ord); | ||
720 | if (peer2peer && (ep->mpa_attr.p2p_type != | ||
721 | FW_RI_INIT_P2PTYPE_DISABLED)) { | ||
722 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | ||
723 | |||
724 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | ||
725 | mpa_v2_params.ord |= | ||
726 | htons(MPA_V2_RDMA_WRITE_RTR); | ||
727 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | ||
728 | mpa_v2_params.ord |= | ||
729 | htons(MPA_V2_RDMA_READ_RTR); | ||
730 | } | ||
731 | |||
732 | memcpy(mpa->private_data, &mpa_v2_params, | ||
733 | sizeof(struct mpa_v2_conn_params)); | ||
734 | |||
735 | if (ep->plen) | ||
736 | memcpy(mpa->private_data + | ||
737 | sizeof(struct mpa_v2_conn_params), pdata, plen); | ||
738 | } else | ||
739 | if (plen) | ||
740 | memcpy(mpa->private_data, pdata, plen); | ||
656 | 741 | ||
657 | /* | 742 | /* |
658 | * Reference the mpa skb. This ensures the data area | 743 | * Reference the mpa skb. This ensures the data area |
@@ -695,7 +780,10 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |||
695 | 780 | ||
696 | /* start MPA negotiation */ | 781 | /* start MPA negotiation */ |
697 | send_flowc(ep, NULL); | 782 | send_flowc(ep, NULL); |
698 | send_mpa_req(ep, skb); | 783 | if (ep->retry_with_mpa_v1) |
784 | send_mpa_req(ep, skb, 1); | ||
785 | else | ||
786 | send_mpa_req(ep, skb, mpa_rev); | ||
699 | 787 | ||
700 | return 0; | 788 | return 0; |
701 | } | 789 | } |
@@ -769,8 +857,19 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |||
769 | event.remote_addr = ep->com.remote_addr; | 857 | event.remote_addr = ep->com.remote_addr; |
770 | 858 | ||
771 | if ((status == 0) || (status == -ECONNREFUSED)) { | 859 | if ((status == 0) || (status == -ECONNREFUSED)) { |
772 | event.private_data_len = ep->plen; | 860 | if (!ep->tried_with_mpa_v1) { |
773 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | 861 | /* this means MPA_v2 is used */ |
862 | event.private_data_len = ep->plen - | ||
863 | sizeof(struct mpa_v2_conn_params); | ||
864 | event.private_data = ep->mpa_pkt + | ||
865 | sizeof(struct mpa_message) + | ||
866 | sizeof(struct mpa_v2_conn_params); | ||
867 | } else { | ||
868 | /* this means MPA_v1 is used */ | ||
869 | event.private_data_len = ep->plen; | ||
870 | event.private_data = ep->mpa_pkt + | ||
871 | sizeof(struct mpa_message); | ||
872 | } | ||
774 | } | 873 | } |
775 | 874 | ||
776 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, | 875 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, |
@@ -793,9 +892,22 @@ static void connect_request_upcall(struct c4iw_ep *ep) | |||
793 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | 892 | event.event = IW_CM_EVENT_CONNECT_REQUEST; |
794 | event.local_addr = ep->com.local_addr; | 893 | event.local_addr = ep->com.local_addr; |
795 | event.remote_addr = ep->com.remote_addr; | 894 | event.remote_addr = ep->com.remote_addr; |
796 | event.private_data_len = ep->plen; | ||
797 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
798 | event.provider_data = ep; | 895 | event.provider_data = ep; |
896 | if (!ep->tried_with_mpa_v1) { | ||
897 | /* this means MPA_v2 is used */ | ||
898 | event.ord = ep->ord; | ||
899 | event.ird = ep->ird; | ||
900 | event.private_data_len = ep->plen - | ||
901 | sizeof(struct mpa_v2_conn_params); | ||
902 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + | ||
903 | sizeof(struct mpa_v2_conn_params); | ||
904 | } else { | ||
905 | /* this means MPA_v1 is used. Send max supported */ | ||
906 | event.ord = c4iw_max_read_depth; | ||
907 | event.ird = c4iw_max_read_depth; | ||
908 | event.private_data_len = ep->plen; | ||
909 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
910 | } | ||
799 | if (state_read(&ep->parent_ep->com) != DEAD) { | 911 | if (state_read(&ep->parent_ep->com) != DEAD) { |
800 | c4iw_get_ep(&ep->com); | 912 | c4iw_get_ep(&ep->com); |
801 | ep->parent_ep->com.cm_id->event_handler( | 913 | ep->parent_ep->com.cm_id->event_handler( |
@@ -813,6 +925,8 @@ static void established_upcall(struct c4iw_ep *ep) | |||
813 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 925 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
814 | memset(&event, 0, sizeof(event)); | 926 | memset(&event, 0, sizeof(event)); |
815 | event.event = IW_CM_EVENT_ESTABLISHED; | 927 | event.event = IW_CM_EVENT_ESTABLISHED; |
928 | event.ird = ep->ird; | ||
929 | event.ord = ep->ord; | ||
816 | if (ep->com.cm_id) { | 930 | if (ep->com.cm_id) { |
817 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 931 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
818 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 932 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
@@ -848,7 +962,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
848 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | 962 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) |
849 | { | 963 | { |
850 | struct mpa_message *mpa; | 964 | struct mpa_message *mpa; |
965 | struct mpa_v2_conn_params *mpa_v2_params; | ||
851 | u16 plen; | 966 | u16 plen; |
967 | u16 resp_ird, resp_ord; | ||
968 | u8 rtr_mismatch = 0, insuff_ird = 0; | ||
852 | struct c4iw_qp_attributes attrs; | 969 | struct c4iw_qp_attributes attrs; |
853 | enum c4iw_qp_attr_mask mask; | 970 | enum c4iw_qp_attr_mask mask; |
854 | int err; | 971 | int err; |
@@ -888,7 +1005,9 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
888 | mpa = (struct mpa_message *) ep->mpa_pkt; | 1005 | mpa = (struct mpa_message *) ep->mpa_pkt; |
889 | 1006 | ||
890 | /* Validate MPA header. */ | 1007 | /* Validate MPA header. */ |
891 | if (mpa->revision != mpa_rev) { | 1008 | if (mpa->revision > mpa_rev) { |
1009 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | ||
1010 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | ||
892 | err = -EPROTO; | 1011 | err = -EPROTO; |
893 | goto err; | 1012 | goto err; |
894 | } | 1013 | } |
@@ -938,13 +1057,66 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
938 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | 1057 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; |
939 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | 1058 | ep->mpa_attr.recv_marker_enabled = markers_enabled; |
940 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | 1059 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; |
941 | ep->mpa_attr.version = mpa_rev; | 1060 | ep->mpa_attr.version = mpa->revision; |
942 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | 1061 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; |
943 | FW_RI_INIT_P2PTYPE_DISABLED; | 1062 | |
1063 | if (mpa->revision == 2) { | ||
1064 | ep->mpa_attr.enhanced_rdma_conn = | ||
1065 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | ||
1066 | if (ep->mpa_attr.enhanced_rdma_conn) { | ||
1067 | mpa_v2_params = (struct mpa_v2_conn_params *) | ||
1068 | (ep->mpa_pkt + sizeof(*mpa)); | ||
1069 | resp_ird = ntohs(mpa_v2_params->ird) & | ||
1070 | MPA_V2_IRD_ORD_MASK; | ||
1071 | resp_ord = ntohs(mpa_v2_params->ord) & | ||
1072 | MPA_V2_IRD_ORD_MASK; | ||
1073 | |||
1074 | /* | ||
1075 | * This is a double-check. Ideally, below checks are | ||
1076 | * not required since ird/ord stuff has been taken | ||
1077 | * care of in c4iw_accept_cr | ||
1078 | */ | ||
1079 | if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { | ||
1080 | err = -ENOMEM; | ||
1081 | ep->ird = resp_ord; | ||
1082 | ep->ord = resp_ird; | ||
1083 | insuff_ird = 1; | ||
1084 | } | ||
1085 | |||
1086 | if (ntohs(mpa_v2_params->ird) & | ||
1087 | MPA_V2_PEER2PEER_MODEL) { | ||
1088 | if (ntohs(mpa_v2_params->ord) & | ||
1089 | MPA_V2_RDMA_WRITE_RTR) | ||
1090 | ep->mpa_attr.p2p_type = | ||
1091 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | ||
1092 | else if (ntohs(mpa_v2_params->ord) & | ||
1093 | MPA_V2_RDMA_READ_RTR) | ||
1094 | ep->mpa_attr.p2p_type = | ||
1095 | FW_RI_INIT_P2PTYPE_READ_REQ; | ||
1096 | } | ||
1097 | } | ||
1098 | } else if (mpa->revision == 1) | ||
1099 | if (peer2peer) | ||
1100 | ep->mpa_attr.p2p_type = p2p_type; | ||
1101 | |||
944 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | 1102 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
945 | "xmit_marker_enabled=%d, version=%d\n", __func__, | 1103 | "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " |
946 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | 1104 | "%d\n", __func__, ep->mpa_attr.crc_enabled, |
947 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | 1105 | ep->mpa_attr.recv_marker_enabled, |
1106 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | ||
1107 | ep->mpa_attr.p2p_type, p2p_type); | ||
1108 | |||
1109 | /* | ||
1110 | * If responder's RTR does not match with that of initiator, assign | ||
1111 | * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not | ||
1112 | * generated when moving QP to RTS state. | ||
1113 | * A TERM message will be sent after QP has moved to RTS state | ||
1114 | */ | ||
1115 | if ((ep->mpa_attr.version == 2) && | ||
1116 | (ep->mpa_attr.p2p_type != p2p_type)) { | ||
1117 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | ||
1118 | rtr_mismatch = 1; | ||
1119 | } | ||
948 | 1120 | ||
949 | attrs.mpa_attr = ep->mpa_attr; | 1121 | attrs.mpa_attr = ep->mpa_attr; |
950 | attrs.max_ird = ep->ird; | 1122 | attrs.max_ird = ep->ird; |
@@ -961,6 +1133,39 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
961 | ep->com.qp, mask, &attrs, 1); | 1133 | ep->com.qp, mask, &attrs, 1); |
962 | if (err) | 1134 | if (err) |
963 | goto err; | 1135 | goto err; |
1136 | |||
1137 | /* | ||
1138 | * If responder's RTR requirement did not match with what initiator | ||
1139 | * supports, generate TERM message | ||
1140 | */ | ||
1141 | if (rtr_mismatch) { | ||
1142 | printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); | ||
1143 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | ||
1144 | attrs.ecode = MPA_NOMATCH_RTR; | ||
1145 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | ||
1146 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1147 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | ||
1148 | err = -ENOMEM; | ||
1149 | goto out; | ||
1150 | } | ||
1151 | |||
1152 | /* | ||
1153 | * Generate TERM if initiator IRD is not sufficient for responder | ||
1154 | * provided ORD. Currently, we do the same behaviour even when | ||
1155 | * responder provided IRD is also not sufficient as regards to | ||
1156 | * initiator ORD. | ||
1157 | */ | ||
1158 | if (insuff_ird) { | ||
1159 | printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", | ||
1160 | __func__); | ||
1161 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | ||
1162 | attrs.ecode = MPA_INSUFF_IRD; | ||
1163 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | ||
1164 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1165 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | ||
1166 | err = -ENOMEM; | ||
1167 | goto out; | ||
1168 | } | ||
964 | goto out; | 1169 | goto out; |
965 | err: | 1170 | err: |
966 | state_set(&ep->com, ABORTING); | 1171 | state_set(&ep->com, ABORTING); |
@@ -973,6 +1178,7 @@ out: | |||
973 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | 1178 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) |
974 | { | 1179 | { |
975 | struct mpa_message *mpa; | 1180 | struct mpa_message *mpa; |
1181 | struct mpa_v2_conn_params *mpa_v2_params; | ||
976 | u16 plen; | 1182 | u16 plen; |
977 | 1183 | ||
978 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1184 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
@@ -1013,7 +1219,9 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1013 | /* | 1219 | /* |
1014 | * Validate MPA Header. | 1220 | * Validate MPA Header. |
1015 | */ | 1221 | */ |
1016 | if (mpa->revision != mpa_rev) { | 1222 | if (mpa->revision > mpa_rev) { |
1223 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | ||
1224 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | ||
1017 | abort_connection(ep, skb, GFP_KERNEL); | 1225 | abort_connection(ep, skb, GFP_KERNEL); |
1018 | return; | 1226 | return; |
1019 | } | 1227 | } |
@@ -1056,9 +1264,37 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1056 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | 1264 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; |
1057 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | 1265 | ep->mpa_attr.recv_marker_enabled = markers_enabled; |
1058 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | 1266 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; |
1059 | ep->mpa_attr.version = mpa_rev; | 1267 | ep->mpa_attr.version = mpa->revision; |
1060 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | 1268 | if (mpa->revision == 1) |
1061 | FW_RI_INIT_P2PTYPE_DISABLED; | 1269 | ep->tried_with_mpa_v1 = 1; |
1270 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | ||
1271 | |||
1272 | if (mpa->revision == 2) { | ||
1273 | ep->mpa_attr.enhanced_rdma_conn = | ||
1274 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | ||
1275 | if (ep->mpa_attr.enhanced_rdma_conn) { | ||
1276 | mpa_v2_params = (struct mpa_v2_conn_params *) | ||
1277 | (ep->mpa_pkt + sizeof(*mpa)); | ||
1278 | ep->ird = ntohs(mpa_v2_params->ird) & | ||
1279 | MPA_V2_IRD_ORD_MASK; | ||
1280 | ep->ord = ntohs(mpa_v2_params->ord) & | ||
1281 | MPA_V2_IRD_ORD_MASK; | ||
1282 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) | ||
1283 | if (peer2peer) { | ||
1284 | if (ntohs(mpa_v2_params->ord) & | ||
1285 | MPA_V2_RDMA_WRITE_RTR) | ||
1286 | ep->mpa_attr.p2p_type = | ||
1287 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | ||
1288 | else if (ntohs(mpa_v2_params->ord) & | ||
1289 | MPA_V2_RDMA_READ_RTR) | ||
1290 | ep->mpa_attr.p2p_type = | ||
1291 | FW_RI_INIT_P2PTYPE_READ_REQ; | ||
1292 | } | ||
1293 | } | ||
1294 | } else if (mpa->revision == 1) | ||
1295 | if (peer2peer) | ||
1296 | ep->mpa_attr.p2p_type = p2p_type; | ||
1297 | |||
1062 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | 1298 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
1063 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, | 1299 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, |
1064 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | 1300 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, |
@@ -1550,6 +1786,112 @@ static int is_neg_adv_abort(unsigned int status) | |||
1550 | status == CPL_ERR_PERSIST_NEG_ADVICE; | 1786 | status == CPL_ERR_PERSIST_NEG_ADVICE; |
1551 | } | 1787 | } |
1552 | 1788 | ||
1789 | static int c4iw_reconnect(struct c4iw_ep *ep) | ||
1790 | { | ||
1791 | int err = 0; | ||
1792 | struct rtable *rt; | ||
1793 | struct net_device *pdev; | ||
1794 | struct neighbour *neigh; | ||
1795 | int step; | ||
1796 | |||
1797 | PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); | ||
1798 | init_timer(&ep->timer); | ||
1799 | |||
1800 | /* | ||
1801 | * Allocate an active TID to initiate a TCP connection. | ||
1802 | */ | ||
1803 | ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); | ||
1804 | if (ep->atid == -1) { | ||
1805 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | ||
1806 | err = -ENOMEM; | ||
1807 | goto fail2; | ||
1808 | } | ||
1809 | |||
1810 | /* find a route */ | ||
1811 | rt = find_route(ep->com.dev, | ||
1812 | ep->com.cm_id->local_addr.sin_addr.s_addr, | ||
1813 | ep->com.cm_id->remote_addr.sin_addr.s_addr, | ||
1814 | ep->com.cm_id->local_addr.sin_port, | ||
1815 | ep->com.cm_id->remote_addr.sin_port, 0); | ||
1816 | if (!rt) { | ||
1817 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | ||
1818 | err = -EHOSTUNREACH; | ||
1819 | goto fail3; | ||
1820 | } | ||
1821 | ep->dst = &rt->dst; | ||
1822 | |||
1823 | neigh = dst_get_neighbour(ep->dst); | ||
1824 | |||
1825 | /* get a l2t entry */ | ||
1826 | if (neigh->dev->flags & IFF_LOOPBACK) { | ||
1827 | PDBG("%s LOOPBACK\n", __func__); | ||
1828 | pdev = ip_dev_find(&init_net, | ||
1829 | ep->com.cm_id->remote_addr.sin_addr.s_addr); | ||
1830 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | ||
1831 | neigh, pdev, 0); | ||
1832 | ep->mtu = pdev->mtu; | ||
1833 | ep->tx_chan = cxgb4_port_chan(pdev); | ||
1834 | ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; | ||
1835 | step = ep->com.dev->rdev.lldi.ntxq / | ||
1836 | ep->com.dev->rdev.lldi.nchan; | ||
1837 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | ||
1838 | step = ep->com.dev->rdev.lldi.nrxq / | ||
1839 | ep->com.dev->rdev.lldi.nchan; | ||
1840 | ep->ctrlq_idx = cxgb4_port_idx(pdev); | ||
1841 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | ||
1842 | cxgb4_port_idx(pdev) * step]; | ||
1843 | dev_put(pdev); | ||
1844 | } else { | ||
1845 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | ||
1846 | neigh, neigh->dev, 0); | ||
1847 | ep->mtu = dst_mtu(ep->dst); | ||
1848 | ep->tx_chan = cxgb4_port_chan(neigh->dev); | ||
1849 | ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; | ||
1850 | step = ep->com.dev->rdev.lldi.ntxq / | ||
1851 | ep->com.dev->rdev.lldi.nchan; | ||
1852 | ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; | ||
1853 | ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); | ||
1854 | step = ep->com.dev->rdev.lldi.nrxq / | ||
1855 | ep->com.dev->rdev.lldi.nchan; | ||
1856 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | ||
1857 | cxgb4_port_idx(neigh->dev) * step]; | ||
1858 | } | ||
1859 | if (!ep->l2t) { | ||
1860 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | ||
1861 | err = -ENOMEM; | ||
1862 | goto fail4; | ||
1863 | } | ||
1864 | |||
1865 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | ||
1866 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | ||
1867 | ep->l2t->idx); | ||
1868 | |||
1869 | state_set(&ep->com, CONNECTING); | ||
1870 | ep->tos = 0; | ||
1871 | |||
1872 | /* send connect request to rnic */ | ||
1873 | err = send_connect(ep); | ||
1874 | if (!err) | ||
1875 | goto out; | ||
1876 | |||
1877 | cxgb4_l2t_release(ep->l2t); | ||
1878 | fail4: | ||
1879 | dst_release(ep->dst); | ||
1880 | fail3: | ||
1881 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | ||
1882 | fail2: | ||
1883 | /* | ||
1884 | * remember to send notification to upper layer. | ||
1885 | * We are in here so the upper layer is not aware that this is | ||
1886 | * re-connect attempt and so, upper layer is still waiting for | ||
1887 | * response of 1st connect request. | ||
1888 | */ | ||
1889 | connect_reply_upcall(ep, -ECONNRESET); | ||
1890 | c4iw_put_ep(&ep->com); | ||
1891 | out: | ||
1892 | return err; | ||
1893 | } | ||
1894 | |||
1553 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | 1895 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) |
1554 | { | 1896 | { |
1555 | struct cpl_abort_req_rss *req = cplhdr(skb); | 1897 | struct cpl_abort_req_rss *req = cplhdr(skb); |
@@ -1573,8 +1915,11 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1573 | 1915 | ||
1574 | /* | 1916 | /* |
1575 | * Wake up any threads in rdma_init() or rdma_fini(). | 1917 | * Wake up any threads in rdma_init() or rdma_fini(). |
1918 | * However, this is not needed if com state is just | ||
1919 | * MPA_REQ_SENT | ||
1576 | */ | 1920 | */ |
1577 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | 1921 | if (ep->com.state != MPA_REQ_SENT) |
1922 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
1578 | 1923 | ||
1579 | mutex_lock(&ep->com.mutex); | 1924 | mutex_lock(&ep->com.mutex); |
1580 | switch (ep->com.state) { | 1925 | switch (ep->com.state) { |
@@ -1585,7 +1930,21 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1585 | break; | 1930 | break; |
1586 | case MPA_REQ_SENT: | 1931 | case MPA_REQ_SENT: |
1587 | stop_ep_timer(ep); | 1932 | stop_ep_timer(ep); |
1588 | connect_reply_upcall(ep, -ECONNRESET); | 1933 | if (mpa_rev == 2 && ep->tried_with_mpa_v1) |
1934 | connect_reply_upcall(ep, -ECONNRESET); | ||
1935 | else { | ||
1936 | /* | ||
1937 | * we just don't send notification upwards because we | ||
1938 | * want to retry with mpa_v1 without upper layers even | ||
1939 | * knowing it. | ||
1940 | * | ||
1941 | * do some housekeeping so as to re-initiate the | ||
1942 | * connection | ||
1943 | */ | ||
1944 | PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, | ||
1945 | mpa_rev); | ||
1946 | ep->retry_with_mpa_v1 = 1; | ||
1947 | } | ||
1589 | break; | 1948 | break; |
1590 | case MPA_REP_SENT: | 1949 | case MPA_REP_SENT: |
1591 | break; | 1950 | break; |
@@ -1621,7 +1980,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1621 | dst_confirm(ep->dst); | 1980 | dst_confirm(ep->dst); |
1622 | if (ep->com.state != ABORTING) { | 1981 | if (ep->com.state != ABORTING) { |
1623 | __state_set(&ep->com, DEAD); | 1982 | __state_set(&ep->com, DEAD); |
1624 | release = 1; | 1983 | /* we don't release if we want to retry with mpa_v1 */ |
1984 | if (!ep->retry_with_mpa_v1) | ||
1985 | release = 1; | ||
1625 | } | 1986 | } |
1626 | mutex_unlock(&ep->com.mutex); | 1987 | mutex_unlock(&ep->com.mutex); |
1627 | 1988 | ||
@@ -1641,6 +2002,15 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1641 | out: | 2002 | out: |
1642 | if (release) | 2003 | if (release) |
1643 | release_ep_resources(ep); | 2004 | release_ep_resources(ep); |
2005 | |||
2006 | /* retry with mpa-v1 */ | ||
2007 | if (ep && ep->retry_with_mpa_v1) { | ||
2008 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | ||
2009 | dst_release(ep->dst); | ||
2010 | cxgb4_l2t_release(ep->l2t); | ||
2011 | c4iw_reconnect(ep); | ||
2012 | } | ||
2013 | |||
1644 | return 0; | 2014 | return 0; |
1645 | } | 2015 | } |
1646 | 2016 | ||
@@ -1792,18 +2162,40 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1792 | goto err; | 2162 | goto err; |
1793 | } | 2163 | } |
1794 | 2164 | ||
1795 | cm_id->add_ref(cm_id); | 2165 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
1796 | ep->com.cm_id = cm_id; | 2166 | if (conn_param->ord > ep->ird) { |
1797 | ep->com.qp = qp; | 2167 | ep->ird = conn_param->ird; |
2168 | ep->ord = conn_param->ord; | ||
2169 | send_mpa_reject(ep, conn_param->private_data, | ||
2170 | conn_param->private_data_len); | ||
2171 | abort_connection(ep, NULL, GFP_KERNEL); | ||
2172 | err = -ENOMEM; | ||
2173 | goto err; | ||
2174 | } | ||
2175 | if (conn_param->ird > ep->ord) { | ||
2176 | if (!ep->ord) | ||
2177 | conn_param->ird = 1; | ||
2178 | else { | ||
2179 | abort_connection(ep, NULL, GFP_KERNEL); | ||
2180 | err = -ENOMEM; | ||
2181 | goto err; | ||
2182 | } | ||
2183 | } | ||
1798 | 2184 | ||
2185 | } | ||
1799 | ep->ird = conn_param->ird; | 2186 | ep->ird = conn_param->ird; |
1800 | ep->ord = conn_param->ord; | 2187 | ep->ord = conn_param->ord; |
1801 | 2188 | ||
1802 | if (peer2peer && ep->ird == 0) | 2189 | if (ep->mpa_attr.version != 2) |
1803 | ep->ird = 1; | 2190 | if (peer2peer && ep->ird == 0) |
2191 | ep->ird = 1; | ||
1804 | 2192 | ||
1805 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); | 2193 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); |
1806 | 2194 | ||
2195 | cm_id->add_ref(cm_id); | ||
2196 | ep->com.cm_id = cm_id; | ||
2197 | ep->com.qp = qp; | ||
2198 | |||
1807 | /* bind QP to EP and move to RTS */ | 2199 | /* bind QP to EP and move to RTS */ |
1808 | attrs.mpa_attr = ep->mpa_attr; | 2200 | attrs.mpa_attr = ep->mpa_attr; |
1809 | attrs.max_ird = ep->ird; | 2201 | attrs.max_ird = ep->ird; |
@@ -1944,6 +2336,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1944 | ep->com.dev->rdev.lldi.nchan; | 2336 | ep->com.dev->rdev.lldi.nchan; |
1945 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | 2337 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ |
1946 | cxgb4_port_idx(neigh->dev) * step]; | 2338 | cxgb4_port_idx(neigh->dev) * step]; |
2339 | ep->retry_with_mpa_v1 = 0; | ||
2340 | ep->tried_with_mpa_v1 = 0; | ||
1947 | } | 2341 | } |
1948 | if (!ep->l2t) { | 2342 | if (!ep->l2t) { |
1949 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | 2343 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
@@ -2323,8 +2717,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2323 | 2717 | ||
2324 | /* | 2718 | /* |
2325 | * Wake up any threads in rdma_init() or rdma_fini(). | 2719 | * Wake up any threads in rdma_init() or rdma_fini(). |
2720 | * However, this is not needed if com state is just | ||
2721 | * MPA_REQ_SENT | ||
2326 | */ | 2722 | */ |
2327 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | 2723 | if (ep->com.state != MPA_REQ_SENT) |
2724 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
2328 | sched(dev, skb); | 2725 | sched(dev, skb); |
2329 | return 0; | 2726 | return 0; |
2330 | } | 2727 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 1720dc790d13..f35a935267e7 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -185,7 +185,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) | |||
185 | V_CQE_OPCODE(FW_RI_SEND) | | 185 | V_CQE_OPCODE(FW_RI_SEND) | |
186 | V_CQE_TYPE(0) | | 186 | V_CQE_TYPE(0) | |
187 | V_CQE_SWCQE(1) | | 187 | V_CQE_SWCQE(1) | |
188 | V_CQE_QPID(wq->rq.qid)); | 188 | V_CQE_QPID(wq->sq.qid)); |
189 | cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); | 189 | cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); |
190 | cq->sw_queue[cq->sw_pidx] = cqe; | 190 | cq->sw_queue[cq->sw_pidx] = cqe; |
191 | t4_swcq_produce(cq); | 191 | t4_swcq_produce(cq); |
@@ -818,6 +818,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
818 | chp->cq.size--; /* status page */ | 818 | chp->cq.size--; /* status page */ |
819 | chp->ibcq.cqe = entries - 2; | 819 | chp->ibcq.cqe = entries - 2; |
820 | spin_lock_init(&chp->lock); | 820 | spin_lock_init(&chp->lock); |
821 | spin_lock_init(&chp->comp_handler_lock); | ||
821 | atomic_set(&chp->refcnt, 1); | 822 | atomic_set(&chp->refcnt, 1); |
822 | init_waitqueue_head(&chp->wait); | 823 | init_waitqueue_head(&chp->wait); |
823 | ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); | 824 | ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 40a13cc633a3..6d0df6ec161b 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -376,10 +376,8 @@ struct uld_ctx { | |||
376 | struct c4iw_dev *dev; | 376 | struct c4iw_dev *dev; |
377 | }; | 377 | }; |
378 | 378 | ||
379 | static void c4iw_remove(struct uld_ctx *ctx) | 379 | static void c4iw_dealloc(struct uld_ctx *ctx) |
380 | { | 380 | { |
381 | PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); | ||
382 | c4iw_unregister_device(ctx->dev); | ||
383 | c4iw_rdev_close(&ctx->dev->rdev); | 381 | c4iw_rdev_close(&ctx->dev->rdev); |
384 | idr_destroy(&ctx->dev->cqidr); | 382 | idr_destroy(&ctx->dev->cqidr); |
385 | idr_destroy(&ctx->dev->qpidr); | 383 | idr_destroy(&ctx->dev->qpidr); |
@@ -389,11 +387,30 @@ static void c4iw_remove(struct uld_ctx *ctx) | |||
389 | ctx->dev = NULL; | 387 | ctx->dev = NULL; |
390 | } | 388 | } |
391 | 389 | ||
390 | static void c4iw_remove(struct uld_ctx *ctx) | ||
391 | { | ||
392 | PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); | ||
393 | c4iw_unregister_device(ctx->dev); | ||
394 | c4iw_dealloc(ctx); | ||
395 | } | ||
396 | |||
397 | static int rdma_supported(const struct cxgb4_lld_info *infop) | ||
398 | { | ||
399 | return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && | ||
400 | infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && | ||
401 | infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; | ||
402 | } | ||
403 | |||
392 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | 404 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) |
393 | { | 405 | { |
394 | struct c4iw_dev *devp; | 406 | struct c4iw_dev *devp; |
395 | int ret; | 407 | int ret; |
396 | 408 | ||
409 | if (!rdma_supported(infop)) { | ||
410 | printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n", | ||
411 | pci_name(infop->pdev)); | ||
412 | return ERR_PTR(-ENOSYS); | ||
413 | } | ||
397 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); | 414 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); |
398 | if (!devp) { | 415 | if (!devp) { |
399 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); | 416 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); |
@@ -414,7 +431,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
414 | 431 | ||
415 | ret = c4iw_rdev_open(&devp->rdev); | 432 | ret = c4iw_rdev_open(&devp->rdev); |
416 | if (ret) { | 433 | if (ret) { |
417 | mutex_unlock(&dev_mutex); | ||
418 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); | 434 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); |
419 | ib_dealloc_device(&devp->ibdev); | 435 | ib_dealloc_device(&devp->ibdev); |
420 | return ERR_PTR(ret); | 436 | return ERR_PTR(ret); |
@@ -519,15 +535,24 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) | |||
519 | case CXGB4_STATE_UP: | 535 | case CXGB4_STATE_UP: |
520 | printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); | 536 | printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); |
521 | if (!ctx->dev) { | 537 | if (!ctx->dev) { |
522 | int ret = 0; | 538 | int ret; |
523 | 539 | ||
524 | ctx->dev = c4iw_alloc(&ctx->lldi); | 540 | ctx->dev = c4iw_alloc(&ctx->lldi); |
525 | if (!IS_ERR(ctx->dev)) | 541 | if (IS_ERR(ctx->dev)) { |
526 | ret = c4iw_register_device(ctx->dev); | 542 | printk(KERN_ERR MOD |
527 | if (IS_ERR(ctx->dev) || ret) | 543 | "%s: initialization failed: %ld\n", |
544 | pci_name(ctx->lldi.pdev), | ||
545 | PTR_ERR(ctx->dev)); | ||
546 | ctx->dev = NULL; | ||
547 | break; | ||
548 | } | ||
549 | ret = c4iw_register_device(ctx->dev); | ||
550 | if (ret) { | ||
528 | printk(KERN_ERR MOD | 551 | printk(KERN_ERR MOD |
529 | "%s: RDMA registration failed: %d\n", | 552 | "%s: RDMA registration failed: %d\n", |
530 | pci_name(ctx->lldi.pdev), ret); | 553 | pci_name(ctx->lldi.pdev), ret); |
554 | c4iw_dealloc(ctx); | ||
555 | } | ||
531 | } | 556 | } |
532 | break; | 557 | break; |
533 | case CXGB4_STATE_DOWN: | 558 | case CXGB4_STATE_DOWN: |
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index c13041a0aeba..397cb36cf103 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
@@ -42,6 +42,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, | |||
42 | { | 42 | { |
43 | struct ib_event event; | 43 | struct ib_event event; |
44 | struct c4iw_qp_attributes attrs; | 44 | struct c4iw_qp_attributes attrs; |
45 | unsigned long flag; | ||
45 | 46 | ||
46 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || | 47 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || |
47 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { | 48 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { |
@@ -72,7 +73,9 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, | |||
72 | if (qhp->ibqp.event_handler) | 73 | if (qhp->ibqp.event_handler) |
73 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); | 74 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); |
74 | 75 | ||
76 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
75 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 77 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
78 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | ||
76 | } | 79 | } |
77 | 80 | ||
78 | void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) | 81 | void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) |
@@ -183,11 +186,14 @@ out: | |||
183 | int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) | 186 | int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) |
184 | { | 187 | { |
185 | struct c4iw_cq *chp; | 188 | struct c4iw_cq *chp; |
189 | unsigned long flag; | ||
186 | 190 | ||
187 | chp = get_chp(dev, qid); | 191 | chp = get_chp(dev, qid); |
188 | if (chp) | 192 | if (chp) { |
193 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
189 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 194 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
190 | else | 195 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); |
196 | } else | ||
191 | PDBG("%s unknown cqid 0x%x\n", __func__, qid); | 197 | PDBG("%s unknown cqid 0x%x\n", __func__, qid); |
192 | return 0; | 198 | return 0; |
193 | } | 199 | } |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 4f045375c8e2..1357c5bf209b 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -309,6 +309,7 @@ struct c4iw_cq { | |||
309 | struct c4iw_dev *rhp; | 309 | struct c4iw_dev *rhp; |
310 | struct t4_cq cq; | 310 | struct t4_cq cq; |
311 | spinlock_t lock; | 311 | spinlock_t lock; |
312 | spinlock_t comp_handler_lock; | ||
312 | atomic_t refcnt; | 313 | atomic_t refcnt; |
313 | wait_queue_head_t wait; | 314 | wait_queue_head_t wait; |
314 | }; | 315 | }; |
@@ -323,6 +324,7 @@ struct c4iw_mpa_attributes { | |||
323 | u8 recv_marker_enabled; | 324 | u8 recv_marker_enabled; |
324 | u8 xmit_marker_enabled; | 325 | u8 xmit_marker_enabled; |
325 | u8 crc_enabled; | 326 | u8 crc_enabled; |
327 | u8 enhanced_rdma_conn; | ||
326 | u8 version; | 328 | u8 version; |
327 | u8 p2p_type; | 329 | u8 p2p_type; |
328 | }; | 330 | }; |
@@ -349,6 +351,8 @@ struct c4iw_qp_attributes { | |||
349 | u8 is_terminate_local; | 351 | u8 is_terminate_local; |
350 | struct c4iw_mpa_attributes mpa_attr; | 352 | struct c4iw_mpa_attributes mpa_attr; |
351 | struct c4iw_ep *llp_stream_handle; | 353 | struct c4iw_ep *llp_stream_handle; |
354 | u8 layer_etype; | ||
355 | u8 ecode; | ||
352 | }; | 356 | }; |
353 | 357 | ||
354 | struct c4iw_qp { | 358 | struct c4iw_qp { |
@@ -501,11 +505,18 @@ enum c4iw_mmid_state { | |||
501 | #define MPA_KEY_REP "MPA ID Rep Frame" | 505 | #define MPA_KEY_REP "MPA ID Rep Frame" |
502 | 506 | ||
503 | #define MPA_MAX_PRIVATE_DATA 256 | 507 | #define MPA_MAX_PRIVATE_DATA 256 |
508 | #define MPA_ENHANCED_RDMA_CONN 0x10 | ||
504 | #define MPA_REJECT 0x20 | 509 | #define MPA_REJECT 0x20 |
505 | #define MPA_CRC 0x40 | 510 | #define MPA_CRC 0x40 |
506 | #define MPA_MARKERS 0x80 | 511 | #define MPA_MARKERS 0x80 |
507 | #define MPA_FLAGS_MASK 0xE0 | 512 | #define MPA_FLAGS_MASK 0xE0 |
508 | 513 | ||
514 | #define MPA_V2_PEER2PEER_MODEL 0x8000 | ||
515 | #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000 | ||
516 | #define MPA_V2_RDMA_WRITE_RTR 0x8000 | ||
517 | #define MPA_V2_RDMA_READ_RTR 0x4000 | ||
518 | #define MPA_V2_IRD_ORD_MASK 0x3FFF | ||
519 | |||
509 | #define c4iw_put_ep(ep) { \ | 520 | #define c4iw_put_ep(ep) { \ |
510 | PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ | 521 | PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ |
511 | ep, atomic_read(&((ep)->kref.refcount))); \ | 522 | ep, atomic_read(&((ep)->kref.refcount))); \ |
@@ -528,6 +539,11 @@ struct mpa_message { | |||
528 | u8 private_data[0]; | 539 | u8 private_data[0]; |
529 | }; | 540 | }; |
530 | 541 | ||
542 | struct mpa_v2_conn_params { | ||
543 | __be16 ird; | ||
544 | __be16 ord; | ||
545 | }; | ||
546 | |||
531 | struct terminate_message { | 547 | struct terminate_message { |
532 | u8 layer_etype; | 548 | u8 layer_etype; |
533 | u8 ecode; | 549 | u8 ecode; |
@@ -580,7 +596,10 @@ enum c4iw_ddp_ecodes { | |||
580 | 596 | ||
581 | enum c4iw_mpa_ecodes { | 597 | enum c4iw_mpa_ecodes { |
582 | MPA_CRC_ERR = 0x02, | 598 | MPA_CRC_ERR = 0x02, |
583 | MPA_MARKER_ERR = 0x03 | 599 | MPA_MARKER_ERR = 0x03, |
600 | MPA_LOCAL_CATA = 0x05, | ||
601 | MPA_INSUFF_IRD = 0x06, | ||
602 | MPA_NOMATCH_RTR = 0x07, | ||
584 | }; | 603 | }; |
585 | 604 | ||
586 | enum c4iw_ep_state { | 605 | enum c4iw_ep_state { |
@@ -651,6 +670,8 @@ struct c4iw_ep { | |||
651 | u16 txq_idx; | 670 | u16 txq_idx; |
652 | u16 ctrlq_idx; | 671 | u16 ctrlq_idx; |
653 | u8 tos; | 672 | u8 tos; |
673 | u8 retry_with_mpa_v1; | ||
674 | u8 tried_with_mpa_v1; | ||
654 | }; | 675 | }; |
655 | 676 | ||
656 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) | 677 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index a41578e48c7b..d6ccc7e84802 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -917,7 +917,11 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, | |||
917 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; | 917 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; |
918 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); | 918 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); |
919 | term = (struct terminate_message *)wqe->u.terminate.termmsg; | 919 | term = (struct terminate_message *)wqe->u.terminate.termmsg; |
920 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | 920 | if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { |
921 | term->layer_etype = qhp->attr.layer_etype; | ||
922 | term->ecode = qhp->attr.ecode; | ||
923 | } else | ||
924 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | ||
921 | c4iw_ofld_send(&qhp->rhp->rdev, skb); | 925 | c4iw_ofld_send(&qhp->rhp->rdev, skb); |
922 | } | 926 | } |
923 | 927 | ||
@@ -941,8 +945,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
941 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); | 945 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); |
942 | spin_unlock(&qhp->lock); | 946 | spin_unlock(&qhp->lock); |
943 | spin_unlock_irqrestore(&rchp->lock, flag); | 947 | spin_unlock_irqrestore(&rchp->lock, flag); |
944 | if (flushed) | 948 | if (flushed) { |
949 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
945 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 950 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
951 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
952 | } | ||
946 | 953 | ||
947 | /* locking hierarchy: cq lock first, then qp lock. */ | 954 | /* locking hierarchy: cq lock first, then qp lock. */ |
948 | spin_lock_irqsave(&schp->lock, flag); | 955 | spin_lock_irqsave(&schp->lock, flag); |
@@ -952,13 +959,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
952 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); | 959 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); |
953 | spin_unlock(&qhp->lock); | 960 | spin_unlock(&qhp->lock); |
954 | spin_unlock_irqrestore(&schp->lock, flag); | 961 | spin_unlock_irqrestore(&schp->lock, flag); |
955 | if (flushed) | 962 | if (flushed) { |
963 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | ||
956 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 964 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); |
965 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | ||
966 | } | ||
957 | } | 967 | } |
958 | 968 | ||
959 | static void flush_qp(struct c4iw_qp *qhp) | 969 | static void flush_qp(struct c4iw_qp *qhp) |
960 | { | 970 | { |
961 | struct c4iw_cq *rchp, *schp; | 971 | struct c4iw_cq *rchp, *schp; |
972 | unsigned long flag; | ||
962 | 973 | ||
963 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | 974 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); |
964 | schp = get_chp(qhp->rhp, qhp->attr.scq); | 975 | schp = get_chp(qhp->rhp, qhp->attr.scq); |
@@ -966,8 +977,16 @@ static void flush_qp(struct c4iw_qp *qhp) | |||
966 | if (qhp->ibqp.uobject) { | 977 | if (qhp->ibqp.uobject) { |
967 | t4_set_wq_in_error(&qhp->wq); | 978 | t4_set_wq_in_error(&qhp->wq); |
968 | t4_set_cq_in_error(&rchp->cq); | 979 | t4_set_cq_in_error(&rchp->cq); |
969 | if (schp != rchp) | 980 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
981 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | ||
982 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
983 | if (schp != rchp) { | ||
970 | t4_set_cq_in_error(&schp->cq); | 984 | t4_set_cq_in_error(&schp->cq); |
985 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | ||
986 | (*schp->ibcq.comp_handler)(&schp->ibcq, | ||
987 | schp->ibcq.cq_context); | ||
988 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | ||
989 | } | ||
971 | return; | 990 | return; |
972 | } | 991 | } |
973 | __flush_qp(qhp, rchp, schp); | 992 | __flush_qp(qhp, rchp, schp); |
@@ -1012,6 +1031,7 @@ out: | |||
1012 | 1031 | ||
1013 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) | 1032 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) |
1014 | { | 1033 | { |
1034 | PDBG("%s p2p_type = %d\n", __func__, p2p_type); | ||
1015 | memset(&init->u, 0, sizeof init->u); | 1035 | memset(&init->u, 0, sizeof init->u); |
1016 | switch (p2p_type) { | 1036 | switch (p2p_type) { |
1017 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: | 1037 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: |
@@ -1206,12 +1226,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1206 | disconnect = 1; | 1226 | disconnect = 1; |
1207 | c4iw_get_ep(&qhp->ep->com); | 1227 | c4iw_get_ep(&qhp->ep->com); |
1208 | } | 1228 | } |
1229 | if (qhp->ibqp.uobject) | ||
1230 | t4_set_wq_in_error(&qhp->wq); | ||
1209 | ret = rdma_fini(rhp, qhp, ep); | 1231 | ret = rdma_fini(rhp, qhp, ep); |
1210 | if (ret) | 1232 | if (ret) |
1211 | goto err; | 1233 | goto err; |
1212 | break; | 1234 | break; |
1213 | case C4IW_QP_STATE_TERMINATE: | 1235 | case C4IW_QP_STATE_TERMINATE: |
1214 | set_state(qhp, C4IW_QP_STATE_TERMINATE); | 1236 | set_state(qhp, C4IW_QP_STATE_TERMINATE); |
1237 | qhp->attr.layer_etype = attrs->layer_etype; | ||
1238 | qhp->attr.ecode = attrs->ecode; | ||
1215 | if (qhp->ibqp.uobject) | 1239 | if (qhp->ibqp.uobject) |
1216 | t4_set_wq_in_error(&qhp->wq); | 1240 | t4_set_wq_in_error(&qhp->wq); |
1217 | ep = qhp->ep; | 1241 | ep = qhp->ep; |
@@ -1222,6 +1246,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1222 | break; | 1246 | break; |
1223 | case C4IW_QP_STATE_ERROR: | 1247 | case C4IW_QP_STATE_ERROR: |
1224 | set_state(qhp, C4IW_QP_STATE_ERROR); | 1248 | set_state(qhp, C4IW_QP_STATE_ERROR); |
1249 | if (qhp->ibqp.uobject) | ||
1250 | t4_set_wq_in_error(&qhp->wq); | ||
1225 | if (!internal) { | 1251 | if (!internal) { |
1226 | abort = 1; | 1252 | abort = 1; |
1227 | disconnect = 1; | 1253 | disconnect = 1; |
@@ -1334,7 +1360,10 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) | |||
1334 | rhp = qhp->rhp; | 1360 | rhp = qhp->rhp; |
1335 | 1361 | ||
1336 | attrs.next_state = C4IW_QP_STATE_ERROR; | 1362 | attrs.next_state = C4IW_QP_STATE_ERROR; |
1337 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1363 | if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) |
1364 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1365 | else | ||
1366 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | ||
1338 | wait_event(qhp->wait, !qhp->ep); | 1367 | wait_event(qhp->wait, !qhp->ep); |
1339 | 1368 | ||
1340 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | 1369 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index d9b1bb40f480..818d721fc448 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c | |||
@@ -125,7 +125,7 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
125 | tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); | 125 | tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); |
126 | 126 | ||
127 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, | 127 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, |
128 | IRQF_DISABLED, "ehca_eq", | 128 | 0, "ehca_eq", |
129 | (void *)shca); | 129 | (void *)shca); |
130 | if (ret < 0) | 130 | if (ret < 0) |
131 | ehca_err(ib_dev, "Can't map interrupt handler."); | 131 | ehca_err(ib_dev, "Can't map interrupt handler."); |
@@ -133,7 +133,7 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
133 | tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); | 133 | tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); |
134 | 134 | ||
135 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, | 135 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, |
136 | IRQF_DISABLED, "ehca_neq", | 136 | 0, "ehca_neq", |
137 | (void *)shca); | 137 | (void *)shca); |
138 | if (ret < 0) | 138 | if (ret < 0) |
139 | ehca_err(ib_dev, "Can't map interrupt handler."); | 139 | ehca_err(ib_dev, "Can't map interrupt handler."); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 32fb34201aba..964f85520798 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -977,6 +977,9 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd, | |||
977 | struct hcp_modify_qp_control_block *mqpcb; | 977 | struct hcp_modify_qp_control_block *mqpcb; |
978 | u64 hret, update_mask; | 978 | u64 hret, update_mask; |
979 | 979 | ||
980 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) | ||
981 | return ERR_PTR(-ENOSYS); | ||
982 | |||
980 | /* For common attributes, internal_create_qp() takes its info | 983 | /* For common attributes, internal_create_qp() takes its info |
981 | * out of qp_init_attr, so copy all common attrs there. | 984 | * out of qp_init_attr, so copy all common attrs there. |
982 | */ | 985 | */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 7c1eebe8c7c9..824a4d508836 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/netdevice.h> | 35 | #include <linux/netdevice.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <linux/stat.h> | ||
37 | #include <linux/vmalloc.h> | 38 | #include <linux/vmalloc.h> |
38 | 39 | ||
39 | #include "ipath_kernel.h" | 40 | #include "ipath_kernel.h" |
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index 386e2c717c53..26271984b717 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c | |||
@@ -107,6 +107,11 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |||
107 | u32 sz; | 107 | u32 sz; |
108 | struct ib_srq *ret; | 108 | struct ib_srq *ret; |
109 | 109 | ||
110 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) { | ||
111 | ret = ERR_PTR(-ENOSYS); | ||
112 | goto done; | ||
113 | } | ||
114 | |||
110 | if (srq_init_attr->attr.max_wr == 0) { | 115 | if (srq_init_attr->attr.max_wr == 0) { |
111 | ret = ERR_PTR(-EINVAL); | 116 | ret = ERR_PTR(-EINVAL); |
112 | goto done; | 117 | goto done; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index fa643f4f4e28..77f3dbc0aaa1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -128,6 +128,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
128 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && | 128 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && |
129 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) | 129 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) |
130 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | 130 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
131 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) | ||
132 | props->device_cap_flags |= IB_DEVICE_XRC; | ||
131 | 133 | ||
132 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & | 134 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & |
133 | 0xffffff; | 135 | 0xffffff; |
@@ -181,8 +183,12 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) | |||
181 | 183 | ||
182 | static int ib_link_query_port(struct ib_device *ibdev, u8 port, | 184 | static int ib_link_query_port(struct ib_device *ibdev, u8 port, |
183 | struct ib_port_attr *props, | 185 | struct ib_port_attr *props, |
186 | struct ib_smp *in_mad, | ||
184 | struct ib_smp *out_mad) | 187 | struct ib_smp *out_mad) |
185 | { | 188 | { |
189 | int ext_active_speed; | ||
190 | int err; | ||
191 | |||
186 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); | 192 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); |
187 | props->lmc = out_mad->data[34] & 0x7; | 193 | props->lmc = out_mad->data[34] & 0x7; |
188 | props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); | 194 | props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); |
@@ -203,6 +209,39 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, | |||
203 | props->max_vl_num = out_mad->data[37] >> 4; | 209 | props->max_vl_num = out_mad->data[37] >> 4; |
204 | props->init_type_reply = out_mad->data[41] >> 4; | 210 | props->init_type_reply = out_mad->data[41] >> 4; |
205 | 211 | ||
212 | /* Check if extended speeds (EDR/FDR/...) are supported */ | ||
213 | if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { | ||
214 | ext_active_speed = out_mad->data[62] >> 4; | ||
215 | |||
216 | switch (ext_active_speed) { | ||
217 | case 1: | ||
218 | props->active_speed = 16; /* FDR */ | ||
219 | break; | ||
220 | case 2: | ||
221 | props->active_speed = 32; /* EDR */ | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | /* If reported active speed is QDR, check if is FDR-10 */ | ||
227 | if (props->active_speed == 4) { | ||
228 | if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] & | ||
229 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { | ||
230 | init_query_mad(in_mad); | ||
231 | in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; | ||
232 | in_mad->attr_mod = cpu_to_be32(port); | ||
233 | |||
234 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, | ||
235 | NULL, NULL, in_mad, out_mad); | ||
236 | if (err) | ||
237 | return err; | ||
238 | |||
239 | /* Checking LinkSpeedActive for FDR-10 */ | ||
240 | if (out_mad->data[15] & 0x1) | ||
241 | props->active_speed = 8; | ||
242 | } | ||
243 | } | ||
244 | |||
206 | return 0; | 245 | return 0; |
207 | } | 246 | } |
208 | 247 | ||
@@ -227,7 +266,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
227 | props->pkey_tbl_len = 1; | 266 | props->pkey_tbl_len = 1; |
228 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); | 267 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); |
229 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); | 268 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); |
230 | props->max_mtu = IB_MTU_2048; | 269 | props->max_mtu = IB_MTU_4096; |
231 | props->subnet_timeout = 0; | 270 | props->subnet_timeout = 0; |
232 | props->max_vl_num = out_mad->data[37] >> 4; | 271 | props->max_vl_num = out_mad->data[37] >> 4; |
233 | props->init_type_reply = 0; | 272 | props->init_type_reply = 0; |
@@ -274,7 +313,7 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | |||
274 | goto out; | 313 | goto out; |
275 | 314 | ||
276 | err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? | 315 | err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? |
277 | ib_link_query_port(ibdev, port, props, out_mad) : | 316 | ib_link_query_port(ibdev, port, props, in_mad, out_mad) : |
278 | eth_link_query_port(ibdev, port, props, out_mad); | 317 | eth_link_query_port(ibdev, port, props, out_mad); |
279 | 318 | ||
280 | out: | 319 | out: |
@@ -566,6 +605,57 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd) | |||
566 | return 0; | 605 | return 0; |
567 | } | 606 | } |
568 | 607 | ||
608 | static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, | ||
609 | struct ib_ucontext *context, | ||
610 | struct ib_udata *udata) | ||
611 | { | ||
612 | struct mlx4_ib_xrcd *xrcd; | ||
613 | int err; | ||
614 | |||
615 | if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) | ||
616 | return ERR_PTR(-ENOSYS); | ||
617 | |||
618 | xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL); | ||
619 | if (!xrcd) | ||
620 | return ERR_PTR(-ENOMEM); | ||
621 | |||
622 | err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); | ||
623 | if (err) | ||
624 | goto err1; | ||
625 | |||
626 | xrcd->pd = ib_alloc_pd(ibdev); | ||
627 | if (IS_ERR(xrcd->pd)) { | ||
628 | err = PTR_ERR(xrcd->pd); | ||
629 | goto err2; | ||
630 | } | ||
631 | |||
632 | xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0); | ||
633 | if (IS_ERR(xrcd->cq)) { | ||
634 | err = PTR_ERR(xrcd->cq); | ||
635 | goto err3; | ||
636 | } | ||
637 | |||
638 | return &xrcd->ibxrcd; | ||
639 | |||
640 | err3: | ||
641 | ib_dealloc_pd(xrcd->pd); | ||
642 | err2: | ||
643 | mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); | ||
644 | err1: | ||
645 | kfree(xrcd); | ||
646 | return ERR_PTR(err); | ||
647 | } | ||
648 | |||
649 | static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd) | ||
650 | { | ||
651 | ib_destroy_cq(to_mxrcd(xrcd)->cq); | ||
652 | ib_dealloc_pd(to_mxrcd(xrcd)->pd); | ||
653 | mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); | ||
654 | kfree(xrcd); | ||
655 | |||
656 | return 0; | ||
657 | } | ||
658 | |||
569 | static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) | 659 | static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) |
570 | { | 660 | { |
571 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | 661 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
@@ -1044,7 +1134,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1044 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | | 1134 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | |
1045 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | | 1135 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | |
1046 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | | 1136 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | |
1047 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); | 1137 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | |
1138 | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | | ||
1139 | (1ull << IB_USER_VERBS_CMD_OPEN_QP); | ||
1048 | 1140 | ||
1049 | ibdev->ib_dev.query_device = mlx4_ib_query_device; | 1141 | ibdev->ib_dev.query_device = mlx4_ib_query_device; |
1050 | ibdev->ib_dev.query_port = mlx4_ib_query_port; | 1142 | ibdev->ib_dev.query_port = mlx4_ib_query_port; |
@@ -1093,6 +1185,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1093 | ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; | 1185 | ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; |
1094 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; | 1186 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; |
1095 | 1187 | ||
1188 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { | ||
1189 | ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; | ||
1190 | ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; | ||
1191 | ibdev->ib_dev.uverbs_cmd_mask |= | ||
1192 | (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | | ||
1193 | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); | ||
1194 | } | ||
1195 | |||
1096 | spin_lock_init(&iboe->lock); | 1196 | spin_lock_init(&iboe->lock); |
1097 | 1197 | ||
1098 | if (init_node_data(ibdev)) | 1198 | if (init_node_data(ibdev)) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e4bf2cff8662..ed80345c99ae 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -56,6 +56,13 @@ struct mlx4_ib_pd { | |||
56 | u32 pdn; | 56 | u32 pdn; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct mlx4_ib_xrcd { | ||
60 | struct ib_xrcd ibxrcd; | ||
61 | u32 xrcdn; | ||
62 | struct ib_pd *pd; | ||
63 | struct ib_cq *cq; | ||
64 | }; | ||
65 | |||
59 | struct mlx4_ib_cq_buf { | 66 | struct mlx4_ib_cq_buf { |
60 | struct mlx4_buf buf; | 67 | struct mlx4_buf buf; |
61 | struct mlx4_mtt mtt; | 68 | struct mlx4_mtt mtt; |
@@ -138,6 +145,7 @@ struct mlx4_ib_qp { | |||
138 | struct mlx4_mtt mtt; | 145 | struct mlx4_mtt mtt; |
139 | int buf_size; | 146 | int buf_size; |
140 | struct mutex mutex; | 147 | struct mutex mutex; |
148 | u16 xrcdn; | ||
141 | u32 flags; | 149 | u32 flags; |
142 | u8 port; | 150 | u8 port; |
143 | u8 alt_port; | 151 | u8 alt_port; |
@@ -211,6 +219,11 @@ static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd) | |||
211 | return container_of(ibpd, struct mlx4_ib_pd, ibpd); | 219 | return container_of(ibpd, struct mlx4_ib_pd, ibpd); |
212 | } | 220 | } |
213 | 221 | ||
222 | static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) | ||
223 | { | ||
224 | return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd); | ||
225 | } | ||
226 | |||
214 | static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) | 227 | static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) |
215 | { | 228 | { |
216 | return container_of(ibcq, struct mlx4_ib_cq, ibcq); | 229 | return container_of(ibcq, struct mlx4_ib_cq, ibcq); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 3a91d9d8dc51..a16f0c8e6f3f 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -302,15 +302,14 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags) | |||
302 | } | 302 | } |
303 | 303 | ||
304 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | 304 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, |
305 | int is_user, int has_srq, struct mlx4_ib_qp *qp) | 305 | int is_user, int has_rq, struct mlx4_ib_qp *qp) |
306 | { | 306 | { |
307 | /* Sanity check RQ size before proceeding */ | 307 | /* Sanity check RQ size before proceeding */ |
308 | if (cap->max_recv_wr > dev->dev->caps.max_wqes || | 308 | if (cap->max_recv_wr > dev->dev->caps.max_wqes || |
309 | cap->max_recv_sge > dev->dev->caps.max_rq_sg) | 309 | cap->max_recv_sge > dev->dev->caps.max_rq_sg) |
310 | return -EINVAL; | 310 | return -EINVAL; |
311 | 311 | ||
312 | if (has_srq) { | 312 | if (!has_rq) { |
313 | /* QPs attached to an SRQ should have no RQ */ | ||
314 | if (cap->max_recv_wr) | 313 | if (cap->max_recv_wr) |
315 | return -EINVAL; | 314 | return -EINVAL; |
316 | 315 | ||
@@ -463,6 +462,14 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev, | |||
463 | return 0; | 462 | return 0; |
464 | } | 463 | } |
465 | 464 | ||
465 | static int qp_has_rq(struct ib_qp_init_attr *attr) | ||
466 | { | ||
467 | if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) | ||
468 | return 0; | ||
469 | |||
470 | return !attr->srq; | ||
471 | } | ||
472 | |||
466 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | 473 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, |
467 | struct ib_qp_init_attr *init_attr, | 474 | struct ib_qp_init_attr *init_attr, |
468 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) | 475 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) |
@@ -479,7 +486,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
479 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | 486 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
480 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | 487 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); |
481 | 488 | ||
482 | err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); | 489 | err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); |
483 | if (err) | 490 | if (err) |
484 | goto err; | 491 | goto err; |
485 | 492 | ||
@@ -513,7 +520,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
513 | if (err) | 520 | if (err) |
514 | goto err_mtt; | 521 | goto err_mtt; |
515 | 522 | ||
516 | if (!init_attr->srq) { | 523 | if (qp_has_rq(init_attr)) { |
517 | err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), | 524 | err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), |
518 | ucmd.db_addr, &qp->db); | 525 | ucmd.db_addr, &qp->db); |
519 | if (err) | 526 | if (err) |
@@ -532,7 +539,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
532 | if (err) | 539 | if (err) |
533 | goto err; | 540 | goto err; |
534 | 541 | ||
535 | if (!init_attr->srq) { | 542 | if (qp_has_rq(init_attr)) { |
536 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); | 543 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); |
537 | if (err) | 544 | if (err) |
538 | goto err; | 545 | goto err; |
@@ -575,6 +582,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
575 | if (err) | 582 | if (err) |
576 | goto err_qpn; | 583 | goto err_qpn; |
577 | 584 | ||
585 | if (init_attr->qp_type == IB_QPT_XRC_TGT) | ||
586 | qp->mqp.qpn |= (1 << 23); | ||
587 | |||
578 | /* | 588 | /* |
579 | * Hardware wants QPN written in big-endian order (after | 589 | * Hardware wants QPN written in big-endian order (after |
580 | * shifting) for send doorbell. Precompute this value to save | 590 | * shifting) for send doorbell. Precompute this value to save |
@@ -592,9 +602,8 @@ err_qpn: | |||
592 | 602 | ||
593 | err_wrid: | 603 | err_wrid: |
594 | if (pd->uobject) { | 604 | if (pd->uobject) { |
595 | if (!init_attr->srq) | 605 | if (qp_has_rq(init_attr)) |
596 | mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), | 606 | mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); |
597 | &qp->db); | ||
598 | } else { | 607 | } else { |
599 | kfree(qp->sq.wrid); | 608 | kfree(qp->sq.wrid); |
600 | kfree(qp->rq.wrid); | 609 | kfree(qp->rq.wrid); |
@@ -610,7 +619,7 @@ err_buf: | |||
610 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 619 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
611 | 620 | ||
612 | err_db: | 621 | err_db: |
613 | if (!pd->uobject && !init_attr->srq) | 622 | if (!pd->uobject && qp_has_rq(init_attr)) |
614 | mlx4_db_free(dev->dev, &qp->db); | 623 | mlx4_db_free(dev->dev, &qp->db); |
615 | 624 | ||
616 | err: | 625 | err: |
@@ -671,6 +680,33 @@ static void del_gid_entries(struct mlx4_ib_qp *qp) | |||
671 | } | 680 | } |
672 | } | 681 | } |
673 | 682 | ||
683 | static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) | ||
684 | { | ||
685 | if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) | ||
686 | return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); | ||
687 | else | ||
688 | return to_mpd(qp->ibqp.pd); | ||
689 | } | ||
690 | |||
691 | static void get_cqs(struct mlx4_ib_qp *qp, | ||
692 | struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) | ||
693 | { | ||
694 | switch (qp->ibqp.qp_type) { | ||
695 | case IB_QPT_XRC_TGT: | ||
696 | *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); | ||
697 | *recv_cq = *send_cq; | ||
698 | break; | ||
699 | case IB_QPT_XRC_INI: | ||
700 | *send_cq = to_mcq(qp->ibqp.send_cq); | ||
701 | *recv_cq = *send_cq; | ||
702 | break; | ||
703 | default: | ||
704 | *send_cq = to_mcq(qp->ibqp.send_cq); | ||
705 | *recv_cq = to_mcq(qp->ibqp.recv_cq); | ||
706 | break; | ||
707 | } | ||
708 | } | ||
709 | |||
674 | static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | 710 | static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, |
675 | int is_user) | 711 | int is_user) |
676 | { | 712 | { |
@@ -682,8 +718,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
682 | printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", | 718 | printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", |
683 | qp->mqp.qpn); | 719 | qp->mqp.qpn); |
684 | 720 | ||
685 | send_cq = to_mcq(qp->ibqp.send_cq); | 721 | get_cqs(qp, &send_cq, &recv_cq); |
686 | recv_cq = to_mcq(qp->ibqp.recv_cq); | ||
687 | 722 | ||
688 | mlx4_ib_lock_cqs(send_cq, recv_cq); | 723 | mlx4_ib_lock_cqs(send_cq, recv_cq); |
689 | 724 | ||
@@ -706,7 +741,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
706 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | 741 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); |
707 | 742 | ||
708 | if (is_user) { | 743 | if (is_user) { |
709 | if (!qp->ibqp.srq) | 744 | if (qp->rq.wqe_cnt) |
710 | mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), | 745 | mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), |
711 | &qp->db); | 746 | &qp->db); |
712 | ib_umem_release(qp->umem); | 747 | ib_umem_release(qp->umem); |
@@ -714,7 +749,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
714 | kfree(qp->sq.wrid); | 749 | kfree(qp->sq.wrid); |
715 | kfree(qp->rq.wrid); | 750 | kfree(qp->rq.wrid); |
716 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 751 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
717 | if (!qp->ibqp.srq) | 752 | if (qp->rq.wqe_cnt) |
718 | mlx4_db_free(dev->dev, &qp->db); | 753 | mlx4_db_free(dev->dev, &qp->db); |
719 | } | 754 | } |
720 | 755 | ||
@@ -725,10 +760,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
725 | struct ib_qp_init_attr *init_attr, | 760 | struct ib_qp_init_attr *init_attr, |
726 | struct ib_udata *udata) | 761 | struct ib_udata *udata) |
727 | { | 762 | { |
728 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
729 | struct mlx4_ib_sqp *sqp; | 763 | struct mlx4_ib_sqp *sqp; |
730 | struct mlx4_ib_qp *qp; | 764 | struct mlx4_ib_qp *qp; |
731 | int err; | 765 | int err; |
766 | u16 xrcdn = 0; | ||
732 | 767 | ||
733 | /* | 768 | /* |
734 | * We only support LSO and multicast loopback blocking, and | 769 | * We only support LSO and multicast loopback blocking, and |
@@ -739,10 +774,20 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
739 | return ERR_PTR(-EINVAL); | 774 | return ERR_PTR(-EINVAL); |
740 | 775 | ||
741 | if (init_attr->create_flags && | 776 | if (init_attr->create_flags && |
742 | (pd->uobject || init_attr->qp_type != IB_QPT_UD)) | 777 | (udata || init_attr->qp_type != IB_QPT_UD)) |
743 | return ERR_PTR(-EINVAL); | 778 | return ERR_PTR(-EINVAL); |
744 | 779 | ||
745 | switch (init_attr->qp_type) { | 780 | switch (init_attr->qp_type) { |
781 | case IB_QPT_XRC_TGT: | ||
782 | pd = to_mxrcd(init_attr->xrcd)->pd; | ||
783 | xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; | ||
784 | init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; | ||
785 | /* fall through */ | ||
786 | case IB_QPT_XRC_INI: | ||
787 | if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) | ||
788 | return ERR_PTR(-ENOSYS); | ||
789 | init_attr->recv_cq = init_attr->send_cq; | ||
790 | /* fall through */ | ||
746 | case IB_QPT_RC: | 791 | case IB_QPT_RC: |
747 | case IB_QPT_UC: | 792 | case IB_QPT_UC: |
748 | case IB_QPT_UD: | 793 | case IB_QPT_UD: |
@@ -751,13 +796,14 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
751 | if (!qp) | 796 | if (!qp) |
752 | return ERR_PTR(-ENOMEM); | 797 | return ERR_PTR(-ENOMEM); |
753 | 798 | ||
754 | err = create_qp_common(dev, pd, init_attr, udata, 0, qp); | 799 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, qp); |
755 | if (err) { | 800 | if (err) { |
756 | kfree(qp); | 801 | kfree(qp); |
757 | return ERR_PTR(err); | 802 | return ERR_PTR(err); |
758 | } | 803 | } |
759 | 804 | ||
760 | qp->ibqp.qp_num = qp->mqp.qpn; | 805 | qp->ibqp.qp_num = qp->mqp.qpn; |
806 | qp->xrcdn = xrcdn; | ||
761 | 807 | ||
762 | break; | 808 | break; |
763 | } | 809 | } |
@@ -765,7 +811,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
765 | case IB_QPT_GSI: | 811 | case IB_QPT_GSI: |
766 | { | 812 | { |
767 | /* Userspace is not allowed to create special QPs: */ | 813 | /* Userspace is not allowed to create special QPs: */ |
768 | if (pd->uobject) | 814 | if (udata) |
769 | return ERR_PTR(-EINVAL); | 815 | return ERR_PTR(-EINVAL); |
770 | 816 | ||
771 | sqp = kzalloc(sizeof *sqp, GFP_KERNEL); | 817 | sqp = kzalloc(sizeof *sqp, GFP_KERNEL); |
@@ -774,8 +820,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
774 | 820 | ||
775 | qp = &sqp->qp; | 821 | qp = &sqp->qp; |
776 | 822 | ||
777 | err = create_qp_common(dev, pd, init_attr, udata, | 823 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, |
778 | dev->dev->caps.sqp_start + | 824 | to_mdev(pd->device)->dev->caps.sqp_start + |
779 | (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + | 825 | (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + |
780 | init_attr->port_num - 1, | 826 | init_attr->port_num - 1, |
781 | qp); | 827 | qp); |
@@ -801,11 +847,13 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) | |||
801 | { | 847 | { |
802 | struct mlx4_ib_dev *dev = to_mdev(qp->device); | 848 | struct mlx4_ib_dev *dev = to_mdev(qp->device); |
803 | struct mlx4_ib_qp *mqp = to_mqp(qp); | 849 | struct mlx4_ib_qp *mqp = to_mqp(qp); |
850 | struct mlx4_ib_pd *pd; | ||
804 | 851 | ||
805 | if (is_qp0(dev, mqp)) | 852 | if (is_qp0(dev, mqp)) |
806 | mlx4_CLOSE_PORT(dev->dev, mqp->port); | 853 | mlx4_CLOSE_PORT(dev->dev, mqp->port); |
807 | 854 | ||
808 | destroy_qp_common(dev, mqp, !!qp->pd->uobject); | 855 | pd = get_pd(mqp); |
856 | destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); | ||
809 | 857 | ||
810 | if (is_sqp(dev, mqp)) | 858 | if (is_sqp(dev, mqp)) |
811 | kfree(to_msqp(mqp)); | 859 | kfree(to_msqp(mqp)); |
@@ -821,6 +869,8 @@ static int to_mlx4_st(enum ib_qp_type type) | |||
821 | case IB_QPT_RC: return MLX4_QP_ST_RC; | 869 | case IB_QPT_RC: return MLX4_QP_ST_RC; |
822 | case IB_QPT_UC: return MLX4_QP_ST_UC; | 870 | case IB_QPT_UC: return MLX4_QP_ST_UC; |
823 | case IB_QPT_UD: return MLX4_QP_ST_UD; | 871 | case IB_QPT_UD: return MLX4_QP_ST_UD; |
872 | case IB_QPT_XRC_INI: | ||
873 | case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; | ||
824 | case IB_QPT_SMI: | 874 | case IB_QPT_SMI: |
825 | case IB_QPT_GSI: return MLX4_QP_ST_MLX; | 875 | case IB_QPT_GSI: return MLX4_QP_ST_MLX; |
826 | default: return -1; | 876 | default: return -1; |
@@ -959,6 +1009,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
959 | { | 1009 | { |
960 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); | 1010 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); |
961 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | 1011 | struct mlx4_ib_qp *qp = to_mqp(ibqp); |
1012 | struct mlx4_ib_pd *pd; | ||
1013 | struct mlx4_ib_cq *send_cq, *recv_cq; | ||
962 | struct mlx4_qp_context *context; | 1014 | struct mlx4_qp_context *context; |
963 | enum mlx4_qp_optpar optpar = 0; | 1015 | enum mlx4_qp_optpar optpar = 0; |
964 | int sqd_event; | 1016 | int sqd_event; |
@@ -1014,8 +1066,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1014 | context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; | 1066 | context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; |
1015 | context->sq_size_stride |= qp->sq.wqe_shift - 4; | 1067 | context->sq_size_stride |= qp->sq.wqe_shift - 4; |
1016 | 1068 | ||
1017 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | 1069 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
1018 | context->sq_size_stride |= !!qp->sq_no_prefetch << 7; | 1070 | context->sq_size_stride |= !!qp->sq_no_prefetch << 7; |
1071 | context->xrcd = cpu_to_be32((u32) qp->xrcdn); | ||
1072 | } | ||
1019 | 1073 | ||
1020 | if (qp->ibqp.uobject) | 1074 | if (qp->ibqp.uobject) |
1021 | context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); | 1075 | context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); |
@@ -1079,8 +1133,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1079 | optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; | 1133 | optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; |
1080 | } | 1134 | } |
1081 | 1135 | ||
1082 | context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); | 1136 | pd = get_pd(qp); |
1083 | context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); | 1137 | get_cqs(qp, &send_cq, &recv_cq); |
1138 | context->pd = cpu_to_be32(pd->pdn); | ||
1139 | context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); | ||
1140 | context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); | ||
1141 | context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); | ||
1084 | 1142 | ||
1085 | /* Set "fast registration enabled" for all kernel QPs */ | 1143 | /* Set "fast registration enabled" for all kernel QPs */ |
1086 | if (!qp->ibqp.uobject) | 1144 | if (!qp->ibqp.uobject) |
@@ -1106,8 +1164,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1106 | if (attr_mask & IB_QP_SQ_PSN) | 1164 | if (attr_mask & IB_QP_SQ_PSN) |
1107 | context->next_send_psn = cpu_to_be32(attr->sq_psn); | 1165 | context->next_send_psn = cpu_to_be32(attr->sq_psn); |
1108 | 1166 | ||
1109 | context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn); | ||
1110 | |||
1111 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { | 1167 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
1112 | if (attr->max_dest_rd_atomic) | 1168 | if (attr->max_dest_rd_atomic) |
1113 | context->params2 |= | 1169 | context->params2 |= |
@@ -1130,8 +1186,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1130 | if (attr_mask & IB_QP_RQ_PSN) | 1186 | if (attr_mask & IB_QP_RQ_PSN) |
1131 | context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); | 1187 | context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); |
1132 | 1188 | ||
1133 | context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn); | ||
1134 | |||
1135 | if (attr_mask & IB_QP_QKEY) { | 1189 | if (attr_mask & IB_QP_QKEY) { |
1136 | context->qkey = cpu_to_be32(attr->qkey); | 1190 | context->qkey = cpu_to_be32(attr->qkey); |
1137 | optpar |= MLX4_QP_OPTPAR_Q_KEY; | 1191 | optpar |= MLX4_QP_OPTPAR_Q_KEY; |
@@ -1140,7 +1194,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1140 | if (ibqp->srq) | 1194 | if (ibqp->srq) |
1141 | context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); | 1195 | context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); |
1142 | 1196 | ||
1143 | if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | 1197 | if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) |
1144 | context->db_rec_addr = cpu_to_be64(qp->db.dma); | 1198 | context->db_rec_addr = cpu_to_be64(qp->db.dma); |
1145 | 1199 | ||
1146 | if (cur_state == IB_QPS_INIT && | 1200 | if (cur_state == IB_QPS_INIT && |
@@ -1225,17 +1279,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1225 | * entries and reinitialize the QP. | 1279 | * entries and reinitialize the QP. |
1226 | */ | 1280 | */ |
1227 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { | 1281 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { |
1228 | mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn, | 1282 | mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, |
1229 | ibqp->srq ? to_msrq(ibqp->srq): NULL); | 1283 | ibqp->srq ? to_msrq(ibqp->srq): NULL); |
1230 | if (ibqp->send_cq != ibqp->recv_cq) | 1284 | if (send_cq != recv_cq) |
1231 | mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL); | 1285 | mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); |
1232 | 1286 | ||
1233 | qp->rq.head = 0; | 1287 | qp->rq.head = 0; |
1234 | qp->rq.tail = 0; | 1288 | qp->rq.tail = 0; |
1235 | qp->sq.head = 0; | 1289 | qp->sq.head = 0; |
1236 | qp->sq.tail = 0; | 1290 | qp->sq.tail = 0; |
1237 | qp->sq_next_wqe = 0; | 1291 | qp->sq_next_wqe = 0; |
1238 | if (!ibqp->srq) | 1292 | if (qp->rq.wqe_cnt) |
1239 | *qp->db.db = 0; | 1293 | *qp->db.db = 0; |
1240 | } | 1294 | } |
1241 | 1295 | ||
@@ -1547,14 +1601,13 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, | |||
1547 | } | 1601 | } |
1548 | 1602 | ||
1549 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | 1603 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, |
1550 | struct ib_send_wr *wr, __be16 *vlan) | 1604 | struct ib_send_wr *wr) |
1551 | { | 1605 | { |
1552 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | 1606 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); |
1553 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1607 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
1554 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1608 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); |
1555 | dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; | 1609 | dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; |
1556 | memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); | 1610 | memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); |
1557 | *vlan = dseg->vlan; | ||
1558 | } | 1611 | } |
1559 | 1612 | ||
1560 | static void set_mlx_icrc_seg(void *dseg) | 1613 | static void set_mlx_icrc_seg(void *dseg) |
@@ -1657,7 +1710,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1657 | __be32 uninitialized_var(lso_hdr_sz); | 1710 | __be32 uninitialized_var(lso_hdr_sz); |
1658 | __be32 blh; | 1711 | __be32 blh; |
1659 | int i; | 1712 | int i; |
1660 | __be16 vlan = cpu_to_be16(0xffff); | ||
1661 | 1713 | ||
1662 | spin_lock_irqsave(&qp->sq.lock, flags); | 1714 | spin_lock_irqsave(&qp->sq.lock, flags); |
1663 | 1715 | ||
@@ -1761,7 +1813,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1761 | break; | 1813 | break; |
1762 | 1814 | ||
1763 | case IB_QPT_UD: | 1815 | case IB_QPT_UD: |
1764 | set_datagram_seg(wqe, wr, &vlan); | 1816 | set_datagram_seg(wqe, wr); |
1765 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 1817 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
1766 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 1818 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
1767 | 1819 | ||
@@ -1824,11 +1876,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1824 | ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? | 1876 | ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? |
1825 | MLX4_WQE_CTRL_FENCE : 0) | size; | 1877 | MLX4_WQE_CTRL_FENCE : 0) | size; |
1826 | 1878 | ||
1827 | if (be16_to_cpu(vlan) < 0x1000) { | ||
1828 | ctrl->ins_vlan = 1 << 6; | ||
1829 | ctrl->vlan_tag = vlan; | ||
1830 | } | ||
1831 | |||
1832 | /* | 1879 | /* |
1833 | * Make sure descriptor is fully written before | 1880 | * Make sure descriptor is fully written before |
1834 | * setting ownership bit (because HW can start | 1881 | * setting ownership bit (because HW can start |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 818b7ecace5e..39542f3703b8 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -76,6 +76,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
76 | struct mlx4_ib_srq *srq; | 76 | struct mlx4_ib_srq *srq; |
77 | struct mlx4_wqe_srq_next_seg *next; | 77 | struct mlx4_wqe_srq_next_seg *next; |
78 | struct mlx4_wqe_data_seg *scatter; | 78 | struct mlx4_wqe_data_seg *scatter; |
79 | u32 cqn; | ||
80 | u16 xrcdn; | ||
79 | int desc_size; | 81 | int desc_size; |
80 | int buf_size; | 82 | int buf_size; |
81 | int err; | 83 | int err; |
@@ -174,12 +176,18 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
174 | } | 176 | } |
175 | } | 177 | } |
176 | 178 | ||
177 | err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt, | 179 | cqn = (init_attr->srq_type == IB_SRQT_XRC) ? |
180 | to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; | ||
181 | xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ? | ||
182 | to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn : | ||
183 | (u16) dev->dev->caps.reserved_xrcds; | ||
184 | err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, | ||
178 | srq->db.dma, &srq->msrq); | 185 | srq->db.dma, &srq->msrq); |
179 | if (err) | 186 | if (err) |
180 | goto err_wrid; | 187 | goto err_wrid; |
181 | 188 | ||
182 | srq->msrq.event = mlx4_ib_srq_event; | 189 | srq->msrq.event = mlx4_ib_srq_event; |
190 | srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; | ||
183 | 191 | ||
184 | if (pd->uobject) | 192 | if (pd->uobject) |
185 | if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { | 193 | if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 365fe0e14192..cb9a0b976804 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -438,6 +438,9 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd, | |||
438 | struct mthca_srq *srq; | 438 | struct mthca_srq *srq; |
439 | int err; | 439 | int err; |
440 | 440 | ||
441 | if (init_attr->srq_type != IB_SRQT_BASIC) | ||
442 | return ERR_PTR(-ENOSYS); | ||
443 | |||
441 | srq = kmalloc(sizeof *srq, GFP_KERNEL); | 444 | srq = kmalloc(sizeof *srq, GFP_KERNEL); |
442 | if (!srq) | 445 | if (!srq) |
443 | return ERR_PTR(-ENOMEM); | 446 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 401b7bb828d0..dfce9ea98a39 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -77,26 +77,19 @@ atomic_t cm_nodes_destroyed; | |||
77 | atomic_t cm_accel_dropped_pkts; | 77 | atomic_t cm_accel_dropped_pkts; |
78 | atomic_t cm_resets_recvd; | 78 | atomic_t cm_resets_recvd; |
79 | 79 | ||
80 | static inline int mini_cm_accelerated(struct nes_cm_core *, | 80 | static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); |
81 | struct nes_cm_node *); | 81 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *); |
82 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, | ||
83 | struct nes_vnic *, struct nes_cm_info *); | ||
84 | static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); | 82 | static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); |
85 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, | 83 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, struct nes_vnic *, u16, void *, struct nes_cm_info *); |
86 | struct nes_vnic *, u16, void *, struct nes_cm_info *); | ||
87 | static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *); | 84 | static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *); |
88 | static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, | 85 | static int mini_cm_accept(struct nes_cm_core *, struct nes_cm_node *); |
89 | struct nes_cm_node *); | 86 | static int mini_cm_reject(struct nes_cm_core *, struct nes_cm_node *); |
90 | static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, | 87 | static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *); |
91 | struct nes_cm_node *); | ||
92 | static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, | ||
93 | struct sk_buff *); | ||
94 | static int mini_cm_dealloc_core(struct nes_cm_core *); | 88 | static int mini_cm_dealloc_core(struct nes_cm_core *); |
95 | static int mini_cm_get(struct nes_cm_core *); | 89 | static int mini_cm_get(struct nes_cm_core *); |
96 | static int mini_cm_set(struct nes_cm_core *, u32, u32); | 90 | static int mini_cm_set(struct nes_cm_core *, u32, u32); |
97 | 91 | ||
98 | static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, | 92 | static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, void *, u32, void *, u32, u8); |
99 | void *, u32, void *, u32, u8); | ||
100 | static int add_ref_cm_node(struct nes_cm_node *); | 93 | static int add_ref_cm_node(struct nes_cm_node *); |
101 | static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); | 94 | static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); |
102 | 95 | ||
@@ -111,16 +104,14 @@ static int send_syn(struct nes_cm_node *, u32, struct sk_buff *); | |||
111 | static int send_reset(struct nes_cm_node *, struct sk_buff *); | 104 | static int send_reset(struct nes_cm_node *, struct sk_buff *); |
112 | static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); | 105 | static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); |
113 | static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb); | 106 | static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb); |
114 | static void process_packet(struct nes_cm_node *, struct sk_buff *, | 107 | static void process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *); |
115 | struct nes_cm_core *); | ||
116 | 108 | ||
117 | static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); | 109 | static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); |
118 | static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); | 110 | static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); |
119 | static void cleanup_retrans_entry(struct nes_cm_node *); | 111 | static void cleanup_retrans_entry(struct nes_cm_node *); |
120 | static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *); | 112 | static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *); |
121 | static void free_retrans_entry(struct nes_cm_node *cm_node); | 113 | static void free_retrans_entry(struct nes_cm_node *cm_node); |
122 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 114 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb, int optionsize, int passive); |
123 | struct sk_buff *skb, int optionsize, int passive); | ||
124 | 115 | ||
125 | /* CM event handler functions */ | 116 | /* CM event handler functions */ |
126 | static void cm_event_connected(struct nes_cm_event *); | 117 | static void cm_event_connected(struct nes_cm_event *); |
@@ -130,6 +121,12 @@ static void cm_event_mpa_req(struct nes_cm_event *); | |||
130 | static void cm_event_mpa_reject(struct nes_cm_event *); | 121 | static void cm_event_mpa_reject(struct nes_cm_event *); |
131 | static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node); | 122 | static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node); |
132 | 123 | ||
124 | /* MPA build functions */ | ||
125 | static int cm_build_mpa_frame(struct nes_cm_node *, u8 **, u16 *, u8 *, u8); | ||
126 | static void build_mpa_v2(struct nes_cm_node *, void *, u8); | ||
127 | static void build_mpa_v1(struct nes_cm_node *, void *, u8); | ||
128 | static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **); | ||
129 | |||
133 | static void print_core(struct nes_cm_core *core); | 130 | static void print_core(struct nes_cm_core *core); |
134 | 131 | ||
135 | /* External CM API Interface */ | 132 | /* External CM API Interface */ |
@@ -172,8 +169,8 @@ int nes_rem_ref_cm_node(struct nes_cm_node *cm_node) | |||
172 | /** | 169 | /** |
173 | * create_event | 170 | * create_event |
174 | */ | 171 | */ |
175 | static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, | 172 | static struct nes_cm_event *create_event(struct nes_cm_node * cm_node, |
176 | enum nes_cm_event_type type) | 173 | enum nes_cm_event_type type) |
177 | { | 174 | { |
178 | struct nes_cm_event *event; | 175 | struct nes_cm_event *event; |
179 | 176 | ||
@@ -195,10 +192,10 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, | |||
195 | event->cm_info.cm_id = cm_node->cm_id; | 192 | event->cm_info.cm_id = cm_node->cm_id; |
196 | 193 | ||
197 | nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, " | 194 | nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, " |
198 | "dst_addr=%08x[%x], src_addr=%08x[%x]\n", | 195 | "dst_addr=%08x[%x], src_addr=%08x[%x]\n", |
199 | cm_node, event, type, event->cm_info.loc_addr, | 196 | cm_node, event, type, event->cm_info.loc_addr, |
200 | event->cm_info.loc_port, event->cm_info.rem_addr, | 197 | event->cm_info.loc_port, event->cm_info.rem_addr, |
201 | event->cm_info.rem_port); | 198 | event->cm_info.rem_port); |
202 | 199 | ||
203 | nes_cm_post_event(event); | 200 | nes_cm_post_event(event); |
204 | return event; | 201 | return event; |
@@ -210,14 +207,19 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, | |||
210 | */ | 207 | */ |
211 | static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) | 208 | static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) |
212 | { | 209 | { |
210 | u8 start_addr = 0; | ||
211 | u8 *start_ptr = &start_addr; | ||
212 | u8 **start_buff = &start_ptr; | ||
213 | u16 buff_len = 0; | ||
214 | |||
213 | if (!skb) { | 215 | if (!skb) { |
214 | nes_debug(NES_DBG_CM, "skb set to NULL\n"); | 216 | nes_debug(NES_DBG_CM, "skb set to NULL\n"); |
215 | return -1; | 217 | return -1; |
216 | } | 218 | } |
217 | 219 | ||
218 | /* send an MPA Request frame */ | 220 | /* send an MPA Request frame */ |
219 | form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, | 221 | cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REQUEST); |
220 | cm_node->mpa_frame_size, SET_ACK); | 222 | form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK); |
221 | 223 | ||
222 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | 224 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); |
223 | } | 225 | } |
@@ -226,7 +228,11 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
226 | 228 | ||
227 | static int send_mpa_reject(struct nes_cm_node *cm_node) | 229 | static int send_mpa_reject(struct nes_cm_node *cm_node) |
228 | { | 230 | { |
229 | struct sk_buff *skb = NULL; | 231 | struct sk_buff *skb = NULL; |
232 | u8 start_addr = 0; | ||
233 | u8 *start_ptr = &start_addr; | ||
234 | u8 **start_buff = &start_ptr; | ||
235 | u16 buff_len = 0; | ||
230 | 236 | ||
231 | skb = dev_alloc_skb(MAX_CM_BUFFER); | 237 | skb = dev_alloc_skb(MAX_CM_BUFFER); |
232 | if (!skb) { | 238 | if (!skb) { |
@@ -235,8 +241,8 @@ static int send_mpa_reject(struct nes_cm_node *cm_node) | |||
235 | } | 241 | } |
236 | 242 | ||
237 | /* send an MPA reject frame */ | 243 | /* send an MPA reject frame */ |
238 | form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, | 244 | cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY); |
239 | cm_node->mpa_frame_size, SET_ACK | SET_FIN); | 245 | form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN); |
240 | 246 | ||
241 | cm_node->state = NES_CM_STATE_FIN_WAIT1; | 247 | cm_node->state = NES_CM_STATE_FIN_WAIT1; |
242 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | 248 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); |
@@ -248,24 +254,31 @@ static int send_mpa_reject(struct nes_cm_node *cm_node) | |||
248 | * IETF MPA frame | 254 | * IETF MPA frame |
249 | */ | 255 | */ |
250 | static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | 256 | static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, |
251 | u32 len) | 257 | u32 len) |
252 | { | 258 | { |
253 | struct ietf_mpa_frame *mpa_frame; | 259 | struct ietf_mpa_v1 *mpa_frame; |
260 | struct ietf_mpa_v2 *mpa_v2_frame; | ||
261 | struct ietf_rtr_msg *rtr_msg; | ||
262 | int mpa_hdr_len; | ||
263 | int priv_data_len; | ||
254 | 264 | ||
255 | *type = NES_MPA_REQUEST_ACCEPT; | 265 | *type = NES_MPA_REQUEST_ACCEPT; |
256 | 266 | ||
257 | /* assume req frame is in tcp data payload */ | 267 | /* assume req frame is in tcp data payload */ |
258 | if (len < sizeof(struct ietf_mpa_frame)) { | 268 | if (len < sizeof(struct ietf_mpa_v1)) { |
259 | nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); | 269 | nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); |
260 | return -EINVAL; | 270 | return -EINVAL; |
261 | } | 271 | } |
262 | 272 | ||
263 | mpa_frame = (struct ietf_mpa_frame *)buffer; | 273 | /* points to the beginning of the frame, which could be MPA V1 or V2 */ |
264 | cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len); | 274 | mpa_frame = (struct ietf_mpa_v1 *)buffer; |
275 | mpa_hdr_len = sizeof(struct ietf_mpa_v1); | ||
276 | priv_data_len = ntohs(mpa_frame->priv_data_len); | ||
277 | |||
265 | /* make sure mpa private data len is less than 512 bytes */ | 278 | /* make sure mpa private data len is less than 512 bytes */ |
266 | if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) { | 279 | if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) { |
267 | nes_debug(NES_DBG_CM, "The received Length of Private" | 280 | nes_debug(NES_DBG_CM, "The received Length of Private" |
268 | " Data field exceeds 512 octets\n"); | 281 | " Data field exceeds 512 octets\n"); |
269 | return -EINVAL; | 282 | return -EINVAL; |
270 | } | 283 | } |
271 | /* | 284 | /* |
@@ -273,11 +286,22 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
273 | * received MPA version and MPA key information | 286 | * received MPA version and MPA key information |
274 | * | 287 | * |
275 | */ | 288 | */ |
276 | if (mpa_frame->rev != mpa_version) { | 289 | if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) { |
290 | nes_debug(NES_DBG_CM, "The received mpa version" | ||
291 | " is not supported\n"); | ||
292 | return -EINVAL; | ||
293 | } | ||
294 | /* | ||
295 | * backwards compatibility only | ||
296 | */ | ||
297 | if (mpa_frame->rev > cm_node->mpa_frame_rev) { | ||
277 | nes_debug(NES_DBG_CM, "The received mpa version" | 298 | nes_debug(NES_DBG_CM, "The received mpa version" |
278 | " can not be interoperated\n"); | 299 | " can not be interoperated\n"); |
279 | return -EINVAL; | 300 | return -EINVAL; |
301 | } else { | ||
302 | cm_node->mpa_frame_rev = mpa_frame->rev; | ||
280 | } | 303 | } |
304 | |||
281 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { | 305 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { |
282 | if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { | 306 | if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { |
283 | nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n"); | 307 | nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n"); |
@@ -290,25 +314,75 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
290 | } | 314 | } |
291 | } | 315 | } |
292 | 316 | ||
293 | if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { | 317 | |
318 | if (priv_data_len + mpa_hdr_len != len) { | ||
294 | nes_debug(NES_DBG_CM, "The received ietf buffer was not right" | 319 | nes_debug(NES_DBG_CM, "The received ietf buffer was not right" |
295 | " complete (%x + %x != %x)\n", | 320 | " complete (%x + %x != %x)\n", |
296 | cm_node->mpa_frame_size, | 321 | priv_data_len, mpa_hdr_len, len); |
297 | (u32)sizeof(struct ietf_mpa_frame), len); | ||
298 | return -EINVAL; | 322 | return -EINVAL; |
299 | } | 323 | } |
300 | /* make sure it does not exceed the max size */ | 324 | /* make sure it does not exceed the max size */ |
301 | if (len > MAX_CM_BUFFER) { | 325 | if (len > MAX_CM_BUFFER) { |
302 | nes_debug(NES_DBG_CM, "The received ietf buffer was too large" | 326 | nes_debug(NES_DBG_CM, "The received ietf buffer was too large" |
303 | " (%x + %x != %x)\n", | 327 | " (%x + %x != %x)\n", |
304 | cm_node->mpa_frame_size, | 328 | priv_data_len, mpa_hdr_len, len); |
305 | (u32)sizeof(struct ietf_mpa_frame), len); | ||
306 | return -EINVAL; | 329 | return -EINVAL; |
307 | } | 330 | } |
308 | 331 | ||
332 | cm_node->mpa_frame_size = priv_data_len; | ||
333 | |||
334 | switch (mpa_frame->rev) { | ||
335 | case IETF_MPA_V2: { | ||
336 | u16 ird_size; | ||
337 | u16 ord_size; | ||
338 | mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; | ||
339 | mpa_hdr_len += IETF_RTR_MSG_SIZE; | ||
340 | cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE; | ||
341 | rtr_msg = &mpa_v2_frame->rtr_msg; | ||
342 | |||
343 | /* parse rtr message */ | ||
344 | rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird); | ||
345 | rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord); | ||
346 | ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD; | ||
347 | ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD; | ||
348 | |||
349 | if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) { | ||
350 | /* send reset */ | ||
351 | return -EINVAL; | ||
352 | } | ||
353 | |||
354 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { | ||
355 | /* responder */ | ||
356 | if (cm_node->ord_size > ird_size) | ||
357 | cm_node->ord_size = ird_size; | ||
358 | } else { | ||
359 | /* initiator */ | ||
360 | if (cm_node->ord_size > ird_size) | ||
361 | cm_node->ord_size = ird_size; | ||
362 | |||
363 | if (cm_node->ird_size < ord_size) { | ||
364 | /* no resources available */ | ||
365 | /* send terminate message */ | ||
366 | return -EINVAL; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) { | ||
371 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | ||
372 | } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) { | ||
373 | cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; | ||
374 | } else { /* Not supported RDMA0 operation */ | ||
375 | return -EINVAL; | ||
376 | } | ||
377 | break; | ||
378 | } | ||
379 | case IETF_MPA_V1: | ||
380 | default: | ||
381 | break; | ||
382 | } | ||
383 | |||
309 | /* copy entire MPA frame to our cm_node's frame */ | 384 | /* copy entire MPA frame to our cm_node's frame */ |
310 | memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame), | 385 | memcpy(cm_node->mpa_frame_buf, buffer + mpa_hdr_len, cm_node->mpa_frame_size); |
311 | cm_node->mpa_frame_size); | ||
312 | 386 | ||
313 | if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) | 387 | if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) |
314 | *type = NES_MPA_REQUEST_REJECT; | 388 | *type = NES_MPA_REQUEST_REJECT; |
@@ -321,8 +395,8 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
321 | * node info to build. | 395 | * node info to build. |
322 | */ | 396 | */ |
323 | static void form_cm_frame(struct sk_buff *skb, | 397 | static void form_cm_frame(struct sk_buff *skb, |
324 | struct nes_cm_node *cm_node, void *options, u32 optionsize, | 398 | struct nes_cm_node *cm_node, void *options, u32 optionsize, |
325 | void *data, u32 datasize, u8 flags) | 399 | void *data, u32 datasize, u8 flags) |
326 | { | 400 | { |
327 | struct tcphdr *tcph; | 401 | struct tcphdr *tcph; |
328 | struct iphdr *iph; | 402 | struct iphdr *iph; |
@@ -331,14 +405,14 @@ static void form_cm_frame(struct sk_buff *skb, | |||
331 | u16 packetsize = sizeof(*iph); | 405 | u16 packetsize = sizeof(*iph); |
332 | 406 | ||
333 | packetsize += sizeof(*tcph); | 407 | packetsize += sizeof(*tcph); |
334 | packetsize += optionsize + datasize; | 408 | packetsize += optionsize + datasize; |
335 | 409 | ||
410 | skb_trim(skb, 0); | ||
336 | memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph)); | 411 | memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph)); |
337 | 412 | ||
338 | skb->len = 0; | ||
339 | buf = skb_put(skb, packetsize + ETH_HLEN); | 413 | buf = skb_put(skb, packetsize + ETH_HLEN); |
340 | 414 | ||
341 | ethh = (struct ethhdr *) buf; | 415 | ethh = (struct ethhdr *)buf; |
342 | buf += ETH_HLEN; | 416 | buf += ETH_HLEN; |
343 | 417 | ||
344 | iph = (struct iphdr *)buf; | 418 | iph = (struct iphdr *)buf; |
@@ -346,7 +420,7 @@ static void form_cm_frame(struct sk_buff *skb, | |||
346 | tcph = (struct tcphdr *)buf; | 420 | tcph = (struct tcphdr *)buf; |
347 | skb_reset_mac_header(skb); | 421 | skb_reset_mac_header(skb); |
348 | skb_set_network_header(skb, ETH_HLEN); | 422 | skb_set_network_header(skb, ETH_HLEN); |
349 | skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph)); | 423 | skb_set_transport_header(skb, ETH_HLEN + sizeof(*iph)); |
350 | buf += sizeof(*tcph); | 424 | buf += sizeof(*tcph); |
351 | 425 | ||
352 | skb->ip_summed = CHECKSUM_PARTIAL; | 426 | skb->ip_summed = CHECKSUM_PARTIAL; |
@@ -359,14 +433,14 @@ static void form_cm_frame(struct sk_buff *skb, | |||
359 | ethh->h_proto = htons(0x0800); | 433 | ethh->h_proto = htons(0x0800); |
360 | 434 | ||
361 | iph->version = IPVERSION; | 435 | iph->version = IPVERSION; |
362 | iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ | 436 | iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ |
363 | iph->tos = 0; | 437 | iph->tos = 0; |
364 | iph->tot_len = htons(packetsize); | 438 | iph->tot_len = htons(packetsize); |
365 | iph->id = htons(++cm_node->tcp_cntxt.loc_id); | 439 | iph->id = htons(++cm_node->tcp_cntxt.loc_id); |
366 | 440 | ||
367 | iph->frag_off = htons(0x4000); | 441 | iph->frag_off = htons(0x4000); |
368 | iph->ttl = 0x40; | 442 | iph->ttl = 0x40; |
369 | iph->protocol = 0x06; /* IPPROTO_TCP */ | 443 | iph->protocol = 0x06; /* IPPROTO_TCP */ |
370 | 444 | ||
371 | iph->saddr = htonl(cm_node->loc_addr); | 445 | iph->saddr = htonl(cm_node->loc_addr); |
372 | iph->daddr = htonl(cm_node->rem_addr); | 446 | iph->daddr = htonl(cm_node->rem_addr); |
@@ -379,14 +453,16 @@ static void form_cm_frame(struct sk_buff *skb, | |||
379 | cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; | 453 | cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; |
380 | tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); | 454 | tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); |
381 | tcph->ack = 1; | 455 | tcph->ack = 1; |
382 | } else | 456 | } else { |
383 | tcph->ack_seq = 0; | 457 | tcph->ack_seq = 0; |
458 | } | ||
384 | 459 | ||
385 | if (flags & SET_SYN) { | 460 | if (flags & SET_SYN) { |
386 | cm_node->tcp_cntxt.loc_seq_num++; | 461 | cm_node->tcp_cntxt.loc_seq_num++; |
387 | tcph->syn = 1; | 462 | tcph->syn = 1; |
388 | } else | 463 | } else { |
389 | cm_node->tcp_cntxt.loc_seq_num += datasize; | 464 | cm_node->tcp_cntxt.loc_seq_num += datasize; |
465 | } | ||
390 | 466 | ||
391 | if (flags & SET_FIN) { | 467 | if (flags & SET_FIN) { |
392 | cm_node->tcp_cntxt.loc_seq_num++; | 468 | cm_node->tcp_cntxt.loc_seq_num++; |
@@ -407,10 +483,8 @@ static void form_cm_frame(struct sk_buff *skb, | |||
407 | 483 | ||
408 | skb_shinfo(skb)->nr_frags = 0; | 484 | skb_shinfo(skb)->nr_frags = 0; |
409 | cm_packets_created++; | 485 | cm_packets_created++; |
410 | |||
411 | } | 486 | } |
412 | 487 | ||
413 | |||
414 | /** | 488 | /** |
415 | * print_core - dump a cm core | 489 | * print_core - dump a cm core |
416 | */ | 490 | */ |
@@ -422,7 +496,7 @@ static void print_core(struct nes_cm_core *core) | |||
422 | return; | 496 | return; |
423 | nes_debug(NES_DBG_CM, "---------------------------------------------\n"); | 497 | nes_debug(NES_DBG_CM, "---------------------------------------------\n"); |
424 | 498 | ||
425 | nes_debug(NES_DBG_CM, "State : %u \n", core->state); | 499 | nes_debug(NES_DBG_CM, "State : %u \n", core->state); |
426 | 500 | ||
427 | nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); | 501 | nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); |
428 | nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); | 502 | nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); |
@@ -432,6 +506,147 @@ static void print_core(struct nes_cm_core *core) | |||
432 | nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); | 506 | nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); |
433 | } | 507 | } |
434 | 508 | ||
509 | /** | ||
510 | * cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame | ||
511 | */ | ||
512 | static int cm_build_mpa_frame(struct nes_cm_node *cm_node, u8 **start_buff, | ||
513 | u16 *buff_len, u8 *pci_mem, u8 mpa_key) | ||
514 | { | ||
515 | int ret = 0; | ||
516 | |||
517 | *start_buff = (pci_mem) ? pci_mem : &cm_node->mpa_frame_buf[0]; | ||
518 | |||
519 | switch (cm_node->mpa_frame_rev) { | ||
520 | case IETF_MPA_V1: | ||
521 | *start_buff = (u8 *)*start_buff + sizeof(struct ietf_rtr_msg); | ||
522 | *buff_len = sizeof(struct ietf_mpa_v1) + cm_node->mpa_frame_size; | ||
523 | build_mpa_v1(cm_node, *start_buff, mpa_key); | ||
524 | break; | ||
525 | case IETF_MPA_V2: | ||
526 | *buff_len = sizeof(struct ietf_mpa_v2) + cm_node->mpa_frame_size; | ||
527 | build_mpa_v2(cm_node, *start_buff, mpa_key); | ||
528 | break; | ||
529 | default: | ||
530 | ret = -EINVAL; | ||
531 | } | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * build_mpa_v2 - build a MPA V2 frame | ||
537 | */ | ||
538 | static void build_mpa_v2(struct nes_cm_node *cm_node, | ||
539 | void *start_addr, u8 mpa_key) | ||
540 | { | ||
541 | struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; | ||
542 | struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; | ||
543 | |||
544 | /* initialize the upper 5 bytes of the frame */ | ||
545 | build_mpa_v1(cm_node, start_addr, mpa_key); | ||
546 | mpa_frame->flags |= IETF_MPA_V2_FLAG; /* set a bit to indicate MPA V2 */ | ||
547 | mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); | ||
548 | |||
549 | /* initialize RTR msg */ | ||
550 | rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? | ||
551 | IETF_NO_IRD_ORD : cm_node->ird_size; | ||
552 | rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? | ||
553 | IETF_NO_IRD_ORD : cm_node->ord_size; | ||
554 | |||
555 | rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER; | ||
556 | rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN; | ||
557 | |||
558 | switch (mpa_key) { | ||
559 | case MPA_KEY_REQUEST: | ||
560 | rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; | ||
561 | rtr_msg->ctrl_ord |= IETF_RDMA0_READ; | ||
562 | break; | ||
563 | case MPA_KEY_REPLY: | ||
564 | switch (cm_node->send_rdma0_op) { | ||
565 | case SEND_RDMA_WRITE_ZERO: | ||
566 | rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; | ||
567 | break; | ||
568 | case SEND_RDMA_READ_ZERO: | ||
569 | rtr_msg->ctrl_ord |= IETF_RDMA0_READ; | ||
570 | break; | ||
571 | } | ||
572 | } | ||
573 | rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird); | ||
574 | rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord); | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * build_mpa_v1 - build a MPA V1 frame | ||
579 | */ | ||
580 | static void build_mpa_v1(struct nes_cm_node *cm_node, void *start_addr, u8 mpa_key) | ||
581 | { | ||
582 | struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr; | ||
583 | |||
584 | switch (mpa_key) { | ||
585 | case MPA_KEY_REQUEST: | ||
586 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); | ||
587 | break; | ||
588 | case MPA_KEY_REPLY: | ||
589 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | ||
590 | break; | ||
591 | } | ||
592 | mpa_frame->flags = IETF_MPA_FLAGS_CRC; | ||
593 | mpa_frame->rev = cm_node->mpa_frame_rev; | ||
594 | mpa_frame->priv_data_len = htons(cm_node->mpa_frame_size); | ||
595 | } | ||
596 | |||
597 | static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_addr) | ||
598 | { | ||
599 | u64 u64temp; | ||
600 | struct nes_qp *nesqp = *nesqp_addr; | ||
601 | struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; | ||
602 | |||
603 | u64temp = (unsigned long)nesqp; | ||
604 | u64temp |= NES_SW_CONTEXT_ALIGN >> 1; | ||
605 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); | ||
606 | |||
607 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; | ||
608 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; | ||
609 | |||
610 | switch (cm_node->send_rdma0_op) { | ||
611 | case SEND_RDMA_WRITE_ZERO: | ||
612 | nes_debug(NES_DBG_CM, "Sending first write.\n"); | ||
613 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | ||
614 | cpu_to_le32(NES_IWARP_SQ_OP_RDMAW); | ||
615 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; | ||
616 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; | ||
617 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; | ||
618 | break; | ||
619 | |||
620 | case SEND_RDMA_READ_ZERO: | ||
621 | default: | ||
622 | if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) { | ||
623 | printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n", | ||
624 | __func__, __LINE__, cm_node->send_rdma0_op); | ||
625 | WARN_ON(1); | ||
626 | } | ||
627 | nes_debug(NES_DBG_CM, "Sending first rdma operation.\n"); | ||
628 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | ||
629 | cpu_to_le32(NES_IWARP_SQ_OP_RDMAR); | ||
630 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = 1; | ||
631 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX] = 0; | ||
632 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = 0; | ||
633 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_STAG_IDX] = 1; | ||
634 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 1; | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | if (nesqp->sq_kmapped) { | ||
639 | nesqp->sq_kmapped = 0; | ||
640 | kunmap(nesqp->page); | ||
641 | } | ||
642 | |||
643 | /*use the reserved spot on the WQ for the extra first WQE*/ | ||
644 | nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | ||
645 | NES_QPCONTEXT_ORDIRD_WRPDU | | ||
646 | NES_QPCONTEXT_ORDIRD_ALSMM)); | ||
647 | nesqp->skip_lsmm = 1; | ||
648 | nesqp->hwqp.sq_tail = 0; | ||
649 | } | ||
435 | 650 | ||
436 | /** | 651 | /** |
437 | * schedule_nes_timer | 652 | * schedule_nes_timer |
@@ -439,10 +654,10 @@ static void print_core(struct nes_cm_core *core) | |||
439 | * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node); | 654 | * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node); |
440 | */ | 655 | */ |
441 | int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | 656 | int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, |
442 | enum nes_timer_type type, int send_retrans, | 657 | enum nes_timer_type type, int send_retrans, |
443 | int close_when_complete) | 658 | int close_when_complete) |
444 | { | 659 | { |
445 | unsigned long flags; | 660 | unsigned long flags; |
446 | struct nes_cm_core *cm_core = cm_node->cm_core; | 661 | struct nes_cm_core *cm_core = cm_node->cm_core; |
447 | struct nes_timer_entry *new_send; | 662 | struct nes_timer_entry *new_send; |
448 | int ret = 0; | 663 | int ret = 0; |
@@ -463,7 +678,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
463 | new_send->close_when_complete = close_when_complete; | 678 | new_send->close_when_complete = close_when_complete; |
464 | 679 | ||
465 | if (type == NES_TIMER_TYPE_CLOSE) { | 680 | if (type == NES_TIMER_TYPE_CLOSE) { |
466 | new_send->timetosend += (HZ/10); | 681 | new_send->timetosend += (HZ / 10); |
467 | if (cm_node->recv_entry) { | 682 | if (cm_node->recv_entry) { |
468 | kfree(new_send); | 683 | kfree(new_send); |
469 | WARN_ON(1); | 684 | WARN_ON(1); |
@@ -484,7 +699,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
484 | ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); | 699 | ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); |
485 | if (ret != NETDEV_TX_OK) { | 700 | if (ret != NETDEV_TX_OK) { |
486 | nes_debug(NES_DBG_CM, "Error sending packet %p " | 701 | nes_debug(NES_DBG_CM, "Error sending packet %p " |
487 | "(jiffies = %lu)\n", new_send, jiffies); | 702 | "(jiffies = %lu)\n", new_send, jiffies); |
488 | new_send->timetosend = jiffies; | 703 | new_send->timetosend = jiffies; |
489 | ret = NETDEV_TX_OK; | 704 | ret = NETDEV_TX_OK; |
490 | } else { | 705 | } else { |
@@ -513,6 +728,7 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node) | |||
513 | struct iw_cm_id *cm_id = cm_node->cm_id; | 728 | struct iw_cm_id *cm_id = cm_node->cm_id; |
514 | enum nes_cm_node_state state = cm_node->state; | 729 | enum nes_cm_node_state state = cm_node->state; |
515 | cm_node->state = NES_CM_STATE_CLOSED; | 730 | cm_node->state = NES_CM_STATE_CLOSED; |
731 | |||
516 | switch (state) { | 732 | switch (state) { |
517 | case NES_CM_STATE_SYN_RCVD: | 733 | case NES_CM_STATE_SYN_RCVD: |
518 | case NES_CM_STATE_CLOSING: | 734 | case NES_CM_STATE_CLOSING: |
@@ -545,10 +761,10 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node) | |||
545 | spin_lock_irqsave(&nesqp->lock, qplockflags); | 761 | spin_lock_irqsave(&nesqp->lock, qplockflags); |
546 | if (nesqp->cm_id) { | 762 | if (nesqp->cm_id) { |
547 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " | 763 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " |
548 | "refcount = %d: HIT A " | 764 | "refcount = %d: HIT A " |
549 | "NES_TIMER_TYPE_CLOSE with something " | 765 | "NES_TIMER_TYPE_CLOSE with something " |
550 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, | 766 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, |
551 | atomic_read(&nesqp->refcount)); | 767 | atomic_read(&nesqp->refcount)); |
552 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | 768 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; |
553 | nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; | 769 | nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; |
554 | nesqp->ibqp_state = IB_QPS_ERR; | 770 | nesqp->ibqp_state = IB_QPS_ERR; |
@@ -557,10 +773,10 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node) | |||
557 | } else { | 773 | } else { |
558 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | 774 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); |
559 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " | 775 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " |
560 | "refcount = %d: HIT A " | 776 | "refcount = %d: HIT A " |
561 | "NES_TIMER_TYPE_CLOSE with nothing " | 777 | "NES_TIMER_TYPE_CLOSE with nothing " |
562 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, | 778 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, |
563 | atomic_read(&nesqp->refcount)); | 779 | atomic_read(&nesqp->refcount)); |
564 | } | 780 | } |
565 | } else if (rem_node) { | 781 | } else if (rem_node) { |
566 | /* TIME_WAIT state */ | 782 | /* TIME_WAIT state */ |
@@ -589,11 +805,12 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
589 | int ret = NETDEV_TX_OK; | 805 | int ret = NETDEV_TX_OK; |
590 | 806 | ||
591 | struct list_head timer_list; | 807 | struct list_head timer_list; |
808 | |||
592 | INIT_LIST_HEAD(&timer_list); | 809 | INIT_LIST_HEAD(&timer_list); |
593 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 810 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
594 | 811 | ||
595 | list_for_each_safe(list_node, list_core_temp, | 812 | list_for_each_safe(list_node, list_core_temp, |
596 | &cm_core->connected_nodes) { | 813 | &cm_core->connected_nodes) { |
597 | cm_node = container_of(list_node, struct nes_cm_node, list); | 814 | cm_node = container_of(list_node, struct nes_cm_node, list); |
598 | if ((cm_node->recv_entry) || (cm_node->send_entry)) { | 815 | if ((cm_node->recv_entry) || (cm_node->send_entry)) { |
599 | add_ref_cm_node(cm_node); | 816 | add_ref_cm_node(cm_node); |
@@ -604,18 +821,19 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
604 | 821 | ||
605 | list_for_each_safe(list_node, list_core_temp, &timer_list) { | 822 | list_for_each_safe(list_node, list_core_temp, &timer_list) { |
606 | cm_node = container_of(list_node, struct nes_cm_node, | 823 | cm_node = container_of(list_node, struct nes_cm_node, |
607 | timer_entry); | 824 | timer_entry); |
608 | recv_entry = cm_node->recv_entry; | 825 | recv_entry = cm_node->recv_entry; |
609 | 826 | ||
610 | if (recv_entry) { | 827 | if (recv_entry) { |
611 | if (time_after(recv_entry->timetosend, jiffies)) { | 828 | if (time_after(recv_entry->timetosend, jiffies)) { |
612 | if (nexttimeout > recv_entry->timetosend || | 829 | if (nexttimeout > recv_entry->timetosend || |
613 | !settimer) { | 830 | !settimer) { |
614 | nexttimeout = recv_entry->timetosend; | 831 | nexttimeout = recv_entry->timetosend; |
615 | settimer = 1; | 832 | settimer = 1; |
616 | } | 833 | } |
617 | } else | 834 | } else { |
618 | handle_recv_entry(cm_node, 1); | 835 | handle_recv_entry(cm_node, 1); |
836 | } | ||
619 | } | 837 | } |
620 | 838 | ||
621 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | 839 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); |
@@ -626,8 +844,8 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
626 | if (time_after(send_entry->timetosend, jiffies)) { | 844 | if (time_after(send_entry->timetosend, jiffies)) { |
627 | if (cm_node->state != NES_CM_STATE_TSA) { | 845 | if (cm_node->state != NES_CM_STATE_TSA) { |
628 | if ((nexttimeout > | 846 | if ((nexttimeout > |
629 | send_entry->timetosend) || | 847 | send_entry->timetosend) || |
630 | !settimer) { | 848 | !settimer) { |
631 | nexttimeout = | 849 | nexttimeout = |
632 | send_entry->timetosend; | 850 | send_entry->timetosend; |
633 | settimer = 1; | 851 | settimer = 1; |
@@ -639,13 +857,13 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
639 | } | 857 | } |
640 | 858 | ||
641 | if ((cm_node->state == NES_CM_STATE_TSA) || | 859 | if ((cm_node->state == NES_CM_STATE_TSA) || |
642 | (cm_node->state == NES_CM_STATE_CLOSED)) { | 860 | (cm_node->state == NES_CM_STATE_CLOSED)) { |
643 | free_retrans_entry(cm_node); | 861 | free_retrans_entry(cm_node); |
644 | break; | 862 | break; |
645 | } | 863 | } |
646 | 864 | ||
647 | if (!send_entry->retranscount || | 865 | if (!send_entry->retranscount || |
648 | !send_entry->retrycount) { | 866 | !send_entry->retrycount) { |
649 | cm_packets_dropped++; | 867 | cm_packets_dropped++; |
650 | free_retrans_entry(cm_node); | 868 | free_retrans_entry(cm_node); |
651 | 869 | ||
@@ -654,28 +872,28 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
654 | nes_retrans_expired(cm_node); | 872 | nes_retrans_expired(cm_node); |
655 | cm_node->state = NES_CM_STATE_CLOSED; | 873 | cm_node->state = NES_CM_STATE_CLOSED; |
656 | spin_lock_irqsave(&cm_node->retrans_list_lock, | 874 | spin_lock_irqsave(&cm_node->retrans_list_lock, |
657 | flags); | 875 | flags); |
658 | break; | 876 | break; |
659 | } | 877 | } |
660 | atomic_inc(&send_entry->skb->users); | 878 | atomic_inc(&send_entry->skb->users); |
661 | cm_packets_retrans++; | 879 | cm_packets_retrans++; |
662 | nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " | 880 | nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " |
663 | "for node %p, jiffies = %lu, time to send = " | 881 | "for node %p, jiffies = %lu, time to send = " |
664 | "%lu, retranscount = %u, send_entry->seq_num = " | 882 | "%lu, retranscount = %u, send_entry->seq_num = " |
665 | "0x%08X, cm_node->tcp_cntxt.rem_ack_num = " | 883 | "0x%08X, cm_node->tcp_cntxt.rem_ack_num = " |
666 | "0x%08X\n", send_entry, cm_node, jiffies, | 884 | "0x%08X\n", send_entry, cm_node, jiffies, |
667 | send_entry->timetosend, | 885 | send_entry->timetosend, |
668 | send_entry->retranscount, | 886 | send_entry->retranscount, |
669 | send_entry->seq_num, | 887 | send_entry->seq_num, |
670 | cm_node->tcp_cntxt.rem_ack_num); | 888 | cm_node->tcp_cntxt.rem_ack_num); |
671 | 889 | ||
672 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, | 890 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, |
673 | flags); | 891 | flags); |
674 | ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev); | 892 | ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev); |
675 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | 893 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); |
676 | if (ret != NETDEV_TX_OK) { | 894 | if (ret != NETDEV_TX_OK) { |
677 | nes_debug(NES_DBG_CM, "rexmit failed for " | 895 | nes_debug(NES_DBG_CM, "rexmit failed for " |
678 | "node=%p\n", cm_node); | 896 | "node=%p\n", cm_node); |
679 | cm_packets_bounced++; | 897 | cm_packets_bounced++; |
680 | send_entry->retrycount--; | 898 | send_entry->retrycount--; |
681 | nexttimeout = jiffies + NES_SHORT_TIME; | 899 | nexttimeout = jiffies + NES_SHORT_TIME; |
@@ -685,18 +903,18 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
685 | cm_packets_sent++; | 903 | cm_packets_sent++; |
686 | } | 904 | } |
687 | nes_debug(NES_DBG_CM, "Packet Sent: retrans count = " | 905 | nes_debug(NES_DBG_CM, "Packet Sent: retrans count = " |
688 | "%u, retry count = %u.\n", | 906 | "%u, retry count = %u.\n", |
689 | send_entry->retranscount, | 907 | send_entry->retranscount, |
690 | send_entry->retrycount); | 908 | send_entry->retrycount); |
691 | if (send_entry->send_retrans) { | 909 | if (send_entry->send_retrans) { |
692 | send_entry->retranscount--; | 910 | send_entry->retranscount--; |
693 | timetosend = (NES_RETRY_TIMEOUT << | 911 | timetosend = (NES_RETRY_TIMEOUT << |
694 | (NES_DEFAULT_RETRANS - send_entry->retranscount)); | 912 | (NES_DEFAULT_RETRANS - send_entry->retranscount)); |
695 | 913 | ||
696 | send_entry->timetosend = jiffies + | 914 | send_entry->timetosend = jiffies + |
697 | min(timetosend, NES_MAX_TIMEOUT); | 915 | min(timetosend, NES_MAX_TIMEOUT); |
698 | if (nexttimeout > send_entry->timetosend || | 916 | if (nexttimeout > send_entry->timetosend || |
699 | !settimer) { | 917 | !settimer) { |
700 | nexttimeout = send_entry->timetosend; | 918 | nexttimeout = send_entry->timetosend; |
701 | settimer = 1; | 919 | settimer = 1; |
702 | } | 920 | } |
@@ -705,11 +923,11 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
705 | close_when_complete = | 923 | close_when_complete = |
706 | send_entry->close_when_complete; | 924 | send_entry->close_when_complete; |
707 | nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n", | 925 | nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n", |
708 | cm_node, cm_node->state); | 926 | cm_node, cm_node->state); |
709 | free_retrans_entry(cm_node); | 927 | free_retrans_entry(cm_node); |
710 | if (close_when_complete) | 928 | if (close_when_complete) |
711 | rem_ref_cm_node(cm_node->cm_core, | 929 | rem_ref_cm_node(cm_node->cm_core, |
712 | cm_node); | 930 | cm_node); |
713 | } | 931 | } |
714 | } while (0); | 932 | } while (0); |
715 | 933 | ||
@@ -719,7 +937,7 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
719 | 937 | ||
720 | if (settimer) { | 938 | if (settimer) { |
721 | if (!timer_pending(&cm_core->tcp_timer)) { | 939 | if (!timer_pending(&cm_core->tcp_timer)) { |
722 | cm_core->tcp_timer.expires = nexttimeout; | 940 | cm_core->tcp_timer.expires = nexttimeout; |
723 | add_timer(&cm_core->tcp_timer); | 941 | add_timer(&cm_core->tcp_timer); |
724 | } | 942 | } |
725 | } | 943 | } |
@@ -730,13 +948,13 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
730 | * send_syn | 948 | * send_syn |
731 | */ | 949 | */ |
732 | static int send_syn(struct nes_cm_node *cm_node, u32 sendack, | 950 | static int send_syn(struct nes_cm_node *cm_node, u32 sendack, |
733 | struct sk_buff *skb) | 951 | struct sk_buff *skb) |
734 | { | 952 | { |
735 | int ret; | 953 | int ret; |
736 | int flags = SET_SYN; | 954 | int flags = SET_SYN; |
737 | char optionsbuffer[sizeof(struct option_mss) + | 955 | char optionsbuffer[sizeof(struct option_mss) + |
738 | sizeof(struct option_windowscale) + sizeof(struct option_base) + | 956 | sizeof(struct option_windowscale) + sizeof(struct option_base) + |
739 | TCP_OPTIONS_PADDING]; | 957 | TCP_OPTIONS_PADDING]; |
740 | 958 | ||
741 | int optionssize = 0; | 959 | int optionssize = 0; |
742 | /* Sending MSS option */ | 960 | /* Sending MSS option */ |
@@ -863,7 +1081,7 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
863 | * find_node - find a cm node that matches the reference cm node | 1081 | * find_node - find a cm node that matches the reference cm node |
864 | */ | 1082 | */ |
865 | static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | 1083 | static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, |
866 | u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) | 1084 | u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) |
867 | { | 1085 | { |
868 | unsigned long flags; | 1086 | unsigned long flags; |
869 | struct list_head *hte; | 1087 | struct list_head *hte; |
@@ -877,12 +1095,12 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
877 | list_for_each_entry(cm_node, hte, list) { | 1095 | list_for_each_entry(cm_node, hte, list) { |
878 | /* compare quad, return node handle if a match */ | 1096 | /* compare quad, return node handle if a match */ |
879 | nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", | 1097 | nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", |
880 | cm_node->loc_addr, cm_node->loc_port, | 1098 | cm_node->loc_addr, cm_node->loc_port, |
881 | loc_addr, loc_port, | 1099 | loc_addr, loc_port, |
882 | cm_node->rem_addr, cm_node->rem_port, | 1100 | cm_node->rem_addr, cm_node->rem_port, |
883 | rem_addr, rem_port); | 1101 | rem_addr, rem_port); |
884 | if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) && | 1102 | if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) && |
885 | (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) { | 1103 | (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) { |
886 | add_ref_cm_node(cm_node); | 1104 | add_ref_cm_node(cm_node); |
887 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | 1105 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); |
888 | return cm_node; | 1106 | return cm_node; |
@@ -899,7 +1117,7 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
899 | * find_listener - find a cm node listening on this addr-port pair | 1117 | * find_listener - find a cm node listening on this addr-port pair |
900 | */ | 1118 | */ |
901 | static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | 1119 | static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, |
902 | nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) | 1120 | nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) |
903 | { | 1121 | { |
904 | unsigned long flags; | 1122 | unsigned long flags; |
905 | struct nes_cm_listener *listen_node; | 1123 | struct nes_cm_listener *listen_node; |
@@ -909,9 +1127,9 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | |||
909 | list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { | 1127 | list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { |
910 | /* compare node pair, return node handle if a match */ | 1128 | /* compare node pair, return node handle if a match */ |
911 | if (((listen_node->loc_addr == dst_addr) || | 1129 | if (((listen_node->loc_addr == dst_addr) || |
912 | listen_node->loc_addr == 0x00000000) && | 1130 | listen_node->loc_addr == 0x00000000) && |
913 | (listen_node->loc_port == dst_port) && | 1131 | (listen_node->loc_port == dst_port) && |
914 | (listener_state & listen_node->listener_state)) { | 1132 | (listener_state & listen_node->listener_state)) { |
915 | atomic_inc(&listen_node->ref_count); | 1133 | atomic_inc(&listen_node->ref_count); |
916 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1134 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
917 | return listen_node; | 1135 | return listen_node; |
@@ -936,7 +1154,7 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node | |||
936 | return -EINVAL; | 1154 | return -EINVAL; |
937 | 1155 | ||
938 | nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", | 1156 | nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", |
939 | cm_node); | 1157 | cm_node); |
940 | 1158 | ||
941 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 1159 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
942 | 1160 | ||
@@ -955,7 +1173,7 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node | |||
955 | * mini_cm_dec_refcnt_listen | 1173 | * mini_cm_dec_refcnt_listen |
956 | */ | 1174 | */ |
957 | static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | 1175 | static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, |
958 | struct nes_cm_listener *listener, int free_hanging_nodes) | 1176 | struct nes_cm_listener *listener, int free_hanging_nodes) |
959 | { | 1177 | { |
960 | int ret = -EINVAL; | 1178 | int ret = -EINVAL; |
961 | int err = 0; | 1179 | int err = 0; |
@@ -966,8 +1184,8 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
966 | struct list_head reset_list; | 1184 | struct list_head reset_list; |
967 | 1185 | ||
968 | nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " | 1186 | nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " |
969 | "refcnt=%d\n", listener, free_hanging_nodes, | 1187 | "refcnt=%d\n", listener, free_hanging_nodes, |
970 | atomic_read(&listener->ref_count)); | 1188 | atomic_read(&listener->ref_count)); |
971 | /* free non-accelerated child nodes for this listener */ | 1189 | /* free non-accelerated child nodes for this listener */ |
972 | INIT_LIST_HEAD(&reset_list); | 1190 | INIT_LIST_HEAD(&reset_list); |
973 | if (free_hanging_nodes) { | 1191 | if (free_hanging_nodes) { |
@@ -975,7 +1193,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
975 | list_for_each_safe(list_pos, list_temp, | 1193 | list_for_each_safe(list_pos, list_temp, |
976 | &g_cm_core->connected_nodes) { | 1194 | &g_cm_core->connected_nodes) { |
977 | cm_node = container_of(list_pos, struct nes_cm_node, | 1195 | cm_node = container_of(list_pos, struct nes_cm_node, |
978 | list); | 1196 | list); |
979 | if ((cm_node->listener == listener) && | 1197 | if ((cm_node->listener == listener) && |
980 | (!cm_node->accelerated)) { | 1198 | (!cm_node->accelerated)) { |
981 | add_ref_cm_node(cm_node); | 1199 | add_ref_cm_node(cm_node); |
@@ -987,7 +1205,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
987 | 1205 | ||
988 | list_for_each_safe(list_pos, list_temp, &reset_list) { | 1206 | list_for_each_safe(list_pos, list_temp, &reset_list) { |
989 | cm_node = container_of(list_pos, struct nes_cm_node, | 1207 | cm_node = container_of(list_pos, struct nes_cm_node, |
990 | reset_entry); | 1208 | reset_entry); |
991 | { | 1209 | { |
992 | struct nes_cm_node *loopback = cm_node->loopbackpartner; | 1210 | struct nes_cm_node *loopback = cm_node->loopbackpartner; |
993 | enum nes_cm_node_state old_state; | 1211 | enum nes_cm_node_state old_state; |
@@ -999,7 +1217,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
999 | err = send_reset(cm_node, NULL); | 1217 | err = send_reset(cm_node, NULL); |
1000 | if (err) { | 1218 | if (err) { |
1001 | cm_node->state = | 1219 | cm_node->state = |
1002 | NES_CM_STATE_CLOSED; | 1220 | NES_CM_STATE_CLOSED; |
1003 | WARN_ON(1); | 1221 | WARN_ON(1); |
1004 | } else { | 1222 | } else { |
1005 | old_state = cm_node->state; | 1223 | old_state = cm_node->state; |
@@ -1044,10 +1262,9 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
1044 | 1262 | ||
1045 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1263 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
1046 | 1264 | ||
1047 | if (listener->nesvnic) { | 1265 | if (listener->nesvnic) |
1048 | nes_manage_apbvt(listener->nesvnic, listener->loc_port, | 1266 | nes_manage_apbvt(listener->nesvnic, listener->loc_port, |
1049 | PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); | 1267 | PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); |
1050 | } | ||
1051 | 1268 | ||
1052 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | 1269 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); |
1053 | 1270 | ||
@@ -1061,8 +1278,8 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
1061 | if (listener) { | 1278 | if (listener) { |
1062 | if (atomic_read(&listener->pend_accepts_cnt) > 0) | 1279 | if (atomic_read(&listener->pend_accepts_cnt) > 0) |
1063 | nes_debug(NES_DBG_CM, "destroying listener (%p)" | 1280 | nes_debug(NES_DBG_CM, "destroying listener (%p)" |
1064 | " with non-zero pending accepts=%u\n", | 1281 | " with non-zero pending accepts=%u\n", |
1065 | listener, atomic_read(&listener->pend_accepts_cnt)); | 1282 | listener, atomic_read(&listener->pend_accepts_cnt)); |
1066 | } | 1283 | } |
1067 | 1284 | ||
1068 | return ret; | 1285 | return ret; |
@@ -1073,7 +1290,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
1073 | * mini_cm_del_listen | 1290 | * mini_cm_del_listen |
1074 | */ | 1291 | */ |
1075 | static int mini_cm_del_listen(struct nes_cm_core *cm_core, | 1292 | static int mini_cm_del_listen(struct nes_cm_core *cm_core, |
1076 | struct nes_cm_listener *listener) | 1293 | struct nes_cm_listener *listener) |
1077 | { | 1294 | { |
1078 | listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE; | 1295 | listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE; |
1079 | listener->cm_id = NULL; /* going to be destroyed pretty soon */ | 1296 | listener->cm_id = NULL; /* going to be destroyed pretty soon */ |
@@ -1085,9 +1302,10 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core, | |||
1085 | * mini_cm_accelerated | 1302 | * mini_cm_accelerated |
1086 | */ | 1303 | */ |
1087 | static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, | 1304 | static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, |
1088 | struct nes_cm_node *cm_node) | 1305 | struct nes_cm_node *cm_node) |
1089 | { | 1306 | { |
1090 | u32 was_timer_set; | 1307 | u32 was_timer_set; |
1308 | |||
1091 | cm_node->accelerated = 1; | 1309 | cm_node->accelerated = 1; |
1092 | 1310 | ||
1093 | if (cm_node->accept_pend) { | 1311 | if (cm_node->accept_pend) { |
@@ -1121,7 +1339,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1121 | rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); | 1339 | rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); |
1122 | if (IS_ERR(rt)) { | 1340 | if (IS_ERR(rt)) { |
1123 | printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", | 1341 | printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", |
1124 | __func__, dst_ip); | 1342 | __func__, dst_ip); |
1125 | return rc; | 1343 | return rc; |
1126 | } | 1344 | } |
1127 | 1345 | ||
@@ -1139,7 +1357,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1139 | 1357 | ||
1140 | if (arpindex >= 0) { | 1358 | if (arpindex >= 0) { |
1141 | if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, | 1359 | if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, |
1142 | neigh->ha, ETH_ALEN)){ | 1360 | neigh->ha, ETH_ALEN)) { |
1143 | /* Mac address same as in nes_arp_table */ | 1361 | /* Mac address same as in nes_arp_table */ |
1144 | neigh_release(neigh); | 1362 | neigh_release(neigh); |
1145 | ip_rt_put(rt); | 1363 | ip_rt_put(rt); |
@@ -1147,8 +1365,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1147 | } | 1365 | } |
1148 | 1366 | ||
1149 | nes_manage_arp_cache(nesvnic->netdev, | 1367 | nes_manage_arp_cache(nesvnic->netdev, |
1150 | nesadapter->arp_table[arpindex].mac_addr, | 1368 | nesadapter->arp_table[arpindex].mac_addr, |
1151 | dst_ip, NES_ARP_DELETE); | 1369 | dst_ip, NES_ARP_DELETE); |
1152 | } | 1370 | } |
1153 | 1371 | ||
1154 | nes_manage_arp_cache(nesvnic->netdev, neigh->ha, | 1372 | nes_manage_arp_cache(nesvnic->netdev, neigh->ha, |
@@ -1170,8 +1388,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1170 | * make_cm_node - create a new instance of a cm node | 1388 | * make_cm_node - create a new instance of a cm node |
1171 | */ | 1389 | */ |
1172 | static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | 1390 | static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, |
1173 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info, | 1391 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info, |
1174 | struct nes_cm_listener *listener) | 1392 | struct nes_cm_listener *listener) |
1175 | { | 1393 | { |
1176 | struct nes_cm_node *cm_node; | 1394 | struct nes_cm_node *cm_node; |
1177 | struct timespec ts; | 1395 | struct timespec ts; |
@@ -1190,7 +1408,12 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1190 | cm_node->rem_addr = cm_info->rem_addr; | 1408 | cm_node->rem_addr = cm_info->rem_addr; |
1191 | cm_node->loc_port = cm_info->loc_port; | 1409 | cm_node->loc_port = cm_info->loc_port; |
1192 | cm_node->rem_port = cm_info->rem_port; | 1410 | cm_node->rem_port = cm_info->rem_port; |
1193 | cm_node->send_write0 = send_first; | 1411 | |
1412 | cm_node->mpa_frame_rev = mpa_version; | ||
1413 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | ||
1414 | cm_node->ird_size = IETF_NO_IRD_ORD; | ||
1415 | cm_node->ord_size = IETF_NO_IRD_ORD; | ||
1416 | |||
1194 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", | 1417 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", |
1195 | &cm_node->loc_addr, cm_node->loc_port, | 1418 | &cm_node->loc_addr, cm_node->loc_port, |
1196 | &cm_node->rem_addr, cm_node->rem_port); | 1419 | &cm_node->rem_addr, cm_node->rem_port); |
@@ -1200,7 +1423,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1200 | memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); | 1423 | memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); |
1201 | 1424 | ||
1202 | nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener, | 1425 | nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener, |
1203 | cm_node->cm_id); | 1426 | cm_node->cm_id); |
1204 | 1427 | ||
1205 | spin_lock_init(&cm_node->retrans_list_lock); | 1428 | spin_lock_init(&cm_node->retrans_list_lock); |
1206 | 1429 | ||
@@ -1211,11 +1434,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1211 | cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID; | 1434 | cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID; |
1212 | cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; | 1435 | cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; |
1213 | cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >> | 1436 | cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >> |
1214 | NES_CM_DEFAULT_RCV_WND_SCALE; | 1437 | NES_CM_DEFAULT_RCV_WND_SCALE; |
1215 | ts = current_kernel_time(); | 1438 | ts = current_kernel_time(); |
1216 | cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec); | 1439 | cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec); |
1217 | cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) - | 1440 | cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) - |
1218 | sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN; | 1441 | sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN; |
1219 | cm_node->tcp_cntxt.rcv_nxt = 0; | 1442 | cm_node->tcp_cntxt.rcv_nxt = 0; |
1220 | /* get a unique session ID , add thread_id to an upcounter to handle race */ | 1443 | /* get a unique session ID , add thread_id to an upcounter to handle race */ |
1221 | atomic_inc(&cm_core->node_cnt); | 1444 | atomic_inc(&cm_core->node_cnt); |
@@ -1231,12 +1454,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1231 | cm_node->loopbackpartner = NULL; | 1454 | cm_node->loopbackpartner = NULL; |
1232 | 1455 | ||
1233 | /* get the mac addr for the remote node */ | 1456 | /* get the mac addr for the remote node */ |
1234 | if (ipv4_is_loopback(htonl(cm_node->rem_addr))) | 1457 | if (ipv4_is_loopback(htonl(cm_node->rem_addr))) { |
1235 | arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); | 1458 | arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); |
1236 | else { | 1459 | } else { |
1237 | oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); | 1460 | oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); |
1238 | arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex); | 1461 | arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex); |
1239 | |||
1240 | } | 1462 | } |
1241 | if (arpindex < 0) { | 1463 | if (arpindex < 0) { |
1242 | kfree(cm_node); | 1464 | kfree(cm_node); |
@@ -1269,7 +1491,7 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node) | |||
1269 | * rem_ref_cm_node - destroy an instance of a cm node | 1491 | * rem_ref_cm_node - destroy an instance of a cm node |
1270 | */ | 1492 | */ |
1271 | static int rem_ref_cm_node(struct nes_cm_core *cm_core, | 1493 | static int rem_ref_cm_node(struct nes_cm_core *cm_core, |
1272 | struct nes_cm_node *cm_node) | 1494 | struct nes_cm_node *cm_node) |
1273 | { | 1495 | { |
1274 | unsigned long flags; | 1496 | unsigned long flags; |
1275 | struct nes_qp *nesqp; | 1497 | struct nes_qp *nesqp; |
@@ -1300,9 +1522,9 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, | |||
1300 | } else { | 1522 | } else { |
1301 | if (cm_node->apbvt_set && cm_node->nesvnic) { | 1523 | if (cm_node->apbvt_set && cm_node->nesvnic) { |
1302 | nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, | 1524 | nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, |
1303 | PCI_FUNC( | 1525 | PCI_FUNC( |
1304 | cm_node->nesvnic->nesdev->pcidev->devfn), | 1526 | cm_node->nesvnic->nesdev->pcidev->devfn), |
1305 | NES_MANAGE_APBVT_DEL); | 1527 | NES_MANAGE_APBVT_DEL); |
1306 | } | 1528 | } |
1307 | } | 1529 | } |
1308 | 1530 | ||
@@ -1323,7 +1545,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, | |||
1323 | * process_options | 1545 | * process_options |
1324 | */ | 1546 | */ |
1325 | static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, | 1547 | static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, |
1326 | u32 optionsize, u32 syn_packet) | 1548 | u32 optionsize, u32 syn_packet) |
1327 | { | 1549 | { |
1328 | u32 tmp; | 1550 | u32 tmp; |
1329 | u32 offset = 0; | 1551 | u32 offset = 0; |
@@ -1341,15 +1563,15 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, | |||
1341 | continue; | 1563 | continue; |
1342 | case OPTION_NUMBER_MSS: | 1564 | case OPTION_NUMBER_MSS: |
1343 | nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d " | 1565 | nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d " |
1344 | "Size: %d\n", __func__, | 1566 | "Size: %d\n", __func__, |
1345 | all_options->as_mss.length, offset, optionsize); | 1567 | all_options->as_mss.length, offset, optionsize); |
1346 | got_mss_option = 1; | 1568 | got_mss_option = 1; |
1347 | if (all_options->as_mss.length != 4) { | 1569 | if (all_options->as_mss.length != 4) { |
1348 | return 1; | 1570 | return 1; |
1349 | } else { | 1571 | } else { |
1350 | tmp = ntohs(all_options->as_mss.mss); | 1572 | tmp = ntohs(all_options->as_mss.mss); |
1351 | if (tmp > 0 && tmp < | 1573 | if (tmp > 0 && tmp < |
1352 | cm_node->tcp_cntxt.mss) | 1574 | cm_node->tcp_cntxt.mss) |
1353 | cm_node->tcp_cntxt.mss = tmp; | 1575 | cm_node->tcp_cntxt.mss = tmp; |
1354 | } | 1576 | } |
1355 | break; | 1577 | break; |
@@ -1357,12 +1579,9 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, | |||
1357 | cm_node->tcp_cntxt.snd_wscale = | 1579 | cm_node->tcp_cntxt.snd_wscale = |
1358 | all_options->as_windowscale.shiftcount; | 1580 | all_options->as_windowscale.shiftcount; |
1359 | break; | 1581 | break; |
1360 | case OPTION_NUMBER_WRITE0: | ||
1361 | cm_node->send_write0 = 1; | ||
1362 | break; | ||
1363 | default: | 1582 | default: |
1364 | nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n", | 1583 | nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n", |
1365 | all_options->as_base.optionnum); | 1584 | all_options->as_base.optionnum); |
1366 | break; | 1585 | break; |
1367 | } | 1586 | } |
1368 | offset += all_options->as_base.length; | 1587 | offset += all_options->as_base.length; |
@@ -1381,8 +1600,8 @@ static void drop_packet(struct sk_buff *skb) | |||
1381 | static void handle_fin_pkt(struct nes_cm_node *cm_node) | 1600 | static void handle_fin_pkt(struct nes_cm_node *cm_node) |
1382 | { | 1601 | { |
1383 | nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " | 1602 | nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " |
1384 | "refcnt=%d\n", cm_node, cm_node->state, | 1603 | "refcnt=%d\n", cm_node, cm_node->state, |
1385 | atomic_read(&cm_node->ref_count)); | 1604 | atomic_read(&cm_node->ref_count)); |
1386 | switch (cm_node->state) { | 1605 | switch (cm_node->state) { |
1387 | case NES_CM_STATE_SYN_RCVD: | 1606 | case NES_CM_STATE_SYN_RCVD: |
1388 | case NES_CM_STATE_SYN_SENT: | 1607 | case NES_CM_STATE_SYN_SENT: |
@@ -1448,7 +1667,20 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1448 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " | 1667 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " |
1449 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, | 1668 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, |
1450 | cm_node->listener, cm_node->state); | 1669 | cm_node->listener, cm_node->state); |
1451 | active_open_err(cm_node, skb, reset); | 1670 | switch (cm_node->mpa_frame_rev) { |
1671 | case IETF_MPA_V2: | ||
1672 | cm_node->mpa_frame_rev = IETF_MPA_V1; | ||
1673 | /* send a syn and goto syn sent state */ | ||
1674 | cm_node->state = NES_CM_STATE_SYN_SENT; | ||
1675 | if (send_syn(cm_node, 0, NULL)) { | ||
1676 | active_open_err(cm_node, skb, reset); | ||
1677 | } | ||
1678 | break; | ||
1679 | case IETF_MPA_V1: | ||
1680 | default: | ||
1681 | active_open_err(cm_node, skb, reset); | ||
1682 | break; | ||
1683 | } | ||
1452 | break; | 1684 | break; |
1453 | case NES_CM_STATE_MPAREQ_RCVD: | 1685 | case NES_CM_STATE_MPAREQ_RCVD: |
1454 | atomic_inc(&cm_node->passive_state); | 1686 | atomic_inc(&cm_node->passive_state); |
@@ -1484,21 +1716,21 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1484 | 1716 | ||
1485 | static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) | 1717 | static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) |
1486 | { | 1718 | { |
1487 | 1719 | int ret = 0; | |
1488 | int ret = 0; | ||
1489 | int datasize = skb->len; | 1720 | int datasize = skb->len; |
1490 | u8 *dataloc = skb->data; | 1721 | u8 *dataloc = skb->data; |
1491 | 1722 | ||
1492 | enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN; | 1723 | enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN; |
1493 | u32 res_type; | 1724 | u32 res_type; |
1725 | |||
1494 | ret = parse_mpa(cm_node, dataloc, &res_type, datasize); | 1726 | ret = parse_mpa(cm_node, dataloc, &res_type, datasize); |
1495 | if (ret) { | 1727 | if (ret) { |
1496 | nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); | 1728 | nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); |
1497 | if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) { | 1729 | if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) { |
1498 | nes_debug(NES_DBG_CM, "%s[%u] create abort for " | 1730 | nes_debug(NES_DBG_CM, "%s[%u] create abort for " |
1499 | "cm_node=%p listener=%p state=%d\n", __func__, | 1731 | "cm_node=%p listener=%p state=%d\n", __func__, |
1500 | __LINE__, cm_node, cm_node->listener, | 1732 | __LINE__, cm_node, cm_node->listener, |
1501 | cm_node->state); | 1733 | cm_node->state); |
1502 | active_open_err(cm_node, skb, 1); | 1734 | active_open_err(cm_node, skb, 1); |
1503 | } else { | 1735 | } else { |
1504 | passive_open_err(cm_node, skb, 1); | 1736 | passive_open_err(cm_node, skb, 1); |
@@ -1508,16 +1740,15 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
1508 | 1740 | ||
1509 | switch (cm_node->state) { | 1741 | switch (cm_node->state) { |
1510 | case NES_CM_STATE_ESTABLISHED: | 1742 | case NES_CM_STATE_ESTABLISHED: |
1511 | if (res_type == NES_MPA_REQUEST_REJECT) { | 1743 | if (res_type == NES_MPA_REQUEST_REJECT) |
1512 | /*BIG problem as we are receiving the MPA.. So should | 1744 | /*BIG problem as we are receiving the MPA.. So should |
1513 | * not be REJECT.. This is Passive Open.. We can | 1745 | * not be REJECT.. This is Passive Open.. We can |
1514 | * only receive it Reject for Active Open...*/ | 1746 | * only receive it Reject for Active Open...*/ |
1515 | WARN_ON(1); | 1747 | WARN_ON(1); |
1516 | } | ||
1517 | cm_node->state = NES_CM_STATE_MPAREQ_RCVD; | 1748 | cm_node->state = NES_CM_STATE_MPAREQ_RCVD; |
1518 | type = NES_CM_EVENT_MPA_REQ; | 1749 | type = NES_CM_EVENT_MPA_REQ; |
1519 | atomic_set(&cm_node->passive_state, | 1750 | atomic_set(&cm_node->passive_state, |
1520 | NES_PASSIVE_STATE_INDICATED); | 1751 | NES_PASSIVE_STATE_INDICATED); |
1521 | break; | 1752 | break; |
1522 | case NES_CM_STATE_MPAREQ_SENT: | 1753 | case NES_CM_STATE_MPAREQ_SENT: |
1523 | cleanup_retrans_entry(cm_node); | 1754 | cleanup_retrans_entry(cm_node); |
@@ -1544,8 +1775,8 @@ static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
1544 | case NES_CM_STATE_SYN_SENT: | 1775 | case NES_CM_STATE_SYN_SENT: |
1545 | case NES_CM_STATE_MPAREQ_SENT: | 1776 | case NES_CM_STATE_MPAREQ_SENT: |
1546 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " | 1777 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " |
1547 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, | 1778 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, |
1548 | cm_node->listener, cm_node->state); | 1779 | cm_node->listener, cm_node->state); |
1549 | active_open_err(cm_node, skb, 1); | 1780 | active_open_err(cm_node, skb, 1); |
1550 | break; | 1781 | break; |
1551 | case NES_CM_STATE_ESTABLISHED: | 1782 | case NES_CM_STATE_ESTABLISHED: |
@@ -1559,11 +1790,11 @@ static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
1559 | } | 1790 | } |
1560 | 1791 | ||
1561 | static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 1792 | static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph, |
1562 | struct sk_buff *skb) | 1793 | struct sk_buff *skb) |
1563 | { | 1794 | { |
1564 | int err; | 1795 | int err; |
1565 | 1796 | ||
1566 | err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num))? 0 : 1; | 1797 | err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num)) ? 0 : 1; |
1567 | if (err) | 1798 | if (err) |
1568 | active_open_err(cm_node, skb, 1); | 1799 | active_open_err(cm_node, skb, 1); |
1569 | 1800 | ||
@@ -1571,7 +1802,7 @@ static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1571 | } | 1802 | } |
1572 | 1803 | ||
1573 | static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 1804 | static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, |
1574 | struct sk_buff *skb) | 1805 | struct sk_buff *skb) |
1575 | { | 1806 | { |
1576 | int err = 0; | 1807 | int err = 0; |
1577 | u32 seq; | 1808 | u32 seq; |
@@ -1579,21 +1810,22 @@ static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1579 | u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; | 1810 | u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; |
1580 | u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; | 1811 | u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; |
1581 | u32 rcv_wnd; | 1812 | u32 rcv_wnd; |
1813 | |||
1582 | seq = ntohl(tcph->seq); | 1814 | seq = ntohl(tcph->seq); |
1583 | ack_seq = ntohl(tcph->ack_seq); | 1815 | ack_seq = ntohl(tcph->ack_seq); |
1584 | rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; | 1816 | rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; |
1585 | if (ack_seq != loc_seq_num) | 1817 | if (ack_seq != loc_seq_num) |
1586 | err = 1; | 1818 | err = 1; |
1587 | else if (!between(seq, rcv_nxt, (rcv_nxt+rcv_wnd))) | 1819 | else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd))) |
1588 | err = 1; | 1820 | err = 1; |
1589 | if (err) { | 1821 | if (err) { |
1590 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " | 1822 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " |
1591 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, | 1823 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, |
1592 | cm_node->listener, cm_node->state); | 1824 | cm_node->listener, cm_node->state); |
1593 | indicate_pkt_err(cm_node, skb); | 1825 | indicate_pkt_err(cm_node, skb); |
1594 | nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X " | 1826 | nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X " |
1595 | "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt, | 1827 | "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt, |
1596 | rcv_wnd); | 1828 | rcv_wnd); |
1597 | } | 1829 | } |
1598 | return err; | 1830 | return err; |
1599 | } | 1831 | } |
@@ -1603,9 +1835,8 @@ static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1603 | * is created with a listener or it may comein as rexmitted packet which in | 1835 | * is created with a listener or it may comein as rexmitted packet which in |
1604 | * that case will be just dropped. | 1836 | * that case will be just dropped. |
1605 | */ | 1837 | */ |
1606 | |||
1607 | static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | 1838 | static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1608 | struct tcphdr *tcph) | 1839 | struct tcphdr *tcph) |
1609 | { | 1840 | { |
1610 | int ret; | 1841 | int ret; |
1611 | u32 inc_sequence; | 1842 | u32 inc_sequence; |
@@ -1624,15 +1855,15 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1624 | case NES_CM_STATE_LISTENING: | 1855 | case NES_CM_STATE_LISTENING: |
1625 | /* Passive OPEN */ | 1856 | /* Passive OPEN */ |
1626 | if (atomic_read(&cm_node->listener->pend_accepts_cnt) > | 1857 | if (atomic_read(&cm_node->listener->pend_accepts_cnt) > |
1627 | cm_node->listener->backlog) { | 1858 | cm_node->listener->backlog) { |
1628 | nes_debug(NES_DBG_CM, "drop syn due to backlog " | 1859 | nes_debug(NES_DBG_CM, "drop syn due to backlog " |
1629 | "pressure \n"); | 1860 | "pressure \n"); |
1630 | cm_backlog_drops++; | 1861 | cm_backlog_drops++; |
1631 | passive_open_err(cm_node, skb, 0); | 1862 | passive_open_err(cm_node, skb, 0); |
1632 | break; | 1863 | break; |
1633 | } | 1864 | } |
1634 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, | 1865 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, |
1635 | 1); | 1866 | 1); |
1636 | if (ret) { | 1867 | if (ret) { |
1637 | passive_open_err(cm_node, skb, 0); | 1868 | passive_open_err(cm_node, skb, 0); |
1638 | /* drop pkt */ | 1869 | /* drop pkt */ |
@@ -1666,9 +1897,8 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1666 | } | 1897 | } |
1667 | 1898 | ||
1668 | static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | 1899 | static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1669 | struct tcphdr *tcph) | 1900 | struct tcphdr *tcph) |
1670 | { | 1901 | { |
1671 | |||
1672 | int ret; | 1902 | int ret; |
1673 | u32 inc_sequence; | 1903 | u32 inc_sequence; |
1674 | int optionsize; | 1904 | int optionsize; |
@@ -1687,7 +1917,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1687 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0); | 1917 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0); |
1688 | if (ret) { | 1918 | if (ret) { |
1689 | nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n", | 1919 | nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n", |
1690 | cm_node); | 1920 | cm_node); |
1691 | break; | 1921 | break; |
1692 | } | 1922 | } |
1693 | cleanup_retrans_entry(cm_node); | 1923 | cleanup_retrans_entry(cm_node); |
@@ -1726,12 +1956,13 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1726 | } | 1956 | } |
1727 | 1957 | ||
1728 | static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | 1958 | static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1729 | struct tcphdr *tcph) | 1959 | struct tcphdr *tcph) |
1730 | { | 1960 | { |
1731 | int datasize = 0; | 1961 | int datasize = 0; |
1732 | u32 inc_sequence; | 1962 | u32 inc_sequence; |
1733 | int ret = 0; | 1963 | int ret = 0; |
1734 | int optionsize; | 1964 | int optionsize; |
1965 | |||
1735 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | 1966 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); |
1736 | 1967 | ||
1737 | if (check_seq(cm_node, tcph, skb)) | 1968 | if (check_seq(cm_node, tcph, skb)) |
@@ -1752,8 +1983,9 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1752 | if (datasize) { | 1983 | if (datasize) { |
1753 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 1984 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
1754 | handle_rcv_mpa(cm_node, skb); | 1985 | handle_rcv_mpa(cm_node, skb); |
1755 | } else /* rcvd ACK only */ | 1986 | } else { /* rcvd ACK only */ |
1756 | dev_kfree_skb_any(skb); | 1987 | dev_kfree_skb_any(skb); |
1988 | } | ||
1757 | break; | 1989 | break; |
1758 | case NES_CM_STATE_ESTABLISHED: | 1990 | case NES_CM_STATE_ESTABLISHED: |
1759 | /* Passive OPEN */ | 1991 | /* Passive OPEN */ |
@@ -1761,16 +1993,18 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1761 | if (datasize) { | 1993 | if (datasize) { |
1762 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 1994 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
1763 | handle_rcv_mpa(cm_node, skb); | 1995 | handle_rcv_mpa(cm_node, skb); |
1764 | } else | 1996 | } else { |
1765 | drop_packet(skb); | 1997 | drop_packet(skb); |
1998 | } | ||
1766 | break; | 1999 | break; |
1767 | case NES_CM_STATE_MPAREQ_SENT: | 2000 | case NES_CM_STATE_MPAREQ_SENT: |
1768 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); | 2001 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); |
1769 | if (datasize) { | 2002 | if (datasize) { |
1770 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 2003 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
1771 | handle_rcv_mpa(cm_node, skb); | 2004 | handle_rcv_mpa(cm_node, skb); |
1772 | } else /* Could be just an ack pkt.. */ | 2005 | } else { /* Could be just an ack pkt.. */ |
1773 | dev_kfree_skb_any(skb); | 2006 | dev_kfree_skb_any(skb); |
2007 | } | ||
1774 | break; | 2008 | break; |
1775 | case NES_CM_STATE_LISTENING: | 2009 | case NES_CM_STATE_LISTENING: |
1776 | cleanup_retrans_entry(cm_node); | 2010 | cleanup_retrans_entry(cm_node); |
@@ -1811,14 +2045,15 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1811 | 2045 | ||
1812 | 2046 | ||
1813 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 2047 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, |
1814 | struct sk_buff *skb, int optionsize, int passive) | 2048 | struct sk_buff *skb, int optionsize, int passive) |
1815 | { | 2049 | { |
1816 | u8 *optionsloc = (u8 *)&tcph[1]; | 2050 | u8 *optionsloc = (u8 *)&tcph[1]; |
2051 | |||
1817 | if (optionsize) { | 2052 | if (optionsize) { |
1818 | if (process_options(cm_node, optionsloc, optionsize, | 2053 | if (process_options(cm_node, optionsloc, optionsize, |
1819 | (u32)tcph->syn)) { | 2054 | (u32)tcph->syn)) { |
1820 | nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", | 2055 | nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", |
1821 | __func__, cm_node); | 2056 | __func__, cm_node); |
1822 | if (passive) | 2057 | if (passive) |
1823 | passive_open_err(cm_node, skb, 1); | 2058 | passive_open_err(cm_node, skb, 1); |
1824 | else | 2059 | else |
@@ -1828,7 +2063,7 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1828 | } | 2063 | } |
1829 | 2064 | ||
1830 | cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << | 2065 | cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << |
1831 | cm_node->tcp_cntxt.snd_wscale; | 2066 | cm_node->tcp_cntxt.snd_wscale; |
1832 | 2067 | ||
1833 | if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) | 2068 | if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) |
1834 | cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; | 2069 | cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; |
@@ -1839,18 +2074,18 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1839 | * active_open_err() will send reset() if flag set.. | 2074 | * active_open_err() will send reset() if flag set.. |
1840 | * It will also send ABORT event. | 2075 | * It will also send ABORT event. |
1841 | */ | 2076 | */ |
1842 | |||
1843 | static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | 2077 | static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1844 | int reset) | 2078 | int reset) |
1845 | { | 2079 | { |
1846 | cleanup_retrans_entry(cm_node); | 2080 | cleanup_retrans_entry(cm_node); |
1847 | if (reset) { | 2081 | if (reset) { |
1848 | nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, " | 2082 | nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, " |
1849 | "state=%d\n", cm_node, cm_node->state); | 2083 | "state=%d\n", cm_node, cm_node->state); |
1850 | add_ref_cm_node(cm_node); | 2084 | add_ref_cm_node(cm_node); |
1851 | send_reset(cm_node, skb); | 2085 | send_reset(cm_node, skb); |
1852 | } else | 2086 | } else { |
1853 | dev_kfree_skb_any(skb); | 2087 | dev_kfree_skb_any(skb); |
2088 | } | ||
1854 | 2089 | ||
1855 | cm_node->state = NES_CM_STATE_CLOSED; | 2090 | cm_node->state = NES_CM_STATE_CLOSED; |
1856 | create_event(cm_node, NES_CM_EVENT_ABORTED); | 2091 | create_event(cm_node, NES_CM_EVENT_ABORTED); |
@@ -1860,15 +2095,14 @@ static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1860 | * passive_open_err() will either do a reset() or will free up the skb and | 2095 | * passive_open_err() will either do a reset() or will free up the skb and |
1861 | * remove the cm_node. | 2096 | * remove the cm_node. |
1862 | */ | 2097 | */ |
1863 | |||
1864 | static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | 2098 | static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1865 | int reset) | 2099 | int reset) |
1866 | { | 2100 | { |
1867 | cleanup_retrans_entry(cm_node); | 2101 | cleanup_retrans_entry(cm_node); |
1868 | cm_node->state = NES_CM_STATE_CLOSED; | 2102 | cm_node->state = NES_CM_STATE_CLOSED; |
1869 | if (reset) { | 2103 | if (reset) { |
1870 | nes_debug(NES_DBG_CM, "passive_open_err sending RST for " | 2104 | nes_debug(NES_DBG_CM, "passive_open_err sending RST for " |
1871 | "cm_node=%p state =%d\n", cm_node, cm_node->state); | 2105 | "cm_node=%p state =%d\n", cm_node, cm_node->state); |
1872 | send_reset(cm_node, skb); | 2106 | send_reset(cm_node, skb); |
1873 | } else { | 2107 | } else { |
1874 | dev_kfree_skb_any(skb); | 2108 | dev_kfree_skb_any(skb); |
@@ -1883,6 +2117,7 @@ static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1883 | static void free_retrans_entry(struct nes_cm_node *cm_node) | 2117 | static void free_retrans_entry(struct nes_cm_node *cm_node) |
1884 | { | 2118 | { |
1885 | struct nes_timer_entry *send_entry; | 2119 | struct nes_timer_entry *send_entry; |
2120 | |||
1886 | send_entry = cm_node->send_entry; | 2121 | send_entry = cm_node->send_entry; |
1887 | if (send_entry) { | 2122 | if (send_entry) { |
1888 | cm_node->send_entry = NULL; | 2123 | cm_node->send_entry = NULL; |
@@ -1906,26 +2141,28 @@ static void cleanup_retrans_entry(struct nes_cm_node *cm_node) | |||
1906 | * Returns skb if to be freed, else it will return NULL if already used.. | 2141 | * Returns skb if to be freed, else it will return NULL if already used.. |
1907 | */ | 2142 | */ |
1908 | static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, | 2143 | static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1909 | struct nes_cm_core *cm_core) | 2144 | struct nes_cm_core *cm_core) |
1910 | { | 2145 | { |
1911 | enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; | 2146 | enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; |
1912 | struct tcphdr *tcph = tcp_hdr(skb); | 2147 | struct tcphdr *tcph = tcp_hdr(skb); |
1913 | u32 fin_set = 0; | 2148 | u32 fin_set = 0; |
1914 | int ret = 0; | 2149 | int ret = 0; |
2150 | |||
1915 | skb_pull(skb, ip_hdr(skb)->ihl << 2); | 2151 | skb_pull(skb, ip_hdr(skb)->ihl << 2); |
1916 | 2152 | ||
1917 | nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " | 2153 | nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " |
1918 | "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn, | 2154 | "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn, |
1919 | tcph->ack, tcph->rst, tcph->fin); | 2155 | tcph->ack, tcph->rst, tcph->fin); |
1920 | 2156 | ||
1921 | if (tcph->rst) | 2157 | if (tcph->rst) { |
1922 | pkt_type = NES_PKT_TYPE_RST; | 2158 | pkt_type = NES_PKT_TYPE_RST; |
1923 | else if (tcph->syn) { | 2159 | } else if (tcph->syn) { |
1924 | pkt_type = NES_PKT_TYPE_SYN; | 2160 | pkt_type = NES_PKT_TYPE_SYN; |
1925 | if (tcph->ack) | 2161 | if (tcph->ack) |
1926 | pkt_type = NES_PKT_TYPE_SYNACK; | 2162 | pkt_type = NES_PKT_TYPE_SYNACK; |
1927 | } else if (tcph->ack) | 2163 | } else if (tcph->ack) { |
1928 | pkt_type = NES_PKT_TYPE_ACK; | 2164 | pkt_type = NES_PKT_TYPE_ACK; |
2165 | } | ||
1929 | if (tcph->fin) | 2166 | if (tcph->fin) |
1930 | fin_set = 1; | 2167 | fin_set = 1; |
1931 | 2168 | ||
@@ -1956,17 +2193,17 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1956 | * mini_cm_listen - create a listen node with params | 2193 | * mini_cm_listen - create a listen node with params |
1957 | */ | 2194 | */ |
1958 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | 2195 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, |
1959 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) | 2196 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) |
1960 | { | 2197 | { |
1961 | struct nes_cm_listener *listener; | 2198 | struct nes_cm_listener *listener; |
1962 | unsigned long flags; | 2199 | unsigned long flags; |
1963 | 2200 | ||
1964 | nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", | 2201 | nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", |
1965 | cm_info->loc_addr, cm_info->loc_port); | 2202 | cm_info->loc_addr, cm_info->loc_port); |
1966 | 2203 | ||
1967 | /* cannot have multiple matching listeners */ | 2204 | /* cannot have multiple matching listeners */ |
1968 | listener = find_listener(cm_core, htonl(cm_info->loc_addr), | 2205 | listener = find_listener(cm_core, htonl(cm_info->loc_addr), |
1969 | htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE); | 2206 | htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE); |
1970 | if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { | 2207 | if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { |
1971 | /* find automatically incs ref count ??? */ | 2208 | /* find automatically incs ref count ??? */ |
1972 | atomic_dec(&listener->ref_count); | 2209 | atomic_dec(&listener->ref_count); |
@@ -2012,9 +2249,9 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | |||
2012 | } | 2249 | } |
2013 | 2250 | ||
2014 | nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x," | 2251 | nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x," |
2015 | " listener = %p, backlog = %d, cm_id = %p.\n", | 2252 | " listener = %p, backlog = %d, cm_id = %p.\n", |
2016 | cm_info->loc_addr, cm_info->loc_port, | 2253 | cm_info->loc_addr, cm_info->loc_port, |
2017 | listener, listener->backlog, listener->cm_id); | 2254 | listener, listener->backlog, listener->cm_id); |
2018 | 2255 | ||
2019 | return listener; | 2256 | return listener; |
2020 | } | 2257 | } |
@@ -2024,26 +2261,20 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | |||
2024 | * mini_cm_connect - make a connection node with params | 2261 | * mini_cm_connect - make a connection node with params |
2025 | */ | 2262 | */ |
2026 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | 2263 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, |
2027 | struct nes_vnic *nesvnic, u16 private_data_len, | 2264 | struct nes_vnic *nesvnic, u16 private_data_len, |
2028 | void *private_data, struct nes_cm_info *cm_info) | 2265 | void *private_data, struct nes_cm_info *cm_info) |
2029 | { | 2266 | { |
2030 | int ret = 0; | 2267 | int ret = 0; |
2031 | struct nes_cm_node *cm_node; | 2268 | struct nes_cm_node *cm_node; |
2032 | struct nes_cm_listener *loopbackremotelistener; | 2269 | struct nes_cm_listener *loopbackremotelistener; |
2033 | struct nes_cm_node *loopbackremotenode; | 2270 | struct nes_cm_node *loopbackremotenode; |
2034 | struct nes_cm_info loopback_cm_info; | 2271 | struct nes_cm_info loopback_cm_info; |
2035 | u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + private_data_len; | 2272 | u8 *start_buff; |
2036 | struct ietf_mpa_frame *mpa_frame = NULL; | ||
2037 | 2273 | ||
2038 | /* create a CM connection node */ | 2274 | /* create a CM connection node */ |
2039 | cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL); | 2275 | cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL); |
2040 | if (!cm_node) | 2276 | if (!cm_node) |
2041 | return NULL; | 2277 | return NULL; |
2042 | mpa_frame = &cm_node->mpa_frame; | ||
2043 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); | ||
2044 | mpa_frame->flags = IETF_MPA_FLAGS_CRC; | ||
2045 | mpa_frame->rev = IETF_MPA_VERSION; | ||
2046 | mpa_frame->priv_data_len = htons(private_data_len); | ||
2047 | 2278 | ||
2048 | /* set our node side to client (active) side */ | 2279 | /* set our node side to client (active) side */ |
2049 | cm_node->tcp_cntxt.client = 1; | 2280 | cm_node->tcp_cntxt.client = 1; |
@@ -2051,8 +2282,8 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2051 | 2282 | ||
2052 | if (cm_info->loc_addr == cm_info->rem_addr) { | 2283 | if (cm_info->loc_addr == cm_info->rem_addr) { |
2053 | loopbackremotelistener = find_listener(cm_core, | 2284 | loopbackremotelistener = find_listener(cm_core, |
2054 | ntohl(nesvnic->local_ipaddr), cm_node->rem_port, | 2285 | ntohl(nesvnic->local_ipaddr), cm_node->rem_port, |
2055 | NES_CM_LISTENER_ACTIVE_STATE); | 2286 | NES_CM_LISTENER_ACTIVE_STATE); |
2056 | if (loopbackremotelistener == NULL) { | 2287 | if (loopbackremotelistener == NULL) { |
2057 | create_event(cm_node, NES_CM_EVENT_ABORTED); | 2288 | create_event(cm_node, NES_CM_EVENT_ABORTED); |
2058 | } else { | 2289 | } else { |
@@ -2061,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2061 | loopback_cm_info.rem_port = cm_info->loc_port; | 2292 | loopback_cm_info.rem_port = cm_info->loc_port; |
2062 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; | 2293 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; |
2063 | loopbackremotenode = make_cm_node(cm_core, nesvnic, | 2294 | loopbackremotenode = make_cm_node(cm_core, nesvnic, |
2064 | &loopback_cm_info, loopbackremotelistener); | 2295 | &loopback_cm_info, loopbackremotelistener); |
2065 | if (!loopbackremotenode) { | 2296 | if (!loopbackremotenode) { |
2066 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 2297 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
2067 | return NULL; | 2298 | return NULL; |
@@ -2072,7 +2303,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2072 | NES_CM_DEFAULT_RCV_WND_SCALE; | 2303 | NES_CM_DEFAULT_RCV_WND_SCALE; |
2073 | cm_node->loopbackpartner = loopbackremotenode; | 2304 | cm_node->loopbackpartner = loopbackremotenode; |
2074 | memcpy(loopbackremotenode->mpa_frame_buf, private_data, | 2305 | memcpy(loopbackremotenode->mpa_frame_buf, private_data, |
2075 | private_data_len); | 2306 | private_data_len); |
2076 | loopbackremotenode->mpa_frame_size = private_data_len; | 2307 | loopbackremotenode->mpa_frame_size = private_data_len; |
2077 | 2308 | ||
2078 | /* we are done handling this state. */ | 2309 | /* we are done handling this state. */ |
@@ -2100,12 +2331,10 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2100 | return cm_node; | 2331 | return cm_node; |
2101 | } | 2332 | } |
2102 | 2333 | ||
2103 | /* set our node side to client (active) side */ | 2334 | start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2); |
2104 | cm_node->tcp_cntxt.client = 1; | 2335 | cm_node->mpa_frame_size = private_data_len; |
2105 | /* init our MPA frame ptr */ | ||
2106 | memcpy(mpa_frame->priv_data, private_data, private_data_len); | ||
2107 | 2336 | ||
2108 | cm_node->mpa_frame_size = mpa_frame_size; | 2337 | memcpy(start_buff, private_data, private_data_len); |
2109 | 2338 | ||
2110 | /* send a syn and goto syn sent state */ | 2339 | /* send a syn and goto syn sent state */ |
2111 | cm_node->state = NES_CM_STATE_SYN_SENT; | 2340 | cm_node->state = NES_CM_STATE_SYN_SENT; |
@@ -2114,18 +2343,19 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2114 | if (ret) { | 2343 | if (ret) { |
2115 | /* error in sending the syn free up the cm_node struct */ | 2344 | /* error in sending the syn free up the cm_node struct */ |
2116 | nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest " | 2345 | nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest " |
2117 | "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n", | 2346 | "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n", |
2118 | cm_node->rem_addr, cm_node->rem_port, cm_node, | 2347 | cm_node->rem_addr, cm_node->rem_port, cm_node, |
2119 | cm_node->cm_id); | 2348 | cm_node->cm_id); |
2120 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 2349 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
2121 | cm_node = NULL; | 2350 | cm_node = NULL; |
2122 | } | 2351 | } |
2123 | 2352 | ||
2124 | if (cm_node) | 2353 | if (cm_node) { |
2125 | nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X," | 2354 | nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X," |
2126 | "port=0x%04x, cm_node=%p, cm_id = %p.\n", | 2355 | "port=0x%04x, cm_node=%p, cm_id = %p.\n", |
2127 | cm_node->rem_addr, cm_node->rem_port, cm_node, | 2356 | cm_node->rem_addr, cm_node->rem_port, cm_node, |
2128 | cm_node->cm_id); | 2357 | cm_node->cm_id); |
2358 | } | ||
2129 | 2359 | ||
2130 | return cm_node; | 2360 | return cm_node; |
2131 | } | 2361 | } |
@@ -2135,8 +2365,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2135 | * mini_cm_accept - accept a connection | 2365 | * mini_cm_accept - accept a connection |
2136 | * This function is never called | 2366 | * This function is never called |
2137 | */ | 2367 | */ |
2138 | static int mini_cm_accept(struct nes_cm_core *cm_core, | 2368 | static int mini_cm_accept(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) |
2139 | struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) | ||
2140 | { | 2369 | { |
2141 | return 0; | 2370 | return 0; |
2142 | } | 2371 | } |
@@ -2145,8 +2374,7 @@ static int mini_cm_accept(struct nes_cm_core *cm_core, | |||
2145 | /** | 2374 | /** |
2146 | * mini_cm_reject - reject and teardown a connection | 2375 | * mini_cm_reject - reject and teardown a connection |
2147 | */ | 2376 | */ |
2148 | static int mini_cm_reject(struct nes_cm_core *cm_core, | 2377 | static int mini_cm_reject(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) |
2149 | struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) | ||
2150 | { | 2378 | { |
2151 | int ret = 0; | 2379 | int ret = 0; |
2152 | int err = 0; | 2380 | int err = 0; |
@@ -2156,7 +2384,7 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, | |||
2156 | struct nes_cm_node *loopback = cm_node->loopbackpartner; | 2384 | struct nes_cm_node *loopback = cm_node->loopbackpartner; |
2157 | 2385 | ||
2158 | nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", | 2386 | nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", |
2159 | __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); | 2387 | __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); |
2160 | 2388 | ||
2161 | if (cm_node->tcp_cntxt.client) | 2389 | if (cm_node->tcp_cntxt.client) |
2162 | return ret; | 2390 | return ret; |
@@ -2177,8 +2405,9 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, | |||
2177 | err = send_reset(cm_node, NULL); | 2405 | err = send_reset(cm_node, NULL); |
2178 | if (err) | 2406 | if (err) |
2179 | WARN_ON(1); | 2407 | WARN_ON(1); |
2180 | } else | 2408 | } else { |
2181 | cm_id->add_ref(cm_id); | 2409 | cm_id->add_ref(cm_id); |
2410 | } | ||
2182 | } | 2411 | } |
2183 | } | 2412 | } |
2184 | } else { | 2413 | } else { |
@@ -2253,7 +2482,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod | |||
2253 | case NES_CM_STATE_TSA: | 2482 | case NES_CM_STATE_TSA: |
2254 | if (cm_node->send_entry) | 2483 | if (cm_node->send_entry) |
2255 | printk(KERN_ERR "ERROR Close got called from STATE_TSA " | 2484 | printk(KERN_ERR "ERROR Close got called from STATE_TSA " |
2256 | "send_entry=%p\n", cm_node->send_entry); | 2485 | "send_entry=%p\n", cm_node->send_entry); |
2257 | ret = rem_ref_cm_node(cm_core, cm_node); | 2486 | ret = rem_ref_cm_node(cm_core, cm_node); |
2258 | break; | 2487 | break; |
2259 | } | 2488 | } |
@@ -2266,7 +2495,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod | |||
2266 | * node state machine | 2495 | * node state machine |
2267 | */ | 2496 | */ |
2268 | static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | 2497 | static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, |
2269 | struct nes_vnic *nesvnic, struct sk_buff *skb) | 2498 | struct nes_vnic *nesvnic, struct sk_buff *skb) |
2270 | { | 2499 | { |
2271 | struct nes_cm_node *cm_node = NULL; | 2500 | struct nes_cm_node *cm_node = NULL; |
2272 | struct nes_cm_listener *listener = NULL; | 2501 | struct nes_cm_listener *listener = NULL; |
@@ -2278,9 +2507,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2278 | 2507 | ||
2279 | if (!skb) | 2508 | if (!skb) |
2280 | return 0; | 2509 | return 0; |
2281 | if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { | 2510 | if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) |
2282 | return 0; | 2511 | return 0; |
2283 | } | ||
2284 | 2512 | ||
2285 | iph = (struct iphdr *)skb->data; | 2513 | iph = (struct iphdr *)skb->data; |
2286 | tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); | 2514 | tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); |
@@ -2298,8 +2526,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2298 | 2526 | ||
2299 | do { | 2527 | do { |
2300 | cm_node = find_node(cm_core, | 2528 | cm_node = find_node(cm_core, |
2301 | nfo.rem_port, nfo.rem_addr, | 2529 | nfo.rem_port, nfo.rem_addr, |
2302 | nfo.loc_port, nfo.loc_addr); | 2530 | nfo.loc_port, nfo.loc_addr); |
2303 | 2531 | ||
2304 | if (!cm_node) { | 2532 | if (!cm_node) { |
2305 | /* Only type of packet accepted are for */ | 2533 | /* Only type of packet accepted are for */ |
@@ -2309,8 +2537,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2309 | break; | 2537 | break; |
2310 | } | 2538 | } |
2311 | listener = find_listener(cm_core, nfo.loc_addr, | 2539 | listener = find_listener(cm_core, nfo.loc_addr, |
2312 | nfo.loc_port, | 2540 | nfo.loc_port, |
2313 | NES_CM_LISTENER_ACTIVE_STATE); | 2541 | NES_CM_LISTENER_ACTIVE_STATE); |
2314 | if (!listener) { | 2542 | if (!listener) { |
2315 | nfo.cm_id = NULL; | 2543 | nfo.cm_id = NULL; |
2316 | nfo.conn_type = 0; | 2544 | nfo.conn_type = 0; |
@@ -2321,10 +2549,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2321 | nfo.cm_id = listener->cm_id; | 2549 | nfo.cm_id = listener->cm_id; |
2322 | nfo.conn_type = listener->conn_type; | 2550 | nfo.conn_type = listener->conn_type; |
2323 | cm_node = make_cm_node(cm_core, nesvnic, &nfo, | 2551 | cm_node = make_cm_node(cm_core, nesvnic, &nfo, |
2324 | listener); | 2552 | listener); |
2325 | if (!cm_node) { | 2553 | if (!cm_node) { |
2326 | nes_debug(NES_DBG_CM, "Unable to allocate " | 2554 | nes_debug(NES_DBG_CM, "Unable to allocate " |
2327 | "node\n"); | 2555 | "node\n"); |
2328 | cm_packets_dropped++; | 2556 | cm_packets_dropped++; |
2329 | atomic_dec(&listener->ref_count); | 2557 | atomic_dec(&listener->ref_count); |
2330 | dev_kfree_skb_any(skb); | 2558 | dev_kfree_skb_any(skb); |
@@ -2376,7 +2604,7 @@ static struct nes_cm_core *nes_cm_alloc_core(void) | |||
2376 | init_timer(&cm_core->tcp_timer); | 2604 | init_timer(&cm_core->tcp_timer); |
2377 | cm_core->tcp_timer.function = nes_cm_timer_tick; | 2605 | cm_core->tcp_timer.function = nes_cm_timer_tick; |
2378 | 2606 | ||
2379 | cm_core->mtu = NES_CM_DEFAULT_MTU; | 2607 | cm_core->mtu = NES_CM_DEFAULT_MTU; |
2380 | cm_core->state = NES_CM_STATE_INITED; | 2608 | cm_core->state = NES_CM_STATE_INITED; |
2381 | cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; | 2609 | cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; |
2382 | 2610 | ||
@@ -2414,9 +2642,8 @@ static int mini_cm_dealloc_core(struct nes_cm_core *cm_core) | |||
2414 | 2642 | ||
2415 | barrier(); | 2643 | barrier(); |
2416 | 2644 | ||
2417 | if (timer_pending(&cm_core->tcp_timer)) { | 2645 | if (timer_pending(&cm_core->tcp_timer)) |
2418 | del_timer(&cm_core->tcp_timer); | 2646 | del_timer(&cm_core->tcp_timer); |
2419 | } | ||
2420 | 2647 | ||
2421 | destroy_workqueue(cm_core->event_wq); | 2648 | destroy_workqueue(cm_core->event_wq); |
2422 | destroy_workqueue(cm_core->disconn_wq); | 2649 | destroy_workqueue(cm_core->disconn_wq); |
@@ -2471,8 +2698,8 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod | |||
2471 | return -EINVAL; | 2698 | return -EINVAL; |
2472 | 2699 | ||
2473 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 | | 2700 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 | |
2474 | NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG | | 2701 | NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG | |
2475 | NES_QPCONTEXT_MISC_DROS); | 2702 | NES_QPCONTEXT_MISC_DROS); |
2476 | 2703 | ||
2477 | if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale) | 2704 | if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale) |
2478 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE); | 2705 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE); |
@@ -2482,15 +2709,15 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod | |||
2482 | nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16); | 2709 | nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16); |
2483 | 2710 | ||
2484 | nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32( | 2711 | nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32( |
2485 | (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT); | 2712 | (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT); |
2486 | 2713 | ||
2487 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( | 2714 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( |
2488 | (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) & | 2715 | (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) & |
2489 | NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK); | 2716 | NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK); |
2490 | 2717 | ||
2491 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( | 2718 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( |
2492 | (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) & | 2719 | (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) & |
2493 | NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK); | 2720 | NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK); |
2494 | 2721 | ||
2495 | nesqp->nesqp_context->keepalive = cpu_to_le32(0x80); | 2722 | nesqp->nesqp_context->keepalive = cpu_to_le32(0x80); |
2496 | nesqp->nesqp_context->ts_recent = 0; | 2723 | nesqp->nesqp_context->ts_recent = 0; |
@@ -2499,24 +2726,24 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod | |||
2499 | nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); | 2726 | nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); |
2500 | nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | 2727 | nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); |
2501 | nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << | 2728 | nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << |
2502 | cm_node->tcp_cntxt.rcv_wscale); | 2729 | cm_node->tcp_cntxt.rcv_wscale); |
2503 | nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | 2730 | nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); |
2504 | nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | 2731 | nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); |
2505 | nesqp->nesqp_context->srtt = 0; | 2732 | nesqp->nesqp_context->srtt = 0; |
2506 | nesqp->nesqp_context->rttvar = cpu_to_le32(0x6); | 2733 | nesqp->nesqp_context->rttvar = cpu_to_le32(0x6); |
2507 | nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000); | 2734 | nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000); |
2508 | nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss); | 2735 | nesqp->nesqp_context->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss); |
2509 | nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | 2736 | nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); |
2510 | nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | 2737 | nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); |
2511 | nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); | 2738 | nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); |
2512 | 2739 | ||
2513 | nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X," | 2740 | nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X," |
2514 | " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n", | 2741 | " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n", |
2515 | nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt), | 2742 | nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt), |
2516 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), | 2743 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), |
2517 | cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale), | 2744 | cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale), |
2518 | le32_to_cpu(nesqp->nesqp_context->rcv_wnd), | 2745 | le32_to_cpu(nesqp->nesqp_context->rcv_wnd), |
2519 | le32_to_cpu(nesqp->nesqp_context->misc)); | 2746 | le32_to_cpu(nesqp->nesqp_context->misc)); |
2520 | nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd)); | 2747 | nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd)); |
2521 | nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd)); | 2748 | nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd)); |
2522 | nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd)); | 2749 | nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd)); |
@@ -2537,7 +2764,7 @@ int nes_cm_disconn(struct nes_qp *nesqp) | |||
2537 | 2764 | ||
2538 | work = kzalloc(sizeof *work, GFP_ATOMIC); | 2765 | work = kzalloc(sizeof *work, GFP_ATOMIC); |
2539 | if (!work) | 2766 | if (!work) |
2540 | return -ENOMEM; /* Timer will clean up */ | 2767 | return -ENOMEM; /* Timer will clean up */ |
2541 | 2768 | ||
2542 | nes_add_ref(&nesqp->ibqp); | 2769 | nes_add_ref(&nesqp->ibqp); |
2543 | work->nesqp = nesqp; | 2770 | work->nesqp = nesqp; |
@@ -2557,7 +2784,7 @@ static void nes_disconnect_worker(struct work_struct *work) | |||
2557 | 2784 | ||
2558 | kfree(dwork); | 2785 | kfree(dwork); |
2559 | nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", | 2786 | nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", |
2560 | nesqp->last_aeq, nesqp->hwqp.qp_id); | 2787 | nesqp->last_aeq, nesqp->hwqp.qp_id); |
2561 | nes_cm_disconn_true(nesqp); | 2788 | nes_cm_disconn_true(nesqp); |
2562 | nes_rem_ref(&nesqp->ibqp); | 2789 | nes_rem_ref(&nesqp->ibqp); |
2563 | } | 2790 | } |
@@ -2593,7 +2820,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2593 | /* make sure we havent already closed this connection */ | 2820 | /* make sure we havent already closed this connection */ |
2594 | if (!cm_id) { | 2821 | if (!cm_id) { |
2595 | nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n", | 2822 | nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n", |
2596 | nesqp->hwqp.qp_id); | 2823 | nesqp->hwqp.qp_id); |
2597 | spin_unlock_irqrestore(&nesqp->lock, flags); | 2824 | spin_unlock_irqrestore(&nesqp->lock, flags); |
2598 | return -1; | 2825 | return -1; |
2599 | } | 2826 | } |
@@ -2602,7 +2829,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2602 | nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id); | 2829 | nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id); |
2603 | 2830 | ||
2604 | original_hw_tcp_state = nesqp->hw_tcp_state; | 2831 | original_hw_tcp_state = nesqp->hw_tcp_state; |
2605 | original_ibqp_state = nesqp->ibqp_state; | 2832 | original_ibqp_state = nesqp->ibqp_state; |
2606 | last_ae = nesqp->last_aeq; | 2833 | last_ae = nesqp->last_aeq; |
2607 | 2834 | ||
2608 | if (nesqp->term_flags) { | 2835 | if (nesqp->term_flags) { |
@@ -2660,16 +2887,16 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2660 | cm_event.private_data_len = 0; | 2887 | cm_event.private_data_len = 0; |
2661 | 2888 | ||
2662 | nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event" | 2889 | nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event" |
2663 | " for QP%u, SQ Head = %u, SQ Tail = %u. " | 2890 | " for QP%u, SQ Head = %u, SQ Tail = %u. " |
2664 | "cm_id = %p, refcount = %u.\n", | 2891 | "cm_id = %p, refcount = %u.\n", |
2665 | nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, | 2892 | nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, |
2666 | nesqp->hwqp.sq_tail, cm_id, | 2893 | nesqp->hwqp.sq_tail, cm_id, |
2667 | atomic_read(&nesqp->refcount)); | 2894 | atomic_read(&nesqp->refcount)); |
2668 | 2895 | ||
2669 | ret = cm_id->event_handler(cm_id, &cm_event); | 2896 | ret = cm_id->event_handler(cm_id, &cm_event); |
2670 | if (ret) | 2897 | if (ret) |
2671 | nes_debug(NES_DBG_CM, "OFA CM event_handler " | 2898 | nes_debug(NES_DBG_CM, "OFA CM event_handler " |
2672 | "returned, ret=%d\n", ret); | 2899 | "returned, ret=%d\n", ret); |
2673 | } | 2900 | } |
2674 | 2901 | ||
2675 | if (issue_close) { | 2902 | if (issue_close) { |
@@ -2687,9 +2914,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2687 | cm_event.private_data_len = 0; | 2914 | cm_event.private_data_len = 0; |
2688 | 2915 | ||
2689 | ret = cm_id->event_handler(cm_id, &cm_event); | 2916 | ret = cm_id->event_handler(cm_id, &cm_event); |
2690 | if (ret) { | 2917 | if (ret) |
2691 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | 2918 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); |
2692 | } | ||
2693 | 2919 | ||
2694 | cm_id->rem_ref(cm_id); | 2920 | cm_id->rem_ref(cm_id); |
2695 | } | 2921 | } |
@@ -2729,8 +2955,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) | |||
2729 | if (nesqp->lsmm_mr) | 2955 | if (nesqp->lsmm_mr) |
2730 | nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); | 2956 | nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); |
2731 | pci_free_consistent(nesdev->pcidev, | 2957 | pci_free_consistent(nesdev->pcidev, |
2732 | nesqp->private_data_len+sizeof(struct ietf_mpa_frame), | 2958 | nesqp->private_data_len + nesqp->ietf_frame_size, |
2733 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); | 2959 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); |
2734 | } | 2960 | } |
2735 | } | 2961 | } |
2736 | 2962 | ||
@@ -2769,6 +2995,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2769 | struct ib_phys_buf ibphysbuf; | 2995 | struct ib_phys_buf ibphysbuf; |
2770 | struct nes_pd *nespd; | 2996 | struct nes_pd *nespd; |
2771 | u64 tagged_offset; | 2997 | u64 tagged_offset; |
2998 | u8 mpa_frame_offset = 0; | ||
2999 | struct ietf_mpa_v2 *mpa_v2_frame; | ||
3000 | u8 start_addr = 0; | ||
3001 | u8 *start_ptr = &start_addr; | ||
3002 | u8 **start_buff = &start_ptr; | ||
3003 | u16 buff_len = 0; | ||
2772 | 3004 | ||
2773 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | 3005 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); |
2774 | if (!ibqp) | 3006 | if (!ibqp) |
@@ -2809,53 +3041,49 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2809 | nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", | 3041 | nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", |
2810 | netdev_refcnt_read(nesvnic->netdev)); | 3042 | netdev_refcnt_read(nesvnic->netdev)); |
2811 | 3043 | ||
3044 | nesqp->ietf_frame_size = sizeof(struct ietf_mpa_v2); | ||
2812 | /* allocate the ietf frame and space for private data */ | 3045 | /* allocate the ietf frame and space for private data */ |
2813 | nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, | 3046 | nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, |
2814 | sizeof(struct ietf_mpa_frame) + conn_param->private_data_len, | 3047 | nesqp->ietf_frame_size + conn_param->private_data_len, |
2815 | &nesqp->ietf_frame_pbase); | 3048 | &nesqp->ietf_frame_pbase); |
2816 | 3049 | ||
2817 | if (!nesqp->ietf_frame) { | 3050 | if (!nesqp->ietf_frame) { |
2818 | nes_debug(NES_DBG_CM, "Unable to allocate memory for private " | 3051 | nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n"); |
2819 | "data\n"); | ||
2820 | return -ENOMEM; | 3052 | return -ENOMEM; |
2821 | } | 3053 | } |
3054 | mpa_v2_frame = (struct ietf_mpa_v2 *)nesqp->ietf_frame; | ||
2822 | 3055 | ||
3056 | if (cm_node->mpa_frame_rev == IETF_MPA_V1) | ||
3057 | mpa_frame_offset = 4; | ||
2823 | 3058 | ||
2824 | /* setup the MPA frame */ | 3059 | memcpy(mpa_v2_frame->priv_data, conn_param->private_data, |
2825 | nesqp->private_data_len = conn_param->private_data_len; | 3060 | conn_param->private_data_len); |
2826 | memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | ||
2827 | |||
2828 | memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data, | ||
2829 | conn_param->private_data_len); | ||
2830 | 3061 | ||
2831 | nesqp->ietf_frame->priv_data_len = | 3062 | cm_build_mpa_frame(cm_node, start_buff, &buff_len, nesqp->ietf_frame, MPA_KEY_REPLY); |
2832 | cpu_to_be16(conn_param->private_data_len); | 3063 | nesqp->private_data_len = conn_param->private_data_len; |
2833 | nesqp->ietf_frame->rev = mpa_version; | ||
2834 | nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC; | ||
2835 | 3064 | ||
2836 | /* setup our first outgoing iWarp send WQE (the IETF frame response) */ | 3065 | /* setup our first outgoing iWarp send WQE (the IETF frame response) */ |
2837 | wqe = &nesqp->hwqp.sq_vbase[0]; | 3066 | wqe = &nesqp->hwqp.sq_vbase[0]; |
2838 | 3067 | ||
2839 | if (cm_id->remote_addr.sin_addr.s_addr != | 3068 | if (cm_id->remote_addr.sin_addr.s_addr != |
2840 | cm_id->local_addr.sin_addr.s_addr) { | 3069 | cm_id->local_addr.sin_addr.s_addr) { |
2841 | u64temp = (unsigned long)nesqp; | 3070 | u64temp = (unsigned long)nesqp; |
2842 | nesibdev = nesvnic->nesibdev; | 3071 | nesibdev = nesvnic->nesibdev; |
2843 | nespd = nesqp->nespd; | 3072 | nespd = nesqp->nespd; |
2844 | ibphysbuf.addr = nesqp->ietf_frame_pbase; | 3073 | ibphysbuf.addr = nesqp->ietf_frame_pbase + mpa_frame_offset; |
2845 | ibphysbuf.size = conn_param->private_data_len + | 3074 | ibphysbuf.size = buff_len; |
2846 | sizeof(struct ietf_mpa_frame); | 3075 | tagged_offset = (u64)(unsigned long)*start_buff; |
2847 | tagged_offset = (u64)(unsigned long)nesqp->ietf_frame; | ||
2848 | ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, | 3076 | ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, |
2849 | &ibphysbuf, 1, | 3077 | &ibphysbuf, 1, |
2850 | IB_ACCESS_LOCAL_WRITE, | 3078 | IB_ACCESS_LOCAL_WRITE, |
2851 | &tagged_offset); | 3079 | &tagged_offset); |
2852 | if (!ibmr) { | 3080 | if (!ibmr) { |
2853 | nes_debug(NES_DBG_CM, "Unable to register memory region" | 3081 | nes_debug(NES_DBG_CM, "Unable to register memory region" |
2854 | "for lSMM for cm_node = %p \n", | 3082 | "for lSMM for cm_node = %p \n", |
2855 | cm_node); | 3083 | cm_node); |
2856 | pci_free_consistent(nesdev->pcidev, | 3084 | pci_free_consistent(nesdev->pcidev, |
2857 | nesqp->private_data_len+sizeof(struct ietf_mpa_frame), | 3085 | nesqp->private_data_len + nesqp->ietf_frame_size, |
2858 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); | 3086 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); |
2859 | return -ENOMEM; | 3087 | return -ENOMEM; |
2860 | } | 3088 | } |
2861 | 3089 | ||
@@ -2863,22 +3091,20 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2863 | ibmr->device = nespd->ibpd.device; | 3091 | ibmr->device = nespd->ibpd.device; |
2864 | nesqp->lsmm_mr = ibmr; | 3092 | nesqp->lsmm_mr = ibmr; |
2865 | 3093 | ||
2866 | u64temp |= NES_SW_CONTEXT_ALIGN>>1; | 3094 | u64temp |= NES_SW_CONTEXT_ALIGN >> 1; |
2867 | set_wqe_64bit_value(wqe->wqe_words, | 3095 | set_wqe_64bit_value(wqe->wqe_words, |
2868 | NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, | 3096 | NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, |
2869 | u64temp); | 3097 | u64temp); |
2870 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | 3098 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = |
2871 | cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | | 3099 | cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | |
2872 | NES_IWARP_SQ_WQE_WRPDU); | 3100 | NES_IWARP_SQ_WQE_WRPDU); |
2873 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = | 3101 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = |
2874 | cpu_to_le32(conn_param->private_data_len + | 3102 | cpu_to_le32(buff_len); |
2875 | sizeof(struct ietf_mpa_frame)); | ||
2876 | set_wqe_64bit_value(wqe->wqe_words, | 3103 | set_wqe_64bit_value(wqe->wqe_words, |
2877 | NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, | 3104 | NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, |
2878 | (u64)(unsigned long)nesqp->ietf_frame); | 3105 | (u64)(unsigned long)(*start_buff)); |
2879 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = | 3106 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = |
2880 | cpu_to_le32(conn_param->private_data_len + | 3107 | cpu_to_le32(buff_len); |
2881 | sizeof(struct ietf_mpa_frame)); | ||
2882 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; | 3108 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; |
2883 | if (nesqp->sq_kmapped) { | 3109 | if (nesqp->sq_kmapped) { |
2884 | nesqp->sq_kmapped = 0; | 3110 | nesqp->sq_kmapped = 0; |
@@ -2887,7 +3113,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2887 | 3113 | ||
2888 | nesqp->nesqp_context->ird_ord_sizes |= | 3114 | nesqp->nesqp_context->ird_ord_sizes |= |
2889 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | 3115 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | |
2890 | NES_QPCONTEXT_ORDIRD_WRPDU); | 3116 | NES_QPCONTEXT_ORDIRD_WRPDU); |
2891 | } else { | 3117 | } else { |
2892 | nesqp->nesqp_context->ird_ord_sizes |= | 3118 | nesqp->nesqp_context->ird_ord_sizes |= |
2893 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU); | 3119 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU); |
@@ -2901,11 +3127,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2901 | 3127 | ||
2902 | /* nesqp->cm_node = (void *)cm_id->provider_data; */ | 3128 | /* nesqp->cm_node = (void *)cm_id->provider_data; */ |
2903 | cm_id->provider_data = nesqp; | 3129 | cm_id->provider_data = nesqp; |
2904 | nesqp->active_conn = 0; | 3130 | nesqp->active_conn = 0; |
2905 | 3131 | ||
2906 | if (cm_node->state == NES_CM_STATE_TSA) | 3132 | if (cm_node->state == NES_CM_STATE_TSA) |
2907 | nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n", | 3133 | nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n", |
2908 | cm_node); | 3134 | cm_node); |
2909 | 3135 | ||
2910 | nes_cm_init_tsa_conn(nesqp, cm_node); | 3136 | nes_cm_init_tsa_conn(nesqp, cm_node); |
2911 | 3137 | ||
@@ -2922,13 +3148,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2922 | cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); | 3148 | cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); |
2923 | 3149 | ||
2924 | nesqp->nesqp_context->misc2 |= cpu_to_le32( | 3150 | nesqp->nesqp_context->misc2 |= cpu_to_le32( |
2925 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << | 3151 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << |
2926 | NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); | 3152 | NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); |
2927 | 3153 | ||
2928 | nesqp->nesqp_context->arp_index_vlan |= | 3154 | nesqp->nesqp_context->arp_index_vlan |= |
2929 | cpu_to_le32(nes_arp_table(nesdev, | 3155 | cpu_to_le32(nes_arp_table(nesdev, |
2930 | le32_to_cpu(nesqp->nesqp_context->ip0), NULL, | 3156 | le32_to_cpu(nesqp->nesqp_context->ip0), NULL, |
2931 | NES_ARP_RESOLVE) << 16); | 3157 | NES_ARP_RESOLVE) << 16); |
2932 | 3158 | ||
2933 | nesqp->nesqp_context->ts_val_delta = cpu_to_le32( | 3159 | nesqp->nesqp_context->ts_val_delta = cpu_to_le32( |
2934 | jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); | 3160 | jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); |
@@ -2954,7 +3180,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2954 | crc_value = get_crc_value(&nes_quad); | 3180 | crc_value = get_crc_value(&nes_quad); |
2955 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); | 3181 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2956 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", | 3182 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", |
2957 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); | 3183 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); |
2958 | 3184 | ||
2959 | nesqp->hte_index &= adapter->hte_index_mask; | 3185 | nesqp->hte_index &= adapter->hte_index_mask; |
2960 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); | 3186 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); |
@@ -2962,17 +3188,15 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2962 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); | 3188 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); |
2963 | 3189 | ||
2964 | nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = " | 3190 | nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = " |
2965 | "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + " | 3191 | "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + " |
2966 | "private data length=%zu.\n", nesqp->hwqp.qp_id, | 3192 | "private data length=%u.\n", nesqp->hwqp.qp_id, |
2967 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | 3193 | ntohl(cm_id->remote_addr.sin_addr.s_addr), |
2968 | ntohs(cm_id->remote_addr.sin_port), | 3194 | ntohs(cm_id->remote_addr.sin_port), |
2969 | ntohl(cm_id->local_addr.sin_addr.s_addr), | 3195 | ntohl(cm_id->local_addr.sin_addr.s_addr), |
2970 | ntohs(cm_id->local_addr.sin_port), | 3196 | ntohs(cm_id->local_addr.sin_port), |
2971 | le32_to_cpu(nesqp->nesqp_context->rcv_nxt), | 3197 | le32_to_cpu(nesqp->nesqp_context->rcv_nxt), |
2972 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), | 3198 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), |
2973 | conn_param->private_data_len + | 3199 | buff_len); |
2974 | sizeof(struct ietf_mpa_frame)); | ||
2975 | |||
2976 | 3200 | ||
2977 | /* notify OF layer that accept event was successful */ | 3201 | /* notify OF layer that accept event was successful */ |
2978 | cm_id->add_ref(cm_id); | 3202 | cm_id->add_ref(cm_id); |
@@ -2993,12 +3217,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2993 | nesqp->private_data_len; | 3217 | nesqp->private_data_len; |
2994 | /* copy entire MPA frame to our cm_node's frame */ | 3218 | /* copy entire MPA frame to our cm_node's frame */ |
2995 | memcpy(cm_node->loopbackpartner->mpa_frame_buf, | 3219 | memcpy(cm_node->loopbackpartner->mpa_frame_buf, |
2996 | nesqp->ietf_frame->priv_data, nesqp->private_data_len); | 3220 | conn_param->private_data, conn_param->private_data_len); |
2997 | create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED); | 3221 | create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED); |
2998 | } | 3222 | } |
2999 | if (ret) | 3223 | if (ret) |
3000 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 3224 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
3001 | "ret=%d\n", __func__, __LINE__, ret); | 3225 | "ret=%d\n", __func__, __LINE__, ret); |
3002 | 3226 | ||
3003 | return 0; | 3227 | return 0; |
3004 | } | 3228 | } |
@@ -3011,34 +3235,28 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | |||
3011 | { | 3235 | { |
3012 | struct nes_cm_node *cm_node; | 3236 | struct nes_cm_node *cm_node; |
3013 | struct nes_cm_node *loopback; | 3237 | struct nes_cm_node *loopback; |
3014 | |||
3015 | struct nes_cm_core *cm_core; | 3238 | struct nes_cm_core *cm_core; |
3239 | u8 *start_buff; | ||
3016 | 3240 | ||
3017 | atomic_inc(&cm_rejects); | 3241 | atomic_inc(&cm_rejects); |
3018 | cm_node = (struct nes_cm_node *) cm_id->provider_data; | 3242 | cm_node = (struct nes_cm_node *)cm_id->provider_data; |
3019 | loopback = cm_node->loopbackpartner; | 3243 | loopback = cm_node->loopbackpartner; |
3020 | cm_core = cm_node->cm_core; | 3244 | cm_core = cm_node->cm_core; |
3021 | cm_node->cm_id = cm_id; | 3245 | cm_node->cm_id = cm_id; |
3022 | cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; | ||
3023 | 3246 | ||
3024 | if (cm_node->mpa_frame_size > MAX_CM_BUFFER) | 3247 | if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER) |
3025 | return -EINVAL; | 3248 | return -EINVAL; |
3026 | 3249 | ||
3027 | memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | ||
3028 | if (loopback) { | 3250 | if (loopback) { |
3029 | memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len); | 3251 | memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len); |
3030 | loopback->mpa_frame.priv_data_len = pdata_len; | 3252 | loopback->mpa_frame.priv_data_len = pdata_len; |
3031 | loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) + | 3253 | loopback->mpa_frame_size = pdata_len; |
3032 | pdata_len; | ||
3033 | } else { | 3254 | } else { |
3034 | memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); | 3255 | start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2); |
3035 | cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len); | 3256 | cm_node->mpa_frame_size = pdata_len; |
3257 | memcpy(start_buff, pdata, pdata_len); | ||
3036 | } | 3258 | } |
3037 | 3259 | return cm_core->api->reject(cm_core, cm_node); | |
3038 | cm_node->mpa_frame.rev = mpa_version; | ||
3039 | cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT; | ||
3040 | |||
3041 | return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); | ||
3042 | } | 3260 | } |
3043 | 3261 | ||
3044 | 3262 | ||
@@ -3065,7 +3283,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3065 | nesvnic = to_nesvnic(nesqp->ibqp.device); | 3283 | nesvnic = to_nesvnic(nesqp->ibqp.device); |
3066 | if (!nesvnic) | 3284 | if (!nesvnic) |
3067 | return -EINVAL; | 3285 | return -EINVAL; |
3068 | nesdev = nesvnic->nesdev; | 3286 | nesdev = nesvnic->nesdev; |
3069 | if (!nesdev) | 3287 | if (!nesdev) |
3070 | return -EINVAL; | 3288 | return -EINVAL; |
3071 | 3289 | ||
@@ -3073,12 +3291,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3073 | return -EINVAL; | 3291 | return -EINVAL; |
3074 | 3292 | ||
3075 | nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = " | 3293 | nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = " |
3076 | "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id, | 3294 | "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id, |
3077 | ntohl(nesvnic->local_ipaddr), | 3295 | ntohl(nesvnic->local_ipaddr), |
3078 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | 3296 | ntohl(cm_id->remote_addr.sin_addr.s_addr), |
3079 | ntohs(cm_id->remote_addr.sin_port), | 3297 | ntohs(cm_id->remote_addr.sin_port), |
3080 | ntohl(cm_id->local_addr.sin_addr.s_addr), | 3298 | ntohl(cm_id->local_addr.sin_addr.s_addr), |
3081 | ntohs(cm_id->local_addr.sin_port)); | 3299 | ntohs(cm_id->local_addr.sin_port)); |
3082 | 3300 | ||
3083 | atomic_inc(&cm_connects); | 3301 | atomic_inc(&cm_connects); |
3084 | nesqp->active_conn = 1; | 3302 | nesqp->active_conn = 1; |
@@ -3092,12 +3310,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3092 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); | 3310 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); |
3093 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); | 3311 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); |
3094 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", | 3312 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", |
3095 | conn_param->private_data_len); | 3313 | conn_param->private_data_len); |
3096 | 3314 | ||
3097 | if (cm_id->local_addr.sin_addr.s_addr != | 3315 | if (cm_id->local_addr.sin_addr.s_addr != |
3098 | cm_id->remote_addr.sin_addr.s_addr) { | 3316 | cm_id->remote_addr.sin_addr.s_addr) { |
3099 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), | 3317 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), |
3100 | PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); | 3318 | PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); |
3101 | apbvt_set = 1; | 3319 | apbvt_set = 1; |
3102 | } | 3320 | } |
3103 | 3321 | ||
@@ -3113,13 +3331,13 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3113 | 3331 | ||
3114 | /* create a connect CM node connection */ | 3332 | /* create a connect CM node connection */ |
3115 | cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, | 3333 | cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, |
3116 | conn_param->private_data_len, (void *)conn_param->private_data, | 3334 | conn_param->private_data_len, (void *)conn_param->private_data, |
3117 | &cm_info); | 3335 | &cm_info); |
3118 | if (!cm_node) { | 3336 | if (!cm_node) { |
3119 | if (apbvt_set) | 3337 | if (apbvt_set) |
3120 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), | 3338 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), |
3121 | PCI_FUNC(nesdev->pcidev->devfn), | 3339 | PCI_FUNC(nesdev->pcidev->devfn), |
3122 | NES_MANAGE_APBVT_DEL); | 3340 | NES_MANAGE_APBVT_DEL); |
3123 | 3341 | ||
3124 | cm_id->rem_ref(cm_id); | 3342 | cm_id->rem_ref(cm_id); |
3125 | return -ENOMEM; | 3343 | return -ENOMEM; |
@@ -3169,7 +3387,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3169 | cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); | 3387 | cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); |
3170 | if (!cm_node) { | 3388 | if (!cm_node) { |
3171 | printk(KERN_ERR "%s[%u] Error returned from listen API call\n", | 3389 | printk(KERN_ERR "%s[%u] Error returned from listen API call\n", |
3172 | __func__, __LINE__); | 3390 | __func__, __LINE__); |
3173 | return -ENOMEM; | 3391 | return -ENOMEM; |
3174 | } | 3392 | } |
3175 | 3393 | ||
@@ -3177,12 +3395,12 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3177 | 3395 | ||
3178 | if (!cm_node->reused_node) { | 3396 | if (!cm_node->reused_node) { |
3179 | err = nes_manage_apbvt(nesvnic, | 3397 | err = nes_manage_apbvt(nesvnic, |
3180 | ntohs(cm_id->local_addr.sin_port), | 3398 | ntohs(cm_id->local_addr.sin_port), |
3181 | PCI_FUNC(nesvnic->nesdev->pcidev->devfn), | 3399 | PCI_FUNC(nesvnic->nesdev->pcidev->devfn), |
3182 | NES_MANAGE_APBVT_ADD); | 3400 | NES_MANAGE_APBVT_ADD); |
3183 | if (err) { | 3401 | if (err) { |
3184 | printk(KERN_ERR "nes_manage_apbvt call returned %d.\n", | 3402 | printk(KERN_ERR "nes_manage_apbvt call returned %d.\n", |
3185 | err); | 3403 | err); |
3186 | g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); | 3404 | g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); |
3187 | return err; | 3405 | return err; |
3188 | } | 3406 | } |
@@ -3219,13 +3437,13 @@ int nes_destroy_listen(struct iw_cm_id *cm_id) | |||
3219 | int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) | 3437 | int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) |
3220 | { | 3438 | { |
3221 | int rc = 0; | 3439 | int rc = 0; |
3440 | |||
3222 | cm_packets_received++; | 3441 | cm_packets_received++; |
3223 | if ((g_cm_core) && (g_cm_core->api)) { | 3442 | if ((g_cm_core) && (g_cm_core->api)) |
3224 | rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); | 3443 | rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); |
3225 | } else { | 3444 | else |
3226 | nes_debug(NES_DBG_CM, "Unable to process packet for CM," | 3445 | nes_debug(NES_DBG_CM, "Unable to process packet for CM," |
3227 | " cm is not setup properly.\n"); | 3446 | " cm is not setup properly.\n"); |
3228 | } | ||
3229 | 3447 | ||
3230 | return rc; | 3448 | return rc; |
3231 | } | 3449 | } |
@@ -3240,11 +3458,10 @@ int nes_cm_start(void) | |||
3240 | nes_debug(NES_DBG_CM, "\n"); | 3458 | nes_debug(NES_DBG_CM, "\n"); |
3241 | /* create the primary CM core, pass this handle to subsequent core inits */ | 3459 | /* create the primary CM core, pass this handle to subsequent core inits */ |
3242 | g_cm_core = nes_cm_alloc_core(); | 3460 | g_cm_core = nes_cm_alloc_core(); |
3243 | if (g_cm_core) { | 3461 | if (g_cm_core) |
3244 | return 0; | 3462 | return 0; |
3245 | } else { | 3463 | else |
3246 | return -ENOMEM; | 3464 | return -ENOMEM; |
3247 | } | ||
3248 | } | 3465 | } |
3249 | 3466 | ||
3250 | 3467 | ||
@@ -3265,7 +3482,6 @@ int nes_cm_stop(void) | |||
3265 | */ | 3482 | */ |
3266 | static void cm_event_connected(struct nes_cm_event *event) | 3483 | static void cm_event_connected(struct nes_cm_event *event) |
3267 | { | 3484 | { |
3268 | u64 u64temp; | ||
3269 | struct nes_qp *nesqp; | 3485 | struct nes_qp *nesqp; |
3270 | struct nes_vnic *nesvnic; | 3486 | struct nes_vnic *nesvnic; |
3271 | struct nes_device *nesdev; | 3487 | struct nes_device *nesdev; |
@@ -3274,7 +3490,6 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3274 | struct ib_qp_attr attr; | 3490 | struct ib_qp_attr attr; |
3275 | struct iw_cm_id *cm_id; | 3491 | struct iw_cm_id *cm_id; |
3276 | struct iw_cm_event cm_event; | 3492 | struct iw_cm_event cm_event; |
3277 | struct nes_hw_qp_wqe *wqe; | ||
3278 | struct nes_v4_quad nes_quad; | 3493 | struct nes_v4_quad nes_quad; |
3279 | u32 crc_value; | 3494 | u32 crc_value; |
3280 | int ret; | 3495 | int ret; |
@@ -3288,17 +3503,16 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3288 | nesdev = nesvnic->nesdev; | 3503 | nesdev = nesvnic->nesdev; |
3289 | nesadapter = nesdev->nesadapter; | 3504 | nesadapter = nesdev->nesadapter; |
3290 | 3505 | ||
3291 | if (nesqp->destroyed) { | 3506 | if (nesqp->destroyed) |
3292 | return; | 3507 | return; |
3293 | } | ||
3294 | atomic_inc(&cm_connecteds); | 3508 | atomic_inc(&cm_connecteds); |
3295 | nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" | 3509 | nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" |
3296 | " local port 0x%04X. jiffies = %lu.\n", | 3510 | " local port 0x%04X. jiffies = %lu.\n", |
3297 | nesqp->hwqp.qp_id, | 3511 | nesqp->hwqp.qp_id, |
3298 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | 3512 | ntohl(cm_id->remote_addr.sin_addr.s_addr), |
3299 | ntohs(cm_id->remote_addr.sin_port), | 3513 | ntohs(cm_id->remote_addr.sin_port), |
3300 | ntohs(cm_id->local_addr.sin_port), | 3514 | ntohs(cm_id->local_addr.sin_port), |
3301 | jiffies); | 3515 | jiffies); |
3302 | 3516 | ||
3303 | nes_cm_init_tsa_conn(nesqp, cm_node); | 3517 | nes_cm_init_tsa_conn(nesqp, cm_node); |
3304 | 3518 | ||
@@ -3329,40 +3543,12 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3329 | NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); | 3543 | NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); |
3330 | 3544 | ||
3331 | /* Adjust tail for not having a LSMM */ | 3545 | /* Adjust tail for not having a LSMM */ |
3332 | nesqp->hwqp.sq_tail = 1; | 3546 | /*nesqp->hwqp.sq_tail = 1;*/ |
3333 | |||
3334 | #if defined(NES_SEND_FIRST_WRITE) | ||
3335 | if (cm_node->send_write0) { | ||
3336 | nes_debug(NES_DBG_CM, "Sending first write.\n"); | ||
3337 | wqe = &nesqp->hwqp.sq_vbase[0]; | ||
3338 | u64temp = (unsigned long)nesqp; | ||
3339 | u64temp |= NES_SW_CONTEXT_ALIGN>>1; | ||
3340 | set_wqe_64bit_value(wqe->wqe_words, | ||
3341 | NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); | ||
3342 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | ||
3343 | cpu_to_le32(NES_IWARP_SQ_OP_RDMAW); | ||
3344 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; | ||
3345 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; | ||
3346 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; | ||
3347 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; | ||
3348 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; | ||
3349 | 3547 | ||
3350 | if (nesqp->sq_kmapped) { | 3548 | build_rdma0_msg(cm_node, &nesqp); |
3351 | nesqp->sq_kmapped = 0; | ||
3352 | kunmap(nesqp->page); | ||
3353 | } | ||
3354 | 3549 | ||
3355 | /* use the reserved spot on the WQ for the extra first WQE */ | 3550 | nes_write32(nesdev->regs + NES_WQE_ALLOC, |
3356 | nesqp->nesqp_context->ird_ord_sizes &= | 3551 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); |
3357 | cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | ||
3358 | NES_QPCONTEXT_ORDIRD_WRPDU | | ||
3359 | NES_QPCONTEXT_ORDIRD_ALSMM)); | ||
3360 | nesqp->skip_lsmm = 1; | ||
3361 | nesqp->hwqp.sq_tail = 0; | ||
3362 | nes_write32(nesdev->regs + NES_WQE_ALLOC, | ||
3363 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); | ||
3364 | } | ||
3365 | #endif | ||
3366 | 3552 | ||
3367 | memset(&nes_quad, 0, sizeof(nes_quad)); | 3553 | memset(&nes_quad, 0, sizeof(nes_quad)); |
3368 | 3554 | ||
@@ -3379,13 +3565,13 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3379 | crc_value = get_crc_value(&nes_quad); | 3565 | crc_value = get_crc_value(&nes_quad); |
3380 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); | 3566 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
3381 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", | 3567 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", |
3382 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | 3568 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); |
3383 | 3569 | ||
3384 | nesqp->hte_index &= nesadapter->hte_index_mask; | 3570 | nesqp->hte_index &= nesadapter->hte_index_mask; |
3385 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); | 3571 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); |
3386 | 3572 | ||
3387 | nesqp->ietf_frame = &cm_node->mpa_frame; | 3573 | nesqp->ietf_frame = &cm_node->mpa_frame; |
3388 | nesqp->private_data_len = (u8) cm_node->mpa_frame_size; | 3574 | nesqp->private_data_len = (u8)cm_node->mpa_frame_size; |
3389 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); | 3575 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); |
3390 | 3576 | ||
3391 | /* notify OF layer we successfully created the requested connection */ | 3577 | /* notify OF layer we successfully created the requested connection */ |
@@ -3397,7 +3583,9 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3397 | cm_event.remote_addr = cm_id->remote_addr; | 3583 | cm_event.remote_addr = cm_id->remote_addr; |
3398 | 3584 | ||
3399 | cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; | 3585 | cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; |
3400 | cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size; | 3586 | cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size; |
3587 | cm_event.ird = cm_node->ird_size; | ||
3588 | cm_event.ord = cm_node->ord_size; | ||
3401 | 3589 | ||
3402 | cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr; | 3590 | cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr; |
3403 | ret = cm_id->event_handler(cm_id, &cm_event); | 3591 | ret = cm_id->event_handler(cm_id, &cm_event); |
@@ -3405,12 +3593,12 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3405 | 3593 | ||
3406 | if (ret) | 3594 | if (ret) |
3407 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 3595 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
3408 | "ret=%d\n", __func__, __LINE__, ret); | 3596 | "ret=%d\n", __func__, __LINE__, ret); |
3409 | attr.qp_state = IB_QPS_RTS; | 3597 | attr.qp_state = IB_QPS_RTS; |
3410 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | 3598 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); |
3411 | 3599 | ||
3412 | nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = " | 3600 | nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = " |
3413 | "%lu\n", nesqp->hwqp.qp_id, jiffies); | 3601 | "%lu\n", nesqp->hwqp.qp_id, jiffies); |
3414 | 3602 | ||
3415 | return; | 3603 | return; |
3416 | } | 3604 | } |
@@ -3431,16 +3619,14 @@ static void cm_event_connect_error(struct nes_cm_event *event) | |||
3431 | return; | 3619 | return; |
3432 | 3620 | ||
3433 | cm_id = event->cm_node->cm_id; | 3621 | cm_id = event->cm_node->cm_id; |
3434 | if (!cm_id) { | 3622 | if (!cm_id) |
3435 | return; | 3623 | return; |
3436 | } | ||
3437 | 3624 | ||
3438 | nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id); | 3625 | nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id); |
3439 | nesqp = cm_id->provider_data; | 3626 | nesqp = cm_id->provider_data; |
3440 | 3627 | ||
3441 | if (!nesqp) { | 3628 | if (!nesqp) |
3442 | return; | 3629 | return; |
3443 | } | ||
3444 | 3630 | ||
3445 | /* notify OF layer about this connection error event */ | 3631 | /* notify OF layer about this connection error event */ |
3446 | /* cm_id->rem_ref(cm_id); */ | 3632 | /* cm_id->rem_ref(cm_id); */ |
@@ -3455,14 +3641,14 @@ static void cm_event_connect_error(struct nes_cm_event *event) | |||
3455 | cm_event.private_data_len = 0; | 3641 | cm_event.private_data_len = 0; |
3456 | 3642 | ||
3457 | nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, " | 3643 | nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, " |
3458 | "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr, | 3644 | "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr, |
3459 | cm_event.remote_addr.sin_addr.s_addr); | 3645 | cm_event.remote_addr.sin_addr.s_addr); |
3460 | 3646 | ||
3461 | ret = cm_id->event_handler(cm_id, &cm_event); | 3647 | ret = cm_id->event_handler(cm_id, &cm_event); |
3462 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | 3648 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); |
3463 | if (ret) | 3649 | if (ret) |
3464 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 3650 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
3465 | "ret=%d\n", __func__, __LINE__, ret); | 3651 | "ret=%d\n", __func__, __LINE__, ret); |
3466 | cm_id->rem_ref(cm_id); | 3652 | cm_id->rem_ref(cm_id); |
3467 | 3653 | ||
3468 | rem_ref_cm_node(event->cm_node->cm_core, event->cm_node); | 3654 | rem_ref_cm_node(event->cm_node->cm_core, event->cm_node); |
@@ -3532,7 +3718,7 @@ static void cm_event_reset(struct nes_cm_event *event) | |||
3532 | */ | 3718 | */ |
3533 | static void cm_event_mpa_req(struct nes_cm_event *event) | 3719 | static void cm_event_mpa_req(struct nes_cm_event *event) |
3534 | { | 3720 | { |
3535 | struct iw_cm_id *cm_id; | 3721 | struct iw_cm_id *cm_id; |
3536 | struct iw_cm_event cm_event; | 3722 | struct iw_cm_event cm_event; |
3537 | int ret; | 3723 | int ret; |
3538 | struct nes_cm_node *cm_node; | 3724 | struct nes_cm_node *cm_node; |
@@ -3544,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) | |||
3544 | 3730 | ||
3545 | atomic_inc(&cm_connect_reqs); | 3731 | atomic_inc(&cm_connect_reqs); |
3546 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", | 3732 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", |
3547 | cm_node, cm_id, jiffies); | 3733 | cm_node, cm_id, jiffies); |
3548 | 3734 | ||
3549 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; | 3735 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; |
3550 | cm_event.status = 0; | 3736 | cm_event.status = 0; |
@@ -3558,19 +3744,21 @@ static void cm_event_mpa_req(struct nes_cm_event *event) | |||
3558 | cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); | 3744 | cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); |
3559 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); | 3745 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); |
3560 | cm_event.private_data = cm_node->mpa_frame_buf; | 3746 | cm_event.private_data = cm_node->mpa_frame_buf; |
3561 | cm_event.private_data_len = (u8) cm_node->mpa_frame_size; | 3747 | cm_event.private_data_len = (u8)cm_node->mpa_frame_size; |
3748 | cm_event.ird = cm_node->ird_size; | ||
3749 | cm_event.ord = cm_node->ord_size; | ||
3562 | 3750 | ||
3563 | ret = cm_id->event_handler(cm_id, &cm_event); | 3751 | ret = cm_id->event_handler(cm_id, &cm_event); |
3564 | if (ret) | 3752 | if (ret) |
3565 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", | 3753 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", |
3566 | __func__, __LINE__, ret); | 3754 | __func__, __LINE__, ret); |
3567 | return; | 3755 | return; |
3568 | } | 3756 | } |
3569 | 3757 | ||
3570 | 3758 | ||
3571 | static void cm_event_mpa_reject(struct nes_cm_event *event) | 3759 | static void cm_event_mpa_reject(struct nes_cm_event *event) |
3572 | { | 3760 | { |
3573 | struct iw_cm_id *cm_id; | 3761 | struct iw_cm_id *cm_id; |
3574 | struct iw_cm_event cm_event; | 3762 | struct iw_cm_event cm_event; |
3575 | struct nes_cm_node *cm_node; | 3763 | struct nes_cm_node *cm_node; |
3576 | int ret; | 3764 | int ret; |
@@ -3582,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) | |||
3582 | 3770 | ||
3583 | atomic_inc(&cm_connect_reqs); | 3771 | atomic_inc(&cm_connect_reqs); |
3584 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", | 3772 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", |
3585 | cm_node, cm_id, jiffies); | 3773 | cm_node, cm_id, jiffies); |
3586 | 3774 | ||
3587 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | 3775 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; |
3588 | cm_event.status = -ECONNREFUSED; | 3776 | cm_event.status = -ECONNREFUSED; |
@@ -3597,17 +3785,17 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) | |||
3597 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); | 3785 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); |
3598 | 3786 | ||
3599 | cm_event.private_data = cm_node->mpa_frame_buf; | 3787 | cm_event.private_data = cm_node->mpa_frame_buf; |
3600 | cm_event.private_data_len = (u8) cm_node->mpa_frame_size; | 3788 | cm_event.private_data_len = (u8)cm_node->mpa_frame_size; |
3601 | 3789 | ||
3602 | nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, " | 3790 | nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, " |
3603 | "remove_addr=%08x\n", | 3791 | "remove_addr=%08x\n", |
3604 | cm_event.local_addr.sin_addr.s_addr, | 3792 | cm_event.local_addr.sin_addr.s_addr, |
3605 | cm_event.remote_addr.sin_addr.s_addr); | 3793 | cm_event.remote_addr.sin_addr.s_addr); |
3606 | 3794 | ||
3607 | ret = cm_id->event_handler(cm_id, &cm_event); | 3795 | ret = cm_id->event_handler(cm_id, &cm_event); |
3608 | if (ret) | 3796 | if (ret) |
3609 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", | 3797 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", |
3610 | __func__, __LINE__, ret); | 3798 | __func__, __LINE__, ret); |
3611 | 3799 | ||
3612 | return; | 3800 | return; |
3613 | } | 3801 | } |
@@ -3626,7 +3814,7 @@ static int nes_cm_post_event(struct nes_cm_event *event) | |||
3626 | event->cm_info.cm_id->add_ref(event->cm_info.cm_id); | 3814 | event->cm_info.cm_id->add_ref(event->cm_info.cm_id); |
3627 | INIT_WORK(&event->event_work, nes_cm_event_handler); | 3815 | INIT_WORK(&event->event_work, nes_cm_event_handler); |
3628 | nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n", | 3816 | nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n", |
3629 | event->cm_node, event); | 3817 | event->cm_node, event); |
3630 | 3818 | ||
3631 | queue_work(event->cm_node->cm_core->event_wq, &event->event_work); | 3819 | queue_work(event->cm_node->cm_core->event_wq, &event->event_work); |
3632 | 3820 | ||
@@ -3643,7 +3831,7 @@ static int nes_cm_post_event(struct nes_cm_event *event) | |||
3643 | static void nes_cm_event_handler(struct work_struct *work) | 3831 | static void nes_cm_event_handler(struct work_struct *work) |
3644 | { | 3832 | { |
3645 | struct nes_cm_event *event = container_of(work, struct nes_cm_event, | 3833 | struct nes_cm_event *event = container_of(work, struct nes_cm_event, |
3646 | event_work); | 3834 | event_work); |
3647 | struct nes_cm_core *cm_core; | 3835 | struct nes_cm_core *cm_core; |
3648 | 3836 | ||
3649 | if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) | 3837 | if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) |
@@ -3651,29 +3839,29 @@ static void nes_cm_event_handler(struct work_struct *work) | |||
3651 | 3839 | ||
3652 | cm_core = event->cm_node->cm_core; | 3840 | cm_core = event->cm_node->cm_core; |
3653 | nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n", | 3841 | nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n", |
3654 | event, event->type, atomic_read(&cm_core->events_posted)); | 3842 | event, event->type, atomic_read(&cm_core->events_posted)); |
3655 | 3843 | ||
3656 | switch (event->type) { | 3844 | switch (event->type) { |
3657 | case NES_CM_EVENT_MPA_REQ: | 3845 | case NES_CM_EVENT_MPA_REQ: |
3658 | cm_event_mpa_req(event); | 3846 | cm_event_mpa_req(event); |
3659 | nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n", | 3847 | nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n", |
3660 | event->cm_node); | 3848 | event->cm_node); |
3661 | break; | 3849 | break; |
3662 | case NES_CM_EVENT_RESET: | 3850 | case NES_CM_EVENT_RESET: |
3663 | nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n", | 3851 | nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n", |
3664 | event->cm_node); | 3852 | event->cm_node); |
3665 | cm_event_reset(event); | 3853 | cm_event_reset(event); |
3666 | break; | 3854 | break; |
3667 | case NES_CM_EVENT_CONNECTED: | 3855 | case NES_CM_EVENT_CONNECTED: |
3668 | if ((!event->cm_node->cm_id) || | 3856 | if ((!event->cm_node->cm_id) || |
3669 | (event->cm_node->state != NES_CM_STATE_TSA)) | 3857 | (event->cm_node->state != NES_CM_STATE_TSA)) |
3670 | break; | 3858 | break; |
3671 | cm_event_connected(event); | 3859 | cm_event_connected(event); |
3672 | nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); | 3860 | nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); |
3673 | break; | 3861 | break; |
3674 | case NES_CM_EVENT_MPA_REJECT: | 3862 | case NES_CM_EVENT_MPA_REJECT: |
3675 | if ((!event->cm_node->cm_id) || | 3863 | if ((!event->cm_node->cm_id) || |
3676 | (event->cm_node->state == NES_CM_STATE_TSA)) | 3864 | (event->cm_node->state == NES_CM_STATE_TSA)) |
3677 | break; | 3865 | break; |
3678 | cm_event_mpa_reject(event); | 3866 | cm_event_mpa_reject(event); |
3679 | nes_debug(NES_DBG_CM, "CM Event: REJECT\n"); | 3867 | nes_debug(NES_DBG_CM, "CM Event: REJECT\n"); |
@@ -3681,7 +3869,7 @@ static void nes_cm_event_handler(struct work_struct *work) | |||
3681 | 3869 | ||
3682 | case NES_CM_EVENT_ABORTED: | 3870 | case NES_CM_EVENT_ABORTED: |
3683 | if ((!event->cm_node->cm_id) || | 3871 | if ((!event->cm_node->cm_id) || |
3684 | (event->cm_node->state == NES_CM_STATE_TSA)) | 3872 | (event->cm_node->state == NES_CM_STATE_TSA)) |
3685 | break; | 3873 | break; |
3686 | cm_event_connect_error(event); | 3874 | cm_event_connect_error(event); |
3687 | nes_debug(NES_DBG_CM, "CM Event: ABORTED\n"); | 3875 | nes_debug(NES_DBG_CM, "CM Event: ABORTED\n"); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index 130c185cde0d..bdfa1fbb35fc 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
@@ -48,7 +48,16 @@ | |||
48 | #define IETF_MPA_KEY_SIZE 16 | 48 | #define IETF_MPA_KEY_SIZE 16 |
49 | #define IETF_MPA_VERSION 1 | 49 | #define IETF_MPA_VERSION 1 |
50 | #define IETF_MAX_PRIV_DATA_LEN 512 | 50 | #define IETF_MAX_PRIV_DATA_LEN 512 |
51 | #define IETF_MPA_FRAME_SIZE 20 | 51 | #define IETF_MPA_FRAME_SIZE 20 |
52 | #define IETF_RTR_MSG_SIZE 4 | ||
53 | #define IETF_MPA_V2_FLAG 0x10 | ||
54 | |||
55 | /* IETF RTR MSG Fields */ | ||
56 | #define IETF_PEER_TO_PEER 0x8000 | ||
57 | #define IETF_FLPDU_ZERO_LEN 0x4000 | ||
58 | #define IETF_RDMA0_WRITE 0x8000 | ||
59 | #define IETF_RDMA0_READ 0x4000 | ||
60 | #define IETF_NO_IRD_ORD 0x3FFF | ||
52 | 61 | ||
53 | enum ietf_mpa_flags { | 62 | enum ietf_mpa_flags { |
54 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ | 63 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ |
@@ -56,7 +65,7 @@ enum ietf_mpa_flags { | |||
56 | IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */ | 65 | IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */ |
57 | }; | 66 | }; |
58 | 67 | ||
59 | struct ietf_mpa_frame { | 68 | struct ietf_mpa_v1 { |
60 | u8 key[IETF_MPA_KEY_SIZE]; | 69 | u8 key[IETF_MPA_KEY_SIZE]; |
61 | u8 flags; | 70 | u8 flags; |
62 | u8 rev; | 71 | u8 rev; |
@@ -66,6 +75,20 @@ struct ietf_mpa_frame { | |||
66 | 75 | ||
67 | #define ietf_mpa_req_resp_frame ietf_mpa_frame | 76 | #define ietf_mpa_req_resp_frame ietf_mpa_frame |
68 | 77 | ||
78 | struct ietf_rtr_msg { | ||
79 | __be16 ctrl_ird; | ||
80 | __be16 ctrl_ord; | ||
81 | }; | ||
82 | |||
83 | struct ietf_mpa_v2 { | ||
84 | u8 key[IETF_MPA_KEY_SIZE]; | ||
85 | u8 flags; | ||
86 | u8 rev; | ||
87 | __be16 priv_data_len; | ||
88 | struct ietf_rtr_msg rtr_msg; | ||
89 | u8 priv_data[0]; | ||
90 | }; | ||
91 | |||
69 | struct nes_v4_quad { | 92 | struct nes_v4_quad { |
70 | u32 rsvd0; | 93 | u32 rsvd0; |
71 | __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */ | 94 | __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */ |
@@ -171,8 +194,7 @@ struct nes_timer_entry { | |||
171 | 194 | ||
172 | #define NES_CM_DEF_SEQ2 0x18ed5740 | 195 | #define NES_CM_DEF_SEQ2 0x18ed5740 |
173 | #define NES_CM_DEF_LOCAL_ID2 0xb807 | 196 | #define NES_CM_DEF_LOCAL_ID2 0xb807 |
174 | #define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_MAX_PRIV_DATA_LEN) | 197 | #define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_RTR_MSG_SIZE + IETF_MAX_PRIV_DATA_LEN) |
175 | |||
176 | 198 | ||
177 | typedef u32 nes_addr_t; | 199 | typedef u32 nes_addr_t; |
178 | 200 | ||
@@ -204,6 +226,21 @@ enum nes_cm_node_state { | |||
204 | NES_CM_STATE_CLOSED | 226 | NES_CM_STATE_CLOSED |
205 | }; | 227 | }; |
206 | 228 | ||
229 | enum mpa_frame_version { | ||
230 | IETF_MPA_V1 = 1, | ||
231 | IETF_MPA_V2 = 2 | ||
232 | }; | ||
233 | |||
234 | enum mpa_frame_key { | ||
235 | MPA_KEY_REQUEST, | ||
236 | MPA_KEY_REPLY | ||
237 | }; | ||
238 | |||
239 | enum send_rdma0 { | ||
240 | SEND_RDMA_READ_ZERO = 1, | ||
241 | SEND_RDMA_WRITE_ZERO = 2 | ||
242 | }; | ||
243 | |||
207 | enum nes_tcpip_pkt_type { | 244 | enum nes_tcpip_pkt_type { |
208 | NES_PKT_TYPE_UNKNOWN, | 245 | NES_PKT_TYPE_UNKNOWN, |
209 | NES_PKT_TYPE_SYN, | 246 | NES_PKT_TYPE_SYN, |
@@ -245,9 +282,9 @@ struct nes_cm_tcp_context { | |||
245 | 282 | ||
246 | 283 | ||
247 | enum nes_cm_listener_state { | 284 | enum nes_cm_listener_state { |
248 | NES_CM_LISTENER_PASSIVE_STATE=1, | 285 | NES_CM_LISTENER_PASSIVE_STATE = 1, |
249 | NES_CM_LISTENER_ACTIVE_STATE=2, | 286 | NES_CM_LISTENER_ACTIVE_STATE = 2, |
250 | NES_CM_LISTENER_EITHER_STATE=3 | 287 | NES_CM_LISTENER_EITHER_STATE = 3 |
251 | }; | 288 | }; |
252 | 289 | ||
253 | struct nes_cm_listener { | 290 | struct nes_cm_listener { |
@@ -283,16 +320,20 @@ struct nes_cm_node { | |||
283 | 320 | ||
284 | struct nes_cm_node *loopbackpartner; | 321 | struct nes_cm_node *loopbackpartner; |
285 | 322 | ||
286 | struct nes_timer_entry *send_entry; | 323 | struct nes_timer_entry *send_entry; |
287 | 324 | struct nes_timer_entry *recv_entry; | |
288 | spinlock_t retrans_list_lock; | 325 | spinlock_t retrans_list_lock; |
289 | struct nes_timer_entry *recv_entry; | 326 | enum send_rdma0 send_rdma0_op; |
290 | 327 | ||
291 | int send_write0; | ||
292 | union { | 328 | union { |
293 | struct ietf_mpa_frame mpa_frame; | 329 | struct ietf_mpa_v1 mpa_frame; |
294 | u8 mpa_frame_buf[MAX_CM_BUFFER]; | 330 | struct ietf_mpa_v2 mpa_v2_frame; |
331 | u8 mpa_frame_buf[MAX_CM_BUFFER]; | ||
295 | }; | 332 | }; |
333 | enum mpa_frame_version mpa_frame_rev; | ||
334 | u16 ird_size; | ||
335 | u16 ord_size; | ||
336 | |||
296 | u16 mpa_frame_size; | 337 | u16 mpa_frame_size; |
297 | struct iw_cm_id *cm_id; | 338 | struct iw_cm_id *cm_id; |
298 | struct list_head list; | 339 | struct list_head list; |
@@ -399,10 +440,8 @@ struct nes_cm_ops { | |||
399 | struct nes_vnic *, u16, void *, | 440 | struct nes_vnic *, u16, void *, |
400 | struct nes_cm_info *); | 441 | struct nes_cm_info *); |
401 | int (*close)(struct nes_cm_core *, struct nes_cm_node *); | 442 | int (*close)(struct nes_cm_core *, struct nes_cm_node *); |
402 | int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *, | 443 | int (*accept)(struct nes_cm_core *, struct nes_cm_node *); |
403 | struct nes_cm_node *); | 444 | int (*reject)(struct nes_cm_core *, struct nes_cm_node *); |
404 | int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *, | ||
405 | struct nes_cm_node *); | ||
406 | int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, | 445 | int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, |
407 | struct sk_buff *); | 446 | struct sk_buff *); |
408 | int (*destroy_cm_core)(struct nes_cm_core *); | 447 | int (*destroy_cm_core)(struct nes_cm_core *); |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index 280057353343..fe6b6e92fa90 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h | |||
@@ -139,7 +139,8 @@ struct nes_qp { | |||
139 | struct nes_cq *nesrcq; | 139 | struct nes_cq *nesrcq; |
140 | struct nes_pd *nespd; | 140 | struct nes_pd *nespd; |
141 | void *cm_node; /* handle of the node this QP is associated with */ | 141 | void *cm_node; /* handle of the node this QP is associated with */ |
142 | struct ietf_mpa_frame *ietf_frame; | 142 | void *ietf_frame; |
143 | u8 ietf_frame_size; | ||
143 | dma_addr_t ietf_frame_pbase; | 144 | dma_addr_t ietf_frame_pbase; |
144 | struct ib_mr *lsmm_mr; | 145 | struct ib_mr *lsmm_mr; |
145 | struct nes_hw_qp hwqp; | 146 | struct nes_hw_qp hwqp; |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index c9624ea87209..b881bdc401f5 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -171,7 +171,9 @@ struct qib_ctxtdata { | |||
171 | /* how many alloc_pages() chunks in rcvegrbuf_pages */ | 171 | /* how many alloc_pages() chunks in rcvegrbuf_pages */ |
172 | u32 rcvegrbuf_chunks; | 172 | u32 rcvegrbuf_chunks; |
173 | /* how many egrbufs per chunk */ | 173 | /* how many egrbufs per chunk */ |
174 | u32 rcvegrbufs_perchunk; | 174 | u16 rcvegrbufs_perchunk; |
175 | /* ilog2 of above */ | ||
176 | u16 rcvegrbufs_perchunk_shift; | ||
175 | /* order for rcvegrbuf_pages */ | 177 | /* order for rcvegrbuf_pages */ |
176 | size_t rcvegrbuf_size; | 178 | size_t rcvegrbuf_size; |
177 | /* rcvhdrq size (for freeing) */ | 179 | /* rcvhdrq size (for freeing) */ |
@@ -221,6 +223,9 @@ struct qib_ctxtdata { | |||
221 | /* ctxt rcvhdrq head offset */ | 223 | /* ctxt rcvhdrq head offset */ |
222 | u32 head; | 224 | u32 head; |
223 | u32 pkt_count; | 225 | u32 pkt_count; |
226 | /* lookaside fields */ | ||
227 | struct qib_qp *lookaside_qp; | ||
228 | u32 lookaside_qpn; | ||
224 | /* QPs waiting for context processing */ | 229 | /* QPs waiting for context processing */ |
225 | struct list_head qp_wait_list; | 230 | struct list_head qp_wait_list; |
226 | }; | 231 | }; |
@@ -807,6 +812,10 @@ struct qib_devdata { | |||
807 | * supports, less gives more pio bufs/ctxt, etc. | 812 | * supports, less gives more pio bufs/ctxt, etc. |
808 | */ | 813 | */ |
809 | u32 cfgctxts; | 814 | u32 cfgctxts; |
815 | /* | ||
816 | * number of ctxts available for PSM open | ||
817 | */ | ||
818 | u32 freectxts; | ||
810 | 819 | ||
811 | /* | 820 | /* |
812 | * hint that we should update pioavailshadow before | 821 | * hint that we should update pioavailshadow before |
@@ -936,7 +945,9 @@ struct qib_devdata { | |||
936 | /* chip address space used by 4k pio buffers */ | 945 | /* chip address space used by 4k pio buffers */ |
937 | u32 align4k; | 946 | u32 align4k; |
938 | /* size of each rcvegrbuffer */ | 947 | /* size of each rcvegrbuffer */ |
939 | u32 rcvegrbufsize; | 948 | u16 rcvegrbufsize; |
949 | /* log2 of above */ | ||
950 | u16 rcvegrbufsize_shift; | ||
940 | /* localbus width (1, 2,4,8,16,32) from config space */ | 951 | /* localbus width (1, 2,4,8,16,32) from config space */ |
941 | u32 lbus_width; | 952 | u32 lbus_width; |
942 | /* localbus speed in MHz */ | 953 | /* localbus speed in MHz */ |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 23e584f4c36c..9a9047f385ae 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -279,10 +279,10 @@ bail: | |||
279 | */ | 279 | */ |
280 | static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) | 280 | static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) |
281 | { | 281 | { |
282 | const u32 chunk = etail / rcd->rcvegrbufs_perchunk; | 282 | const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift; |
283 | const u32 idx = etail % rcd->rcvegrbufs_perchunk; | 283 | const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1); |
284 | 284 | ||
285 | return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize; | 285 | return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); |
286 | } | 286 | } |
287 | 287 | ||
288 | /* | 288 | /* |
@@ -310,7 +310,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, | |||
310 | u32 opcode; | 310 | u32 opcode; |
311 | u32 psn; | 311 | u32 psn; |
312 | int diff; | 312 | int diff; |
313 | unsigned long flags; | ||
314 | 313 | ||
315 | /* Sanity check packet */ | 314 | /* Sanity check packet */ |
316 | if (tlen < 24) | 315 | if (tlen < 24) |
@@ -365,7 +364,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, | |||
365 | 364 | ||
366 | switch (qp->ibqp.qp_type) { | 365 | switch (qp->ibqp.qp_type) { |
367 | case IB_QPT_RC: | 366 | case IB_QPT_RC: |
368 | spin_lock_irqsave(&qp->s_lock, flags); | ||
369 | ruc_res = | 367 | ruc_res = |
370 | qib_ruc_check_hdr( | 368 | qib_ruc_check_hdr( |
371 | ibp, hdr, | 369 | ibp, hdr, |
@@ -373,11 +371,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, | |||
373 | qp, | 371 | qp, |
374 | be32_to_cpu(ohdr->bth[0])); | 372 | be32_to_cpu(ohdr->bth[0])); |
375 | if (ruc_res) { | 373 | if (ruc_res) { |
376 | spin_unlock_irqrestore(&qp->s_lock, | ||
377 | flags); | ||
378 | goto unlock; | 374 | goto unlock; |
379 | } | 375 | } |
380 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
381 | 376 | ||
382 | /* Only deal with RDMA Writes for now */ | 377 | /* Only deal with RDMA Writes for now */ |
383 | if (opcode < | 378 | if (opcode < |
@@ -547,6 +542,15 @@ move_along: | |||
547 | updegr = 0; | 542 | updegr = 0; |
548 | } | 543 | } |
549 | } | 544 | } |
545 | /* | ||
546 | * Notify qib_destroy_qp() if it is waiting | ||
547 | * for lookaside_qp to finish. | ||
548 | */ | ||
549 | if (rcd->lookaside_qp) { | ||
550 | if (atomic_dec_and_test(&rcd->lookaside_qp->refcount)) | ||
551 | wake_up(&rcd->lookaside_qp->wait); | ||
552 | rcd->lookaside_qp = NULL; | ||
553 | } | ||
550 | 554 | ||
551 | rcd->head = l; | 555 | rcd->head = l; |
552 | rcd->pkt_count += i; | 556 | rcd->pkt_count += i; |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 26253039d2c7..77633666f81c 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -1284,6 +1284,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, | |||
1284 | strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); | 1284 | strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); |
1285 | ctxt_fp(fp) = rcd; | 1285 | ctxt_fp(fp) = rcd; |
1286 | qib_stats.sps_ctxts++; | 1286 | qib_stats.sps_ctxts++; |
1287 | dd->freectxts++; | ||
1287 | ret = 0; | 1288 | ret = 0; |
1288 | goto bail; | 1289 | goto bail; |
1289 | 1290 | ||
@@ -1792,6 +1793,7 @@ static int qib_close(struct inode *in, struct file *fp) | |||
1792 | if (dd->pageshadow) | 1793 | if (dd->pageshadow) |
1793 | unlock_expected_tids(rcd); | 1794 | unlock_expected_tids(rcd); |
1794 | qib_stats.sps_ctxts--; | 1795 | qib_stats.sps_ctxts--; |
1796 | dd->freectxts--; | ||
1795 | } | 1797 | } |
1796 | 1798 | ||
1797 | mutex_unlock(&qib_mutex); | 1799 | mutex_unlock(&qib_mutex); |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index d8ca0a0b970d..781a802a321f 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -3273,6 +3273,8 @@ static int init_6120_variables(struct qib_devdata *dd) | |||
3273 | /* we always allocate at least 2048 bytes for eager buffers */ | 3273 | /* we always allocate at least 2048 bytes for eager buffers */ |
3274 | ret = ib_mtu_enum_to_int(qib_ibmtu); | 3274 | ret = ib_mtu_enum_to_int(qib_ibmtu); |
3275 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; | 3275 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; |
3276 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); | ||
3277 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | ||
3276 | 3278 | ||
3277 | qib_6120_tidtemplate(dd); | 3279 | qib_6120_tidtemplate(dd); |
3278 | 3280 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index e1f947446c2a..3f1d562ba898 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -4085,6 +4085,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd) | |||
4085 | /* we always allocate at least 2048 bytes for eager buffers */ | 4085 | /* we always allocate at least 2048 bytes for eager buffers */ |
4086 | ret = ib_mtu_enum_to_int(qib_ibmtu); | 4086 | ret = ib_mtu_enum_to_int(qib_ibmtu); |
4087 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; | 4087 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; |
4088 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); | ||
4089 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | ||
4088 | 4090 | ||
4089 | qib_7220_tidtemplate(dd); | 4091 | qib_7220_tidtemplate(dd); |
4090 | 4092 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5ea9ece23b33..efd0a110091f 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -2310,12 +2310,15 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2310 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | 2310 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << |
2311 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | 2311 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); |
2312 | 2312 | ||
2313 | ppd->cpspec->ibcctrl_a = val; | ||
2313 | /* | 2314 | /* |
2314 | * Reset the PCS interface to the serdes (and also ibc, which is still | 2315 | * Reset the PCS interface to the serdes (and also ibc, which is still |
2315 | * in reset from above). Writes new value of ibcctrl_a as last step. | 2316 | * in reset from above). Writes new value of ibcctrl_a as last step. |
2316 | */ | 2317 | */ |
2317 | qib_7322_mini_pcs_reset(ppd); | 2318 | qib_7322_mini_pcs_reset(ppd); |
2318 | qib_write_kreg(dd, kr_scratch, 0ULL); | 2319 | qib_write_kreg(dd, kr_scratch, 0ULL); |
2320 | /* clear the linkinit cmds */ | ||
2321 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); | ||
2319 | 2322 | ||
2320 | if (!ppd->cpspec->ibcctrl_b) { | 2323 | if (!ppd->cpspec->ibcctrl_b) { |
2321 | unsigned lse = ppd->link_speed_enabled; | 2324 | unsigned lse = ppd->link_speed_enabled; |
@@ -2387,11 +2390,6 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2387 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | 2390 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); |
2388 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | 2391 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); |
2389 | 2392 | ||
2390 | /* Hold the link state machine for mezz boards */ | ||
2391 | if (IS_QMH(dd) || IS_QME(dd)) | ||
2392 | qib_set_ib_7322_lstate(ppd, 0, | ||
2393 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
2394 | |||
2395 | /* Also enable IBSTATUSCHG interrupt. */ | 2393 | /* Also enable IBSTATUSCHG interrupt. */ |
2396 | val = qib_read_kreg_port(ppd, krp_errmask); | 2394 | val = qib_read_kreg_port(ppd, krp_errmask); |
2397 | qib_write_kreg_port(ppd, krp_errmask, | 2395 | qib_write_kreg_port(ppd, krp_errmask, |
@@ -2853,9 +2851,8 @@ static irqreturn_t qib_7322intr(int irq, void *data) | |||
2853 | for (i = 0; i < dd->first_user_ctxt; i++) { | 2851 | for (i = 0; i < dd->first_user_ctxt; i++) { |
2854 | if (ctxtrbits & rmask) { | 2852 | if (ctxtrbits & rmask) { |
2855 | ctxtrbits &= ~rmask; | 2853 | ctxtrbits &= ~rmask; |
2856 | if (dd->rcd[i]) { | 2854 | if (dd->rcd[i]) |
2857 | qib_kreceive(dd->rcd[i], NULL, &npkts); | 2855 | qib_kreceive(dd->rcd[i], NULL, &npkts); |
2858 | } | ||
2859 | } | 2856 | } |
2860 | rmask <<= 1; | 2857 | rmask <<= 1; |
2861 | } | 2858 | } |
@@ -5230,6 +5227,8 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |||
5230 | QIBL_IB_AUTONEG_INPROG))) | 5227 | QIBL_IB_AUTONEG_INPROG))) |
5231 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | 5228 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); |
5232 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | 5229 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { |
5230 | struct qib_qsfp_data *qd = | ||
5231 | &ppd->cpspec->qsfp_data; | ||
5233 | /* unlock the Tx settings, speed may change */ | 5232 | /* unlock the Tx settings, speed may change */ |
5234 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | 5233 | qib_write_kreg_port(ppd, krp_tx_deemph_override, |
5235 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | 5234 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, |
@@ -5237,6 +5236,12 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |||
5237 | qib_cancel_sends(ppd); | 5236 | qib_cancel_sends(ppd); |
5238 | /* on link down, ensure sane pcs state */ | 5237 | /* on link down, ensure sane pcs state */ |
5239 | qib_7322_mini_pcs_reset(ppd); | 5238 | qib_7322_mini_pcs_reset(ppd); |
5239 | /* schedule the qsfp refresh which should turn the link | ||
5240 | off */ | ||
5241 | if (ppd->dd->flags & QIB_HAS_QSFP) { | ||
5242 | qd->t_insert = get_jiffies_64(); | ||
5243 | schedule_work(&qd->work); | ||
5244 | } | ||
5240 | spin_lock_irqsave(&ppd->sdma_lock, flags); | 5245 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
5241 | if (__qib_sdma_running(ppd)) | 5246 | if (__qib_sdma_running(ppd)) |
5242 | __qib_sdma_process_event(ppd, | 5247 | __qib_sdma_process_event(ppd, |
@@ -5587,43 +5592,79 @@ static void qsfp_7322_event(struct work_struct *work) | |||
5587 | struct qib_qsfp_data *qd; | 5592 | struct qib_qsfp_data *qd; |
5588 | struct qib_pportdata *ppd; | 5593 | struct qib_pportdata *ppd; |
5589 | u64 pwrup; | 5594 | u64 pwrup; |
5595 | unsigned long flags; | ||
5590 | int ret; | 5596 | int ret; |
5591 | u32 le2; | 5597 | u32 le2; |
5592 | 5598 | ||
5593 | qd = container_of(work, struct qib_qsfp_data, work); | 5599 | qd = container_of(work, struct qib_qsfp_data, work); |
5594 | ppd = qd->ppd; | 5600 | ppd = qd->ppd; |
5595 | pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC); | 5601 | pwrup = qd->t_insert + |
5602 | msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC); | ||
5596 | 5603 | ||
5597 | /* | 5604 | /* Delay for 20 msecs to allow ModPrs resistor to setup */ |
5598 | * Some QSFP's not only do not respond until the full power-up | 5605 | mdelay(QSFP_MODPRS_LAG_MSEC); |
5599 | * time, but may behave badly if we try. So hold off responding | 5606 | |
5600 | * to insertion. | 5607 | if (!qib_qsfp_mod_present(ppd)) { |
5601 | */ | 5608 | ppd->cpspec->qsfp_data.modpresent = 0; |
5602 | while (1) { | 5609 | /* Set the physical link to disabled */ |
5603 | u64 now = get_jiffies_64(); | 5610 | qib_set_ib_7322_lstate(ppd, 0, |
5604 | if (time_after64(now, pwrup)) | 5611 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); |
5605 | break; | 5612 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
5606 | msleep(20); | 5613 | ppd->lflags &= ~QIBL_LINKV; |
5607 | } | 5614 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
5608 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); | 5615 | } else { |
5609 | /* | 5616 | /* |
5610 | * Need to change LE2 back to defaults if we couldn't | 5617 | * Some QSFP's not only do not respond until the full power-up |
5611 | * read the cable type (to handle cable swaps), so do this | 5618 | * time, but may behave badly if we try. So hold off responding |
5612 | * even on failure to read cable information. We don't | 5619 | * to insertion. |
5613 | * get here for QME, so IS_QME check not needed here. | 5620 | */ |
5614 | */ | 5621 | while (1) { |
5615 | if (!ret && !ppd->dd->cspec->r1) { | 5622 | u64 now = get_jiffies_64(); |
5616 | if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) | 5623 | if (time_after64(now, pwrup)) |
5617 | le2 = LE2_QME; | 5624 | break; |
5618 | else if (qd->cache.atten[1] >= qib_long_atten && | 5625 | msleep(20); |
5619 | QSFP_IS_CU(qd->cache.tech)) | 5626 | } |
5620 | le2 = LE2_5m; | 5627 | |
5621 | else | 5628 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); |
5629 | |||
5630 | /* | ||
5631 | * Need to change LE2 back to defaults if we couldn't | ||
5632 | * read the cable type (to handle cable swaps), so do this | ||
5633 | * even on failure to read cable information. We don't | ||
5634 | * get here for QME, so IS_QME check not needed here. | ||
5635 | */ | ||
5636 | if (!ret && !ppd->dd->cspec->r1) { | ||
5637 | if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) | ||
5638 | le2 = LE2_QME; | ||
5639 | else if (qd->cache.atten[1] >= qib_long_atten && | ||
5640 | QSFP_IS_CU(qd->cache.tech)) | ||
5641 | le2 = LE2_5m; | ||
5642 | else | ||
5643 | le2 = LE2_DEFAULT; | ||
5644 | } else | ||
5622 | le2 = LE2_DEFAULT; | 5645 | le2 = LE2_DEFAULT; |
5623 | } else | 5646 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); |
5624 | le2 = LE2_DEFAULT; | 5647 | /* |
5625 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); | 5648 | * We always change parameteters, since we can choose |
5626 | init_txdds_table(ppd, 0); | 5649 | * values for cables without eeproms, and the cable may have |
5650 | * changed from a cable with full or partial eeprom content | ||
5651 | * to one with partial or no content. | ||
5652 | */ | ||
5653 | init_txdds_table(ppd, 0); | ||
5654 | /* The physical link is being re-enabled only when the | ||
5655 | * previous state was DISABLED and the VALID bit is not | ||
5656 | * set. This should only happen when the cable has been | ||
5657 | * physically pulled. */ | ||
5658 | if (!ppd->cpspec->qsfp_data.modpresent && | ||
5659 | (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) { | ||
5660 | ppd->cpspec->qsfp_data.modpresent = 1; | ||
5661 | qib_set_ib_7322_lstate(ppd, 0, | ||
5662 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | ||
5663 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
5664 | ppd->lflags |= QIBL_LINKV; | ||
5665 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
5666 | } | ||
5667 | } | ||
5627 | } | 5668 | } |
5628 | 5669 | ||
5629 | /* | 5670 | /* |
@@ -5727,7 +5768,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5727 | /* now change the IBC and serdes, overriding generic */ | 5768 | /* now change the IBC and serdes, overriding generic */ |
5728 | init_txdds_table(ppd, 1); | 5769 | init_txdds_table(ppd, 1); |
5729 | /* Re-enable the physical state machine on mezz boards | 5770 | /* Re-enable the physical state machine on mezz boards |
5730 | * now that the correct settings have been set. */ | 5771 | * now that the correct settings have been set. |
5772 | * QSFP boards are handles by the QSFP event handler */ | ||
5731 | if (IS_QMH(dd) || IS_QME(dd)) | 5773 | if (IS_QMH(dd) || IS_QME(dd)) |
5732 | qib_set_ib_7322_lstate(ppd, 0, | 5774 | qib_set_ib_7322_lstate(ppd, 0, |
5733 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | 5775 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); |
@@ -6205,6 +6247,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6205 | 6247 | ||
6206 | /* we always allocate at least 2048 bytes for eager buffers */ | 6248 | /* we always allocate at least 2048 bytes for eager buffers */ |
6207 | dd->rcvegrbufsize = max(mtu, 2048); | 6249 | dd->rcvegrbufsize = max(mtu, 2048); |
6250 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); | ||
6251 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | ||
6208 | 6252 | ||
6209 | qib_7322_tidtemplate(dd); | 6253 | qib_7322_tidtemplate(dd); |
6210 | 6254 | ||
@@ -7147,7 +7191,8 @@ static void find_best_ent(struct qib_pportdata *ppd, | |||
7147 | } | 7191 | } |
7148 | } | 7192 | } |
7149 | 7193 | ||
7150 | /* Lookup serdes setting by cable type and attenuation */ | 7194 | /* Active cables don't have attenuation so we only set SERDES |
7195 | * settings to account for the attenuation of the board traces. */ | ||
7151 | if (!override && QSFP_IS_ACTIVE(qd->tech)) { | 7196 | if (!override && QSFP_IS_ACTIVE(qd->tech)) { |
7152 | *sdr_dds = txdds_sdr + ppd->dd->board_atten; | 7197 | *sdr_dds = txdds_sdr + ppd->dd->board_atten; |
7153 | *ddr_dds = txdds_ddr + ppd->dd->board_atten; | 7198 | *ddr_dds = txdds_ddr + ppd->dd->board_atten; |
@@ -7464,12 +7509,6 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7464 | u32 le_val, rxcaldone; | 7509 | u32 le_val, rxcaldone; |
7465 | int chan, chan_done = (1 << SERDES_CHANS) - 1; | 7510 | int chan, chan_done = (1 << SERDES_CHANS) - 1; |
7466 | 7511 | ||
7467 | /* | ||
7468 | * Initialize the Tx DDS tables. Also done every QSFP event, | ||
7469 | * for adapters with QSFP | ||
7470 | */ | ||
7471 | init_txdds_table(ppd, 0); | ||
7472 | |||
7473 | /* Clear cmode-override, may be set from older driver */ | 7512 | /* Clear cmode-override, may be set from older driver */ |
7474 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | 7513 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); |
7475 | 7514 | ||
@@ -7655,6 +7694,12 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7655 | /* VGA output common mode */ | 7694 | /* VGA output common mode */ |
7656 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); | 7695 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); |
7657 | 7696 | ||
7697 | /* | ||
7698 | * Initialize the Tx DDS tables. Also done every QSFP event, | ||
7699 | * for adapters with QSFP | ||
7700 | */ | ||
7701 | init_txdds_table(ppd, 0); | ||
7702 | |||
7658 | return 0; | 7703 | return 0; |
7659 | } | 7704 | } |
7660 | 7705 | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index a01f3fce8eb3..b093a0b53b2f 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -183,6 +183,9 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) | |||
183 | rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + | 183 | rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + |
184 | rcd->rcvegrbufs_perchunk - 1) / | 184 | rcd->rcvegrbufs_perchunk - 1) / |
185 | rcd->rcvegrbufs_perchunk; | 185 | rcd->rcvegrbufs_perchunk; |
186 | BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk)); | ||
187 | rcd->rcvegrbufs_perchunk_shift = | ||
188 | ilog2(rcd->rcvegrbufs_perchunk); | ||
186 | } | 189 | } |
187 | return rcd; | 190 | return rcd; |
188 | } | 191 | } |
@@ -398,6 +401,7 @@ static void enable_chip(struct qib_devdata *dd) | |||
398 | if (rcd) | 401 | if (rcd) |
399 | dd->f_rcvctrl(rcd->ppd, rcvmask, i); | 402 | dd->f_rcvctrl(rcd->ppd, rcvmask, i); |
400 | } | 403 | } |
404 | dd->freectxts = dd->cfgctxts - dd->first_user_ctxt; | ||
401 | } | 405 | } |
402 | 406 | ||
403 | static void verify_interrupt(unsigned long opaque) | 407 | static void verify_interrupt(unsigned long opaque) |
@@ -581,10 +585,6 @@ int qib_init(struct qib_devdata *dd, int reinit) | |||
581 | continue; | 585 | continue; |
582 | } | 586 | } |
583 | 587 | ||
584 | /* let link come up, and enable IBC */ | ||
585 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
586 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | ||
587 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
588 | portok++; | 588 | portok++; |
589 | } | 589 | } |
590 | 590 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index e16751f8639e..7e7e16fbee99 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <linux/err.h> | 35 | #include <linux/err.h> |
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include <linux/jhash.h> | ||
37 | 38 | ||
38 | #include "qib.h" | 39 | #include "qib.h" |
39 | 40 | ||
@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | |||
204 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); | 205 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); |
205 | } | 206 | } |
206 | 207 | ||
208 | static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) | ||
209 | { | ||
210 | return jhash_1word(qpn, dev->qp_rnd) & | ||
211 | (dev->qp_table_size - 1); | ||
212 | } | ||
213 | |||
214 | |||
207 | /* | 215 | /* |
208 | * Put the QP into the hash table. | 216 | * Put the QP into the hash table. |
209 | * The hash table holds a reference to the QP. | 217 | * The hash table holds a reference to the QP. |
@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | |||
211 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | 219 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) |
212 | { | 220 | { |
213 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | 221 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
214 | unsigned n = qp->ibqp.qp_num % dev->qp_table_size; | ||
215 | unsigned long flags; | 222 | unsigned long flags; |
223 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); | ||
216 | 224 | ||
217 | spin_lock_irqsave(&dev->qpt_lock, flags); | 225 | spin_lock_irqsave(&dev->qpt_lock, flags); |
226 | atomic_inc(&qp->refcount); | ||
218 | 227 | ||
219 | if (qp->ibqp.qp_num == 0) | 228 | if (qp->ibqp.qp_num == 0) |
220 | ibp->qp0 = qp; | 229 | rcu_assign_pointer(ibp->qp0, qp); |
221 | else if (qp->ibqp.qp_num == 1) | 230 | else if (qp->ibqp.qp_num == 1) |
222 | ibp->qp1 = qp; | 231 | rcu_assign_pointer(ibp->qp1, qp); |
223 | else { | 232 | else { |
224 | qp->next = dev->qp_table[n]; | 233 | qp->next = dev->qp_table[n]; |
225 | dev->qp_table[n] = qp; | 234 | rcu_assign_pointer(dev->qp_table[n], qp); |
226 | } | 235 | } |
227 | atomic_inc(&qp->refcount); | ||
228 | 236 | ||
229 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 237 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
238 | synchronize_rcu(); | ||
230 | } | 239 | } |
231 | 240 | ||
232 | /* | 241 | /* |
@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
236 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | 245 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) |
237 | { | 246 | { |
238 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | 247 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
239 | struct qib_qp *q, **qpp; | 248 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); |
240 | unsigned long flags; | 249 | unsigned long flags; |
241 | 250 | ||
242 | qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size]; | ||
243 | |||
244 | spin_lock_irqsave(&dev->qpt_lock, flags); | 251 | spin_lock_irqsave(&dev->qpt_lock, flags); |
245 | 252 | ||
246 | if (ibp->qp0 == qp) { | 253 | if (ibp->qp0 == qp) { |
247 | ibp->qp0 = NULL; | ||
248 | atomic_dec(&qp->refcount); | 254 | atomic_dec(&qp->refcount); |
255 | rcu_assign_pointer(ibp->qp0, NULL); | ||
249 | } else if (ibp->qp1 == qp) { | 256 | } else if (ibp->qp1 == qp) { |
250 | ibp->qp1 = NULL; | ||
251 | atomic_dec(&qp->refcount); | 257 | atomic_dec(&qp->refcount); |
252 | } else | 258 | rcu_assign_pointer(ibp->qp1, NULL); |
259 | } else { | ||
260 | struct qib_qp *q, **qpp; | ||
261 | |||
262 | qpp = &dev->qp_table[n]; | ||
253 | for (; (q = *qpp) != NULL; qpp = &q->next) | 263 | for (; (q = *qpp) != NULL; qpp = &q->next) |
254 | if (q == qp) { | 264 | if (q == qp) { |
255 | *qpp = qp->next; | ||
256 | qp->next = NULL; | ||
257 | atomic_dec(&qp->refcount); | 265 | atomic_dec(&qp->refcount); |
266 | rcu_assign_pointer(*qpp, qp->next); | ||
267 | qp->next = NULL; | ||
258 | break; | 268 | break; |
259 | } | 269 | } |
270 | } | ||
260 | 271 | ||
261 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 272 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
273 | synchronize_rcu(); | ||
262 | } | 274 | } |
263 | 275 | ||
264 | /** | 276 | /** |
@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) | |||
280 | 292 | ||
281 | if (!qib_mcast_tree_empty(ibp)) | 293 | if (!qib_mcast_tree_empty(ibp)) |
282 | qp_inuse++; | 294 | qp_inuse++; |
283 | if (ibp->qp0) | 295 | rcu_read_lock(); |
296 | if (rcu_dereference(ibp->qp0)) | ||
284 | qp_inuse++; | 297 | qp_inuse++; |
285 | if (ibp->qp1) | 298 | if (rcu_dereference(ibp->qp1)) |
286 | qp_inuse++; | 299 | qp_inuse++; |
300 | rcu_read_unlock(); | ||
287 | } | 301 | } |
288 | 302 | ||
289 | spin_lock_irqsave(&dev->qpt_lock, flags); | 303 | spin_lock_irqsave(&dev->qpt_lock, flags); |
290 | for (n = 0; n < dev->qp_table_size; n++) { | 304 | for (n = 0; n < dev->qp_table_size; n++) { |
291 | qp = dev->qp_table[n]; | 305 | qp = dev->qp_table[n]; |
292 | dev->qp_table[n] = NULL; | 306 | rcu_assign_pointer(dev->qp_table[n], NULL); |
293 | 307 | ||
294 | for (; qp; qp = qp->next) | 308 | for (; qp; qp = qp->next) |
295 | qp_inuse++; | 309 | qp_inuse++; |
296 | } | 310 | } |
297 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 311 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
312 | synchronize_rcu(); | ||
298 | 313 | ||
299 | return qp_inuse; | 314 | return qp_inuse; |
300 | } | 315 | } |
@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) | |||
309 | */ | 324 | */ |
310 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | 325 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) |
311 | { | 326 | { |
312 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | 327 | struct qib_qp *qp = NULL; |
313 | unsigned long flags; | ||
314 | struct qib_qp *qp; | ||
315 | 328 | ||
316 | spin_lock_irqsave(&dev->qpt_lock, flags); | 329 | if (unlikely(qpn <= 1)) { |
330 | rcu_read_lock(); | ||
331 | if (qpn == 0) | ||
332 | qp = rcu_dereference(ibp->qp0); | ||
333 | else | ||
334 | qp = rcu_dereference(ibp->qp1); | ||
335 | } else { | ||
336 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | ||
337 | unsigned n = qpn_hash(dev, qpn); | ||
317 | 338 | ||
318 | if (qpn == 0) | 339 | rcu_read_lock(); |
319 | qp = ibp->qp0; | 340 | for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next) |
320 | else if (qpn == 1) | ||
321 | qp = ibp->qp1; | ||
322 | else | ||
323 | for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp; | ||
324 | qp = qp->next) | ||
325 | if (qp->ibqp.qp_num == qpn) | 341 | if (qp->ibqp.qp_num == qpn) |
326 | break; | 342 | break; |
343 | } | ||
327 | if (qp) | 344 | if (qp) |
328 | atomic_inc(&qp->refcount); | 345 | if (unlikely(!atomic_inc_not_zero(&qp->refcount))) |
346 | qp = NULL; | ||
329 | 347 | ||
330 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 348 | rcu_read_unlock(); |
331 | return qp; | 349 | return qp; |
332 | } | 350 | } |
333 | 351 | ||
@@ -765,8 +783,10 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
765 | } | 783 | } |
766 | } | 784 | } |
767 | 785 | ||
768 | if (attr_mask & IB_QP_PATH_MTU) | 786 | if (attr_mask & IB_QP_PATH_MTU) { |
769 | qp->path_mtu = pmtu; | 787 | qp->path_mtu = pmtu; |
788 | qp->pmtu = ib_mtu_enum_to_int(pmtu); | ||
789 | } | ||
770 | 790 | ||
771 | if (attr_mask & IB_QP_RETRY_CNT) { | 791 | if (attr_mask & IB_QP_RETRY_CNT) { |
772 | qp->s_retry_cnt = attr->retry_cnt; | 792 | qp->s_retry_cnt = attr->retry_cnt; |
@@ -781,8 +801,12 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
781 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | 801 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
782 | qp->r_min_rnr_timer = attr->min_rnr_timer; | 802 | qp->r_min_rnr_timer = attr->min_rnr_timer; |
783 | 803 | ||
784 | if (attr_mask & IB_QP_TIMEOUT) | 804 | if (attr_mask & IB_QP_TIMEOUT) { |
785 | qp->timeout = attr->timeout; | 805 | qp->timeout = attr->timeout; |
806 | qp->timeout_jiffies = | ||
807 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | ||
808 | 1000UL); | ||
809 | } | ||
786 | 810 | ||
787 | if (attr_mask & IB_QP_QKEY) | 811 | if (attr_mask & IB_QP_QKEY) |
788 | qp->qkey = attr->qkey; | 812 | qp->qkey = attr->qkey; |
@@ -1013,6 +1037,10 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |||
1013 | ret = ERR_PTR(-ENOMEM); | 1037 | ret = ERR_PTR(-ENOMEM); |
1014 | goto bail_swq; | 1038 | goto bail_swq; |
1015 | } | 1039 | } |
1040 | RCU_INIT_POINTER(qp->next, NULL); | ||
1041 | qp->timeout_jiffies = | ||
1042 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | ||
1043 | 1000UL); | ||
1016 | if (init_attr->srq) | 1044 | if (init_attr->srq) |
1017 | sz = 0; | 1045 | sz = 0; |
1018 | else { | 1046 | else { |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 3374a52232c1..e06c4ed383f1 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c | |||
@@ -273,18 +273,12 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) | |||
273 | int ret; | 273 | int ret; |
274 | int idx; | 274 | int idx; |
275 | u16 cks; | 275 | u16 cks; |
276 | u32 mask; | ||
277 | u8 peek[4]; | 276 | u8 peek[4]; |
278 | 277 | ||
279 | /* ensure sane contents on invalid reads, for cable swaps */ | 278 | /* ensure sane contents on invalid reads, for cable swaps */ |
280 | memset(cp, 0, sizeof(*cp)); | 279 | memset(cp, 0, sizeof(*cp)); |
281 | 280 | ||
282 | mask = QSFP_GPIO_MOD_PRS_N; | 281 | if (!qib_qsfp_mod_present(ppd)) { |
283 | if (ppd->hw_pidx) | ||
284 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
285 | |||
286 | ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); | ||
287 | if (ret & mask) { | ||
288 | ret = -ENODEV; | 282 | ret = -ENODEV; |
289 | goto bail; | 283 | goto bail; |
290 | } | 284 | } |
@@ -444,6 +438,19 @@ const char * const qib_qsfp_devtech[16] = { | |||
444 | 438 | ||
445 | static const char *pwr_codes = "1.5W2.0W2.5W3.5W"; | 439 | static const char *pwr_codes = "1.5W2.0W2.5W3.5W"; |
446 | 440 | ||
441 | int qib_qsfp_mod_present(struct qib_pportdata *ppd) | ||
442 | { | ||
443 | u32 mask; | ||
444 | int ret; | ||
445 | |||
446 | mask = QSFP_GPIO_MOD_PRS_N << | ||
447 | (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT); | ||
448 | ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); | ||
449 | |||
450 | return !((ret & mask) >> | ||
451 | ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3)); | ||
452 | } | ||
453 | |||
447 | /* | 454 | /* |
448 | * Initialize structures that control access to QSFP. Called once per port | 455 | * Initialize structures that control access to QSFP. Called once per port |
449 | * on cards that support QSFP. | 456 | * on cards that support QSFP. |
@@ -452,7 +459,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
452 | void (*fevent)(struct work_struct *)) | 459 | void (*fevent)(struct work_struct *)) |
453 | { | 460 | { |
454 | u32 mask, highs; | 461 | u32 mask, highs; |
455 | int pins; | ||
456 | 462 | ||
457 | struct qib_devdata *dd = qd->ppd->dd; | 463 | struct qib_devdata *dd = qd->ppd->dd; |
458 | 464 | ||
@@ -480,8 +486,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
480 | mask <<= QSFP_GPIO_PORT2_SHIFT; | 486 | mask <<= QSFP_GPIO_PORT2_SHIFT; |
481 | 487 | ||
482 | /* Do not try to wait here. Better to let event handle it */ | 488 | /* Do not try to wait here. Better to let event handle it */ |
483 | pins = dd->f_gpio_mod(dd, 0, 0, 0); | 489 | if (!qib_qsfp_mod_present(qd->ppd)) |
484 | if (pins & mask) | ||
485 | goto bail; | 490 | goto bail; |
486 | /* We see a module, but it may be unwise to look yet. Just schedule */ | 491 | /* We see a module, but it may be unwise to look yet. Just schedule */ |
487 | qd->t_insert = get_jiffies_64(); | 492 | qd->t_insert = get_jiffies_64(); |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h index c109bbdc90ac..46002a9417c0 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.h +++ b/drivers/infiniband/hw/qib/qib_qsfp.h | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #define QSFP_DEV 0xA0 | 35 | #define QSFP_DEV 0xA0 |
36 | #define QSFP_PWR_LAG_MSEC 2000 | 36 | #define QSFP_PWR_LAG_MSEC 2000 |
37 | #define QSFP_MODPRS_LAG_MSEC 20 | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Below are masks for various QSFP signals, for Port 1. | 40 | * Below are masks for various QSFP signals, for Port 1. |
@@ -177,10 +178,12 @@ struct qib_qsfp_data { | |||
177 | struct work_struct work; | 178 | struct work_struct work; |
178 | struct qib_qsfp_cache cache; | 179 | struct qib_qsfp_cache cache; |
179 | u64 t_insert; | 180 | u64 t_insert; |
181 | u8 modpresent; | ||
180 | }; | 182 | }; |
181 | 183 | ||
182 | extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, | 184 | extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, |
183 | struct qib_qsfp_cache *cp); | 185 | struct qib_qsfp_cache *cp); |
186 | extern int qib_qsfp_mod_present(struct qib_pportdata *ppd); | ||
184 | extern void qib_qsfp_init(struct qib_qsfp_data *qd, | 187 | extern void qib_qsfp_init(struct qib_qsfp_data *qd, |
185 | void (*fevent)(struct work_struct *)); | 188 | void (*fevent)(struct work_struct *)); |
186 | extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); | 189 | extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index eca0c41f1226..afaf4ac79f42 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -59,8 +59,7 @@ static void start_timer(struct qib_qp *qp) | |||
59 | qp->s_flags |= QIB_S_TIMER; | 59 | qp->s_flags |= QIB_S_TIMER; |
60 | qp->s_timer.function = rc_timeout; | 60 | qp->s_timer.function = rc_timeout; |
61 | /* 4.096 usec. * (1 << qp->timeout) */ | 61 | /* 4.096 usec. * (1 << qp->timeout) */ |
62 | qp->s_timer.expires = jiffies + | 62 | qp->s_timer.expires = jiffies + qp->timeout_jiffies; |
63 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); | ||
64 | add_timer(&qp->s_timer); | 63 | add_timer(&qp->s_timer); |
65 | } | 64 | } |
66 | 65 | ||
@@ -239,7 +238,7 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
239 | u32 len; | 238 | u32 len; |
240 | u32 bth0; | 239 | u32 bth0; |
241 | u32 bth2; | 240 | u32 bth2; |
242 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 241 | u32 pmtu = qp->pmtu; |
243 | char newreq; | 242 | char newreq; |
244 | unsigned long flags; | 243 | unsigned long flags; |
245 | int ret = 0; | 244 | int ret = 0; |
@@ -1519,9 +1518,7 @@ read_middle: | |||
1519 | * 4.096 usec. * (1 << qp->timeout) | 1518 | * 4.096 usec. * (1 << qp->timeout) |
1520 | */ | 1519 | */ |
1521 | qp->s_flags |= QIB_S_TIMER; | 1520 | qp->s_flags |= QIB_S_TIMER; |
1522 | mod_timer(&qp->s_timer, jiffies + | 1521 | mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); |
1523 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | ||
1524 | 1000UL)); | ||
1525 | if (qp->s_flags & QIB_S_WAIT_ACK) { | 1522 | if (qp->s_flags & QIB_S_WAIT_ACK) { |
1526 | qp->s_flags &= ~QIB_S_WAIT_ACK; | 1523 | qp->s_flags &= ~QIB_S_WAIT_ACK; |
1527 | qib_schedule_send(qp); | 1524 | qib_schedule_send(qp); |
@@ -1732,7 +1729,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, | |||
1732 | * same request. | 1729 | * same request. |
1733 | */ | 1730 | */ |
1734 | offset = ((psn - e->psn) & QIB_PSN_MASK) * | 1731 | offset = ((psn - e->psn) & QIB_PSN_MASK) * |
1735 | ib_mtu_enum_to_int(qp->path_mtu); | 1732 | qp->pmtu; |
1736 | len = be32_to_cpu(reth->length); | 1733 | len = be32_to_cpu(reth->length); |
1737 | if (unlikely(offset + len != e->rdma_sge.sge_length)) | 1734 | if (unlikely(offset + len != e->rdma_sge.sge_length)) |
1738 | goto unlock_done; | 1735 | goto unlock_done; |
@@ -1876,7 +1873,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1876 | u32 psn; | 1873 | u32 psn; |
1877 | u32 pad; | 1874 | u32 pad; |
1878 | struct ib_wc wc; | 1875 | struct ib_wc wc; |
1879 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 1876 | u32 pmtu = qp->pmtu; |
1880 | int diff; | 1877 | int diff; |
1881 | struct ib_reth *reth; | 1878 | struct ib_reth *reth; |
1882 | unsigned long flags; | 1879 | unsigned long flags; |
@@ -1892,10 +1889,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1892 | } | 1889 | } |
1893 | 1890 | ||
1894 | opcode = be32_to_cpu(ohdr->bth[0]); | 1891 | opcode = be32_to_cpu(ohdr->bth[0]); |
1895 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1896 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) | 1892 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) |
1897 | goto sunlock; | 1893 | return; |
1898 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1899 | 1894 | ||
1900 | psn = be32_to_cpu(ohdr->bth[2]); | 1895 | psn = be32_to_cpu(ohdr->bth[2]); |
1901 | opcode >>= 24; | 1896 | opcode >>= 24; |
@@ -1955,8 +1950,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1955 | break; | 1950 | break; |
1956 | } | 1951 | } |
1957 | 1952 | ||
1958 | memset(&wc, 0, sizeof wc); | ||
1959 | |||
1960 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { | 1953 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { |
1961 | qp->r_flags |= QIB_R_COMM_EST; | 1954 | qp->r_flags |= QIB_R_COMM_EST; |
1962 | if (qp->ibqp.event_handler) { | 1955 | if (qp->ibqp.event_handler) { |
@@ -2009,16 +2002,19 @@ send_middle: | |||
2009 | goto rnr_nak; | 2002 | goto rnr_nak; |
2010 | qp->r_rcv_len = 0; | 2003 | qp->r_rcv_len = 0; |
2011 | if (opcode == OP(SEND_ONLY)) | 2004 | if (opcode == OP(SEND_ONLY)) |
2012 | goto send_last; | 2005 | goto no_immediate_data; |
2013 | /* FALLTHROUGH */ | 2006 | /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */ |
2014 | case OP(SEND_LAST_WITH_IMMEDIATE): | 2007 | case OP(SEND_LAST_WITH_IMMEDIATE): |
2015 | send_last_imm: | 2008 | send_last_imm: |
2016 | wc.ex.imm_data = ohdr->u.imm_data; | 2009 | wc.ex.imm_data = ohdr->u.imm_data; |
2017 | hdrsize += 4; | 2010 | hdrsize += 4; |
2018 | wc.wc_flags = IB_WC_WITH_IMM; | 2011 | wc.wc_flags = IB_WC_WITH_IMM; |
2019 | /* FALLTHROUGH */ | 2012 | goto send_last; |
2020 | case OP(SEND_LAST): | 2013 | case OP(SEND_LAST): |
2021 | case OP(RDMA_WRITE_LAST): | 2014 | case OP(RDMA_WRITE_LAST): |
2015 | no_immediate_data: | ||
2016 | wc.wc_flags = 0; | ||
2017 | wc.ex.imm_data = 0; | ||
2022 | send_last: | 2018 | send_last: |
2023 | /* Get the number of bytes the message was padded by. */ | 2019 | /* Get the number of bytes the message was padded by. */ |
2024 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 2020 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
@@ -2051,6 +2047,12 @@ send_last: | |||
2051 | wc.src_qp = qp->remote_qpn; | 2047 | wc.src_qp = qp->remote_qpn; |
2052 | wc.slid = qp->remote_ah_attr.dlid; | 2048 | wc.slid = qp->remote_ah_attr.dlid; |
2053 | wc.sl = qp->remote_ah_attr.sl; | 2049 | wc.sl = qp->remote_ah_attr.sl; |
2050 | /* zero fields that are N/A */ | ||
2051 | wc.vendor_err = 0; | ||
2052 | wc.pkey_index = 0; | ||
2053 | wc.dlid_path_bits = 0; | ||
2054 | wc.port_num = 0; | ||
2055 | wc.csum_ok = 0; | ||
2054 | /* Signal completion event if the solicited bit is set. */ | 2056 | /* Signal completion event if the solicited bit is set. */ |
2055 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 2057 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
2056 | (ohdr->bth[0] & | 2058 | (ohdr->bth[0] & |
@@ -2089,7 +2091,7 @@ send_last: | |||
2089 | if (opcode == OP(RDMA_WRITE_FIRST)) | 2091 | if (opcode == OP(RDMA_WRITE_FIRST)) |
2090 | goto send_middle; | 2092 | goto send_middle; |
2091 | else if (opcode == OP(RDMA_WRITE_ONLY)) | 2093 | else if (opcode == OP(RDMA_WRITE_ONLY)) |
2092 | goto send_last; | 2094 | goto no_immediate_data; |
2093 | ret = qib_get_rwqe(qp, 1); | 2095 | ret = qib_get_rwqe(qp, 1); |
2094 | if (ret < 0) | 2096 | if (ret < 0) |
2095 | goto nack_op_err; | 2097 | goto nack_op_err; |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index eb78d9367f06..b4b37e47321a 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -260,12 +260,15 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) | |||
260 | 260 | ||
261 | /* | 261 | /* |
262 | * | 262 | * |
263 | * This should be called with the QP s_lock held. | 263 | * This should be called with the QP r_lock held. |
264 | * | ||
265 | * The s_lock will be acquired around the qib_migrate_qp() call. | ||
264 | */ | 266 | */ |
265 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, | 267 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, |
266 | int has_grh, struct qib_qp *qp, u32 bth0) | 268 | int has_grh, struct qib_qp *qp, u32 bth0) |
267 | { | 269 | { |
268 | __be64 guid; | 270 | __be64 guid; |
271 | unsigned long flags; | ||
269 | 272 | ||
270 | if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { | 273 | if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { |
271 | if (!has_grh) { | 274 | if (!has_grh) { |
@@ -295,7 +298,9 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
295 | if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || | 298 | if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || |
296 | ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) | 299 | ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) |
297 | goto err; | 300 | goto err; |
301 | spin_lock_irqsave(&qp->s_lock, flags); | ||
298 | qib_migrate_qp(qp); | 302 | qib_migrate_qp(qp); |
303 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
299 | } else { | 304 | } else { |
300 | if (!has_grh) { | 305 | if (!has_grh) { |
301 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | 306 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) |
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c index c3ec8efc2ed8..d6235931a1ba 100644 --- a/drivers/infiniband/hw/qib/qib_srq.c +++ b/drivers/infiniband/hw/qib/qib_srq.c | |||
@@ -107,6 +107,11 @@ struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | |||
107 | u32 sz; | 107 | u32 sz; |
108 | struct ib_srq *ret; | 108 | struct ib_srq *ret; |
109 | 109 | ||
110 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) { | ||
111 | ret = ERR_PTR(-ENOSYS); | ||
112 | goto done; | ||
113 | } | ||
114 | |||
110 | if (srq_init_attr->attr.max_sge == 0 || | 115 | if (srq_init_attr->attr.max_sge == 0 || |
111 | srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || | 116 | srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || |
112 | srq_init_attr->attr.max_wr == 0 || | 117 | srq_init_attr->attr.max_wr == 0 || |
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 14d129de4320..78fbd56879d4 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
@@ -515,8 +515,7 @@ static ssize_t show_nfreectxts(struct device *device, | |||
515 | struct qib_devdata *dd = dd_from_dev(dev); | 515 | struct qib_devdata *dd = dd_from_dev(dev); |
516 | 516 | ||
517 | /* Return the number of free user ports (contexts) available. */ | 517 | /* Return the number of free user ports (contexts) available. */ |
518 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts - | 518 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts); |
519 | dd->first_user_ctxt - (u32)qib_stats.sps_ctxts); | ||
520 | } | 519 | } |
521 | 520 | ||
522 | static ssize_t show_serial(struct device *device, | 521 | static ssize_t show_serial(struct device *device, |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 32ccf3c824ca..847e7afdfd94 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -51,7 +51,7 @@ int qib_make_uc_req(struct qib_qp *qp) | |||
51 | u32 hwords; | 51 | u32 hwords; |
52 | u32 bth0; | 52 | u32 bth0; |
53 | u32 len; | 53 | u32 len; |
54 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 54 | u32 pmtu = qp->pmtu; |
55 | int ret = 0; | 55 | int ret = 0; |
56 | 56 | ||
57 | spin_lock_irqsave(&qp->s_lock, flags); | 57 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -243,13 +243,12 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
243 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) | 243 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) |
244 | { | 244 | { |
245 | struct qib_other_headers *ohdr; | 245 | struct qib_other_headers *ohdr; |
246 | unsigned long flags; | ||
247 | u32 opcode; | 246 | u32 opcode; |
248 | u32 hdrsize; | 247 | u32 hdrsize; |
249 | u32 psn; | 248 | u32 psn; |
250 | u32 pad; | 249 | u32 pad; |
251 | struct ib_wc wc; | 250 | struct ib_wc wc; |
252 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 251 | u32 pmtu = qp->pmtu; |
253 | struct ib_reth *reth; | 252 | struct ib_reth *reth; |
254 | int ret; | 253 | int ret; |
255 | 254 | ||
@@ -263,14 +262,11 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
263 | } | 262 | } |
264 | 263 | ||
265 | opcode = be32_to_cpu(ohdr->bth[0]); | 264 | opcode = be32_to_cpu(ohdr->bth[0]); |
266 | spin_lock_irqsave(&qp->s_lock, flags); | ||
267 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) | 265 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) |
268 | goto sunlock; | 266 | return; |
269 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
270 | 267 | ||
271 | psn = be32_to_cpu(ohdr->bth[2]); | 268 | psn = be32_to_cpu(ohdr->bth[2]); |
272 | opcode >>= 24; | 269 | opcode >>= 24; |
273 | memset(&wc, 0, sizeof wc); | ||
274 | 270 | ||
275 | /* Compare the PSN verses the expected PSN. */ | 271 | /* Compare the PSN verses the expected PSN. */ |
276 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { | 272 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { |
@@ -370,7 +366,7 @@ send_first: | |||
370 | } | 366 | } |
371 | qp->r_rcv_len = 0; | 367 | qp->r_rcv_len = 0; |
372 | if (opcode == OP(SEND_ONLY)) | 368 | if (opcode == OP(SEND_ONLY)) |
373 | goto send_last; | 369 | goto no_immediate_data; |
374 | else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) | 370 | else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) |
375 | goto send_last_imm; | 371 | goto send_last_imm; |
376 | /* FALLTHROUGH */ | 372 | /* FALLTHROUGH */ |
@@ -389,8 +385,11 @@ send_last_imm: | |||
389 | wc.ex.imm_data = ohdr->u.imm_data; | 385 | wc.ex.imm_data = ohdr->u.imm_data; |
390 | hdrsize += 4; | 386 | hdrsize += 4; |
391 | wc.wc_flags = IB_WC_WITH_IMM; | 387 | wc.wc_flags = IB_WC_WITH_IMM; |
392 | /* FALLTHROUGH */ | 388 | goto send_last; |
393 | case OP(SEND_LAST): | 389 | case OP(SEND_LAST): |
390 | no_immediate_data: | ||
391 | wc.ex.imm_data = 0; | ||
392 | wc.wc_flags = 0; | ||
394 | send_last: | 393 | send_last: |
395 | /* Get the number of bytes the message was padded by. */ | 394 | /* Get the number of bytes the message was padded by. */ |
396 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 395 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
@@ -418,6 +417,12 @@ last_imm: | |||
418 | wc.src_qp = qp->remote_qpn; | 417 | wc.src_qp = qp->remote_qpn; |
419 | wc.slid = qp->remote_ah_attr.dlid; | 418 | wc.slid = qp->remote_ah_attr.dlid; |
420 | wc.sl = qp->remote_ah_attr.sl; | 419 | wc.sl = qp->remote_ah_attr.sl; |
420 | /* zero fields that are N/A */ | ||
421 | wc.vendor_err = 0; | ||
422 | wc.pkey_index = 0; | ||
423 | wc.dlid_path_bits = 0; | ||
424 | wc.port_num = 0; | ||
425 | wc.csum_ok = 0; | ||
421 | /* Signal completion event if the solicited bit is set. */ | 426 | /* Signal completion event if the solicited bit is set. */ |
422 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 427 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
423 | (ohdr->bth[0] & | 428 | (ohdr->bth[0] & |
@@ -546,6 +551,4 @@ op_err: | |||
546 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 551 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
547 | return; | 552 | return; |
548 | 553 | ||
549 | sunlock: | ||
550 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
551 | } | 554 | } |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 9fab40488850..9627cb737125 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -38,11 +38,12 @@ | |||
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <linux/rculist.h> | 39 | #include <linux/rculist.h> |
40 | #include <linux/mm.h> | 40 | #include <linux/mm.h> |
41 | #include <linux/random.h> | ||
41 | 42 | ||
42 | #include "qib.h" | 43 | #include "qib.h" |
43 | #include "qib_common.h" | 44 | #include "qib_common.h" |
44 | 45 | ||
45 | static unsigned int ib_qib_qp_table_size = 251; | 46 | static unsigned int ib_qib_qp_table_size = 256; |
46 | module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); | 47 | module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); |
47 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | 48 | MODULE_PARM_DESC(qp_table_size, "QP table size"); |
48 | 49 | ||
@@ -659,17 +660,25 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | |||
659 | if (atomic_dec_return(&mcast->refcount) <= 1) | 660 | if (atomic_dec_return(&mcast->refcount) <= 1) |
660 | wake_up(&mcast->wait); | 661 | wake_up(&mcast->wait); |
661 | } else { | 662 | } else { |
662 | qp = qib_lookup_qpn(ibp, qp_num); | 663 | if (rcd->lookaside_qp) { |
663 | if (!qp) | 664 | if (rcd->lookaside_qpn != qp_num) { |
664 | goto drop; | 665 | if (atomic_dec_and_test( |
666 | &rcd->lookaside_qp->refcount)) | ||
667 | wake_up( | ||
668 | &rcd->lookaside_qp->wait); | ||
669 | rcd->lookaside_qp = NULL; | ||
670 | } | ||
671 | } | ||
672 | if (!rcd->lookaside_qp) { | ||
673 | qp = qib_lookup_qpn(ibp, qp_num); | ||
674 | if (!qp) | ||
675 | goto drop; | ||
676 | rcd->lookaside_qp = qp; | ||
677 | rcd->lookaside_qpn = qp_num; | ||
678 | } else | ||
679 | qp = rcd->lookaside_qp; | ||
665 | ibp->n_unicast_rcv++; | 680 | ibp->n_unicast_rcv++; |
666 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); | 681 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); |
667 | /* | ||
668 | * Notify qib_destroy_qp() if it is waiting | ||
669 | * for us to finish. | ||
670 | */ | ||
671 | if (atomic_dec_and_test(&qp->refcount)) | ||
672 | wake_up(&qp->wait); | ||
673 | } | 682 | } |
674 | return; | 683 | return; |
675 | 684 | ||
@@ -1974,6 +1983,8 @@ static void init_ibport(struct qib_pportdata *ppd) | |||
1974 | ibp->z_excessive_buffer_overrun_errors = | 1983 | ibp->z_excessive_buffer_overrun_errors = |
1975 | cntrs.excessive_buffer_overrun_errors; | 1984 | cntrs.excessive_buffer_overrun_errors; |
1976 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | 1985 | ibp->z_vl15_dropped = cntrs.vl15_dropped; |
1986 | RCU_INIT_POINTER(ibp->qp0, NULL); | ||
1987 | RCU_INIT_POINTER(ibp->qp1, NULL); | ||
1977 | } | 1988 | } |
1978 | 1989 | ||
1979 | /** | 1990 | /** |
@@ -1990,12 +2001,15 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
1990 | int ret; | 2001 | int ret; |
1991 | 2002 | ||
1992 | dev->qp_table_size = ib_qib_qp_table_size; | 2003 | dev->qp_table_size = ib_qib_qp_table_size; |
1993 | dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, | 2004 | get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); |
2005 | dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, | ||
1994 | GFP_KERNEL); | 2006 | GFP_KERNEL); |
1995 | if (!dev->qp_table) { | 2007 | if (!dev->qp_table) { |
1996 | ret = -ENOMEM; | 2008 | ret = -ENOMEM; |
1997 | goto err_qpt; | 2009 | goto err_qpt; |
1998 | } | 2010 | } |
2011 | for (i = 0; i < dev->qp_table_size; i++) | ||
2012 | RCU_INIT_POINTER(dev->qp_table[i], NULL); | ||
1999 | 2013 | ||
2000 | for (i = 0; i < dd->num_pports; i++) | 2014 | for (i = 0; i < dd->num_pports; i++) |
2001 | init_ibport(ppd + i); | 2015 | init_ibport(ppd + i); |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 95e5b47223b3..0c19ef0c4123 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -485,6 +485,7 @@ struct qib_qp { | |||
485 | u8 alt_timeout; /* Alternate path timeout for this QP */ | 485 | u8 alt_timeout; /* Alternate path timeout for this QP */ |
486 | u8 port_num; | 486 | u8 port_num; |
487 | enum ib_mtu path_mtu; | 487 | enum ib_mtu path_mtu; |
488 | u32 pmtu; /* decoded from path_mtu */ | ||
488 | u32 remote_qpn; | 489 | u32 remote_qpn; |
489 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | 490 | u32 qkey; /* QKEY for this QP (for UD or RD) */ |
490 | u32 s_size; /* send work queue size */ | 491 | u32 s_size; /* send work queue size */ |
@@ -495,6 +496,7 @@ struct qib_qp { | |||
495 | u32 s_last; /* last completed entry */ | 496 | u32 s_last; /* last completed entry */ |
496 | u32 s_ssn; /* SSN of tail entry */ | 497 | u32 s_ssn; /* SSN of tail entry */ |
497 | u32 s_lsn; /* limit sequence number (credit) */ | 498 | u32 s_lsn; /* limit sequence number (credit) */ |
499 | unsigned long timeout_jiffies; /* computed from timeout */ | ||
498 | struct qib_swqe *s_wq; /* send work queue */ | 500 | struct qib_swqe *s_wq; /* send work queue */ |
499 | struct qib_swqe *s_wqe; | 501 | struct qib_swqe *s_wqe; |
500 | struct qib_rq r_rq; /* receive work queue */ | 502 | struct qib_rq r_rq; /* receive work queue */ |
@@ -723,7 +725,8 @@ struct qib_ibdev { | |||
723 | dma_addr_t pio_hdrs_phys; | 725 | dma_addr_t pio_hdrs_phys; |
724 | /* list of QPs waiting for RNR timer */ | 726 | /* list of QPs waiting for RNR timer */ |
725 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ | 727 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ |
726 | unsigned qp_table_size; /* size of the hash table */ | 728 | u32 qp_table_size; /* size of the hash table */ |
729 | u32 qp_rnd; /* random bytes for hash */ | ||
727 | spinlock_t qpt_lock; | 730 | spinlock_t qpt_lock; |
728 | 731 | ||
729 | u32 n_piowait; | 732 | u32 n_piowait; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 39913a065f99..fe48677fd748 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -84,7 +84,7 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, | |||
84 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | 84 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); |
85 | 85 | ||
86 | for (i = 0; i < frags; ++i) | 86 | for (i = 0; i < frags; ++i) |
87 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); | 87 | ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); |
88 | } | 88 | } |
89 | 89 | ||
90 | static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) | 90 | static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) |
@@ -183,7 +183,7 @@ partial_error: | |||
183 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | 183 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); |
184 | 184 | ||
185 | for (; i > 0; --i) | 185 | for (; i > 0; --i) |
186 | ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); | 186 | ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); |
187 | 187 | ||
188 | dev_kfree_skb_any(skb); | 188 | dev_kfree_skb_any(skb); |
189 | return NULL; | 189 | return NULL; |
@@ -1496,6 +1496,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) | |||
1496 | { | 1496 | { |
1497 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 1497 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
1498 | struct ib_srq_init_attr srq_init_attr = { | 1498 | struct ib_srq_init_attr srq_init_attr = { |
1499 | .srq_type = IB_SRQT_BASIC, | ||
1499 | .attr = { | 1500 | .attr = { |
1500 | .max_wr = ipoib_recvq_size, | 1501 | .max_wr = ipoib_recvq_size, |
1501 | .max_sge = max_sge | 1502 | .max_sge = max_sge |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 86eae229dc49..0e2fe4631ba8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c | |||
@@ -212,16 +212,15 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr) | |||
212 | gid_buf, path.pathrec.dlid ? "yes" : "no"); | 212 | gid_buf, path.pathrec.dlid ? "yes" : "no"); |
213 | 213 | ||
214 | if (path.pathrec.dlid) { | 214 | if (path.pathrec.dlid) { |
215 | rate = ib_rate_to_mult(path.pathrec.rate) * 25; | 215 | rate = ib_rate_to_mbps(path.pathrec.rate); |
216 | 216 | ||
217 | seq_printf(file, | 217 | seq_printf(file, |
218 | " DLID: 0x%04x\n" | 218 | " DLID: 0x%04x\n" |
219 | " SL: %12d\n" | 219 | " SL: %12d\n" |
220 | " rate: %*d%s Gb/sec\n", | 220 | " rate: %8d.%d Gb/sec\n", |
221 | be16_to_cpu(path.pathrec.dlid), | 221 | be16_to_cpu(path.pathrec.dlid), |
222 | path.pathrec.sl, | 222 | path.pathrec.sl, |
223 | 10 - ((rate % 10) ? 2 : 0), | 223 | rate / 1000, rate % 1000); |
224 | rate / 10, rate % 10 ? ".5" : ""); | ||
225 | } | 224 | } |
226 | 225 | ||
227 | seq_putc(file, '\n'); | 226 | seq_putc(file, '\n'); |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 1ad1f6029af8..869a2c220a7b 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -484,7 +484,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, | |||
484 | 484 | ||
485 | mlx4_mtt_cleanup(dev, &eq->mtt); | 485 | mlx4_mtt_cleanup(dev, &eq->mtt); |
486 | for (i = 0; i < npages; ++i) | 486 | for (i = 0; i < npages; ++i) |
487 | pci_free_consistent(dev->pdev, PAGE_SIZE, | 487 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
488 | eq->page_list[i].buf, | 488 | eq->page_list[i].buf, |
489 | eq->page_list[i].map); | 489 | eq->page_list[i].map); |
490 | 490 | ||
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 7eb8ba822e97..875838b8799c 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -204,6 +204,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
204 | #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 | 204 | #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 |
205 | #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 | 205 | #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 |
206 | #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 | 206 | #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 |
207 | #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 | ||
208 | #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 | ||
207 | #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 | 209 | #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 |
208 | #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 | 210 | #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 |
209 | #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 | 211 | #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 |
@@ -318,6 +320,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
318 | dev_cap->reserved_pds = field >> 4; | 320 | dev_cap->reserved_pds = field >> 4; |
319 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); | 321 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); |
320 | dev_cap->max_pds = 1 << (field & 0x3f); | 322 | dev_cap->max_pds = 1 << (field & 0x3f); |
323 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); | ||
324 | dev_cap->reserved_xrcds = field >> 4; | ||
325 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); | ||
326 | dev_cap->max_xrcds = 1 << (field & 0x1f); | ||
321 | 327 | ||
322 | MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); | 328 | MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); |
323 | dev_cap->rdmarc_entry_sz = size; | 329 | dev_cap->rdmarc_entry_sz = size; |
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index 1e8ecc3708e2..bf5ec2286528 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h | |||
@@ -93,6 +93,8 @@ struct mlx4_dev_cap { | |||
93 | int max_mcgs; | 93 | int max_mcgs; |
94 | int reserved_pds; | 94 | int reserved_pds; |
95 | int max_pds; | 95 | int max_pds; |
96 | int reserved_xrcds; | ||
97 | int max_xrcds; | ||
96 | int qpc_entry_sz; | 98 | int qpc_entry_sz; |
97 | int rdmarc_entry_sz; | 99 | int rdmarc_entry_sz; |
98 | int altc_entry_sz; | 100 | int altc_entry_sz; |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index f0ee35df4dd7..94bbc85a532d 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -96,6 +96,8 @@ MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); | |||
96 | static int log_num_vlan; | 96 | static int log_num_vlan; |
97 | module_param_named(log_num_vlan, log_num_vlan, int, 0444); | 97 | module_param_named(log_num_vlan, log_num_vlan, int, 0444); |
98 | MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); | 98 | MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); |
99 | /* Log2 max number of VLANs per ETH port (0-7) */ | ||
100 | #define MLX4_LOG_NUM_VLANS 7 | ||
99 | 101 | ||
100 | static int use_prio; | 102 | static int use_prio; |
101 | module_param_named(use_prio, use_prio, bool, 0444); | 103 | module_param_named(use_prio, use_prio, bool, 0444); |
@@ -220,6 +222,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
220 | dev->caps.reserved_mrws = dev_cap->reserved_mrws; | 222 | dev->caps.reserved_mrws = dev_cap->reserved_mrws; |
221 | dev->caps.reserved_uars = dev_cap->reserved_uars; | 223 | dev->caps.reserved_uars = dev_cap->reserved_uars; |
222 | dev->caps.reserved_pds = dev_cap->reserved_pds; | 224 | dev->caps.reserved_pds = dev_cap->reserved_pds; |
225 | dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? | ||
226 | dev_cap->reserved_xrcds : 0; | ||
227 | dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? | ||
228 | dev_cap->max_xrcds : 0; | ||
223 | dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; | 229 | dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; |
224 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; | 230 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; |
225 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); | 231 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); |
@@ -230,7 +236,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
230 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 236 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
231 | 237 | ||
232 | dev->caps.log_num_macs = log_num_mac; | 238 | dev->caps.log_num_macs = log_num_mac; |
233 | dev->caps.log_num_vlans = log_num_vlan; | 239 | dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; |
234 | dev->caps.log_num_prios = use_prio ? 3 : 0; | 240 | dev->caps.log_num_prios = use_prio ? 3 : 0; |
235 | 241 | ||
236 | for (i = 1; i <= dev->caps.num_ports; ++i) { | 242 | for (i = 1; i <= dev->caps.num_ports; ++i) { |
@@ -912,11 +918,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
912 | goto err_kar_unmap; | 918 | goto err_kar_unmap; |
913 | } | 919 | } |
914 | 920 | ||
921 | err = mlx4_init_xrcd_table(dev); | ||
922 | if (err) { | ||
923 | mlx4_err(dev, "Failed to initialize " | ||
924 | "reliable connection domain table, aborting.\n"); | ||
925 | goto err_pd_table_free; | ||
926 | } | ||
927 | |||
915 | err = mlx4_init_mr_table(dev); | 928 | err = mlx4_init_mr_table(dev); |
916 | if (err) { | 929 | if (err) { |
917 | mlx4_err(dev, "Failed to initialize " | 930 | mlx4_err(dev, "Failed to initialize " |
918 | "memory region table, aborting.\n"); | 931 | "memory region table, aborting.\n"); |
919 | goto err_pd_table_free; | 932 | goto err_xrcd_table_free; |
920 | } | 933 | } |
921 | 934 | ||
922 | err = mlx4_init_eq_table(dev); | 935 | err = mlx4_init_eq_table(dev); |
@@ -998,6 +1011,13 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
998 | "ib capabilities (%d). Continuing with " | 1011 | "ib capabilities (%d). Continuing with " |
999 | "caps = 0\n", port, err); | 1012 | "caps = 0\n", port, err); |
1000 | dev->caps.ib_port_def_cap[port] = ib_port_default_caps; | 1013 | dev->caps.ib_port_def_cap[port] = ib_port_default_caps; |
1014 | |||
1015 | err = mlx4_check_ext_port_caps(dev, port); | ||
1016 | if (err) | ||
1017 | mlx4_warn(dev, "failed to get port %d extended " | ||
1018 | "port capabilities support info (%d)." | ||
1019 | " Assuming not supported\n", port, err); | ||
1020 | |||
1001 | err = mlx4_SET_PORT(dev, port); | 1021 | err = mlx4_SET_PORT(dev, port); |
1002 | if (err) { | 1022 | if (err) { |
1003 | mlx4_err(dev, "Failed to set port %d, aborting\n", | 1023 | mlx4_err(dev, "Failed to set port %d, aborting\n", |
@@ -1033,6 +1053,9 @@ err_eq_table_free: | |||
1033 | err_mr_table_free: | 1053 | err_mr_table_free: |
1034 | mlx4_cleanup_mr_table(dev); | 1054 | mlx4_cleanup_mr_table(dev); |
1035 | 1055 | ||
1056 | err_xrcd_table_free: | ||
1057 | mlx4_cleanup_xrcd_table(dev); | ||
1058 | |||
1036 | err_pd_table_free: | 1059 | err_pd_table_free: |
1037 | mlx4_cleanup_pd_table(dev); | 1060 | mlx4_cleanup_pd_table(dev); |
1038 | 1061 | ||
@@ -1355,6 +1378,7 @@ err_port: | |||
1355 | mlx4_cmd_use_polling(dev); | 1378 | mlx4_cmd_use_polling(dev); |
1356 | mlx4_cleanup_eq_table(dev); | 1379 | mlx4_cleanup_eq_table(dev); |
1357 | mlx4_cleanup_mr_table(dev); | 1380 | mlx4_cleanup_mr_table(dev); |
1381 | mlx4_cleanup_xrcd_table(dev); | ||
1358 | mlx4_cleanup_pd_table(dev); | 1382 | mlx4_cleanup_pd_table(dev); |
1359 | mlx4_cleanup_uar_table(dev); | 1383 | mlx4_cleanup_uar_table(dev); |
1360 | 1384 | ||
@@ -1416,6 +1440,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
1416 | mlx4_cmd_use_polling(dev); | 1440 | mlx4_cmd_use_polling(dev); |
1417 | mlx4_cleanup_eq_table(dev); | 1441 | mlx4_cleanup_eq_table(dev); |
1418 | mlx4_cleanup_mr_table(dev); | 1442 | mlx4_cleanup_mr_table(dev); |
1443 | mlx4_cleanup_xrcd_table(dev); | ||
1419 | mlx4_cleanup_pd_table(dev); | 1444 | mlx4_cleanup_pd_table(dev); |
1420 | 1445 | ||
1421 | iounmap(priv->kar); | 1446 | iounmap(priv->kar); |
@@ -1489,10 +1514,9 @@ static int __init mlx4_verify_params(void) | |||
1489 | return -1; | 1514 | return -1; |
1490 | } | 1515 | } |
1491 | 1516 | ||
1492 | if ((log_num_vlan < 0) || (log_num_vlan > 7)) { | 1517 | if (log_num_vlan != 0) |
1493 | pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan); | 1518 | pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", |
1494 | return -1; | 1519 | MLX4_LOG_NUM_VLANS); |
1495 | } | ||
1496 | 1520 | ||
1497 | if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { | 1521 | if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { |
1498 | pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); | 1522 | pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index a2fcd8402d37..5dfa68ffc11c 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -335,6 +335,7 @@ struct mlx4_priv { | |||
335 | struct mlx4_cmd cmd; | 335 | struct mlx4_cmd cmd; |
336 | 336 | ||
337 | struct mlx4_bitmap pd_bitmap; | 337 | struct mlx4_bitmap pd_bitmap; |
338 | struct mlx4_bitmap xrcd_bitmap; | ||
338 | struct mlx4_uar_table uar_table; | 339 | struct mlx4_uar_table uar_table; |
339 | struct mlx4_mr_table mr_table; | 340 | struct mlx4_mr_table mr_table; |
340 | struct mlx4_cq_table cq_table; | 341 | struct mlx4_cq_table cq_table; |
@@ -384,6 +385,7 @@ int mlx4_alloc_eq_table(struct mlx4_dev *dev); | |||
384 | void mlx4_free_eq_table(struct mlx4_dev *dev); | 385 | void mlx4_free_eq_table(struct mlx4_dev *dev); |
385 | 386 | ||
386 | int mlx4_init_pd_table(struct mlx4_dev *dev); | 387 | int mlx4_init_pd_table(struct mlx4_dev *dev); |
388 | int mlx4_init_xrcd_table(struct mlx4_dev *dev); | ||
387 | int mlx4_init_uar_table(struct mlx4_dev *dev); | 389 | int mlx4_init_uar_table(struct mlx4_dev *dev); |
388 | int mlx4_init_mr_table(struct mlx4_dev *dev); | 390 | int mlx4_init_mr_table(struct mlx4_dev *dev); |
389 | int mlx4_init_eq_table(struct mlx4_dev *dev); | 391 | int mlx4_init_eq_table(struct mlx4_dev *dev); |
@@ -393,6 +395,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev); | |||
393 | int mlx4_init_mcg_table(struct mlx4_dev *dev); | 395 | int mlx4_init_mcg_table(struct mlx4_dev *dev); |
394 | 396 | ||
395 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev); | 397 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev); |
398 | void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); | ||
396 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev); | 399 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev); |
397 | void mlx4_cleanup_mr_table(struct mlx4_dev *dev); | 400 | void mlx4_cleanup_mr_table(struct mlx4_dev *dev); |
398 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev); | 401 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev); |
@@ -450,6 +453,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); | |||
450 | 453 | ||
451 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); | 454 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); |
452 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); | 455 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); |
456 | int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port); | ||
453 | 457 | ||
454 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 458 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
455 | enum mlx4_protocol prot, enum mlx4_steer_type steer); | 459 | enum mlx4_protocol prot, enum mlx4_steer_type steer); |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 9c188bdd7f4f..ab639cfef78e 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -139,7 +139,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) | |||
139 | 139 | ||
140 | buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), | 140 | buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), |
141 | GFP_KERNEL); | 141 | GFP_KERNEL); |
142 | buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), | 142 | buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, |
143 | GFP_KERNEL); | 143 | GFP_KERNEL); |
144 | if (!buddy->bits || !buddy->num_free) | 144 | if (!buddy->bits || !buddy->num_free) |
145 | goto err_out; | 145 | goto err_out; |
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c index 1286b886dcea..3736163e30e9 100644 --- a/drivers/net/mlx4/pd.c +++ b/drivers/net/mlx4/pd.c | |||
@@ -61,6 +61,24 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) | |||
61 | } | 61 | } |
62 | EXPORT_SYMBOL_GPL(mlx4_pd_free); | 62 | EXPORT_SYMBOL_GPL(mlx4_pd_free); |
63 | 63 | ||
64 | int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) | ||
65 | { | ||
66 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
67 | |||
68 | *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap); | ||
69 | if (*xrcdn == -1) | ||
70 | return -ENOMEM; | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); | ||
75 | |||
76 | void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) | ||
77 | { | ||
78 | mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn); | ||
79 | } | ||
80 | EXPORT_SYMBOL_GPL(mlx4_xrcd_free); | ||
81 | |||
64 | int mlx4_init_pd_table(struct mlx4_dev *dev) | 82 | int mlx4_init_pd_table(struct mlx4_dev *dev) |
65 | { | 83 | { |
66 | struct mlx4_priv *priv = mlx4_priv(dev); | 84 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -74,6 +92,18 @@ void mlx4_cleanup_pd_table(struct mlx4_dev *dev) | |||
74 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); | 92 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); |
75 | } | 93 | } |
76 | 94 | ||
95 | int mlx4_init_xrcd_table(struct mlx4_dev *dev) | ||
96 | { | ||
97 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
98 | |||
99 | return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), | ||
100 | (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0); | ||
101 | } | ||
102 | |||
103 | void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) | ||
104 | { | ||
105 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap); | ||
106 | } | ||
77 | 107 | ||
78 | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) | 108 | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) |
79 | { | 109 | { |
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 609e0ec14cee..881592eec614 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c | |||
@@ -148,22 +148,26 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) | |||
148 | 148 | ||
149 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { | 149 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { |
150 | err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); | 150 | err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); |
151 | if (!err) { | 151 | if (err) |
152 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
153 | if (!entry) { | ||
154 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
155 | return -ENOMEM; | ||
156 | } | ||
157 | entry->mac = mac; | ||
158 | err = radix_tree_insert(&info->mac_tree, *qpn, entry); | ||
159 | if (err) { | ||
160 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
161 | return err; | ||
162 | } | ||
163 | } else | ||
164 | return err; | 152 | return err; |
153 | |||
154 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
155 | if (!entry) { | ||
156 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
157 | return -ENOMEM; | ||
158 | } | ||
159 | |||
160 | entry->mac = mac; | ||
161 | err = radix_tree_insert(&info->mac_tree, *qpn, entry); | ||
162 | if (err) { | ||
163 | kfree(entry); | ||
164 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
165 | return err; | ||
166 | } | ||
165 | } | 167 | } |
168 | |||
166 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); | 169 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); |
170 | |||
167 | mutex_lock(&table->mutex); | 171 | mutex_lock(&table->mutex); |
168 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { | 172 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { |
169 | if (free < 0 && !table->refs[i]) { | 173 | if (free < 0 && !table->refs[i]) { |
@@ -464,6 +468,48 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) | |||
464 | return err; | 468 | return err; |
465 | } | 469 | } |
466 | 470 | ||
471 | int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) | ||
472 | { | ||
473 | struct mlx4_cmd_mailbox *inmailbox, *outmailbox; | ||
474 | u8 *inbuf, *outbuf; | ||
475 | int err, packet_error; | ||
476 | |||
477 | inmailbox = mlx4_alloc_cmd_mailbox(dev); | ||
478 | if (IS_ERR(inmailbox)) | ||
479 | return PTR_ERR(inmailbox); | ||
480 | |||
481 | outmailbox = mlx4_alloc_cmd_mailbox(dev); | ||
482 | if (IS_ERR(outmailbox)) { | ||
483 | mlx4_free_cmd_mailbox(dev, inmailbox); | ||
484 | return PTR_ERR(outmailbox); | ||
485 | } | ||
486 | |||
487 | inbuf = inmailbox->buf; | ||
488 | outbuf = outmailbox->buf; | ||
489 | memset(inbuf, 0, 256); | ||
490 | memset(outbuf, 0, 256); | ||
491 | inbuf[0] = 1; | ||
492 | inbuf[1] = 1; | ||
493 | inbuf[2] = 1; | ||
494 | inbuf[3] = 1; | ||
495 | |||
496 | *(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO; | ||
497 | *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); | ||
498 | |||
499 | err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, | ||
500 | MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); | ||
501 | |||
502 | packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4)); | ||
503 | |||
504 | dev->caps.ext_port_cap[port] = (!err && !packet_error) ? | ||
505 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO | ||
506 | : 0; | ||
507 | |||
508 | mlx4_free_cmd_mailbox(dev, inmailbox); | ||
509 | mlx4_free_cmd_mailbox(dev, outmailbox); | ||
510 | return err; | ||
511 | } | ||
512 | |||
467 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | 513 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) |
468 | { | 514 | { |
469 | struct mlx4_cmd_mailbox *mailbox; | 515 | struct mlx4_cmd_mailbox *mailbox; |
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index ec9350e5f21a..51c53898c35f 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c | |||
@@ -280,6 +280,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) | |||
280 | * We reserve 2 extra QPs per port for the special QPs. The | 280 | * We reserve 2 extra QPs per port for the special QPs. The |
281 | * block of special QPs must be aligned to a multiple of 8, so | 281 | * block of special QPs must be aligned to a multiple of 8, so |
282 | * round up. | 282 | * round up. |
283 | * | ||
284 | * We also reserve the MSB of the 24-bit QP number to indicate | ||
285 | * that a QP is an XRC QP. | ||
283 | */ | 286 | */ |
284 | dev->caps.sqp_start = | 287 | dev->caps.sqp_start = |
285 | ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); | 288 | ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); |
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index 3b07b80a0456..a20b141dbb5c 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c | |||
@@ -40,20 +40,20 @@ | |||
40 | struct mlx4_srq_context { | 40 | struct mlx4_srq_context { |
41 | __be32 state_logsize_srqn; | 41 | __be32 state_logsize_srqn; |
42 | u8 logstride; | 42 | u8 logstride; |
43 | u8 reserved1[3]; | 43 | u8 reserved1; |
44 | u8 pg_offset; | 44 | __be16 xrcd; |
45 | u8 reserved2[3]; | 45 | __be32 pg_offset_cqn; |
46 | u32 reserved3; | 46 | u32 reserved2; |
47 | u8 log_page_size; | 47 | u8 log_page_size; |
48 | u8 reserved4[2]; | 48 | u8 reserved3[2]; |
49 | u8 mtt_base_addr_h; | 49 | u8 mtt_base_addr_h; |
50 | __be32 mtt_base_addr_l; | 50 | __be32 mtt_base_addr_l; |
51 | __be32 pd; | 51 | __be32 pd; |
52 | __be16 limit_watermark; | 52 | __be16 limit_watermark; |
53 | __be16 wqe_cnt; | 53 | __be16 wqe_cnt; |
54 | u16 reserved5; | 54 | u16 reserved4; |
55 | __be16 wqe_counter; | 55 | __be16 wqe_counter; |
56 | u32 reserved6; | 56 | u32 reserved5; |
57 | __be64 db_rec_addr; | 57 | __be64 db_rec_addr; |
58 | }; | 58 | }; |
59 | 59 | ||
@@ -109,8 +109,8 @@ static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
109 | MLX4_CMD_TIME_CLASS_A); | 109 | MLX4_CMD_TIME_CLASS_A); |
110 | } | 110 | } |
111 | 111 | ||
112 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, | 112 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, |
113 | u64 db_rec, struct mlx4_srq *srq) | 113 | struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) |
114 | { | 114 | { |
115 | struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; | 115 | struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; |
116 | struct mlx4_cmd_mailbox *mailbox; | 116 | struct mlx4_cmd_mailbox *mailbox; |
@@ -148,6 +148,8 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, | |||
148 | srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | | 148 | srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | |
149 | srq->srqn); | 149 | srq->srqn); |
150 | srq_context->logstride = srq->wqe_shift - 4; | 150 | srq_context->logstride = srq->wqe_shift - 4; |
151 | srq_context->xrcd = cpu_to_be16(xrcd); | ||
152 | srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); | ||
151 | srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | 153 | srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; |
152 | 154 | ||
153 | mtt_addr = mlx4_mtt_addr(dev, mtt); | 155 | mtt_addr = mlx4_mtt_addr(dev, mtt); |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 53ef894bfa05..ff3ccd5c44d6 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -61,6 +61,7 @@ enum { | |||
61 | MLX4_DEV_CAP_FLAG_RC = 1LL << 0, | 61 | MLX4_DEV_CAP_FLAG_RC = 1LL << 0, |
62 | MLX4_DEV_CAP_FLAG_UC = 1LL << 1, | 62 | MLX4_DEV_CAP_FLAG_UC = 1LL << 1, |
63 | MLX4_DEV_CAP_FLAG_UD = 1LL << 2, | 63 | MLX4_DEV_CAP_FLAG_UD = 1LL << 2, |
64 | MLX4_DEV_CAP_FLAG_XRC = 1LL << 3, | ||
64 | MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, | 65 | MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, |
65 | MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, | 66 | MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, |
66 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, | 67 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, |
@@ -82,6 +83,12 @@ enum { | |||
82 | MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 | 83 | MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 |
83 | }; | 84 | }; |
84 | 85 | ||
86 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) | ||
87 | |||
88 | enum { | ||
89 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 | ||
90 | }; | ||
91 | |||
85 | enum { | 92 | enum { |
86 | MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, | 93 | MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, |
87 | MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, | 94 | MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, |
@@ -256,6 +263,8 @@ struct mlx4_caps { | |||
256 | int num_qp_per_mgm; | 263 | int num_qp_per_mgm; |
257 | int num_pds; | 264 | int num_pds; |
258 | int reserved_pds; | 265 | int reserved_pds; |
266 | int max_xrcds; | ||
267 | int reserved_xrcds; | ||
259 | int mtt_entry_sz; | 268 | int mtt_entry_sz; |
260 | u32 max_msg_sz; | 269 | u32 max_msg_sz; |
261 | u32 page_size_cap; | 270 | u32 page_size_cap; |
@@ -276,6 +285,7 @@ struct mlx4_caps { | |||
276 | u32 port_mask; | 285 | u32 port_mask; |
277 | enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; | 286 | enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; |
278 | u32 max_counters; | 287 | u32 max_counters; |
288 | u8 ext_port_cap[MLX4_MAX_PORTS + 1]; | ||
279 | }; | 289 | }; |
280 | 290 | ||
281 | struct mlx4_buf_list { | 291 | struct mlx4_buf_list { |
@@ -499,6 +509,8 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) | |||
499 | 509 | ||
500 | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); | 510 | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); |
501 | void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); | 511 | void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); |
512 | int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); | ||
513 | void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); | ||
502 | 514 | ||
503 | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); | 515 | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); |
504 | void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); | 516 | void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); |
@@ -538,8 +550,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | |||
538 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); | 550 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); |
539 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); | 551 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); |
540 | 552 | ||
541 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, | 553 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, |
542 | u64 db_rec, struct mlx4_srq *srq); | 554 | struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq); |
543 | void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); | 555 | void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); |
544 | int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); | 556 | int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); |
545 | int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); | 557 | int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 4001c8249dbb..48cc4cb97858 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -75,6 +75,7 @@ enum { | |||
75 | MLX4_QP_ST_UC = 0x1, | 75 | MLX4_QP_ST_UC = 0x1, |
76 | MLX4_QP_ST_RD = 0x2, | 76 | MLX4_QP_ST_RD = 0x2, |
77 | MLX4_QP_ST_UD = 0x3, | 77 | MLX4_QP_ST_UD = 0x3, |
78 | MLX4_QP_ST_XRC = 0x6, | ||
78 | MLX4_QP_ST_MLX = 0x7 | 79 | MLX4_QP_ST_MLX = 0x7 |
79 | }; | 80 | }; |
80 | 81 | ||
@@ -137,7 +138,7 @@ struct mlx4_qp_context { | |||
137 | __be32 ssn; | 138 | __be32 ssn; |
138 | __be32 params2; | 139 | __be32 params2; |
139 | __be32 rnr_nextrecvpsn; | 140 | __be32 rnr_nextrecvpsn; |
140 | __be32 srcd; | 141 | __be32 xrcd; |
141 | __be32 cqn_recv; | 142 | __be32 cqn_recv; |
142 | __be64 db_rec_addr; | 143 | __be64 db_rec_addr; |
143 | __be32 qkey; | 144 | __be32 qkey; |
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h index fe5b05177a2c..81aba3a73aa3 100644 --- a/include/rdma/ib_user_verbs.h +++ b/include/rdma/ib_user_verbs.h | |||
@@ -81,7 +81,11 @@ enum { | |||
81 | IB_USER_VERBS_CMD_MODIFY_SRQ, | 81 | IB_USER_VERBS_CMD_MODIFY_SRQ, |
82 | IB_USER_VERBS_CMD_QUERY_SRQ, | 82 | IB_USER_VERBS_CMD_QUERY_SRQ, |
83 | IB_USER_VERBS_CMD_DESTROY_SRQ, | 83 | IB_USER_VERBS_CMD_DESTROY_SRQ, |
84 | IB_USER_VERBS_CMD_POST_SRQ_RECV | 84 | IB_USER_VERBS_CMD_POST_SRQ_RECV, |
85 | IB_USER_VERBS_CMD_OPEN_XRCD, | ||
86 | IB_USER_VERBS_CMD_CLOSE_XRCD, | ||
87 | IB_USER_VERBS_CMD_CREATE_XSRQ, | ||
88 | IB_USER_VERBS_CMD_OPEN_QP | ||
85 | }; | 89 | }; |
86 | 90 | ||
87 | /* | 91 | /* |
@@ -222,6 +226,21 @@ struct ib_uverbs_dealloc_pd { | |||
222 | __u32 pd_handle; | 226 | __u32 pd_handle; |
223 | }; | 227 | }; |
224 | 228 | ||
229 | struct ib_uverbs_open_xrcd { | ||
230 | __u64 response; | ||
231 | __u32 fd; | ||
232 | __u32 oflags; | ||
233 | __u64 driver_data[0]; | ||
234 | }; | ||
235 | |||
236 | struct ib_uverbs_open_xrcd_resp { | ||
237 | __u32 xrcd_handle; | ||
238 | }; | ||
239 | |||
240 | struct ib_uverbs_close_xrcd { | ||
241 | __u32 xrcd_handle; | ||
242 | }; | ||
243 | |||
225 | struct ib_uverbs_reg_mr { | 244 | struct ib_uverbs_reg_mr { |
226 | __u64 response; | 245 | __u64 response; |
227 | __u64 start; | 246 | __u64 start; |
@@ -404,6 +423,17 @@ struct ib_uverbs_create_qp { | |||
404 | __u64 driver_data[0]; | 423 | __u64 driver_data[0]; |
405 | }; | 424 | }; |
406 | 425 | ||
426 | struct ib_uverbs_open_qp { | ||
427 | __u64 response; | ||
428 | __u64 user_handle; | ||
429 | __u32 pd_handle; | ||
430 | __u32 qpn; | ||
431 | __u8 qp_type; | ||
432 | __u8 reserved[7]; | ||
433 | __u64 driver_data[0]; | ||
434 | }; | ||
435 | |||
436 | /* also used for open response */ | ||
407 | struct ib_uverbs_create_qp_resp { | 437 | struct ib_uverbs_create_qp_resp { |
408 | __u32 qp_handle; | 438 | __u32 qp_handle; |
409 | __u32 qpn; | 439 | __u32 qpn; |
@@ -648,11 +678,25 @@ struct ib_uverbs_create_srq { | |||
648 | __u64 driver_data[0]; | 678 | __u64 driver_data[0]; |
649 | }; | 679 | }; |
650 | 680 | ||
681 | struct ib_uverbs_create_xsrq { | ||
682 | __u64 response; | ||
683 | __u64 user_handle; | ||
684 | __u32 srq_type; | ||
685 | __u32 pd_handle; | ||
686 | __u32 max_wr; | ||
687 | __u32 max_sge; | ||
688 | __u32 srq_limit; | ||
689 | __u32 reserved; | ||
690 | __u32 xrcd_handle; | ||
691 | __u32 cq_handle; | ||
692 | __u64 driver_data[0]; | ||
693 | }; | ||
694 | |||
651 | struct ib_uverbs_create_srq_resp { | 695 | struct ib_uverbs_create_srq_resp { |
652 | __u32 srq_handle; | 696 | __u32 srq_handle; |
653 | __u32 max_wr; | 697 | __u32 max_wr; |
654 | __u32 max_sge; | 698 | __u32 max_sge; |
655 | __u32 reserved; | 699 | __u32 srqn; |
656 | }; | 700 | }; |
657 | 701 | ||
658 | struct ib_uverbs_modify_srq { | 702 | struct ib_uverbs_modify_srq { |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 228be3e220d9..bf5daafe8ecc 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -112,6 +112,7 @@ enum ib_device_cap_flags { | |||
112 | */ | 112 | */ |
113 | IB_DEVICE_UD_IP_CSUM = (1<<18), | 113 | IB_DEVICE_UD_IP_CSUM = (1<<18), |
114 | IB_DEVICE_UD_TSO = (1<<19), | 114 | IB_DEVICE_UD_TSO = (1<<19), |
115 | IB_DEVICE_XRC = (1<<20), | ||
115 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), | 116 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), |
116 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), | 117 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), |
117 | }; | 118 | }; |
@@ -207,6 +208,7 @@ enum ib_port_cap_flags { | |||
207 | IB_PORT_SM_DISABLED = 1 << 10, | 208 | IB_PORT_SM_DISABLED = 1 << 10, |
208 | IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, | 209 | IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, |
209 | IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, | 210 | IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, |
211 | IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, | ||
210 | IB_PORT_CM_SUP = 1 << 16, | 212 | IB_PORT_CM_SUP = 1 << 16, |
211 | IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, | 213 | IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, |
212 | IB_PORT_REINIT_SUP = 1 << 18, | 214 | IB_PORT_REINIT_SUP = 1 << 18, |
@@ -415,7 +417,15 @@ enum ib_rate { | |||
415 | IB_RATE_40_GBPS = 7, | 417 | IB_RATE_40_GBPS = 7, |
416 | IB_RATE_60_GBPS = 8, | 418 | IB_RATE_60_GBPS = 8, |
417 | IB_RATE_80_GBPS = 9, | 419 | IB_RATE_80_GBPS = 9, |
418 | IB_RATE_120_GBPS = 10 | 420 | IB_RATE_120_GBPS = 10, |
421 | IB_RATE_14_GBPS = 11, | ||
422 | IB_RATE_56_GBPS = 12, | ||
423 | IB_RATE_112_GBPS = 13, | ||
424 | IB_RATE_168_GBPS = 14, | ||
425 | IB_RATE_25_GBPS = 15, | ||
426 | IB_RATE_100_GBPS = 16, | ||
427 | IB_RATE_200_GBPS = 17, | ||
428 | IB_RATE_300_GBPS = 18 | ||
419 | }; | 429 | }; |
420 | 430 | ||
421 | /** | 431 | /** |
@@ -427,6 +437,13 @@ enum ib_rate { | |||
427 | int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; | 437 | int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; |
428 | 438 | ||
429 | /** | 439 | /** |
440 | * ib_rate_to_mbps - Convert the IB rate enum to Mbps. | ||
441 | * For example, IB_RATE_2_5_GBPS will be converted to 2500. | ||
442 | * @rate: rate to convert. | ||
443 | */ | ||
444 | int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__; | ||
445 | |||
446 | /** | ||
430 | * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate | 447 | * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate |
431 | * enum. | 448 | * enum. |
432 | * @mult: multiple to convert. | 449 | * @mult: multiple to convert. |
@@ -522,6 +539,11 @@ enum ib_cq_notify_flags { | |||
522 | IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, | 539 | IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, |
523 | }; | 540 | }; |
524 | 541 | ||
542 | enum ib_srq_type { | ||
543 | IB_SRQT_BASIC, | ||
544 | IB_SRQT_XRC | ||
545 | }; | ||
546 | |||
525 | enum ib_srq_attr_mask { | 547 | enum ib_srq_attr_mask { |
526 | IB_SRQ_MAX_WR = 1 << 0, | 548 | IB_SRQ_MAX_WR = 1 << 0, |
527 | IB_SRQ_LIMIT = 1 << 1, | 549 | IB_SRQ_LIMIT = 1 << 1, |
@@ -537,6 +559,14 @@ struct ib_srq_init_attr { | |||
537 | void (*event_handler)(struct ib_event *, void *); | 559 | void (*event_handler)(struct ib_event *, void *); |
538 | void *srq_context; | 560 | void *srq_context; |
539 | struct ib_srq_attr attr; | 561 | struct ib_srq_attr attr; |
562 | enum ib_srq_type srq_type; | ||
563 | |||
564 | union { | ||
565 | struct { | ||
566 | struct ib_xrcd *xrcd; | ||
567 | struct ib_cq *cq; | ||
568 | } xrc; | ||
569 | } ext; | ||
540 | }; | 570 | }; |
541 | 571 | ||
542 | struct ib_qp_cap { | 572 | struct ib_qp_cap { |
@@ -565,7 +595,11 @@ enum ib_qp_type { | |||
565 | IB_QPT_UC, | 595 | IB_QPT_UC, |
566 | IB_QPT_UD, | 596 | IB_QPT_UD, |
567 | IB_QPT_RAW_IPV6, | 597 | IB_QPT_RAW_IPV6, |
568 | IB_QPT_RAW_ETHERTYPE | 598 | IB_QPT_RAW_ETHERTYPE, |
599 | /* Save 8 for RAW_PACKET */ | ||
600 | IB_QPT_XRC_INI = 9, | ||
601 | IB_QPT_XRC_TGT, | ||
602 | IB_QPT_MAX | ||
569 | }; | 603 | }; |
570 | 604 | ||
571 | enum ib_qp_create_flags { | 605 | enum ib_qp_create_flags { |
@@ -579,6 +613,7 @@ struct ib_qp_init_attr { | |||
579 | struct ib_cq *send_cq; | 613 | struct ib_cq *send_cq; |
580 | struct ib_cq *recv_cq; | 614 | struct ib_cq *recv_cq; |
581 | struct ib_srq *srq; | 615 | struct ib_srq *srq; |
616 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ | ||
582 | struct ib_qp_cap cap; | 617 | struct ib_qp_cap cap; |
583 | enum ib_sig_type sq_sig_type; | 618 | enum ib_sig_type sq_sig_type; |
584 | enum ib_qp_type qp_type; | 619 | enum ib_qp_type qp_type; |
@@ -586,6 +621,13 @@ struct ib_qp_init_attr { | |||
586 | u8 port_num; /* special QP types only */ | 621 | u8 port_num; /* special QP types only */ |
587 | }; | 622 | }; |
588 | 623 | ||
624 | struct ib_qp_open_attr { | ||
625 | void (*event_handler)(struct ib_event *, void *); | ||
626 | void *qp_context; | ||
627 | u32 qp_num; | ||
628 | enum ib_qp_type qp_type; | ||
629 | }; | ||
630 | |||
589 | enum ib_rnr_timeout { | 631 | enum ib_rnr_timeout { |
590 | IB_RNR_TIMER_655_36 = 0, | 632 | IB_RNR_TIMER_655_36 = 0, |
591 | IB_RNR_TIMER_000_01 = 1, | 633 | IB_RNR_TIMER_000_01 = 1, |
@@ -770,6 +812,7 @@ struct ib_send_wr { | |||
770 | u32 rkey; | 812 | u32 rkey; |
771 | } fast_reg; | 813 | } fast_reg; |
772 | } wr; | 814 | } wr; |
815 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ | ||
773 | }; | 816 | }; |
774 | 817 | ||
775 | struct ib_recv_wr { | 818 | struct ib_recv_wr { |
@@ -831,6 +874,7 @@ struct ib_ucontext { | |||
831 | struct list_head qp_list; | 874 | struct list_head qp_list; |
832 | struct list_head srq_list; | 875 | struct list_head srq_list; |
833 | struct list_head ah_list; | 876 | struct list_head ah_list; |
877 | struct list_head xrcd_list; | ||
834 | int closing; | 878 | int closing; |
835 | }; | 879 | }; |
836 | 880 | ||
@@ -858,6 +902,15 @@ struct ib_pd { | |||
858 | atomic_t usecnt; /* count all resources */ | 902 | atomic_t usecnt; /* count all resources */ |
859 | }; | 903 | }; |
860 | 904 | ||
905 | struct ib_xrcd { | ||
906 | struct ib_device *device; | ||
907 | atomic_t usecnt; /* count all exposed resources */ | ||
908 | struct inode *inode; | ||
909 | |||
910 | struct mutex tgt_qp_mutex; | ||
911 | struct list_head tgt_qp_list; | ||
912 | }; | ||
913 | |||
861 | struct ib_ah { | 914 | struct ib_ah { |
862 | struct ib_device *device; | 915 | struct ib_device *device; |
863 | struct ib_pd *pd; | 916 | struct ib_pd *pd; |
@@ -882,7 +935,16 @@ struct ib_srq { | |||
882 | struct ib_uobject *uobject; | 935 | struct ib_uobject *uobject; |
883 | void (*event_handler)(struct ib_event *, void *); | 936 | void (*event_handler)(struct ib_event *, void *); |
884 | void *srq_context; | 937 | void *srq_context; |
938 | enum ib_srq_type srq_type; | ||
885 | atomic_t usecnt; | 939 | atomic_t usecnt; |
940 | |||
941 | union { | ||
942 | struct { | ||
943 | struct ib_xrcd *xrcd; | ||
944 | struct ib_cq *cq; | ||
945 | u32 srq_num; | ||
946 | } xrc; | ||
947 | } ext; | ||
886 | }; | 948 | }; |
887 | 949 | ||
888 | struct ib_qp { | 950 | struct ib_qp { |
@@ -891,6 +953,11 @@ struct ib_qp { | |||
891 | struct ib_cq *send_cq; | 953 | struct ib_cq *send_cq; |
892 | struct ib_cq *recv_cq; | 954 | struct ib_cq *recv_cq; |
893 | struct ib_srq *srq; | 955 | struct ib_srq *srq; |
956 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ | ||
957 | struct list_head xrcd_list; | ||
958 | atomic_t usecnt; /* count times opened */ | ||
959 | struct list_head open_list; | ||
960 | struct ib_qp *real_qp; | ||
894 | struct ib_uobject *uobject; | 961 | struct ib_uobject *uobject; |
895 | void (*event_handler)(struct ib_event *, void *); | 962 | void (*event_handler)(struct ib_event *, void *); |
896 | void *qp_context; | 963 | void *qp_context; |
@@ -1149,6 +1216,10 @@ struct ib_device { | |||
1149 | struct ib_grh *in_grh, | 1216 | struct ib_grh *in_grh, |
1150 | struct ib_mad *in_mad, | 1217 | struct ib_mad *in_mad, |
1151 | struct ib_mad *out_mad); | 1218 | struct ib_mad *out_mad); |
1219 | struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, | ||
1220 | struct ib_ucontext *ucontext, | ||
1221 | struct ib_udata *udata); | ||
1222 | int (*dealloc_xrcd)(struct ib_xrcd *xrcd); | ||
1152 | 1223 | ||
1153 | struct ib_dma_mapping_ops *dma_ops; | 1224 | struct ib_dma_mapping_ops *dma_ops; |
1154 | 1225 | ||
@@ -1443,6 +1514,25 @@ int ib_query_qp(struct ib_qp *qp, | |||
1443 | int ib_destroy_qp(struct ib_qp *qp); | 1514 | int ib_destroy_qp(struct ib_qp *qp); |
1444 | 1515 | ||
1445 | /** | 1516 | /** |
1517 | * ib_open_qp - Obtain a reference to an existing sharable QP. | ||
1518 | * @xrcd - XRC domain | ||
1519 | * @qp_open_attr: Attributes identifying the QP to open. | ||
1520 | * | ||
1521 | * Returns a reference to a sharable QP. | ||
1522 | */ | ||
1523 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | ||
1524 | struct ib_qp_open_attr *qp_open_attr); | ||
1525 | |||
1526 | /** | ||
1527 | * ib_close_qp - Release an external reference to a QP. | ||
1528 | * @qp: The QP handle to release | ||
1529 | * | ||
1530 | * The opened QP handle is released by the caller. The underlying | ||
1531 | * shared QP is not destroyed until all internal references are released. | ||
1532 | */ | ||
1533 | int ib_close_qp(struct ib_qp *qp); | ||
1534 | |||
1535 | /** | ||
1446 | * ib_post_send - Posts a list of work requests to the send queue of | 1536 | * ib_post_send - Posts a list of work requests to the send queue of |
1447 | * the specified QP. | 1537 | * the specified QP. |
1448 | * @qp: The QP to post the work request on. | 1538 | * @qp: The QP to post the work request on. |
@@ -2060,4 +2150,16 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); | |||
2060 | */ | 2150 | */ |
2061 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); | 2151 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
2062 | 2152 | ||
2153 | /** | ||
2154 | * ib_alloc_xrcd - Allocates an XRC domain. | ||
2155 | * @device: The device on which to allocate the XRC domain. | ||
2156 | */ | ||
2157 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); | ||
2158 | |||
2159 | /** | ||
2160 | * ib_dealloc_xrcd - Deallocates an XRC domain. | ||
2161 | * @xrcd: The XRC domain to deallocate. | ||
2162 | */ | ||
2163 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd); | ||
2164 | |||
2063 | #endif /* IB_VERBS_H */ | 2165 | #endif /* IB_VERBS_H */ |
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index 2d0191c90f9e..1a046b1595cc 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h | |||
@@ -52,8 +52,10 @@ struct iw_cm_event { | |||
52 | struct sockaddr_in local_addr; | 52 | struct sockaddr_in local_addr; |
53 | struct sockaddr_in remote_addr; | 53 | struct sockaddr_in remote_addr; |
54 | void *private_data; | 54 | void *private_data; |
55 | u8 private_data_len; | ||
56 | void *provider_data; | 55 | void *provider_data; |
56 | u8 private_data_len; | ||
57 | u8 ord; | ||
58 | u8 ird; | ||
57 | }; | 59 | }; |
58 | 60 | ||
59 | /** | 61 | /** |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 26977c149c41..51988f808181 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -65,6 +65,7 @@ enum rdma_cm_event_type { | |||
65 | enum rdma_port_space { | 65 | enum rdma_port_space { |
66 | RDMA_PS_SDP = 0x0001, | 66 | RDMA_PS_SDP = 0x0001, |
67 | RDMA_PS_IPOIB = 0x0002, | 67 | RDMA_PS_IPOIB = 0x0002, |
68 | RDMA_PS_IB = 0x013F, | ||
68 | RDMA_PS_TCP = 0x0106, | 69 | RDMA_PS_TCP = 0x0106, |
69 | RDMA_PS_UDP = 0x0111, | 70 | RDMA_PS_UDP = 0x0111, |
70 | }; | 71 | }; |
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h index fc82c1896f75..5348a000c8f3 100644 --- a/include/rdma/rdma_user_cm.h +++ b/include/rdma/rdma_user_cm.h | |||
@@ -77,7 +77,8 @@ struct rdma_ucm_create_id { | |||
77 | __u64 uid; | 77 | __u64 uid; |
78 | __u64 response; | 78 | __u64 response; |
79 | __u16 ps; | 79 | __u16 ps; |
80 | __u8 reserved[6]; | 80 | __u8 qp_type; |
81 | __u8 reserved[5]; | ||
81 | }; | 82 | }; |
82 | 83 | ||
83 | struct rdma_ucm_create_id_resp { | 84 | struct rdma_ucm_create_id_resp { |