diff options
Diffstat (limited to 'drivers/infiniband')
74 files changed, 4490 insertions, 980 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 82013946efc9..8b72f39202fb 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -890,6 +890,8 @@ retest: | |||
890 | break; | 890 | break; |
891 | case IB_CM_ESTABLISHED: | 891 | case IB_CM_ESTABLISHED: |
892 | spin_unlock_irq(&cm_id_priv->lock); | 892 | spin_unlock_irq(&cm_id_priv->lock); |
893 | if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) | ||
894 | break; | ||
893 | ib_send_cm_dreq(cm_id, NULL, 0); | 895 | ib_send_cm_dreq(cm_id, NULL, 0); |
894 | goto retest; | 896 | goto retest; |
895 | case IB_CM_DREQ_SENT: | 897 | case IB_CM_DREQ_SENT: |
@@ -1009,7 +1011,6 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
1009 | req_msg->service_id = param->service_id; | 1011 | req_msg->service_id = param->service_id; |
1010 | req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; | 1012 | req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; |
1011 | cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); | 1013 | cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); |
1012 | cm_req_set_resp_res(req_msg, param->responder_resources); | ||
1013 | cm_req_set_init_depth(req_msg, param->initiator_depth); | 1014 | cm_req_set_init_depth(req_msg, param->initiator_depth); |
1014 | cm_req_set_remote_resp_timeout(req_msg, | 1015 | cm_req_set_remote_resp_timeout(req_msg, |
1015 | param->remote_cm_response_timeout); | 1016 | param->remote_cm_response_timeout); |
@@ -1018,12 +1019,16 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
1018 | cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); | 1019 | cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); |
1019 | cm_req_set_local_resp_timeout(req_msg, | 1020 | cm_req_set_local_resp_timeout(req_msg, |
1020 | param->local_cm_response_timeout); | 1021 | param->local_cm_response_timeout); |
1021 | cm_req_set_retry_count(req_msg, param->retry_count); | ||
1022 | req_msg->pkey = param->primary_path->pkey; | 1022 | req_msg->pkey = param->primary_path->pkey; |
1023 | cm_req_set_path_mtu(req_msg, param->primary_path->mtu); | 1023 | cm_req_set_path_mtu(req_msg, param->primary_path->mtu); |
1024 | cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); | ||
1025 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); | 1024 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); |
1026 | cm_req_set_srq(req_msg, param->srq); | 1025 | |
1026 | if (param->qp_type != IB_QPT_XRC_INI) { | ||
1027 | cm_req_set_resp_res(req_msg, param->responder_resources); | ||
1028 | cm_req_set_retry_count(req_msg, param->retry_count); | ||
1029 | cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); | ||
1030 | cm_req_set_srq(req_msg, param->srq); | ||
1031 | } | ||
1027 | 1032 | ||
1028 | if (pri_path->hop_limit <= 1) { | 1033 | if (pri_path->hop_limit <= 1) { |
1029 | req_msg->primary_local_lid = pri_path->slid; | 1034 | req_msg->primary_local_lid = pri_path->slid; |
@@ -1081,7 +1086,8 @@ static int cm_validate_req_param(struct ib_cm_req_param *param) | |||
1081 | if (!param->primary_path) | 1086 | if (!param->primary_path) |
1082 | return -EINVAL; | 1087 | return -EINVAL; |
1083 | 1088 | ||
1084 | if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) | 1089 | if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && |
1090 | param->qp_type != IB_QPT_XRC_INI) | ||
1085 | return -EINVAL; | 1091 | return -EINVAL; |
1086 | 1092 | ||
1087 | if (param->private_data && | 1093 | if (param->private_data && |
@@ -1602,18 +1608,24 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg, | |||
1602 | cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); | 1608 | cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); |
1603 | rep_msg->local_comm_id = cm_id_priv->id.local_id; | 1609 | rep_msg->local_comm_id = cm_id_priv->id.local_id; |
1604 | rep_msg->remote_comm_id = cm_id_priv->id.remote_id; | 1610 | rep_msg->remote_comm_id = cm_id_priv->id.remote_id; |
1605 | cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1606 | cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); | 1611 | cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); |
1607 | rep_msg->resp_resources = param->responder_resources; | 1612 | rep_msg->resp_resources = param->responder_resources; |
1608 | rep_msg->initiator_depth = param->initiator_depth; | ||
1609 | cm_rep_set_target_ack_delay(rep_msg, | 1613 | cm_rep_set_target_ack_delay(rep_msg, |
1610 | cm_id_priv->av.port->cm_dev->ack_delay); | 1614 | cm_id_priv->av.port->cm_dev->ack_delay); |
1611 | cm_rep_set_failover(rep_msg, param->failover_accepted); | 1615 | cm_rep_set_failover(rep_msg, param->failover_accepted); |
1612 | cm_rep_set_flow_ctrl(rep_msg, param->flow_control); | ||
1613 | cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); | 1616 | cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); |
1614 | cm_rep_set_srq(rep_msg, param->srq); | ||
1615 | rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; | 1617 | rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; |
1616 | 1618 | ||
1619 | if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { | ||
1620 | rep_msg->initiator_depth = param->initiator_depth; | ||
1621 | cm_rep_set_flow_ctrl(rep_msg, param->flow_control); | ||
1622 | cm_rep_set_srq(rep_msg, param->srq); | ||
1623 | cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1624 | } else { | ||
1625 | cm_rep_set_srq(rep_msg, 1); | ||
1626 | cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1627 | } | ||
1628 | |||
1617 | if (param->private_data && param->private_data_len) | 1629 | if (param->private_data && param->private_data_len) |
1618 | memcpy(rep_msg->private_data, param->private_data, | 1630 | memcpy(rep_msg->private_data, param->private_data, |
1619 | param->private_data_len); | 1631 | param->private_data_len); |
@@ -1661,7 +1673,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, | |||
1661 | cm_id_priv->initiator_depth = param->initiator_depth; | 1673 | cm_id_priv->initiator_depth = param->initiator_depth; |
1662 | cm_id_priv->responder_resources = param->responder_resources; | 1674 | cm_id_priv->responder_resources = param->responder_resources; |
1663 | cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); | 1675 | cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); |
1664 | cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); | 1676 | cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); |
1665 | 1677 | ||
1666 | out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1678 | out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1667 | return ret; | 1679 | return ret; |
@@ -1732,7 +1744,7 @@ error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |||
1732 | } | 1744 | } |
1733 | EXPORT_SYMBOL(ib_send_cm_rtu); | 1745 | EXPORT_SYMBOL(ib_send_cm_rtu); |
1734 | 1746 | ||
1735 | static void cm_format_rep_event(struct cm_work *work) | 1747 | static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) |
1736 | { | 1748 | { |
1737 | struct cm_rep_msg *rep_msg; | 1749 | struct cm_rep_msg *rep_msg; |
1738 | struct ib_cm_rep_event_param *param; | 1750 | struct ib_cm_rep_event_param *param; |
@@ -1741,7 +1753,7 @@ static void cm_format_rep_event(struct cm_work *work) | |||
1741 | param = &work->cm_event.param.rep_rcvd; | 1753 | param = &work->cm_event.param.rep_rcvd; |
1742 | param->remote_ca_guid = rep_msg->local_ca_guid; | 1754 | param->remote_ca_guid = rep_msg->local_ca_guid; |
1743 | param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); | 1755 | param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); |
1744 | param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); | 1756 | param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); |
1745 | param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); | 1757 | param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); |
1746 | param->responder_resources = rep_msg->initiator_depth; | 1758 | param->responder_resources = rep_msg->initiator_depth; |
1747 | param->initiator_depth = rep_msg->resp_resources; | 1759 | param->initiator_depth = rep_msg->resp_resources; |
@@ -1809,7 +1821,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1809 | return -EINVAL; | 1821 | return -EINVAL; |
1810 | } | 1822 | } |
1811 | 1823 | ||
1812 | cm_format_rep_event(work); | 1824 | cm_format_rep_event(work, cm_id_priv->qp_type); |
1813 | 1825 | ||
1814 | spin_lock_irq(&cm_id_priv->lock); | 1826 | spin_lock_irq(&cm_id_priv->lock); |
1815 | switch (cm_id_priv->id.state) { | 1827 | switch (cm_id_priv->id.state) { |
@@ -1824,7 +1836,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1824 | 1836 | ||
1825 | cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; | 1837 | cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; |
1826 | cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; | 1838 | cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; |
1827 | cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); | 1839 | cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); |
1828 | 1840 | ||
1829 | spin_lock(&cm.lock); | 1841 | spin_lock(&cm.lock); |
1830 | /* Check for duplicate REP. */ | 1842 | /* Check for duplicate REP. */ |
@@ -1851,7 +1863,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1851 | 1863 | ||
1852 | cm_id_priv->id.state = IB_CM_REP_RCVD; | 1864 | cm_id_priv->id.state = IB_CM_REP_RCVD; |
1853 | cm_id_priv->id.remote_id = rep_msg->local_comm_id; | 1865 | cm_id_priv->id.remote_id = rep_msg->local_comm_id; |
1854 | cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); | 1866 | cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); |
1855 | cm_id_priv->initiator_depth = rep_msg->resp_resources; | 1867 | cm_id_priv->initiator_depth = rep_msg->resp_resources; |
1856 | cm_id_priv->responder_resources = rep_msg->initiator_depth; | 1868 | cm_id_priv->responder_resources = rep_msg->initiator_depth; |
1857 | cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); | 1869 | cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); |
@@ -3493,7 +3505,8 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, | |||
3493 | qp_attr->path_mtu = cm_id_priv->path_mtu; | 3505 | qp_attr->path_mtu = cm_id_priv->path_mtu; |
3494 | qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); | 3506 | qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); |
3495 | qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); | 3507 | qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); |
3496 | if (cm_id_priv->qp_type == IB_QPT_RC) { | 3508 | if (cm_id_priv->qp_type == IB_QPT_RC || |
3509 | cm_id_priv->qp_type == IB_QPT_XRC_TGT) { | ||
3497 | *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | | 3510 | *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | |
3498 | IB_QP_MIN_RNR_TIMER; | 3511 | IB_QP_MIN_RNR_TIMER; |
3499 | qp_attr->max_dest_rd_atomic = | 3512 | qp_attr->max_dest_rd_atomic = |
@@ -3538,15 +3551,21 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, | |||
3538 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { | 3551 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { |
3539 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; | 3552 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; |
3540 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); | 3553 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); |
3541 | if (cm_id_priv->qp_type == IB_QPT_RC) { | 3554 | switch (cm_id_priv->qp_type) { |
3542 | *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | | 3555 | case IB_QPT_RC: |
3543 | IB_QP_RNR_RETRY | | 3556 | case IB_QPT_XRC_INI: |
3557 | *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | | ||
3544 | IB_QP_MAX_QP_RD_ATOMIC; | 3558 | IB_QP_MAX_QP_RD_ATOMIC; |
3545 | qp_attr->timeout = cm_id_priv->av.timeout; | ||
3546 | qp_attr->retry_cnt = cm_id_priv->retry_count; | 3559 | qp_attr->retry_cnt = cm_id_priv->retry_count; |
3547 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; | 3560 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; |
3548 | qp_attr->max_rd_atomic = | 3561 | qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; |
3549 | cm_id_priv->initiator_depth; | 3562 | /* fall through */ |
3563 | case IB_QPT_XRC_TGT: | ||
3564 | *qp_attr_mask |= IB_QP_TIMEOUT; | ||
3565 | qp_attr->timeout = cm_id_priv->av.timeout; | ||
3566 | break; | ||
3567 | default: | ||
3568 | break; | ||
3550 | } | 3569 | } |
3551 | if (cm_id_priv->alt_av.ah_attr.dlid) { | 3570 | if (cm_id_priv->alt_av.ah_attr.dlid) { |
3552 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; | 3571 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; |
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 7e63c08f697c..505db2a59e7f 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | 3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
4 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. | 4 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
5 | * | 5 | * |
@@ -86,7 +86,7 @@ struct cm_req_msg { | |||
86 | __be16 pkey; | 86 | __be16 pkey; |
87 | /* path MTU:4, RDC exists:1, RNR retry count:3. */ | 87 | /* path MTU:4, RDC exists:1, RNR retry count:3. */ |
88 | u8 offset50; | 88 | u8 offset50; |
89 | /* max CM Retries:4, SRQ:1, rsvd:3 */ | 89 | /* max CM Retries:4, SRQ:1, extended transport type:3 */ |
90 | u8 offset51; | 90 | u8 offset51; |
91 | 91 | ||
92 | __be16 primary_local_lid; | 92 | __be16 primary_local_lid; |
@@ -175,6 +175,11 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg) | |||
175 | switch(transport_type) { | 175 | switch(transport_type) { |
176 | case 0: return IB_QPT_RC; | 176 | case 0: return IB_QPT_RC; |
177 | case 1: return IB_QPT_UC; | 177 | case 1: return IB_QPT_UC; |
178 | case 3: | ||
179 | switch (req_msg->offset51 & 0x7) { | ||
180 | case 1: return IB_QPT_XRC_TGT; | ||
181 | default: return 0; | ||
182 | } | ||
178 | default: return 0; | 183 | default: return 0; |
179 | } | 184 | } |
180 | } | 185 | } |
@@ -188,6 +193,12 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, | |||
188 | req_msg->offset40) & | 193 | req_msg->offset40) & |
189 | 0xFFFFFFF9) | 0x2); | 194 | 0xFFFFFFF9) | 0x2); |
190 | break; | 195 | break; |
196 | case IB_QPT_XRC_INI: | ||
197 | req_msg->offset40 = cpu_to_be32((be32_to_cpu( | ||
198 | req_msg->offset40) & | ||
199 | 0xFFFFFFF9) | 0x6); | ||
200 | req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1; | ||
201 | break; | ||
191 | default: | 202 | default: |
192 | req_msg->offset40 = cpu_to_be32(be32_to_cpu( | 203 | req_msg->offset40 = cpu_to_be32(be32_to_cpu( |
193 | req_msg->offset40) & | 204 | req_msg->offset40) & |
@@ -527,6 +538,23 @@ static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) | |||
527 | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); | 538 | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); |
528 | } | 539 | } |
529 | 540 | ||
541 | static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg) | ||
542 | { | ||
543 | return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8); | ||
544 | } | ||
545 | |||
546 | static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn) | ||
547 | { | ||
548 | rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) | | ||
549 | (be32_to_cpu(rep_msg->offset16) & 0x000000FF)); | ||
550 | } | ||
551 | |||
552 | static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type) | ||
553 | { | ||
554 | return (qp_type == IB_QPT_XRC_INI) ? | ||
555 | cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg); | ||
556 | } | ||
557 | |||
530 | static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) | 558 | static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) |
531 | { | 559 | { |
532 | return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); | 560 | return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b05716300d2f..75ff821c0af0 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -82,6 +82,7 @@ static DEFINE_IDR(sdp_ps); | |||
82 | static DEFINE_IDR(tcp_ps); | 82 | static DEFINE_IDR(tcp_ps); |
83 | static DEFINE_IDR(udp_ps); | 83 | static DEFINE_IDR(udp_ps); |
84 | static DEFINE_IDR(ipoib_ps); | 84 | static DEFINE_IDR(ipoib_ps); |
85 | static DEFINE_IDR(ib_ps); | ||
85 | 86 | ||
86 | struct cma_device { | 87 | struct cma_device { |
87 | struct list_head list; | 88 | struct list_head list; |
@@ -1180,6 +1181,15 @@ static void cma_set_req_event_data(struct rdma_cm_event *event, | |||
1180 | event->param.conn.qp_num = req_data->remote_qpn; | 1181 | event->param.conn.qp_num = req_data->remote_qpn; |
1181 | } | 1182 | } |
1182 | 1183 | ||
1184 | static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) | ||
1185 | { | ||
1186 | return (((ib_event->event == IB_CM_REQ_RECEIVED) || | ||
1187 | (ib_event->param.req_rcvd.qp_type == id->qp_type)) || | ||
1188 | ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && | ||
1189 | (id->qp_type == IB_QPT_UD)) || | ||
1190 | (!id->qp_type)); | ||
1191 | } | ||
1192 | |||
1183 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | 1193 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) |
1184 | { | 1194 | { |
1185 | struct rdma_id_private *listen_id, *conn_id; | 1195 | struct rdma_id_private *listen_id, *conn_id; |
@@ -1187,13 +1197,16 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1187 | int offset, ret; | 1197 | int offset, ret; |
1188 | 1198 | ||
1189 | listen_id = cm_id->context; | 1199 | listen_id = cm_id->context; |
1200 | if (!cma_check_req_qp_type(&listen_id->id, ib_event)) | ||
1201 | return -EINVAL; | ||
1202 | |||
1190 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) | 1203 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) |
1191 | return -ECONNABORTED; | 1204 | return -ECONNABORTED; |
1192 | 1205 | ||
1193 | memset(&event, 0, sizeof event); | 1206 | memset(&event, 0, sizeof event); |
1194 | offset = cma_user_data_offset(listen_id->id.ps); | 1207 | offset = cma_user_data_offset(listen_id->id.ps); |
1195 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1208 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1196 | if (listen_id->id.qp_type == IB_QPT_UD) { | 1209 | if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { |
1197 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); | 1210 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); |
1198 | event.param.ud.private_data = ib_event->private_data + offset; | 1211 | event.param.ud.private_data = ib_event->private_data + offset; |
1199 | event.param.ud.private_data_len = | 1212 | event.param.ud.private_data_len = |
@@ -1329,6 +1342,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1329 | switch (iw_event->status) { | 1342 | switch (iw_event->status) { |
1330 | case 0: | 1343 | case 0: |
1331 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1344 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
1345 | event.param.conn.initiator_depth = iw_event->ird; | ||
1346 | event.param.conn.responder_resources = iw_event->ord; | ||
1332 | break; | 1347 | break; |
1333 | case -ECONNRESET: | 1348 | case -ECONNRESET: |
1334 | case -ECONNREFUSED: | 1349 | case -ECONNREFUSED: |
@@ -1344,6 +1359,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1344 | break; | 1359 | break; |
1345 | case IW_CM_EVENT_ESTABLISHED: | 1360 | case IW_CM_EVENT_ESTABLISHED: |
1346 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1361 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
1362 | event.param.conn.initiator_depth = iw_event->ird; | ||
1363 | event.param.conn.responder_resources = iw_event->ord; | ||
1347 | break; | 1364 | break; |
1348 | default: | 1365 | default: |
1349 | BUG_ON(1); | 1366 | BUG_ON(1); |
@@ -1434,8 +1451,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1434 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1451 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1435 | event.param.conn.private_data = iw_event->private_data; | 1452 | event.param.conn.private_data = iw_event->private_data; |
1436 | event.param.conn.private_data_len = iw_event->private_data_len; | 1453 | event.param.conn.private_data_len = iw_event->private_data_len; |
1437 | event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; | 1454 | event.param.conn.initiator_depth = iw_event->ird; |
1438 | event.param.conn.responder_resources = attr.max_qp_rd_atom; | 1455 | event.param.conn.responder_resources = iw_event->ord; |
1439 | 1456 | ||
1440 | /* | 1457 | /* |
1441 | * Protect against the user destroying conn_id from another thread | 1458 | * Protect against the user destroying conn_id from another thread |
@@ -2235,6 +2252,9 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
2235 | case RDMA_PS_IPOIB: | 2252 | case RDMA_PS_IPOIB: |
2236 | ps = &ipoib_ps; | 2253 | ps = &ipoib_ps; |
2237 | break; | 2254 | break; |
2255 | case RDMA_PS_IB: | ||
2256 | ps = &ib_ps; | ||
2257 | break; | ||
2238 | default: | 2258 | default: |
2239 | return -EPROTONOSUPPORT; | 2259 | return -EPROTONOSUPPORT; |
2240 | } | 2260 | } |
@@ -2570,7 +2590,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, | |||
2570 | req.service_id = cma_get_service_id(id_priv->id.ps, | 2590 | req.service_id = cma_get_service_id(id_priv->id.ps, |
2571 | (struct sockaddr *) &route->addr.dst_addr); | 2591 | (struct sockaddr *) &route->addr.dst_addr); |
2572 | req.qp_num = id_priv->qp_num; | 2592 | req.qp_num = id_priv->qp_num; |
2573 | req.qp_type = IB_QPT_RC; | 2593 | req.qp_type = id_priv->id.qp_type; |
2574 | req.starting_psn = id_priv->seq_num; | 2594 | req.starting_psn = id_priv->seq_num; |
2575 | req.responder_resources = conn_param->responder_resources; | 2595 | req.responder_resources = conn_param->responder_resources; |
2576 | req.initiator_depth = conn_param->initiator_depth; | 2596 | req.initiator_depth = conn_param->initiator_depth; |
@@ -2617,14 +2637,16 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, | |||
2617 | if (ret) | 2637 | if (ret) |
2618 | goto out; | 2638 | goto out; |
2619 | 2639 | ||
2620 | iw_param.ord = conn_param->initiator_depth; | 2640 | if (conn_param) { |
2621 | iw_param.ird = conn_param->responder_resources; | 2641 | iw_param.ord = conn_param->initiator_depth; |
2622 | iw_param.private_data = conn_param->private_data; | 2642 | iw_param.ird = conn_param->responder_resources; |
2623 | iw_param.private_data_len = conn_param->private_data_len; | 2643 | iw_param.private_data = conn_param->private_data; |
2624 | if (id_priv->id.qp) | 2644 | iw_param.private_data_len = conn_param->private_data_len; |
2645 | iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; | ||
2646 | } else { | ||
2647 | memset(&iw_param, 0, sizeof iw_param); | ||
2625 | iw_param.qpn = id_priv->qp_num; | 2648 | iw_param.qpn = id_priv->qp_num; |
2626 | else | 2649 | } |
2627 | iw_param.qpn = conn_param->qp_num; | ||
2628 | ret = iw_cm_connect(cm_id, &iw_param); | 2650 | ret = iw_cm_connect(cm_id, &iw_param); |
2629 | out: | 2651 | out: |
2630 | if (ret) { | 2652 | if (ret) { |
@@ -2766,14 +2788,20 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2766 | 2788 | ||
2767 | switch (rdma_node_get_transport(id->device->node_type)) { | 2789 | switch (rdma_node_get_transport(id->device->node_type)) { |
2768 | case RDMA_TRANSPORT_IB: | 2790 | case RDMA_TRANSPORT_IB: |
2769 | if (id->qp_type == IB_QPT_UD) | 2791 | if (id->qp_type == IB_QPT_UD) { |
2770 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | 2792 | if (conn_param) |
2771 | conn_param->private_data, | 2793 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2772 | conn_param->private_data_len); | 2794 | conn_param->private_data, |
2773 | else if (conn_param) | 2795 | conn_param->private_data_len); |
2774 | ret = cma_accept_ib(id_priv, conn_param); | 2796 | else |
2775 | else | 2797 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2776 | ret = cma_rep_recv(id_priv); | 2798 | NULL, 0); |
2799 | } else { | ||
2800 | if (conn_param) | ||
2801 | ret = cma_accept_ib(id_priv, conn_param); | ||
2802 | else | ||
2803 | ret = cma_rep_recv(id_priv); | ||
2804 | } | ||
2777 | break; | 2805 | break; |
2778 | case RDMA_TRANSPORT_IWARP: | 2806 | case RDMA_TRANSPORT_IWARP: |
2779 | ret = cma_accept_iw(id_priv, conn_param); | 2807 | ret = cma_accept_iw(id_priv, conn_param); |
@@ -3461,6 +3489,7 @@ static void __exit cma_cleanup(void) | |||
3461 | idr_destroy(&tcp_ps); | 3489 | idr_destroy(&tcp_ps); |
3462 | idr_destroy(&udp_ps); | 3490 | idr_destroy(&udp_ps); |
3463 | idr_destroy(&ipoib_ps); | 3491 | idr_destroy(&ipoib_ps); |
3492 | idr_destroy(&ib_ps); | ||
3464 | } | 3493 | } |
3465 | 3494 | ||
3466 | module_init(cma_init); | 3495 | module_init(cma_init); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index bb3dad20bb83..2fe428bba54c 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1597,6 +1597,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
1597 | mad->mad_hdr.class_version].class; | 1597 | mad->mad_hdr.class_version].class; |
1598 | if (!class) | 1598 | if (!class) |
1599 | goto out; | 1599 | goto out; |
1600 | if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= | ||
1601 | IB_MGMT_MAX_METHODS) | ||
1602 | goto out; | ||
1600 | method = class->method_table[convert_mgmt_class( | 1603 | method = class->method_table[convert_mgmt_class( |
1601 | mad->mad_hdr.mgmt_class)]; | 1604 | mad->mad_hdr.mgmt_class)]; |
1602 | if (method) | 1605 | if (method) |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index ab7fc60aeb5d..c61bca30fd2d 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -186,17 +186,35 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
186 | if (ret) | 186 | if (ret) |
187 | return ret; | 187 | return ret; |
188 | 188 | ||
189 | rate = (25 * attr.active_speed) / 10; | ||
190 | |||
189 | switch (attr.active_speed) { | 191 | switch (attr.active_speed) { |
190 | case 2: speed = " DDR"; break; | 192 | case 2: |
191 | case 4: speed = " QDR"; break; | 193 | speed = " DDR"; |
194 | break; | ||
195 | case 4: | ||
196 | speed = " QDR"; | ||
197 | break; | ||
198 | case 8: | ||
199 | speed = " FDR10"; | ||
200 | rate = 10; | ||
201 | break; | ||
202 | case 16: | ||
203 | speed = " FDR"; | ||
204 | rate = 14; | ||
205 | break; | ||
206 | case 32: | ||
207 | speed = " EDR"; | ||
208 | rate = 25; | ||
209 | break; | ||
192 | } | 210 | } |
193 | 211 | ||
194 | rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed; | 212 | rate *= ib_width_enum_to_int(attr.active_width); |
195 | if (rate < 0) | 213 | if (rate < 0) |
196 | return -EINVAL; | 214 | return -EINVAL; |
197 | 215 | ||
198 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", | 216 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", |
199 | rate / 10, rate % 10 ? ".5" : "", | 217 | rate, (attr.active_speed == 1) ? ".5" : "", |
200 | ib_width_enum_to_int(attr.active_width), speed); | 218 | ib_width_enum_to_int(attr.active_width), speed); |
201 | } | 219 | } |
202 | 220 | ||
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 08f948df8fa9..b8a0b4a7811b 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -1122,7 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, | |||
1122 | if (copy_from_user(&hdr, buf, sizeof(hdr))) | 1122 | if (copy_from_user(&hdr, buf, sizeof(hdr))) |
1123 | return -EFAULT; | 1123 | return -EFAULT; |
1124 | 1124 | ||
1125 | if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) | 1125 | if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) |
1126 | return -EINVAL; | 1126 | return -EINVAL; |
1127 | 1127 | ||
1128 | if (hdr.in + sizeof(hdr) > len) | 1128 | if (hdr.in + sizeof(hdr) > len) |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index e29f7e1dcca0..b37b0c02a7b9 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -277,7 +277,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, | |||
277 | ucma_set_event_context(ctx, event, uevent); | 277 | ucma_set_event_context(ctx, event, uevent); |
278 | uevent->resp.event = event->event; | 278 | uevent->resp.event = event->event; |
279 | uevent->resp.status = event->status; | 279 | uevent->resp.status = event->status; |
280 | if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB) | 280 | if (cm_id->qp_type == IB_QPT_UD) |
281 | ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); | 281 | ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); |
282 | else | 282 | else |
283 | ucma_copy_conn_event(&uevent->resp.param.conn, | 283 | ucma_copy_conn_event(&uevent->resp.param.conn, |
@@ -378,6 +378,9 @@ static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_ | |||
378 | case RDMA_PS_IPOIB: | 378 | case RDMA_PS_IPOIB: |
379 | *qp_type = IB_QPT_UD; | 379 | *qp_type = IB_QPT_UD; |
380 | return 0; | 380 | return 0; |
381 | case RDMA_PS_IB: | ||
382 | *qp_type = cmd->qp_type; | ||
383 | return 0; | ||
381 | default: | 384 | default: |
382 | return -EINVAL; | 385 | return -EINVAL; |
383 | } | 386 | } |
@@ -1271,7 +1274,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, | |||
1271 | if (copy_from_user(&hdr, buf, sizeof(hdr))) | 1274 | if (copy_from_user(&hdr, buf, sizeof(hdr))) |
1272 | return -EFAULT; | 1275 | return -EFAULT; |
1273 | 1276 | ||
1274 | if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) | 1277 | if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) |
1275 | return -EINVAL; | 1278 | return -EINVAL; |
1276 | 1279 | ||
1277 | if (hdr.in + sizeof(hdr) > len) | 1280 | if (hdr.in + sizeof(hdr) > len) |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index cc92137b3e02..71f0c0f7df94 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -137,7 +137,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
137 | 137 | ||
138 | down_write(¤t->mm->mmap_sem); | 138 | down_write(¤t->mm->mmap_sem); |
139 | 139 | ||
140 | locked = npages + current->mm->locked_vm; | 140 | locked = npages + current->mm->pinned_vm; |
141 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | 141 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
142 | 142 | ||
143 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { | 143 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { |
@@ -207,7 +207,7 @@ out: | |||
207 | __ib_umem_release(context->device, umem, 0); | 207 | __ib_umem_release(context->device, umem, 0); |
208 | kfree(umem); | 208 | kfree(umem); |
209 | } else | 209 | } else |
210 | current->mm->locked_vm = locked; | 210 | current->mm->pinned_vm = locked; |
211 | 211 | ||
212 | up_write(¤t->mm->mmap_sem); | 212 | up_write(¤t->mm->mmap_sem); |
213 | if (vma_list) | 213 | if (vma_list) |
@@ -223,7 +223,7 @@ static void ib_umem_account(struct work_struct *work) | |||
223 | struct ib_umem *umem = container_of(work, struct ib_umem, work); | 223 | struct ib_umem *umem = container_of(work, struct ib_umem, work); |
224 | 224 | ||
225 | down_write(&umem->mm->mmap_sem); | 225 | down_write(&umem->mm->mmap_sem); |
226 | umem->mm->locked_vm -= umem->diff; | 226 | umem->mm->pinned_vm -= umem->diff; |
227 | up_write(&umem->mm->mmap_sem); | 227 | up_write(&umem->mm->mmap_sem); |
228 | mmput(umem->mm); | 228 | mmput(umem->mm); |
229 | kfree(umem); | 229 | kfree(umem); |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 8d261b6ea5fe..07db22997e97 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -458,8 +458,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
458 | goto err; | 458 | goto err; |
459 | } | 459 | } |
460 | 460 | ||
461 | if (packet->mad.hdr.id < 0 || | 461 | if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { |
462 | packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { | ||
463 | ret = -EINVAL; | 462 | ret = -EINVAL; |
464 | goto err; | 463 | goto err; |
465 | } | 464 | } |
@@ -703,7 +702,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) | |||
703 | mutex_lock(&file->port->file_mutex); | 702 | mutex_lock(&file->port->file_mutex); |
704 | mutex_lock(&file->mutex); | 703 | mutex_lock(&file->mutex); |
705 | 704 | ||
706 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { | 705 | if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { |
707 | ret = -EINVAL; | 706 | ret = -EINVAL; |
708 | goto out; | 707 | goto out; |
709 | } | 708 | } |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index a078e5624d22..5bcb2afd3dcb 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -76,6 +76,8 @@ struct ib_uverbs_device { | |||
76 | struct ib_device *ib_dev; | 76 | struct ib_device *ib_dev; |
77 | int devnum; | 77 | int devnum; |
78 | struct cdev cdev; | 78 | struct cdev cdev; |
79 | struct rb_root xrcd_tree; | ||
80 | struct mutex xrcd_tree_mutex; | ||
79 | }; | 81 | }; |
80 | 82 | ||
81 | struct ib_uverbs_event_file { | 83 | struct ib_uverbs_event_file { |
@@ -120,6 +122,16 @@ struct ib_uevent_object { | |||
120 | u32 events_reported; | 122 | u32 events_reported; |
121 | }; | 123 | }; |
122 | 124 | ||
125 | struct ib_uxrcd_object { | ||
126 | struct ib_uobject uobject; | ||
127 | atomic_t refcnt; | ||
128 | }; | ||
129 | |||
130 | struct ib_usrq_object { | ||
131 | struct ib_uevent_object uevent; | ||
132 | struct ib_uxrcd_object *uxrcd; | ||
133 | }; | ||
134 | |||
123 | struct ib_uqp_object { | 135 | struct ib_uqp_object { |
124 | struct ib_uevent_object uevent; | 136 | struct ib_uevent_object uevent; |
125 | struct list_head mcast_list; | 137 | struct list_head mcast_list; |
@@ -142,6 +154,7 @@ extern struct idr ib_uverbs_ah_idr; | |||
142 | extern struct idr ib_uverbs_cq_idr; | 154 | extern struct idr ib_uverbs_cq_idr; |
143 | extern struct idr ib_uverbs_qp_idr; | 155 | extern struct idr ib_uverbs_qp_idr; |
144 | extern struct idr ib_uverbs_srq_idr; | 156 | extern struct idr ib_uverbs_srq_idr; |
157 | extern struct idr ib_uverbs_xrcd_idr; | ||
145 | 158 | ||
146 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); | 159 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); |
147 | 160 | ||
@@ -161,6 +174,7 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); | |||
161 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); | 174 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); |
162 | void ib_uverbs_event_handler(struct ib_event_handler *handler, | 175 | void ib_uverbs_event_handler(struct ib_event_handler *handler, |
163 | struct ib_event *event); | 176 | struct ib_event *event); |
177 | void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd); | ||
164 | 178 | ||
165 | #define IB_UVERBS_DECLARE_CMD(name) \ | 179 | #define IB_UVERBS_DECLARE_CMD(name) \ |
166 | ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ | 180 | ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ |
@@ -181,6 +195,7 @@ IB_UVERBS_DECLARE_CMD(poll_cq); | |||
181 | IB_UVERBS_DECLARE_CMD(req_notify_cq); | 195 | IB_UVERBS_DECLARE_CMD(req_notify_cq); |
182 | IB_UVERBS_DECLARE_CMD(destroy_cq); | 196 | IB_UVERBS_DECLARE_CMD(destroy_cq); |
183 | IB_UVERBS_DECLARE_CMD(create_qp); | 197 | IB_UVERBS_DECLARE_CMD(create_qp); |
198 | IB_UVERBS_DECLARE_CMD(open_qp); | ||
184 | IB_UVERBS_DECLARE_CMD(query_qp); | 199 | IB_UVERBS_DECLARE_CMD(query_qp); |
185 | IB_UVERBS_DECLARE_CMD(modify_qp); | 200 | IB_UVERBS_DECLARE_CMD(modify_qp); |
186 | IB_UVERBS_DECLARE_CMD(destroy_qp); | 201 | IB_UVERBS_DECLARE_CMD(destroy_qp); |
@@ -195,5 +210,8 @@ IB_UVERBS_DECLARE_CMD(create_srq); | |||
195 | IB_UVERBS_DECLARE_CMD(modify_srq); | 210 | IB_UVERBS_DECLARE_CMD(modify_srq); |
196 | IB_UVERBS_DECLARE_CMD(query_srq); | 211 | IB_UVERBS_DECLARE_CMD(query_srq); |
197 | IB_UVERBS_DECLARE_CMD(destroy_srq); | 212 | IB_UVERBS_DECLARE_CMD(destroy_srq); |
213 | IB_UVERBS_DECLARE_CMD(create_xsrq); | ||
214 | IB_UVERBS_DECLARE_CMD(open_xrcd); | ||
215 | IB_UVERBS_DECLARE_CMD(close_xrcd); | ||
198 | 216 | ||
199 | #endif /* UVERBS_H */ | 217 | #endif /* UVERBS_H */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index c42699285f8e..254f1649c734 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -47,6 +47,7 @@ static struct lock_class_key cq_lock_key; | |||
47 | static struct lock_class_key qp_lock_key; | 47 | static struct lock_class_key qp_lock_key; |
48 | static struct lock_class_key ah_lock_key; | 48 | static struct lock_class_key ah_lock_key; |
49 | static struct lock_class_key srq_lock_key; | 49 | static struct lock_class_key srq_lock_key; |
50 | static struct lock_class_key xrcd_lock_key; | ||
50 | 51 | ||
51 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 52 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
52 | do { \ | 53 | do { \ |
@@ -255,6 +256,18 @@ static void put_srq_read(struct ib_srq *srq) | |||
255 | put_uobj_read(srq->uobject); | 256 | put_uobj_read(srq->uobject); |
256 | } | 257 | } |
257 | 258 | ||
259 | static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, | ||
260 | struct ib_uobject **uobj) | ||
261 | { | ||
262 | *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); | ||
263 | return *uobj ? (*uobj)->object : NULL; | ||
264 | } | ||
265 | |||
266 | static void put_xrcd_read(struct ib_uobject *uobj) | ||
267 | { | ||
268 | put_uobj_read(uobj); | ||
269 | } | ||
270 | |||
258 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | 271 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, |
259 | const char __user *buf, | 272 | const char __user *buf, |
260 | int in_len, int out_len) | 273 | int in_len, int out_len) |
@@ -298,6 +311,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | |||
298 | INIT_LIST_HEAD(&ucontext->qp_list); | 311 | INIT_LIST_HEAD(&ucontext->qp_list); |
299 | INIT_LIST_HEAD(&ucontext->srq_list); | 312 | INIT_LIST_HEAD(&ucontext->srq_list); |
300 | INIT_LIST_HEAD(&ucontext->ah_list); | 313 | INIT_LIST_HEAD(&ucontext->ah_list); |
314 | INIT_LIST_HEAD(&ucontext->xrcd_list); | ||
301 | ucontext->closing = 0; | 315 | ucontext->closing = 0; |
302 | 316 | ||
303 | resp.num_comp_vectors = file->device->num_comp_vectors; | 317 | resp.num_comp_vectors = file->device->num_comp_vectors; |
@@ -579,6 +593,310 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, | |||
579 | return in_len; | 593 | return in_len; |
580 | } | 594 | } |
581 | 595 | ||
596 | struct xrcd_table_entry { | ||
597 | struct rb_node node; | ||
598 | struct ib_xrcd *xrcd; | ||
599 | struct inode *inode; | ||
600 | }; | ||
601 | |||
602 | static int xrcd_table_insert(struct ib_uverbs_device *dev, | ||
603 | struct inode *inode, | ||
604 | struct ib_xrcd *xrcd) | ||
605 | { | ||
606 | struct xrcd_table_entry *entry, *scan; | ||
607 | struct rb_node **p = &dev->xrcd_tree.rb_node; | ||
608 | struct rb_node *parent = NULL; | ||
609 | |||
610 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
611 | if (!entry) | ||
612 | return -ENOMEM; | ||
613 | |||
614 | entry->xrcd = xrcd; | ||
615 | entry->inode = inode; | ||
616 | |||
617 | while (*p) { | ||
618 | parent = *p; | ||
619 | scan = rb_entry(parent, struct xrcd_table_entry, node); | ||
620 | |||
621 | if (inode < scan->inode) { | ||
622 | p = &(*p)->rb_left; | ||
623 | } else if (inode > scan->inode) { | ||
624 | p = &(*p)->rb_right; | ||
625 | } else { | ||
626 | kfree(entry); | ||
627 | return -EEXIST; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | rb_link_node(&entry->node, parent, p); | ||
632 | rb_insert_color(&entry->node, &dev->xrcd_tree); | ||
633 | igrab(inode); | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, | ||
638 | struct inode *inode) | ||
639 | { | ||
640 | struct xrcd_table_entry *entry; | ||
641 | struct rb_node *p = dev->xrcd_tree.rb_node; | ||
642 | |||
643 | while (p) { | ||
644 | entry = rb_entry(p, struct xrcd_table_entry, node); | ||
645 | |||
646 | if (inode < entry->inode) | ||
647 | p = p->rb_left; | ||
648 | else if (inode > entry->inode) | ||
649 | p = p->rb_right; | ||
650 | else | ||
651 | return entry; | ||
652 | } | ||
653 | |||
654 | return NULL; | ||
655 | } | ||
656 | |||
657 | static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) | ||
658 | { | ||
659 | struct xrcd_table_entry *entry; | ||
660 | |||
661 | entry = xrcd_table_search(dev, inode); | ||
662 | if (!entry) | ||
663 | return NULL; | ||
664 | |||
665 | return entry->xrcd; | ||
666 | } | ||
667 | |||
668 | static void xrcd_table_delete(struct ib_uverbs_device *dev, | ||
669 | struct inode *inode) | ||
670 | { | ||
671 | struct xrcd_table_entry *entry; | ||
672 | |||
673 | entry = xrcd_table_search(dev, inode); | ||
674 | if (entry) { | ||
675 | iput(inode); | ||
676 | rb_erase(&entry->node, &dev->xrcd_tree); | ||
677 | kfree(entry); | ||
678 | } | ||
679 | } | ||
680 | |||
681 | ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, | ||
682 | const char __user *buf, int in_len, | ||
683 | int out_len) | ||
684 | { | ||
685 | struct ib_uverbs_open_xrcd cmd; | ||
686 | struct ib_uverbs_open_xrcd_resp resp; | ||
687 | struct ib_udata udata; | ||
688 | struct ib_uxrcd_object *obj; | ||
689 | struct ib_xrcd *xrcd = NULL; | ||
690 | struct file *f = NULL; | ||
691 | struct inode *inode = NULL; | ||
692 | int ret = 0; | ||
693 | int new_xrcd = 0; | ||
694 | |||
695 | if (out_len < sizeof resp) | ||
696 | return -ENOSPC; | ||
697 | |||
698 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
699 | return -EFAULT; | ||
700 | |||
701 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
702 | (unsigned long) cmd.response + sizeof resp, | ||
703 | in_len - sizeof cmd, out_len - sizeof resp); | ||
704 | |||
705 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
706 | |||
707 | if (cmd.fd != -1) { | ||
708 | /* search for file descriptor */ | ||
709 | f = fget(cmd.fd); | ||
710 | if (!f) { | ||
711 | ret = -EBADF; | ||
712 | goto err_tree_mutex_unlock; | ||
713 | } | ||
714 | |||
715 | inode = f->f_dentry->d_inode; | ||
716 | if (!inode) { | ||
717 | ret = -EBADF; | ||
718 | goto err_tree_mutex_unlock; | ||
719 | } | ||
720 | |||
721 | xrcd = find_xrcd(file->device, inode); | ||
722 | if (!xrcd && !(cmd.oflags & O_CREAT)) { | ||
723 | /* no file descriptor. Need CREATE flag */ | ||
724 | ret = -EAGAIN; | ||
725 | goto err_tree_mutex_unlock; | ||
726 | } | ||
727 | |||
728 | if (xrcd && cmd.oflags & O_EXCL) { | ||
729 | ret = -EINVAL; | ||
730 | goto err_tree_mutex_unlock; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | ||
735 | if (!obj) { | ||
736 | ret = -ENOMEM; | ||
737 | goto err_tree_mutex_unlock; | ||
738 | } | ||
739 | |||
740 | init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key); | ||
741 | |||
742 | down_write(&obj->uobject.mutex); | ||
743 | |||
744 | if (!xrcd) { | ||
745 | xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, | ||
746 | file->ucontext, &udata); | ||
747 | if (IS_ERR(xrcd)) { | ||
748 | ret = PTR_ERR(xrcd); | ||
749 | goto err; | ||
750 | } | ||
751 | |||
752 | xrcd->inode = inode; | ||
753 | xrcd->device = file->device->ib_dev; | ||
754 | atomic_set(&xrcd->usecnt, 0); | ||
755 | mutex_init(&xrcd->tgt_qp_mutex); | ||
756 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); | ||
757 | new_xrcd = 1; | ||
758 | } | ||
759 | |||
760 | atomic_set(&obj->refcnt, 0); | ||
761 | obj->uobject.object = xrcd; | ||
762 | ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); | ||
763 | if (ret) | ||
764 | goto err_idr; | ||
765 | |||
766 | memset(&resp, 0, sizeof resp); | ||
767 | resp.xrcd_handle = obj->uobject.id; | ||
768 | |||
769 | if (inode) { | ||
770 | if (new_xrcd) { | ||
771 | /* create new inode/xrcd table entry */ | ||
772 | ret = xrcd_table_insert(file->device, inode, xrcd); | ||
773 | if (ret) | ||
774 | goto err_insert_xrcd; | ||
775 | } | ||
776 | atomic_inc(&xrcd->usecnt); | ||
777 | } | ||
778 | |||
779 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
780 | &resp, sizeof resp)) { | ||
781 | ret = -EFAULT; | ||
782 | goto err_copy; | ||
783 | } | ||
784 | |||
785 | if (f) | ||
786 | fput(f); | ||
787 | |||
788 | mutex_lock(&file->mutex); | ||
789 | list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); | ||
790 | mutex_unlock(&file->mutex); | ||
791 | |||
792 | obj->uobject.live = 1; | ||
793 | up_write(&obj->uobject.mutex); | ||
794 | |||
795 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
796 | return in_len; | ||
797 | |||
798 | err_copy: | ||
799 | if (inode) { | ||
800 | if (new_xrcd) | ||
801 | xrcd_table_delete(file->device, inode); | ||
802 | atomic_dec(&xrcd->usecnt); | ||
803 | } | ||
804 | |||
805 | err_insert_xrcd: | ||
806 | idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); | ||
807 | |||
808 | err_idr: | ||
809 | ib_dealloc_xrcd(xrcd); | ||
810 | |||
811 | err: | ||
812 | put_uobj_write(&obj->uobject); | ||
813 | |||
814 | err_tree_mutex_unlock: | ||
815 | if (f) | ||
816 | fput(f); | ||
817 | |||
818 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
819 | |||
820 | return ret; | ||
821 | } | ||
822 | |||
823 | ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, | ||
824 | const char __user *buf, int in_len, | ||
825 | int out_len) | ||
826 | { | ||
827 | struct ib_uverbs_close_xrcd cmd; | ||
828 | struct ib_uobject *uobj; | ||
829 | struct ib_xrcd *xrcd = NULL; | ||
830 | struct inode *inode = NULL; | ||
831 | struct ib_uxrcd_object *obj; | ||
832 | int live; | ||
833 | int ret = 0; | ||
834 | |||
835 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
836 | return -EFAULT; | ||
837 | |||
838 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
839 | uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); | ||
840 | if (!uobj) { | ||
841 | ret = -EINVAL; | ||
842 | goto out; | ||
843 | } | ||
844 | |||
845 | xrcd = uobj->object; | ||
846 | inode = xrcd->inode; | ||
847 | obj = container_of(uobj, struct ib_uxrcd_object, uobject); | ||
848 | if (atomic_read(&obj->refcnt)) { | ||
849 | put_uobj_write(uobj); | ||
850 | ret = -EBUSY; | ||
851 | goto out; | ||
852 | } | ||
853 | |||
854 | if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { | ||
855 | ret = ib_dealloc_xrcd(uobj->object); | ||
856 | if (!ret) | ||
857 | uobj->live = 0; | ||
858 | } | ||
859 | |||
860 | live = uobj->live; | ||
861 | if (inode && ret) | ||
862 | atomic_inc(&xrcd->usecnt); | ||
863 | |||
864 | put_uobj_write(uobj); | ||
865 | |||
866 | if (ret) | ||
867 | goto out; | ||
868 | |||
869 | if (inode && !live) | ||
870 | xrcd_table_delete(file->device, inode); | ||
871 | |||
872 | idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); | ||
873 | mutex_lock(&file->mutex); | ||
874 | list_del(&uobj->list); | ||
875 | mutex_unlock(&file->mutex); | ||
876 | |||
877 | put_uobj(uobj); | ||
878 | ret = in_len; | ||
879 | |||
880 | out: | ||
881 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
882 | return ret; | ||
883 | } | ||
884 | |||
885 | void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, | ||
886 | struct ib_xrcd *xrcd) | ||
887 | { | ||
888 | struct inode *inode; | ||
889 | |||
890 | inode = xrcd->inode; | ||
891 | if (inode && !atomic_dec_and_test(&xrcd->usecnt)) | ||
892 | return; | ||
893 | |||
894 | ib_dealloc_xrcd(xrcd); | ||
895 | |||
896 | if (inode) | ||
897 | xrcd_table_delete(dev, inode); | ||
898 | } | ||
899 | |||
582 | ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, | 900 | ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, |
583 | const char __user *buf, int in_len, | 901 | const char __user *buf, int in_len, |
584 | int out_len) | 902 | int out_len) |
@@ -1052,9 +1370,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1052 | struct ib_uverbs_create_qp_resp resp; | 1370 | struct ib_uverbs_create_qp_resp resp; |
1053 | struct ib_udata udata; | 1371 | struct ib_udata udata; |
1054 | struct ib_uqp_object *obj; | 1372 | struct ib_uqp_object *obj; |
1055 | struct ib_pd *pd; | 1373 | struct ib_device *device; |
1056 | struct ib_cq *scq, *rcq; | 1374 | struct ib_pd *pd = NULL; |
1057 | struct ib_srq *srq; | 1375 | struct ib_xrcd *xrcd = NULL; |
1376 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1377 | struct ib_cq *scq = NULL, *rcq = NULL; | ||
1378 | struct ib_srq *srq = NULL; | ||
1058 | struct ib_qp *qp; | 1379 | struct ib_qp *qp; |
1059 | struct ib_qp_init_attr attr; | 1380 | struct ib_qp_init_attr attr; |
1060 | int ret; | 1381 | int ret; |
@@ -1076,15 +1397,39 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1076 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); | 1397 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); |
1077 | down_write(&obj->uevent.uobject.mutex); | 1398 | down_write(&obj->uevent.uobject.mutex); |
1078 | 1399 | ||
1079 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; | 1400 | if (cmd.qp_type == IB_QPT_XRC_TGT) { |
1080 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1401 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); |
1081 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); | 1402 | if (!xrcd) { |
1082 | rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? | 1403 | ret = -EINVAL; |
1083 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); | 1404 | goto err_put; |
1405 | } | ||
1406 | device = xrcd->device; | ||
1407 | } else { | ||
1408 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
1409 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); | ||
1410 | if (!pd || !scq) { | ||
1411 | ret = -EINVAL; | ||
1412 | goto err_put; | ||
1413 | } | ||
1084 | 1414 | ||
1085 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { | 1415 | if (cmd.qp_type == IB_QPT_XRC_INI) { |
1086 | ret = -EINVAL; | 1416 | cmd.max_recv_wr = cmd.max_recv_sge = 0; |
1087 | goto err_put; | 1417 | } else { |
1418 | if (cmd.is_srq) { | ||
1419 | srq = idr_read_srq(cmd.srq_handle, file->ucontext); | ||
1420 | if (!srq || srq->srq_type != IB_SRQT_BASIC) { | ||
1421 | ret = -EINVAL; | ||
1422 | goto err_put; | ||
1423 | } | ||
1424 | } | ||
1425 | rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ? | ||
1426 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); | ||
1427 | if (!rcq) { | ||
1428 | ret = -EINVAL; | ||
1429 | goto err_put; | ||
1430 | } | ||
1431 | } | ||
1432 | device = pd->device; | ||
1088 | } | 1433 | } |
1089 | 1434 | ||
1090 | attr.event_handler = ib_uverbs_qp_event_handler; | 1435 | attr.event_handler = ib_uverbs_qp_event_handler; |
@@ -1092,6 +1437,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1092 | attr.send_cq = scq; | 1437 | attr.send_cq = scq; |
1093 | attr.recv_cq = rcq; | 1438 | attr.recv_cq = rcq; |
1094 | attr.srq = srq; | 1439 | attr.srq = srq; |
1440 | attr.xrcd = xrcd; | ||
1095 | attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; | 1441 | attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; |
1096 | attr.qp_type = cmd.qp_type; | 1442 | attr.qp_type = cmd.qp_type; |
1097 | attr.create_flags = 0; | 1443 | attr.create_flags = 0; |
@@ -1106,26 +1452,34 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1106 | INIT_LIST_HEAD(&obj->uevent.event_list); | 1452 | INIT_LIST_HEAD(&obj->uevent.event_list); |
1107 | INIT_LIST_HEAD(&obj->mcast_list); | 1453 | INIT_LIST_HEAD(&obj->mcast_list); |
1108 | 1454 | ||
1109 | qp = pd->device->create_qp(pd, &attr, &udata); | 1455 | if (cmd.qp_type == IB_QPT_XRC_TGT) |
1456 | qp = ib_create_qp(pd, &attr); | ||
1457 | else | ||
1458 | qp = device->create_qp(pd, &attr, &udata); | ||
1459 | |||
1110 | if (IS_ERR(qp)) { | 1460 | if (IS_ERR(qp)) { |
1111 | ret = PTR_ERR(qp); | 1461 | ret = PTR_ERR(qp); |
1112 | goto err_put; | 1462 | goto err_put; |
1113 | } | 1463 | } |
1114 | 1464 | ||
1115 | qp->device = pd->device; | 1465 | if (cmd.qp_type != IB_QPT_XRC_TGT) { |
1116 | qp->pd = pd; | 1466 | qp->real_qp = qp; |
1117 | qp->send_cq = attr.send_cq; | 1467 | qp->device = device; |
1118 | qp->recv_cq = attr.recv_cq; | 1468 | qp->pd = pd; |
1119 | qp->srq = attr.srq; | 1469 | qp->send_cq = attr.send_cq; |
1120 | qp->uobject = &obj->uevent.uobject; | 1470 | qp->recv_cq = attr.recv_cq; |
1121 | qp->event_handler = attr.event_handler; | 1471 | qp->srq = attr.srq; |
1122 | qp->qp_context = attr.qp_context; | 1472 | qp->event_handler = attr.event_handler; |
1123 | qp->qp_type = attr.qp_type; | 1473 | qp->qp_context = attr.qp_context; |
1124 | atomic_inc(&pd->usecnt); | 1474 | qp->qp_type = attr.qp_type; |
1125 | atomic_inc(&attr.send_cq->usecnt); | 1475 | atomic_inc(&pd->usecnt); |
1126 | atomic_inc(&attr.recv_cq->usecnt); | 1476 | atomic_inc(&attr.send_cq->usecnt); |
1127 | if (attr.srq) | 1477 | if (attr.recv_cq) |
1128 | atomic_inc(&attr.srq->usecnt); | 1478 | atomic_inc(&attr.recv_cq->usecnt); |
1479 | if (attr.srq) | ||
1480 | atomic_inc(&attr.srq->usecnt); | ||
1481 | } | ||
1482 | qp->uobject = &obj->uevent.uobject; | ||
1129 | 1483 | ||
1130 | obj->uevent.uobject.object = qp; | 1484 | obj->uevent.uobject.object = qp; |
1131 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | 1485 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); |
@@ -1147,9 +1501,13 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1147 | goto err_copy; | 1501 | goto err_copy; |
1148 | } | 1502 | } |
1149 | 1503 | ||
1150 | put_pd_read(pd); | 1504 | if (xrcd) |
1151 | put_cq_read(scq); | 1505 | put_xrcd_read(xrcd_uobj); |
1152 | if (rcq != scq) | 1506 | if (pd) |
1507 | put_pd_read(pd); | ||
1508 | if (scq) | ||
1509 | put_cq_read(scq); | ||
1510 | if (rcq && rcq != scq) | ||
1153 | put_cq_read(rcq); | 1511 | put_cq_read(rcq); |
1154 | if (srq) | 1512 | if (srq) |
1155 | put_srq_read(srq); | 1513 | put_srq_read(srq); |
@@ -1171,6 +1529,8 @@ err_destroy: | |||
1171 | ib_destroy_qp(qp); | 1529 | ib_destroy_qp(qp); |
1172 | 1530 | ||
1173 | err_put: | 1531 | err_put: |
1532 | if (xrcd) | ||
1533 | put_xrcd_read(xrcd_uobj); | ||
1174 | if (pd) | 1534 | if (pd) |
1175 | put_pd_read(pd); | 1535 | put_pd_read(pd); |
1176 | if (scq) | 1536 | if (scq) |
@@ -1184,6 +1544,98 @@ err_put: | |||
1184 | return ret; | 1544 | return ret; |
1185 | } | 1545 | } |
1186 | 1546 | ||
1547 | ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, | ||
1548 | const char __user *buf, int in_len, int out_len) | ||
1549 | { | ||
1550 | struct ib_uverbs_open_qp cmd; | ||
1551 | struct ib_uverbs_create_qp_resp resp; | ||
1552 | struct ib_udata udata; | ||
1553 | struct ib_uqp_object *obj; | ||
1554 | struct ib_xrcd *xrcd; | ||
1555 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1556 | struct ib_qp *qp; | ||
1557 | struct ib_qp_open_attr attr; | ||
1558 | int ret; | ||
1559 | |||
1560 | if (out_len < sizeof resp) | ||
1561 | return -ENOSPC; | ||
1562 | |||
1563 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
1564 | return -EFAULT; | ||
1565 | |||
1566 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
1567 | (unsigned long) cmd.response + sizeof resp, | ||
1568 | in_len - sizeof cmd, out_len - sizeof resp); | ||
1569 | |||
1570 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | ||
1571 | if (!obj) | ||
1572 | return -ENOMEM; | ||
1573 | |||
1574 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); | ||
1575 | down_write(&obj->uevent.uobject.mutex); | ||
1576 | |||
1577 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); | ||
1578 | if (!xrcd) { | ||
1579 | ret = -EINVAL; | ||
1580 | goto err_put; | ||
1581 | } | ||
1582 | |||
1583 | attr.event_handler = ib_uverbs_qp_event_handler; | ||
1584 | attr.qp_context = file; | ||
1585 | attr.qp_num = cmd.qpn; | ||
1586 | attr.qp_type = cmd.qp_type; | ||
1587 | |||
1588 | obj->uevent.events_reported = 0; | ||
1589 | INIT_LIST_HEAD(&obj->uevent.event_list); | ||
1590 | INIT_LIST_HEAD(&obj->mcast_list); | ||
1591 | |||
1592 | qp = ib_open_qp(xrcd, &attr); | ||
1593 | if (IS_ERR(qp)) { | ||
1594 | ret = PTR_ERR(qp); | ||
1595 | goto err_put; | ||
1596 | } | ||
1597 | |||
1598 | qp->uobject = &obj->uevent.uobject; | ||
1599 | |||
1600 | obj->uevent.uobject.object = qp; | ||
1601 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | ||
1602 | if (ret) | ||
1603 | goto err_destroy; | ||
1604 | |||
1605 | memset(&resp, 0, sizeof resp); | ||
1606 | resp.qpn = qp->qp_num; | ||
1607 | resp.qp_handle = obj->uevent.uobject.id; | ||
1608 | |||
1609 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
1610 | &resp, sizeof resp)) { | ||
1611 | ret = -EFAULT; | ||
1612 | goto err_remove; | ||
1613 | } | ||
1614 | |||
1615 | put_xrcd_read(xrcd_uobj); | ||
1616 | |||
1617 | mutex_lock(&file->mutex); | ||
1618 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); | ||
1619 | mutex_unlock(&file->mutex); | ||
1620 | |||
1621 | obj->uevent.uobject.live = 1; | ||
1622 | |||
1623 | up_write(&obj->uevent.uobject.mutex); | ||
1624 | |||
1625 | return in_len; | ||
1626 | |||
1627 | err_remove: | ||
1628 | idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | ||
1629 | |||
1630 | err_destroy: | ||
1631 | ib_destroy_qp(qp); | ||
1632 | |||
1633 | err_put: | ||
1634 | put_xrcd_read(xrcd_uobj); | ||
1635 | put_uobj_write(&obj->uevent.uobject); | ||
1636 | return ret; | ||
1637 | } | ||
1638 | |||
1187 | ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, | 1639 | ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, |
1188 | const char __user *buf, int in_len, | 1640 | const char __user *buf, int in_len, |
1189 | int out_len) | 1641 | int out_len) |
@@ -1284,6 +1736,20 @@ out: | |||
1284 | return ret ? ret : in_len; | 1736 | return ret ? ret : in_len; |
1285 | } | 1737 | } |
1286 | 1738 | ||
1739 | /* Remove ignored fields set in the attribute mask */ | ||
1740 | static int modify_qp_mask(enum ib_qp_type qp_type, int mask) | ||
1741 | { | ||
1742 | switch (qp_type) { | ||
1743 | case IB_QPT_XRC_INI: | ||
1744 | return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); | ||
1745 | case IB_QPT_XRC_TGT: | ||
1746 | return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | | ||
1747 | IB_QP_RNR_RETRY); | ||
1748 | default: | ||
1749 | return mask; | ||
1750 | } | ||
1751 | } | ||
1752 | |||
1287 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | 1753 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, |
1288 | const char __user *buf, int in_len, | 1754 | const char __user *buf, int in_len, |
1289 | int out_len) | 1755 | int out_len) |
@@ -1356,7 +1822,12 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | |||
1356 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; | 1822 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; |
1357 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; | 1823 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; |
1358 | 1824 | ||
1359 | ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); | 1825 | if (qp->real_qp == qp) { |
1826 | ret = qp->device->modify_qp(qp, attr, | ||
1827 | modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); | ||
1828 | } else { | ||
1829 | ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); | ||
1830 | } | ||
1360 | 1831 | ||
1361 | put_qp_read(qp); | 1832 | put_qp_read(qp); |
1362 | 1833 | ||
@@ -1553,7 +2024,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
1553 | } | 2024 | } |
1554 | 2025 | ||
1555 | resp.bad_wr = 0; | 2026 | resp.bad_wr = 0; |
1556 | ret = qp->device->post_send(qp, wr, &bad_wr); | 2027 | ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); |
1557 | if (ret) | 2028 | if (ret) |
1558 | for (next = wr; next; next = next->next) { | 2029 | for (next = wr; next; next = next->next) { |
1559 | ++resp.bad_wr; | 2030 | ++resp.bad_wr; |
@@ -1691,7 +2162,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, | |||
1691 | goto out; | 2162 | goto out; |
1692 | 2163 | ||
1693 | resp.bad_wr = 0; | 2164 | resp.bad_wr = 0; |
1694 | ret = qp->device->post_recv(qp, wr, &bad_wr); | 2165 | ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); |
1695 | 2166 | ||
1696 | put_qp_read(qp); | 2167 | put_qp_read(qp); |
1697 | 2168 | ||
@@ -1975,107 +2446,199 @@ out_put: | |||
1975 | return ret ? ret : in_len; | 2446 | return ret ? ret : in_len; |
1976 | } | 2447 | } |
1977 | 2448 | ||
1978 | ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | 2449 | int __uverbs_create_xsrq(struct ib_uverbs_file *file, |
1979 | const char __user *buf, int in_len, | 2450 | struct ib_uverbs_create_xsrq *cmd, |
1980 | int out_len) | 2451 | struct ib_udata *udata) |
1981 | { | 2452 | { |
1982 | struct ib_uverbs_create_srq cmd; | ||
1983 | struct ib_uverbs_create_srq_resp resp; | 2453 | struct ib_uverbs_create_srq_resp resp; |
1984 | struct ib_udata udata; | 2454 | struct ib_usrq_object *obj; |
1985 | struct ib_uevent_object *obj; | ||
1986 | struct ib_pd *pd; | 2455 | struct ib_pd *pd; |
1987 | struct ib_srq *srq; | 2456 | struct ib_srq *srq; |
2457 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1988 | struct ib_srq_init_attr attr; | 2458 | struct ib_srq_init_attr attr; |
1989 | int ret; | 2459 | int ret; |
1990 | 2460 | ||
1991 | if (out_len < sizeof resp) | ||
1992 | return -ENOSPC; | ||
1993 | |||
1994 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
1995 | return -EFAULT; | ||
1996 | |||
1997 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
1998 | (unsigned long) cmd.response + sizeof resp, | ||
1999 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2000 | |||
2001 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | 2461 | obj = kmalloc(sizeof *obj, GFP_KERNEL); |
2002 | if (!obj) | 2462 | if (!obj) |
2003 | return -ENOMEM; | 2463 | return -ENOMEM; |
2004 | 2464 | ||
2005 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); | 2465 | init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key); |
2006 | down_write(&obj->uobject.mutex); | 2466 | down_write(&obj->uevent.uobject.mutex); |
2007 | 2467 | ||
2008 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 2468 | pd = idr_read_pd(cmd->pd_handle, file->ucontext); |
2009 | if (!pd) { | 2469 | if (!pd) { |
2010 | ret = -EINVAL; | 2470 | ret = -EINVAL; |
2011 | goto err; | 2471 | goto err; |
2012 | } | 2472 | } |
2013 | 2473 | ||
2474 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2475 | attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); | ||
2476 | if (!attr.ext.xrc.cq) { | ||
2477 | ret = -EINVAL; | ||
2478 | goto err_put_pd; | ||
2479 | } | ||
2480 | |||
2481 | attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); | ||
2482 | if (!attr.ext.xrc.xrcd) { | ||
2483 | ret = -EINVAL; | ||
2484 | goto err_put_cq; | ||
2485 | } | ||
2486 | |||
2487 | obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); | ||
2488 | atomic_inc(&obj->uxrcd->refcnt); | ||
2489 | } | ||
2490 | |||
2014 | attr.event_handler = ib_uverbs_srq_event_handler; | 2491 | attr.event_handler = ib_uverbs_srq_event_handler; |
2015 | attr.srq_context = file; | 2492 | attr.srq_context = file; |
2016 | attr.attr.max_wr = cmd.max_wr; | 2493 | attr.srq_type = cmd->srq_type; |
2017 | attr.attr.max_sge = cmd.max_sge; | 2494 | attr.attr.max_wr = cmd->max_wr; |
2018 | attr.attr.srq_limit = cmd.srq_limit; | 2495 | attr.attr.max_sge = cmd->max_sge; |
2496 | attr.attr.srq_limit = cmd->srq_limit; | ||
2019 | 2497 | ||
2020 | obj->events_reported = 0; | 2498 | obj->uevent.events_reported = 0; |
2021 | INIT_LIST_HEAD(&obj->event_list); | 2499 | INIT_LIST_HEAD(&obj->uevent.event_list); |
2022 | 2500 | ||
2023 | srq = pd->device->create_srq(pd, &attr, &udata); | 2501 | srq = pd->device->create_srq(pd, &attr, udata); |
2024 | if (IS_ERR(srq)) { | 2502 | if (IS_ERR(srq)) { |
2025 | ret = PTR_ERR(srq); | 2503 | ret = PTR_ERR(srq); |
2026 | goto err_put; | 2504 | goto err_put; |
2027 | } | 2505 | } |
2028 | 2506 | ||
2029 | srq->device = pd->device; | 2507 | srq->device = pd->device; |
2030 | srq->pd = pd; | 2508 | srq->pd = pd; |
2031 | srq->uobject = &obj->uobject; | 2509 | srq->srq_type = cmd->srq_type; |
2510 | srq->uobject = &obj->uevent.uobject; | ||
2032 | srq->event_handler = attr.event_handler; | 2511 | srq->event_handler = attr.event_handler; |
2033 | srq->srq_context = attr.srq_context; | 2512 | srq->srq_context = attr.srq_context; |
2513 | |||
2514 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2515 | srq->ext.xrc.cq = attr.ext.xrc.cq; | ||
2516 | srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; | ||
2517 | atomic_inc(&attr.ext.xrc.cq->usecnt); | ||
2518 | atomic_inc(&attr.ext.xrc.xrcd->usecnt); | ||
2519 | } | ||
2520 | |||
2034 | atomic_inc(&pd->usecnt); | 2521 | atomic_inc(&pd->usecnt); |
2035 | atomic_set(&srq->usecnt, 0); | 2522 | atomic_set(&srq->usecnt, 0); |
2036 | 2523 | ||
2037 | obj->uobject.object = srq; | 2524 | obj->uevent.uobject.object = srq; |
2038 | ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject); | 2525 | ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); |
2039 | if (ret) | 2526 | if (ret) |
2040 | goto err_destroy; | 2527 | goto err_destroy; |
2041 | 2528 | ||
2042 | memset(&resp, 0, sizeof resp); | 2529 | memset(&resp, 0, sizeof resp); |
2043 | resp.srq_handle = obj->uobject.id; | 2530 | resp.srq_handle = obj->uevent.uobject.id; |
2044 | resp.max_wr = attr.attr.max_wr; | 2531 | resp.max_wr = attr.attr.max_wr; |
2045 | resp.max_sge = attr.attr.max_sge; | 2532 | resp.max_sge = attr.attr.max_sge; |
2533 | if (cmd->srq_type == IB_SRQT_XRC) | ||
2534 | resp.srqn = srq->ext.xrc.srq_num; | ||
2046 | 2535 | ||
2047 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 2536 | if (copy_to_user((void __user *) (unsigned long) cmd->response, |
2048 | &resp, sizeof resp)) { | 2537 | &resp, sizeof resp)) { |
2049 | ret = -EFAULT; | 2538 | ret = -EFAULT; |
2050 | goto err_copy; | 2539 | goto err_copy; |
2051 | } | 2540 | } |
2052 | 2541 | ||
2542 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2543 | put_uobj_read(xrcd_uobj); | ||
2544 | put_cq_read(attr.ext.xrc.cq); | ||
2545 | } | ||
2053 | put_pd_read(pd); | 2546 | put_pd_read(pd); |
2054 | 2547 | ||
2055 | mutex_lock(&file->mutex); | 2548 | mutex_lock(&file->mutex); |
2056 | list_add_tail(&obj->uobject.list, &file->ucontext->srq_list); | 2549 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); |
2057 | mutex_unlock(&file->mutex); | 2550 | mutex_unlock(&file->mutex); |
2058 | 2551 | ||
2059 | obj->uobject.live = 1; | 2552 | obj->uevent.uobject.live = 1; |
2060 | 2553 | ||
2061 | up_write(&obj->uobject.mutex); | 2554 | up_write(&obj->uevent.uobject.mutex); |
2062 | 2555 | ||
2063 | return in_len; | 2556 | return 0; |
2064 | 2557 | ||
2065 | err_copy: | 2558 | err_copy: |
2066 | idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject); | 2559 | idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); |
2067 | 2560 | ||
2068 | err_destroy: | 2561 | err_destroy: |
2069 | ib_destroy_srq(srq); | 2562 | ib_destroy_srq(srq); |
2070 | 2563 | ||
2071 | err_put: | 2564 | err_put: |
2565 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2566 | atomic_dec(&obj->uxrcd->refcnt); | ||
2567 | put_uobj_read(xrcd_uobj); | ||
2568 | } | ||
2569 | |||
2570 | err_put_cq: | ||
2571 | if (cmd->srq_type == IB_SRQT_XRC) | ||
2572 | put_cq_read(attr.ext.xrc.cq); | ||
2573 | |||
2574 | err_put_pd: | ||
2072 | put_pd_read(pd); | 2575 | put_pd_read(pd); |
2073 | 2576 | ||
2074 | err: | 2577 | err: |
2075 | put_uobj_write(&obj->uobject); | 2578 | put_uobj_write(&obj->uevent.uobject); |
2076 | return ret; | 2579 | return ret; |
2077 | } | 2580 | } |
2078 | 2581 | ||
2582 | ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | ||
2583 | const char __user *buf, int in_len, | ||
2584 | int out_len) | ||
2585 | { | ||
2586 | struct ib_uverbs_create_srq cmd; | ||
2587 | struct ib_uverbs_create_xsrq xcmd; | ||
2588 | struct ib_uverbs_create_srq_resp resp; | ||
2589 | struct ib_udata udata; | ||
2590 | int ret; | ||
2591 | |||
2592 | if (out_len < sizeof resp) | ||
2593 | return -ENOSPC; | ||
2594 | |||
2595 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
2596 | return -EFAULT; | ||
2597 | |||
2598 | xcmd.response = cmd.response; | ||
2599 | xcmd.user_handle = cmd.user_handle; | ||
2600 | xcmd.srq_type = IB_SRQT_BASIC; | ||
2601 | xcmd.pd_handle = cmd.pd_handle; | ||
2602 | xcmd.max_wr = cmd.max_wr; | ||
2603 | xcmd.max_sge = cmd.max_sge; | ||
2604 | xcmd.srq_limit = cmd.srq_limit; | ||
2605 | |||
2606 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
2607 | (unsigned long) cmd.response + sizeof resp, | ||
2608 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2609 | |||
2610 | ret = __uverbs_create_xsrq(file, &xcmd, &udata); | ||
2611 | if (ret) | ||
2612 | return ret; | ||
2613 | |||
2614 | return in_len; | ||
2615 | } | ||
2616 | |||
2617 | ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, | ||
2618 | const char __user *buf, int in_len, int out_len) | ||
2619 | { | ||
2620 | struct ib_uverbs_create_xsrq cmd; | ||
2621 | struct ib_uverbs_create_srq_resp resp; | ||
2622 | struct ib_udata udata; | ||
2623 | int ret; | ||
2624 | |||
2625 | if (out_len < sizeof resp) | ||
2626 | return -ENOSPC; | ||
2627 | |||
2628 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
2629 | return -EFAULT; | ||
2630 | |||
2631 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
2632 | (unsigned long) cmd.response + sizeof resp, | ||
2633 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2634 | |||
2635 | ret = __uverbs_create_xsrq(file, &cmd, &udata); | ||
2636 | if (ret) | ||
2637 | return ret; | ||
2638 | |||
2639 | return in_len; | ||
2640 | } | ||
2641 | |||
2079 | ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | 2642 | ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, |
2080 | const char __user *buf, int in_len, | 2643 | const char __user *buf, int in_len, |
2081 | int out_len) | 2644 | int out_len) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 56898b6578a4..879636746373 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -72,6 +72,7 @@ DEFINE_IDR(ib_uverbs_ah_idr); | |||
72 | DEFINE_IDR(ib_uverbs_cq_idr); | 72 | DEFINE_IDR(ib_uverbs_cq_idr); |
73 | DEFINE_IDR(ib_uverbs_qp_idr); | 73 | DEFINE_IDR(ib_uverbs_qp_idr); |
74 | DEFINE_IDR(ib_uverbs_srq_idr); | 74 | DEFINE_IDR(ib_uverbs_srq_idr); |
75 | DEFINE_IDR(ib_uverbs_xrcd_idr); | ||
75 | 76 | ||
76 | static DEFINE_SPINLOCK(map_lock); | 77 | static DEFINE_SPINLOCK(map_lock); |
77 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); | 78 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); |
@@ -107,6 +108,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | |||
107 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, | 108 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, |
108 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, | 109 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, |
109 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, | 110 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, |
111 | [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd, | ||
112 | [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, | ||
113 | [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, | ||
114 | [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp | ||
110 | }; | 115 | }; |
111 | 116 | ||
112 | static void ib_uverbs_add_one(struct ib_device *device); | 117 | static void ib_uverbs_add_one(struct ib_device *device); |
@@ -202,8 +207,12 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
202 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | 207 | container_of(uobj, struct ib_uqp_object, uevent.uobject); |
203 | 208 | ||
204 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); | 209 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); |
205 | ib_uverbs_detach_umcast(qp, uqp); | 210 | if (qp != qp->real_qp) { |
206 | ib_destroy_qp(qp); | 211 | ib_close_qp(qp); |
212 | } else { | ||
213 | ib_uverbs_detach_umcast(qp, uqp); | ||
214 | ib_destroy_qp(qp); | ||
215 | } | ||
207 | ib_uverbs_release_uevent(file, &uqp->uevent); | 216 | ib_uverbs_release_uevent(file, &uqp->uevent); |
208 | kfree(uqp); | 217 | kfree(uqp); |
209 | } | 218 | } |
@@ -241,6 +250,18 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
241 | kfree(uobj); | 250 | kfree(uobj); |
242 | } | 251 | } |
243 | 252 | ||
253 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
254 | list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) { | ||
255 | struct ib_xrcd *xrcd = uobj->object; | ||
256 | struct ib_uxrcd_object *uxrcd = | ||
257 | container_of(uobj, struct ib_uxrcd_object, uobject); | ||
258 | |||
259 | idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); | ||
260 | ib_uverbs_dealloc_xrcd(file->device, xrcd); | ||
261 | kfree(uxrcd); | ||
262 | } | ||
263 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
264 | |||
244 | list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { | 265 | list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { |
245 | struct ib_pd *pd = uobj->object; | 266 | struct ib_pd *pd = uobj->object; |
246 | 267 | ||
@@ -557,8 +578,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
557 | if (hdr.in_words * 4 != count) | 578 | if (hdr.in_words * 4 != count) |
558 | return -EINVAL; | 579 | return -EINVAL; |
559 | 580 | ||
560 | if (hdr.command < 0 || | 581 | if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || |
561 | hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || | ||
562 | !uverbs_cmd_table[hdr.command]) | 582 | !uverbs_cmd_table[hdr.command]) |
563 | return -EINVAL; | 583 | return -EINVAL; |
564 | 584 | ||
@@ -741,6 +761,8 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
741 | 761 | ||
742 | kref_init(&uverbs_dev->ref); | 762 | kref_init(&uverbs_dev->ref); |
743 | init_completion(&uverbs_dev->comp); | 763 | init_completion(&uverbs_dev->comp); |
764 | uverbs_dev->xrcd_tree = RB_ROOT; | ||
765 | mutex_init(&uverbs_dev->xrcd_tree_mutex); | ||
744 | 766 | ||
745 | spin_lock(&map_lock); | 767 | spin_lock(&map_lock); |
746 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); | 768 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a8923ffc6459..602b1bd723a9 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/err.h> | 40 | #include <linux/err.h> |
41 | #include <linux/export.h> | 41 | #include <linux/export.h> |
42 | #include <linux/string.h> | 42 | #include <linux/string.h> |
43 | #include <linux/slab.h> | ||
43 | 44 | ||
44 | #include <rdma/ib_verbs.h> | 45 | #include <rdma/ib_verbs.h> |
45 | #include <rdma/ib_cache.h> | 46 | #include <rdma/ib_cache.h> |
@@ -78,6 +79,31 @@ enum ib_rate mult_to_ib_rate(int mult) | |||
78 | } | 79 | } |
79 | EXPORT_SYMBOL(mult_to_ib_rate); | 80 | EXPORT_SYMBOL(mult_to_ib_rate); |
80 | 81 | ||
82 | int ib_rate_to_mbps(enum ib_rate rate) | ||
83 | { | ||
84 | switch (rate) { | ||
85 | case IB_RATE_2_5_GBPS: return 2500; | ||
86 | case IB_RATE_5_GBPS: return 5000; | ||
87 | case IB_RATE_10_GBPS: return 10000; | ||
88 | case IB_RATE_20_GBPS: return 20000; | ||
89 | case IB_RATE_30_GBPS: return 30000; | ||
90 | case IB_RATE_40_GBPS: return 40000; | ||
91 | case IB_RATE_60_GBPS: return 60000; | ||
92 | case IB_RATE_80_GBPS: return 80000; | ||
93 | case IB_RATE_120_GBPS: return 120000; | ||
94 | case IB_RATE_14_GBPS: return 14062; | ||
95 | case IB_RATE_56_GBPS: return 56250; | ||
96 | case IB_RATE_112_GBPS: return 112500; | ||
97 | case IB_RATE_168_GBPS: return 168750; | ||
98 | case IB_RATE_25_GBPS: return 25781; | ||
99 | case IB_RATE_100_GBPS: return 103125; | ||
100 | case IB_RATE_200_GBPS: return 206250; | ||
101 | case IB_RATE_300_GBPS: return 309375; | ||
102 | default: return -1; | ||
103 | } | ||
104 | } | ||
105 | EXPORT_SYMBOL(ib_rate_to_mbps); | ||
106 | |||
81 | enum rdma_transport_type | 107 | enum rdma_transport_type |
82 | rdma_node_get_transport(enum rdma_node_type node_type) | 108 | rdma_node_get_transport(enum rdma_node_type node_type) |
83 | { | 109 | { |
@@ -251,6 +277,13 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd, | |||
251 | srq->uobject = NULL; | 277 | srq->uobject = NULL; |
252 | srq->event_handler = srq_init_attr->event_handler; | 278 | srq->event_handler = srq_init_attr->event_handler; |
253 | srq->srq_context = srq_init_attr->srq_context; | 279 | srq->srq_context = srq_init_attr->srq_context; |
280 | srq->srq_type = srq_init_attr->srq_type; | ||
281 | if (srq->srq_type == IB_SRQT_XRC) { | ||
282 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; | ||
283 | srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; | ||
284 | atomic_inc(&srq->ext.xrc.xrcd->usecnt); | ||
285 | atomic_inc(&srq->ext.xrc.cq->usecnt); | ||
286 | } | ||
254 | atomic_inc(&pd->usecnt); | 287 | atomic_inc(&pd->usecnt); |
255 | atomic_set(&srq->usecnt, 0); | 288 | atomic_set(&srq->usecnt, 0); |
256 | } | 289 | } |
@@ -280,16 +313,29 @@ EXPORT_SYMBOL(ib_query_srq); | |||
280 | int ib_destroy_srq(struct ib_srq *srq) | 313 | int ib_destroy_srq(struct ib_srq *srq) |
281 | { | 314 | { |
282 | struct ib_pd *pd; | 315 | struct ib_pd *pd; |
316 | enum ib_srq_type srq_type; | ||
317 | struct ib_xrcd *uninitialized_var(xrcd); | ||
318 | struct ib_cq *uninitialized_var(cq); | ||
283 | int ret; | 319 | int ret; |
284 | 320 | ||
285 | if (atomic_read(&srq->usecnt)) | 321 | if (atomic_read(&srq->usecnt)) |
286 | return -EBUSY; | 322 | return -EBUSY; |
287 | 323 | ||
288 | pd = srq->pd; | 324 | pd = srq->pd; |
325 | srq_type = srq->srq_type; | ||
326 | if (srq_type == IB_SRQT_XRC) { | ||
327 | xrcd = srq->ext.xrc.xrcd; | ||
328 | cq = srq->ext.xrc.cq; | ||
329 | } | ||
289 | 330 | ||
290 | ret = srq->device->destroy_srq(srq); | 331 | ret = srq->device->destroy_srq(srq); |
291 | if (!ret) | 332 | if (!ret) { |
292 | atomic_dec(&pd->usecnt); | 333 | atomic_dec(&pd->usecnt); |
334 | if (srq_type == IB_SRQT_XRC) { | ||
335 | atomic_dec(&xrcd->usecnt); | ||
336 | atomic_dec(&cq->usecnt); | ||
337 | } | ||
338 | } | ||
293 | 339 | ||
294 | return ret; | 340 | return ret; |
295 | } | 341 | } |
@@ -297,28 +343,123 @@ EXPORT_SYMBOL(ib_destroy_srq); | |||
297 | 343 | ||
298 | /* Queue pairs */ | 344 | /* Queue pairs */ |
299 | 345 | ||
346 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) | ||
347 | { | ||
348 | struct ib_qp *qp = context; | ||
349 | |||
350 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) | ||
351 | event->element.qp->event_handler(event, event->element.qp->qp_context); | ||
352 | } | ||
353 | |||
354 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) | ||
355 | { | ||
356 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
357 | list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); | ||
358 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
359 | } | ||
360 | |||
361 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, | ||
362 | void (*event_handler)(struct ib_event *, void *), | ||
363 | void *qp_context) | ||
364 | { | ||
365 | struct ib_qp *qp; | ||
366 | unsigned long flags; | ||
367 | |||
368 | qp = kzalloc(sizeof *qp, GFP_KERNEL); | ||
369 | if (!qp) | ||
370 | return ERR_PTR(-ENOMEM); | ||
371 | |||
372 | qp->real_qp = real_qp; | ||
373 | atomic_inc(&real_qp->usecnt); | ||
374 | qp->device = real_qp->device; | ||
375 | qp->event_handler = event_handler; | ||
376 | qp->qp_context = qp_context; | ||
377 | qp->qp_num = real_qp->qp_num; | ||
378 | qp->qp_type = real_qp->qp_type; | ||
379 | |||
380 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
381 | list_add(&qp->open_list, &real_qp->open_list); | ||
382 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
383 | |||
384 | return qp; | ||
385 | } | ||
386 | |||
387 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | ||
388 | struct ib_qp_open_attr *qp_open_attr) | ||
389 | { | ||
390 | struct ib_qp *qp, *real_qp; | ||
391 | |||
392 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) | ||
393 | return ERR_PTR(-EINVAL); | ||
394 | |||
395 | qp = ERR_PTR(-EINVAL); | ||
396 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
397 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { | ||
398 | if (real_qp->qp_num == qp_open_attr->qp_num) { | ||
399 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, | ||
400 | qp_open_attr->qp_context); | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
405 | return qp; | ||
406 | } | ||
407 | EXPORT_SYMBOL(ib_open_qp); | ||
408 | |||
300 | struct ib_qp *ib_create_qp(struct ib_pd *pd, | 409 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
301 | struct ib_qp_init_attr *qp_init_attr) | 410 | struct ib_qp_init_attr *qp_init_attr) |
302 | { | 411 | { |
303 | struct ib_qp *qp; | 412 | struct ib_qp *qp, *real_qp; |
413 | struct ib_device *device; | ||
304 | 414 | ||
305 | qp = pd->device->create_qp(pd, qp_init_attr, NULL); | 415 | device = pd ? pd->device : qp_init_attr->xrcd->device; |
416 | qp = device->create_qp(pd, qp_init_attr, NULL); | ||
306 | 417 | ||
307 | if (!IS_ERR(qp)) { | 418 | if (!IS_ERR(qp)) { |
308 | qp->device = pd->device; | 419 | qp->device = device; |
309 | qp->pd = pd; | 420 | qp->real_qp = qp; |
310 | qp->send_cq = qp_init_attr->send_cq; | 421 | qp->uobject = NULL; |
311 | qp->recv_cq = qp_init_attr->recv_cq; | 422 | qp->qp_type = qp_init_attr->qp_type; |
312 | qp->srq = qp_init_attr->srq; | 423 | |
313 | qp->uobject = NULL; | 424 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { |
314 | qp->event_handler = qp_init_attr->event_handler; | 425 | qp->event_handler = __ib_shared_qp_event_handler; |
315 | qp->qp_context = qp_init_attr->qp_context; | 426 | qp->qp_context = qp; |
316 | qp->qp_type = qp_init_attr->qp_type; | 427 | qp->pd = NULL; |
317 | atomic_inc(&pd->usecnt); | 428 | qp->send_cq = qp->recv_cq = NULL; |
318 | atomic_inc(&qp_init_attr->send_cq->usecnt); | 429 | qp->srq = NULL; |
319 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | 430 | qp->xrcd = qp_init_attr->xrcd; |
320 | if (qp_init_attr->srq) | 431 | atomic_inc(&qp_init_attr->xrcd->usecnt); |
321 | atomic_inc(&qp_init_attr->srq->usecnt); | 432 | INIT_LIST_HEAD(&qp->open_list); |
433 | atomic_set(&qp->usecnt, 0); | ||
434 | |||
435 | real_qp = qp; | ||
436 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, | ||
437 | qp_init_attr->qp_context); | ||
438 | if (!IS_ERR(qp)) | ||
439 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); | ||
440 | else | ||
441 | real_qp->device->destroy_qp(real_qp); | ||
442 | } else { | ||
443 | qp->event_handler = qp_init_attr->event_handler; | ||
444 | qp->qp_context = qp_init_attr->qp_context; | ||
445 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { | ||
446 | qp->recv_cq = NULL; | ||
447 | qp->srq = NULL; | ||
448 | } else { | ||
449 | qp->recv_cq = qp_init_attr->recv_cq; | ||
450 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | ||
451 | qp->srq = qp_init_attr->srq; | ||
452 | if (qp->srq) | ||
453 | atomic_inc(&qp_init_attr->srq->usecnt); | ||
454 | } | ||
455 | |||
456 | qp->pd = pd; | ||
457 | qp->send_cq = qp_init_attr->send_cq; | ||
458 | qp->xrcd = NULL; | ||
459 | |||
460 | atomic_inc(&pd->usecnt); | ||
461 | atomic_inc(&qp_init_attr->send_cq->usecnt); | ||
462 | } | ||
322 | } | 463 | } |
323 | 464 | ||
324 | return qp; | 465 | return qp; |
@@ -327,8 +468,8 @@ EXPORT_SYMBOL(ib_create_qp); | |||
327 | 468 | ||
328 | static const struct { | 469 | static const struct { |
329 | int valid; | 470 | int valid; |
330 | enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1]; | 471 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; |
331 | enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1]; | 472 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; |
332 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | 473 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
333 | [IB_QPS_RESET] = { | 474 | [IB_QPS_RESET] = { |
334 | [IB_QPS_RESET] = { .valid = 1 }, | 475 | [IB_QPS_RESET] = { .valid = 1 }, |
@@ -344,6 +485,12 @@ static const struct { | |||
344 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | 485 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
345 | IB_QP_PORT | | 486 | IB_QP_PORT | |
346 | IB_QP_ACCESS_FLAGS), | 487 | IB_QP_ACCESS_FLAGS), |
488 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | | ||
489 | IB_QP_PORT | | ||
490 | IB_QP_ACCESS_FLAGS), | ||
491 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | ||
492 | IB_QP_PORT | | ||
493 | IB_QP_ACCESS_FLAGS), | ||
347 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 494 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
348 | IB_QP_QKEY), | 495 | IB_QP_QKEY), |
349 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 496 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -366,6 +513,12 @@ static const struct { | |||
366 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | 513 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
367 | IB_QP_PORT | | 514 | IB_QP_PORT | |
368 | IB_QP_ACCESS_FLAGS), | 515 | IB_QP_ACCESS_FLAGS), |
516 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | | ||
517 | IB_QP_PORT | | ||
518 | IB_QP_ACCESS_FLAGS), | ||
519 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | ||
520 | IB_QP_PORT | | ||
521 | IB_QP_ACCESS_FLAGS), | ||
369 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 522 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
370 | IB_QP_QKEY), | 523 | IB_QP_QKEY), |
371 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 524 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -385,6 +538,16 @@ static const struct { | |||
385 | IB_QP_RQ_PSN | | 538 | IB_QP_RQ_PSN | |
386 | IB_QP_MAX_DEST_RD_ATOMIC | | 539 | IB_QP_MAX_DEST_RD_ATOMIC | |
387 | IB_QP_MIN_RNR_TIMER), | 540 | IB_QP_MIN_RNR_TIMER), |
541 | [IB_QPT_XRC_INI] = (IB_QP_AV | | ||
542 | IB_QP_PATH_MTU | | ||
543 | IB_QP_DEST_QPN | | ||
544 | IB_QP_RQ_PSN), | ||
545 | [IB_QPT_XRC_TGT] = (IB_QP_AV | | ||
546 | IB_QP_PATH_MTU | | ||
547 | IB_QP_DEST_QPN | | ||
548 | IB_QP_RQ_PSN | | ||
549 | IB_QP_MAX_DEST_RD_ATOMIC | | ||
550 | IB_QP_MIN_RNR_TIMER), | ||
388 | }, | 551 | }, |
389 | .opt_param = { | 552 | .opt_param = { |
390 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | 553 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
@@ -395,6 +558,12 @@ static const struct { | |||
395 | [IB_QPT_RC] = (IB_QP_ALT_PATH | | 558 | [IB_QPT_RC] = (IB_QP_ALT_PATH | |
396 | IB_QP_ACCESS_FLAGS | | 559 | IB_QP_ACCESS_FLAGS | |
397 | IB_QP_PKEY_INDEX), | 560 | IB_QP_PKEY_INDEX), |
561 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | | ||
562 | IB_QP_ACCESS_FLAGS | | ||
563 | IB_QP_PKEY_INDEX), | ||
564 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | | ||
565 | IB_QP_ACCESS_FLAGS | | ||
566 | IB_QP_PKEY_INDEX), | ||
398 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 567 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
399 | IB_QP_QKEY), | 568 | IB_QP_QKEY), |
400 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 569 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -415,6 +584,13 @@ static const struct { | |||
415 | IB_QP_RNR_RETRY | | 584 | IB_QP_RNR_RETRY | |
416 | IB_QP_SQ_PSN | | 585 | IB_QP_SQ_PSN | |
417 | IB_QP_MAX_QP_RD_ATOMIC), | 586 | IB_QP_MAX_QP_RD_ATOMIC), |
587 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | | ||
588 | IB_QP_RETRY_CNT | | ||
589 | IB_QP_RNR_RETRY | | ||
590 | IB_QP_SQ_PSN | | ||
591 | IB_QP_MAX_QP_RD_ATOMIC), | ||
592 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | | ||
593 | IB_QP_SQ_PSN), | ||
418 | [IB_QPT_SMI] = IB_QP_SQ_PSN, | 594 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
419 | [IB_QPT_GSI] = IB_QP_SQ_PSN, | 595 | [IB_QPT_GSI] = IB_QP_SQ_PSN, |
420 | }, | 596 | }, |
@@ -430,6 +606,15 @@ static const struct { | |||
430 | IB_QP_ACCESS_FLAGS | | 606 | IB_QP_ACCESS_FLAGS | |
431 | IB_QP_MIN_RNR_TIMER | | 607 | IB_QP_MIN_RNR_TIMER | |
432 | IB_QP_PATH_MIG_STATE), | 608 | IB_QP_PATH_MIG_STATE), |
609 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
610 | IB_QP_ALT_PATH | | ||
611 | IB_QP_ACCESS_FLAGS | | ||
612 | IB_QP_PATH_MIG_STATE), | ||
613 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
614 | IB_QP_ALT_PATH | | ||
615 | IB_QP_ACCESS_FLAGS | | ||
616 | IB_QP_MIN_RNR_TIMER | | ||
617 | IB_QP_PATH_MIG_STATE), | ||
433 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 618 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
434 | IB_QP_QKEY), | 619 | IB_QP_QKEY), |
435 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 620 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -454,6 +639,15 @@ static const struct { | |||
454 | IB_QP_ALT_PATH | | 639 | IB_QP_ALT_PATH | |
455 | IB_QP_PATH_MIG_STATE | | 640 | IB_QP_PATH_MIG_STATE | |
456 | IB_QP_MIN_RNR_TIMER), | 641 | IB_QP_MIN_RNR_TIMER), |
642 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
643 | IB_QP_ACCESS_FLAGS | | ||
644 | IB_QP_ALT_PATH | | ||
645 | IB_QP_PATH_MIG_STATE), | ||
646 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
647 | IB_QP_ACCESS_FLAGS | | ||
648 | IB_QP_ALT_PATH | | ||
649 | IB_QP_PATH_MIG_STATE | | ||
650 | IB_QP_MIN_RNR_TIMER), | ||
457 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 651 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
458 | IB_QP_QKEY), | 652 | IB_QP_QKEY), |
459 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 653 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -466,6 +660,8 @@ static const struct { | |||
466 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 660 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
467 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 661 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
468 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 662 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
663 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, | ||
664 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ | ||
469 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 665 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
470 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY | 666 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY |
471 | } | 667 | } |
@@ -488,6 +684,15 @@ static const struct { | |||
488 | IB_QP_ACCESS_FLAGS | | 684 | IB_QP_ACCESS_FLAGS | |
489 | IB_QP_MIN_RNR_TIMER | | 685 | IB_QP_MIN_RNR_TIMER | |
490 | IB_QP_PATH_MIG_STATE), | 686 | IB_QP_PATH_MIG_STATE), |
687 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
688 | IB_QP_ALT_PATH | | ||
689 | IB_QP_ACCESS_FLAGS | | ||
690 | IB_QP_PATH_MIG_STATE), | ||
691 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
692 | IB_QP_ALT_PATH | | ||
693 | IB_QP_ACCESS_FLAGS | | ||
694 | IB_QP_MIN_RNR_TIMER | | ||
695 | IB_QP_PATH_MIG_STATE), | ||
491 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 696 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
492 | IB_QP_QKEY), | 697 | IB_QP_QKEY), |
493 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 698 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -516,6 +721,25 @@ static const struct { | |||
516 | IB_QP_PKEY_INDEX | | 721 | IB_QP_PKEY_INDEX | |
517 | IB_QP_MIN_RNR_TIMER | | 722 | IB_QP_MIN_RNR_TIMER | |
518 | IB_QP_PATH_MIG_STATE), | 723 | IB_QP_PATH_MIG_STATE), |
724 | [IB_QPT_XRC_INI] = (IB_QP_PORT | | ||
725 | IB_QP_AV | | ||
726 | IB_QP_TIMEOUT | | ||
727 | IB_QP_RETRY_CNT | | ||
728 | IB_QP_RNR_RETRY | | ||
729 | IB_QP_MAX_QP_RD_ATOMIC | | ||
730 | IB_QP_ALT_PATH | | ||
731 | IB_QP_ACCESS_FLAGS | | ||
732 | IB_QP_PKEY_INDEX | | ||
733 | IB_QP_PATH_MIG_STATE), | ||
734 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | | ||
735 | IB_QP_AV | | ||
736 | IB_QP_TIMEOUT | | ||
737 | IB_QP_MAX_DEST_RD_ATOMIC | | ||
738 | IB_QP_ALT_PATH | | ||
739 | IB_QP_ACCESS_FLAGS | | ||
740 | IB_QP_PKEY_INDEX | | ||
741 | IB_QP_MIN_RNR_TIMER | | ||
742 | IB_QP_PATH_MIG_STATE), | ||
519 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 743 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
520 | IB_QP_QKEY), | 744 | IB_QP_QKEY), |
521 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 745 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -580,7 +804,7 @@ int ib_modify_qp(struct ib_qp *qp, | |||
580 | struct ib_qp_attr *qp_attr, | 804 | struct ib_qp_attr *qp_attr, |
581 | int qp_attr_mask) | 805 | int qp_attr_mask) |
582 | { | 806 | { |
583 | return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); | 807 | return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); |
584 | } | 808 | } |
585 | EXPORT_SYMBOL(ib_modify_qp); | 809 | EXPORT_SYMBOL(ib_modify_qp); |
586 | 810 | ||
@@ -590,11 +814,59 @@ int ib_query_qp(struct ib_qp *qp, | |||
590 | struct ib_qp_init_attr *qp_init_attr) | 814 | struct ib_qp_init_attr *qp_init_attr) |
591 | { | 815 | { |
592 | return qp->device->query_qp ? | 816 | return qp->device->query_qp ? |
593 | qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : | 817 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
594 | -ENOSYS; | 818 | -ENOSYS; |
595 | } | 819 | } |
596 | EXPORT_SYMBOL(ib_query_qp); | 820 | EXPORT_SYMBOL(ib_query_qp); |
597 | 821 | ||
822 | int ib_close_qp(struct ib_qp *qp) | ||
823 | { | ||
824 | struct ib_qp *real_qp; | ||
825 | unsigned long flags; | ||
826 | |||
827 | real_qp = qp->real_qp; | ||
828 | if (real_qp == qp) | ||
829 | return -EINVAL; | ||
830 | |||
831 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
832 | list_del(&qp->open_list); | ||
833 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
834 | |||
835 | atomic_dec(&real_qp->usecnt); | ||
836 | kfree(qp); | ||
837 | |||
838 | return 0; | ||
839 | } | ||
840 | EXPORT_SYMBOL(ib_close_qp); | ||
841 | |||
842 | static int __ib_destroy_shared_qp(struct ib_qp *qp) | ||
843 | { | ||
844 | struct ib_xrcd *xrcd; | ||
845 | struct ib_qp *real_qp; | ||
846 | int ret; | ||
847 | |||
848 | real_qp = qp->real_qp; | ||
849 | xrcd = real_qp->xrcd; | ||
850 | |||
851 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
852 | ib_close_qp(qp); | ||
853 | if (atomic_read(&real_qp->usecnt) == 0) | ||
854 | list_del(&real_qp->xrcd_list); | ||
855 | else | ||
856 | real_qp = NULL; | ||
857 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
858 | |||
859 | if (real_qp) { | ||
860 | ret = ib_destroy_qp(real_qp); | ||
861 | if (!ret) | ||
862 | atomic_dec(&xrcd->usecnt); | ||
863 | else | ||
864 | __ib_insert_xrcd_qp(xrcd, real_qp); | ||
865 | } | ||
866 | |||
867 | return 0; | ||
868 | } | ||
869 | |||
598 | int ib_destroy_qp(struct ib_qp *qp) | 870 | int ib_destroy_qp(struct ib_qp *qp) |
599 | { | 871 | { |
600 | struct ib_pd *pd; | 872 | struct ib_pd *pd; |
@@ -602,16 +874,25 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
602 | struct ib_srq *srq; | 874 | struct ib_srq *srq; |
603 | int ret; | 875 | int ret; |
604 | 876 | ||
605 | pd = qp->pd; | 877 | if (atomic_read(&qp->usecnt)) |
606 | scq = qp->send_cq; | 878 | return -EBUSY; |
607 | rcq = qp->recv_cq; | 879 | |
608 | srq = qp->srq; | 880 | if (qp->real_qp != qp) |
881 | return __ib_destroy_shared_qp(qp); | ||
882 | |||
883 | pd = qp->pd; | ||
884 | scq = qp->send_cq; | ||
885 | rcq = qp->recv_cq; | ||
886 | srq = qp->srq; | ||
609 | 887 | ||
610 | ret = qp->device->destroy_qp(qp); | 888 | ret = qp->device->destroy_qp(qp); |
611 | if (!ret) { | 889 | if (!ret) { |
612 | atomic_dec(&pd->usecnt); | 890 | if (pd) |
613 | atomic_dec(&scq->usecnt); | 891 | atomic_dec(&pd->usecnt); |
614 | atomic_dec(&rcq->usecnt); | 892 | if (scq) |
893 | atomic_dec(&scq->usecnt); | ||
894 | if (rcq) | ||
895 | atomic_dec(&rcq->usecnt); | ||
615 | if (srq) | 896 | if (srq) |
616 | atomic_dec(&srq->usecnt); | 897 | atomic_dec(&srq->usecnt); |
617 | } | 898 | } |
@@ -921,3 +1202,42 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) | |||
921 | return qp->device->detach_mcast(qp, gid, lid); | 1202 | return qp->device->detach_mcast(qp, gid, lid); |
922 | } | 1203 | } |
923 | EXPORT_SYMBOL(ib_detach_mcast); | 1204 | EXPORT_SYMBOL(ib_detach_mcast); |
1205 | |||
1206 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) | ||
1207 | { | ||
1208 | struct ib_xrcd *xrcd; | ||
1209 | |||
1210 | if (!device->alloc_xrcd) | ||
1211 | return ERR_PTR(-ENOSYS); | ||
1212 | |||
1213 | xrcd = device->alloc_xrcd(device, NULL, NULL); | ||
1214 | if (!IS_ERR(xrcd)) { | ||
1215 | xrcd->device = device; | ||
1216 | xrcd->inode = NULL; | ||
1217 | atomic_set(&xrcd->usecnt, 0); | ||
1218 | mutex_init(&xrcd->tgt_qp_mutex); | ||
1219 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); | ||
1220 | } | ||
1221 | |||
1222 | return xrcd; | ||
1223 | } | ||
1224 | EXPORT_SYMBOL(ib_alloc_xrcd); | ||
1225 | |||
1226 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) | ||
1227 | { | ||
1228 | struct ib_qp *qp; | ||
1229 | int ret; | ||
1230 | |||
1231 | if (atomic_read(&xrcd->usecnt)) | ||
1232 | return -EBUSY; | ||
1233 | |||
1234 | while (!list_empty(&xrcd->tgt_qp_list)) { | ||
1235 | qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); | ||
1236 | ret = ib_destroy_qp(qp); | ||
1237 | if (ret) | ||
1238 | return ret; | ||
1239 | } | ||
1240 | |||
1241 | return xrcd->device->dealloc_xrcd(xrcd); | ||
1242 | } | ||
1243 | EXPORT_SYMBOL(ib_dealloc_xrcd); | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c index 24f9e3a90e8e..32d34e88d5cf 100644 --- a/drivers/infiniband/hw/amso1100/c2_ae.c +++ b/drivers/infiniband/hw/amso1100/c2_ae.c | |||
@@ -288,6 +288,11 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) | |||
288 | cm_event.private_data_len = | 288 | cm_event.private_data_len = |
289 | be32_to_cpu(req->private_data_length); | 289 | be32_to_cpu(req->private_data_length); |
290 | cm_event.private_data = req->private_data; | 290 | cm_event.private_data = req->private_data; |
291 | /* | ||
292 | * Until ird/ord negotiation via MPAv2 support is added, send | ||
293 | * max supported values | ||
294 | */ | ||
295 | cm_event.ird = cm_event.ord = 128; | ||
291 | 296 | ||
292 | if (cm_id->event_handler) | 297 | if (cm_id->event_handler) |
293 | cm_id->event_handler(cm_id, &cm_event); | 298 | cm_id->event_handler(cm_id, &cm_event); |
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c index 0ebe4e806b86..8951db4ae29d 100644 --- a/drivers/infiniband/hw/amso1100/c2_intr.c +++ b/drivers/infiniband/hw/amso1100/c2_intr.c | |||
@@ -183,6 +183,11 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index) | |||
183 | case IW_CM_EVENT_ESTABLISHED: | 183 | case IW_CM_EVENT_ESTABLISHED: |
184 | c2_set_qp_state(req->qp, | 184 | c2_set_qp_state(req->qp, |
185 | C2_QP_STATE_RTS); | 185 | C2_QP_STATE_RTS); |
186 | /* | ||
187 | * Until ird/ord negotiation via MPAv2 support is added, send | ||
188 | * max supported values | ||
189 | */ | ||
190 | cm_event.ird = cm_event.ord = 128; | ||
186 | case IW_CM_EVENT_CLOSE: | 191 | case IW_CM_EVENT_CLOSE: |
187 | 192 | ||
188 | /* | 193 | /* |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index f101bb73be63..12f923d64e42 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -753,10 +753,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) | |||
753 | memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6); | 753 | memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6); |
754 | 754 | ||
755 | /* Print out the MAC address */ | 755 | /* Print out the MAC address */ |
756 | pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n", | 756 | pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr); |
757 | netdev->name, | ||
758 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
759 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); | ||
760 | 757 | ||
761 | #if 0 | 758 | #if 0 |
762 | /* Disable network packets */ | 759 | /* Disable network packets */ |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 6cd642aaa4de..de6d0774e609 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -753,6 +753,11 @@ static void connect_request_upcall(struct iwch_ep *ep) | |||
753 | event.private_data_len = ep->plen; | 753 | event.private_data_len = ep->plen; |
754 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | 754 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); |
755 | event.provider_data = ep; | 755 | event.provider_data = ep; |
756 | /* | ||
757 | * Until ird/ord negotiation via MPAv2 support is added, send max | ||
758 | * supported values | ||
759 | */ | ||
760 | event.ird = event.ord = 8; | ||
756 | if (state_read(&ep->parent_ep->com) != DEAD) { | 761 | if (state_read(&ep->parent_ep->com) != DEAD) { |
757 | get_ep(&ep->com); | 762 | get_ep(&ep->com); |
758 | ep->parent_ep->com.cm_id->event_handler( | 763 | ep->parent_ep->com.cm_id->event_handler( |
@@ -770,6 +775,11 @@ static void established_upcall(struct iwch_ep *ep) | |||
770 | PDBG("%s ep %p\n", __func__, ep); | 775 | PDBG("%s ep %p\n", __func__, ep); |
771 | memset(&event, 0, sizeof(event)); | 776 | memset(&event, 0, sizeof(event)); |
772 | event.event = IW_CM_EVENT_ESTABLISHED; | 777 | event.event = IW_CM_EVENT_ESTABLISHED; |
778 | /* | ||
779 | * Until ird/ord negotiation via MPAv2 support is added, send max | ||
780 | * supported values | ||
781 | */ | ||
782 | event.ird = event.ord = 8; | ||
773 | if (ep->com.cm_id) { | 783 | if (ep->com.cm_id) { |
774 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); | 784 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); |
775 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 785 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c index 71e0d845da3d..abcc9e76962b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c | |||
@@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
46 | struct ib_event event; | 46 | struct ib_event event; |
47 | struct iwch_qp_attributes attrs; | 47 | struct iwch_qp_attributes attrs; |
48 | struct iwch_qp *qhp; | 48 | struct iwch_qp *qhp; |
49 | unsigned long flag; | ||
49 | 50 | ||
50 | spin_lock(&rnicp->lock); | 51 | spin_lock(&rnicp->lock); |
51 | qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); | 52 | qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); |
@@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
94 | if (qhp->ibqp.event_handler) | 95 | if (qhp->ibqp.event_handler) |
95 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); | 96 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); |
96 | 97 | ||
98 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
97 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 99 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
100 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | ||
98 | 101 | ||
99 | if (atomic_dec_and_test(&qhp->refcnt)) | 102 | if (atomic_dec_and_test(&qhp->refcnt)) |
100 | wake_up(&qhp->wait); | 103 | wake_up(&qhp->wait); |
@@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) | |||
107 | struct iwch_cq *chp; | 110 | struct iwch_cq *chp; |
108 | struct iwch_qp *qhp; | 111 | struct iwch_qp *qhp; |
109 | u32 cqid = RSPQ_CQID(rsp_msg); | 112 | u32 cqid = RSPQ_CQID(rsp_msg); |
113 | unsigned long flag; | ||
110 | 114 | ||
111 | rnicp = (struct iwch_dev *) rdev_p->ulp; | 115 | rnicp = (struct iwch_dev *) rdev_p->ulp; |
112 | spin_lock(&rnicp->lock); | 116 | spin_lock(&rnicp->lock); |
@@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) | |||
170 | */ | 174 | */ |
171 | if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) | 175 | if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) |
172 | dst_confirm(qhp->ep->dst); | 176 | dst_confirm(qhp->ep->dst); |
177 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
173 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 178 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
179 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | ||
174 | break; | 180 | break; |
175 | 181 | ||
176 | case TPT_ERR_STAG: | 182 | case TPT_ERR_STAG: |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index c7d9411f2954..37c224fc3ad9 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -190,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
190 | chp->rhp = rhp; | 190 | chp->rhp = rhp; |
191 | chp->ibcq.cqe = 1 << chp->cq.size_log2; | 191 | chp->ibcq.cqe = 1 << chp->cq.size_log2; |
192 | spin_lock_init(&chp->lock); | 192 | spin_lock_init(&chp->lock); |
193 | spin_lock_init(&chp->comp_handler_lock); | ||
193 | atomic_set(&chp->refcnt, 1); | 194 | atomic_set(&chp->refcnt, 1); |
194 | init_waitqueue_head(&chp->wait); | 195 | init_waitqueue_head(&chp->wait); |
195 | if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { | 196 | if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index 9a342c9b220d..87c14b0c5ac0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
@@ -103,6 +103,7 @@ struct iwch_cq { | |||
103 | struct iwch_dev *rhp; | 103 | struct iwch_dev *rhp; |
104 | struct t3_cq cq; | 104 | struct t3_cq cq; |
105 | spinlock_t lock; | 105 | spinlock_t lock; |
106 | spinlock_t comp_handler_lock; | ||
106 | atomic_t refcnt; | 107 | atomic_t refcnt; |
107 | wait_queue_head_t wait; | 108 | wait_queue_head_t wait; |
108 | u32 __user *user_rptr_addr; | 109 | u32 __user *user_rptr_addr; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index ecd313f359a4..bea5839d89ee 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -822,8 +822,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | |||
822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); | 822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); |
823 | spin_unlock(&qhp->lock); | 823 | spin_unlock(&qhp->lock); |
824 | spin_unlock_irqrestore(&rchp->lock, *flag); | 824 | spin_unlock_irqrestore(&rchp->lock, *flag); |
825 | if (flushed) | 825 | if (flushed) { |
826 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | ||
826 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 827 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
828 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | ||
829 | } | ||
827 | 830 | ||
828 | /* locking hierarchy: cq lock first, then qp lock. */ | 831 | /* locking hierarchy: cq lock first, then qp lock. */ |
829 | spin_lock_irqsave(&schp->lock, *flag); | 832 | spin_lock_irqsave(&schp->lock, *flag); |
@@ -833,8 +836,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | |||
833 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); | 836 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); |
834 | spin_unlock(&qhp->lock); | 837 | spin_unlock(&qhp->lock); |
835 | spin_unlock_irqrestore(&schp->lock, *flag); | 838 | spin_unlock_irqrestore(&schp->lock, *flag); |
836 | if (flushed) | 839 | if (flushed) { |
840 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | ||
837 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 841 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); |
842 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | ||
843 | } | ||
838 | 844 | ||
839 | /* deref */ | 845 | /* deref */ |
840 | if (atomic_dec_and_test(&qhp->refcnt)) | 846 | if (atomic_dec_and_test(&qhp->refcnt)) |
@@ -853,11 +859,15 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
853 | if (qhp->ibqp.uobject) { | 859 | if (qhp->ibqp.uobject) { |
854 | cxio_set_wq_in_error(&qhp->wq); | 860 | cxio_set_wq_in_error(&qhp->wq); |
855 | cxio_set_cq_in_error(&rchp->cq); | 861 | cxio_set_cq_in_error(&rchp->cq); |
862 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | ||
856 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 863 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
864 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | ||
857 | if (schp != rchp) { | 865 | if (schp != rchp) { |
858 | cxio_set_cq_in_error(&schp->cq); | 866 | cxio_set_cq_in_error(&schp->cq); |
867 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | ||
859 | (*schp->ibcq.comp_handler)(&schp->ibcq, | 868 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
860 | schp->ibcq.cq_context); | 869 | schp->ibcq.cq_context); |
870 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | ||
861 | } | 871 | } |
862 | return; | 872 | return; |
863 | } | 873 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 77f769d9227d..b36cdac9c558 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -103,7 +103,8 @@ MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | |||
103 | static int mpa_rev = 1; | 103 | static int mpa_rev = 1; |
104 | module_param(mpa_rev, int, 0644); | 104 | module_param(mpa_rev, int, 0644); |
105 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | 105 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " |
106 | "1 is spec compliant. (default=1)"); | 106 | "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" |
107 | " compliant (default=1)"); | ||
107 | 108 | ||
108 | static int markers_enabled; | 109 | static int markers_enabled; |
109 | module_param(markers_enabled, int, 0644); | 110 | module_param(markers_enabled, int, 0644); |
@@ -497,17 +498,21 @@ static int send_connect(struct c4iw_ep *ep) | |||
497 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 498 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
498 | } | 499 | } |
499 | 500 | ||
500 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) | 501 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, |
502 | u8 mpa_rev_to_use) | ||
501 | { | 503 | { |
502 | int mpalen, wrlen; | 504 | int mpalen, wrlen; |
503 | struct fw_ofld_tx_data_wr *req; | 505 | struct fw_ofld_tx_data_wr *req; |
504 | struct mpa_message *mpa; | 506 | struct mpa_message *mpa; |
507 | struct mpa_v2_conn_params mpa_v2_params; | ||
505 | 508 | ||
506 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | 509 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); |
507 | 510 | ||
508 | BUG_ON(skb_cloned(skb)); | 511 | BUG_ON(skb_cloned(skb)); |
509 | 512 | ||
510 | mpalen = sizeof(*mpa) + ep->plen; | 513 | mpalen = sizeof(*mpa) + ep->plen; |
514 | if (mpa_rev_to_use == 2) | ||
515 | mpalen += sizeof(struct mpa_v2_conn_params); | ||
511 | wrlen = roundup(mpalen + sizeof *req, 16); | 516 | wrlen = roundup(mpalen + sizeof *req, 16); |
512 | skb = get_skb(skb, wrlen, GFP_KERNEL); | 517 | skb = get_skb(skb, wrlen, GFP_KERNEL); |
513 | if (!skb) { | 518 | if (!skb) { |
@@ -533,12 +538,39 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) | |||
533 | mpa = (struct mpa_message *)(req + 1); | 538 | mpa = (struct mpa_message *)(req + 1); |
534 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | 539 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); |
535 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | 540 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | |
536 | (markers_enabled ? MPA_MARKERS : 0); | 541 | (markers_enabled ? MPA_MARKERS : 0) | |
542 | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); | ||
537 | mpa->private_data_size = htons(ep->plen); | 543 | mpa->private_data_size = htons(ep->plen); |
538 | mpa->revision = mpa_rev; | 544 | mpa->revision = mpa_rev_to_use; |
545 | if (mpa_rev_to_use == 1) | ||
546 | ep->tried_with_mpa_v1 = 1; | ||
547 | |||
548 | if (mpa_rev_to_use == 2) { | ||
549 | mpa->private_data_size += | ||
550 | htons(sizeof(struct mpa_v2_conn_params)); | ||
551 | mpa_v2_params.ird = htons((u16)ep->ird); | ||
552 | mpa_v2_params.ord = htons((u16)ep->ord); | ||
553 | |||
554 | if (peer2peer) { | ||
555 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | ||
556 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | ||
557 | mpa_v2_params.ord |= | ||
558 | htons(MPA_V2_RDMA_WRITE_RTR); | ||
559 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | ||
560 | mpa_v2_params.ord |= | ||
561 | htons(MPA_V2_RDMA_READ_RTR); | ||
562 | } | ||
563 | memcpy(mpa->private_data, &mpa_v2_params, | ||
564 | sizeof(struct mpa_v2_conn_params)); | ||
539 | 565 | ||
540 | if (ep->plen) | 566 | if (ep->plen) |
541 | memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); | 567 | memcpy(mpa->private_data + |
568 | sizeof(struct mpa_v2_conn_params), | ||
569 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | ||
570 | } else | ||
571 | if (ep->plen) | ||
572 | memcpy(mpa->private_data, | ||
573 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | ||
542 | 574 | ||
543 | /* | 575 | /* |
544 | * Reference the mpa skb. This ensures the data area | 576 | * Reference the mpa skb. This ensures the data area |
@@ -562,10 +594,13 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
562 | struct fw_ofld_tx_data_wr *req; | 594 | struct fw_ofld_tx_data_wr *req; |
563 | struct mpa_message *mpa; | 595 | struct mpa_message *mpa; |
564 | struct sk_buff *skb; | 596 | struct sk_buff *skb; |
597 | struct mpa_v2_conn_params mpa_v2_params; | ||
565 | 598 | ||
566 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | 599 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); |
567 | 600 | ||
568 | mpalen = sizeof(*mpa) + plen; | 601 | mpalen = sizeof(*mpa) + plen; |
602 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) | ||
603 | mpalen += sizeof(struct mpa_v2_conn_params); | ||
569 | wrlen = roundup(mpalen + sizeof *req, 16); | 604 | wrlen = roundup(mpalen + sizeof *req, 16); |
570 | 605 | ||
571 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | 606 | skb = get_skb(NULL, wrlen, GFP_KERNEL); |
@@ -595,8 +630,29 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
595 | mpa->flags = MPA_REJECT; | 630 | mpa->flags = MPA_REJECT; |
596 | mpa->revision = mpa_rev; | 631 | mpa->revision = mpa_rev; |
597 | mpa->private_data_size = htons(plen); | 632 | mpa->private_data_size = htons(plen); |
598 | if (plen) | 633 | |
599 | memcpy(mpa->private_data, pdata, plen); | 634 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
635 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | ||
636 | mpa->private_data_size += | ||
637 | htons(sizeof(struct mpa_v2_conn_params)); | ||
638 | mpa_v2_params.ird = htons(((u16)ep->ird) | | ||
639 | (peer2peer ? MPA_V2_PEER2PEER_MODEL : | ||
640 | 0)); | ||
641 | mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? | ||
642 | (p2p_type == | ||
643 | FW_RI_INIT_P2PTYPE_RDMA_WRITE ? | ||
644 | MPA_V2_RDMA_WRITE_RTR : p2p_type == | ||
645 | FW_RI_INIT_P2PTYPE_READ_REQ ? | ||
646 | MPA_V2_RDMA_READ_RTR : 0) : 0)); | ||
647 | memcpy(mpa->private_data, &mpa_v2_params, | ||
648 | sizeof(struct mpa_v2_conn_params)); | ||
649 | |||
650 | if (ep->plen) | ||
651 | memcpy(mpa->private_data + | ||
652 | sizeof(struct mpa_v2_conn_params), pdata, plen); | ||
653 | } else | ||
654 | if (plen) | ||
655 | memcpy(mpa->private_data, pdata, plen); | ||
600 | 656 | ||
601 | /* | 657 | /* |
602 | * Reference the mpa skb again. This ensures the data area | 658 | * Reference the mpa skb again. This ensures the data area |
@@ -617,10 +673,13 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
617 | struct fw_ofld_tx_data_wr *req; | 673 | struct fw_ofld_tx_data_wr *req; |
618 | struct mpa_message *mpa; | 674 | struct mpa_message *mpa; |
619 | struct sk_buff *skb; | 675 | struct sk_buff *skb; |
676 | struct mpa_v2_conn_params mpa_v2_params; | ||
620 | 677 | ||
621 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | 678 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); |
622 | 679 | ||
623 | mpalen = sizeof(*mpa) + plen; | 680 | mpalen = sizeof(*mpa) + plen; |
681 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) | ||
682 | mpalen += sizeof(struct mpa_v2_conn_params); | ||
624 | wrlen = roundup(mpalen + sizeof *req, 16); | 683 | wrlen = roundup(mpalen + sizeof *req, 16); |
625 | 684 | ||
626 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | 685 | skb = get_skb(NULL, wrlen, GFP_KERNEL); |
@@ -649,10 +708,36 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
649 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | 708 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); |
650 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | 709 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | |
651 | (markers_enabled ? MPA_MARKERS : 0); | 710 | (markers_enabled ? MPA_MARKERS : 0); |
652 | mpa->revision = mpa_rev; | 711 | mpa->revision = ep->mpa_attr.version; |
653 | mpa->private_data_size = htons(plen); | 712 | mpa->private_data_size = htons(plen); |
654 | if (plen) | 713 | |
655 | memcpy(mpa->private_data, pdata, plen); | 714 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
715 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | ||
716 | mpa->private_data_size += | ||
717 | htons(sizeof(struct mpa_v2_conn_params)); | ||
718 | mpa_v2_params.ird = htons((u16)ep->ird); | ||
719 | mpa_v2_params.ord = htons((u16)ep->ord); | ||
720 | if (peer2peer && (ep->mpa_attr.p2p_type != | ||
721 | FW_RI_INIT_P2PTYPE_DISABLED)) { | ||
722 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | ||
723 | |||
724 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | ||
725 | mpa_v2_params.ord |= | ||
726 | htons(MPA_V2_RDMA_WRITE_RTR); | ||
727 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | ||
728 | mpa_v2_params.ord |= | ||
729 | htons(MPA_V2_RDMA_READ_RTR); | ||
730 | } | ||
731 | |||
732 | memcpy(mpa->private_data, &mpa_v2_params, | ||
733 | sizeof(struct mpa_v2_conn_params)); | ||
734 | |||
735 | if (ep->plen) | ||
736 | memcpy(mpa->private_data + | ||
737 | sizeof(struct mpa_v2_conn_params), pdata, plen); | ||
738 | } else | ||
739 | if (plen) | ||
740 | memcpy(mpa->private_data, pdata, plen); | ||
656 | 741 | ||
657 | /* | 742 | /* |
658 | * Reference the mpa skb. This ensures the data area | 743 | * Reference the mpa skb. This ensures the data area |
@@ -695,7 +780,10 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |||
695 | 780 | ||
696 | /* start MPA negotiation */ | 781 | /* start MPA negotiation */ |
697 | send_flowc(ep, NULL); | 782 | send_flowc(ep, NULL); |
698 | send_mpa_req(ep, skb); | 783 | if (ep->retry_with_mpa_v1) |
784 | send_mpa_req(ep, skb, 1); | ||
785 | else | ||
786 | send_mpa_req(ep, skb, mpa_rev); | ||
699 | 787 | ||
700 | return 0; | 788 | return 0; |
701 | } | 789 | } |
@@ -769,8 +857,19 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |||
769 | event.remote_addr = ep->com.remote_addr; | 857 | event.remote_addr = ep->com.remote_addr; |
770 | 858 | ||
771 | if ((status == 0) || (status == -ECONNREFUSED)) { | 859 | if ((status == 0) || (status == -ECONNREFUSED)) { |
772 | event.private_data_len = ep->plen; | 860 | if (!ep->tried_with_mpa_v1) { |
773 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | 861 | /* this means MPA_v2 is used */ |
862 | event.private_data_len = ep->plen - | ||
863 | sizeof(struct mpa_v2_conn_params); | ||
864 | event.private_data = ep->mpa_pkt + | ||
865 | sizeof(struct mpa_message) + | ||
866 | sizeof(struct mpa_v2_conn_params); | ||
867 | } else { | ||
868 | /* this means MPA_v1 is used */ | ||
869 | event.private_data_len = ep->plen; | ||
870 | event.private_data = ep->mpa_pkt + | ||
871 | sizeof(struct mpa_message); | ||
872 | } | ||
774 | } | 873 | } |
775 | 874 | ||
776 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, | 875 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, |
@@ -793,9 +892,22 @@ static void connect_request_upcall(struct c4iw_ep *ep) | |||
793 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | 892 | event.event = IW_CM_EVENT_CONNECT_REQUEST; |
794 | event.local_addr = ep->com.local_addr; | 893 | event.local_addr = ep->com.local_addr; |
795 | event.remote_addr = ep->com.remote_addr; | 894 | event.remote_addr = ep->com.remote_addr; |
796 | event.private_data_len = ep->plen; | ||
797 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
798 | event.provider_data = ep; | 895 | event.provider_data = ep; |
896 | if (!ep->tried_with_mpa_v1) { | ||
897 | /* this means MPA_v2 is used */ | ||
898 | event.ord = ep->ord; | ||
899 | event.ird = ep->ird; | ||
900 | event.private_data_len = ep->plen - | ||
901 | sizeof(struct mpa_v2_conn_params); | ||
902 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + | ||
903 | sizeof(struct mpa_v2_conn_params); | ||
904 | } else { | ||
905 | /* this means MPA_v1 is used. Send max supported */ | ||
906 | event.ord = c4iw_max_read_depth; | ||
907 | event.ird = c4iw_max_read_depth; | ||
908 | event.private_data_len = ep->plen; | ||
909 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
910 | } | ||
799 | if (state_read(&ep->parent_ep->com) != DEAD) { | 911 | if (state_read(&ep->parent_ep->com) != DEAD) { |
800 | c4iw_get_ep(&ep->com); | 912 | c4iw_get_ep(&ep->com); |
801 | ep->parent_ep->com.cm_id->event_handler( | 913 | ep->parent_ep->com.cm_id->event_handler( |
@@ -813,6 +925,8 @@ static void established_upcall(struct c4iw_ep *ep) | |||
813 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 925 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
814 | memset(&event, 0, sizeof(event)); | 926 | memset(&event, 0, sizeof(event)); |
815 | event.event = IW_CM_EVENT_ESTABLISHED; | 927 | event.event = IW_CM_EVENT_ESTABLISHED; |
928 | event.ird = ep->ird; | ||
929 | event.ord = ep->ord; | ||
816 | if (ep->com.cm_id) { | 930 | if (ep->com.cm_id) { |
817 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 931 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
818 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 932 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
@@ -848,7 +962,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
848 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | 962 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) |
849 | { | 963 | { |
850 | struct mpa_message *mpa; | 964 | struct mpa_message *mpa; |
965 | struct mpa_v2_conn_params *mpa_v2_params; | ||
851 | u16 plen; | 966 | u16 plen; |
967 | u16 resp_ird, resp_ord; | ||
968 | u8 rtr_mismatch = 0, insuff_ird = 0; | ||
852 | struct c4iw_qp_attributes attrs; | 969 | struct c4iw_qp_attributes attrs; |
853 | enum c4iw_qp_attr_mask mask; | 970 | enum c4iw_qp_attr_mask mask; |
854 | int err; | 971 | int err; |
@@ -888,7 +1005,9 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
888 | mpa = (struct mpa_message *) ep->mpa_pkt; | 1005 | mpa = (struct mpa_message *) ep->mpa_pkt; |
889 | 1006 | ||
890 | /* Validate MPA header. */ | 1007 | /* Validate MPA header. */ |
891 | if (mpa->revision != mpa_rev) { | 1008 | if (mpa->revision > mpa_rev) { |
1009 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | ||
1010 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | ||
892 | err = -EPROTO; | 1011 | err = -EPROTO; |
893 | goto err; | 1012 | goto err; |
894 | } | 1013 | } |
@@ -938,13 +1057,66 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
938 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | 1057 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; |
939 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | 1058 | ep->mpa_attr.recv_marker_enabled = markers_enabled; |
940 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | 1059 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; |
941 | ep->mpa_attr.version = mpa_rev; | 1060 | ep->mpa_attr.version = mpa->revision; |
942 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | 1061 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; |
943 | FW_RI_INIT_P2PTYPE_DISABLED; | 1062 | |
1063 | if (mpa->revision == 2) { | ||
1064 | ep->mpa_attr.enhanced_rdma_conn = | ||
1065 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | ||
1066 | if (ep->mpa_attr.enhanced_rdma_conn) { | ||
1067 | mpa_v2_params = (struct mpa_v2_conn_params *) | ||
1068 | (ep->mpa_pkt + sizeof(*mpa)); | ||
1069 | resp_ird = ntohs(mpa_v2_params->ird) & | ||
1070 | MPA_V2_IRD_ORD_MASK; | ||
1071 | resp_ord = ntohs(mpa_v2_params->ord) & | ||
1072 | MPA_V2_IRD_ORD_MASK; | ||
1073 | |||
1074 | /* | ||
1075 | * This is a double-check. Ideally, below checks are | ||
1076 | * not required since ird/ord stuff has been taken | ||
1077 | * care of in c4iw_accept_cr | ||
1078 | */ | ||
1079 | if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { | ||
1080 | err = -ENOMEM; | ||
1081 | ep->ird = resp_ord; | ||
1082 | ep->ord = resp_ird; | ||
1083 | insuff_ird = 1; | ||
1084 | } | ||
1085 | |||
1086 | if (ntohs(mpa_v2_params->ird) & | ||
1087 | MPA_V2_PEER2PEER_MODEL) { | ||
1088 | if (ntohs(mpa_v2_params->ord) & | ||
1089 | MPA_V2_RDMA_WRITE_RTR) | ||
1090 | ep->mpa_attr.p2p_type = | ||
1091 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | ||
1092 | else if (ntohs(mpa_v2_params->ord) & | ||
1093 | MPA_V2_RDMA_READ_RTR) | ||
1094 | ep->mpa_attr.p2p_type = | ||
1095 | FW_RI_INIT_P2PTYPE_READ_REQ; | ||
1096 | } | ||
1097 | } | ||
1098 | } else if (mpa->revision == 1) | ||
1099 | if (peer2peer) | ||
1100 | ep->mpa_attr.p2p_type = p2p_type; | ||
1101 | |||
944 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | 1102 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
945 | "xmit_marker_enabled=%d, version=%d\n", __func__, | 1103 | "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " |
946 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | 1104 | "%d\n", __func__, ep->mpa_attr.crc_enabled, |
947 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | 1105 | ep->mpa_attr.recv_marker_enabled, |
1106 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | ||
1107 | ep->mpa_attr.p2p_type, p2p_type); | ||
1108 | |||
1109 | /* | ||
1110 | * If responder's RTR does not match with that of initiator, assign | ||
1111 | * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not | ||
1112 | * generated when moving QP to RTS state. | ||
1113 | * A TERM message will be sent after QP has moved to RTS state | ||
1114 | */ | ||
1115 | if ((ep->mpa_attr.version == 2) && | ||
1116 | (ep->mpa_attr.p2p_type != p2p_type)) { | ||
1117 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | ||
1118 | rtr_mismatch = 1; | ||
1119 | } | ||
948 | 1120 | ||
949 | attrs.mpa_attr = ep->mpa_attr; | 1121 | attrs.mpa_attr = ep->mpa_attr; |
950 | attrs.max_ird = ep->ird; | 1122 | attrs.max_ird = ep->ird; |
@@ -961,6 +1133,39 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
961 | ep->com.qp, mask, &attrs, 1); | 1133 | ep->com.qp, mask, &attrs, 1); |
962 | if (err) | 1134 | if (err) |
963 | goto err; | 1135 | goto err; |
1136 | |||
1137 | /* | ||
1138 | * If responder's RTR requirement did not match with what initiator | ||
1139 | * supports, generate TERM message | ||
1140 | */ | ||
1141 | if (rtr_mismatch) { | ||
1142 | printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); | ||
1143 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | ||
1144 | attrs.ecode = MPA_NOMATCH_RTR; | ||
1145 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | ||
1146 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1147 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | ||
1148 | err = -ENOMEM; | ||
1149 | goto out; | ||
1150 | } | ||
1151 | |||
1152 | /* | ||
1153 | * Generate TERM if initiator IRD is not sufficient for responder | ||
1154 | * provided ORD. Currently, we do the same behaviour even when | ||
1155 | * responder provided IRD is also not sufficient as regards to | ||
1156 | * initiator ORD. | ||
1157 | */ | ||
1158 | if (insuff_ird) { | ||
1159 | printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", | ||
1160 | __func__); | ||
1161 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | ||
1162 | attrs.ecode = MPA_INSUFF_IRD; | ||
1163 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | ||
1164 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1165 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | ||
1166 | err = -ENOMEM; | ||
1167 | goto out; | ||
1168 | } | ||
964 | goto out; | 1169 | goto out; |
965 | err: | 1170 | err: |
966 | state_set(&ep->com, ABORTING); | 1171 | state_set(&ep->com, ABORTING); |
@@ -973,6 +1178,7 @@ out: | |||
973 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | 1178 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) |
974 | { | 1179 | { |
975 | struct mpa_message *mpa; | 1180 | struct mpa_message *mpa; |
1181 | struct mpa_v2_conn_params *mpa_v2_params; | ||
976 | u16 plen; | 1182 | u16 plen; |
977 | 1183 | ||
978 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1184 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
@@ -1013,7 +1219,9 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1013 | /* | 1219 | /* |
1014 | * Validate MPA Header. | 1220 | * Validate MPA Header. |
1015 | */ | 1221 | */ |
1016 | if (mpa->revision != mpa_rev) { | 1222 | if (mpa->revision > mpa_rev) { |
1223 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | ||
1224 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | ||
1017 | abort_connection(ep, skb, GFP_KERNEL); | 1225 | abort_connection(ep, skb, GFP_KERNEL); |
1018 | return; | 1226 | return; |
1019 | } | 1227 | } |
@@ -1056,9 +1264,37 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1056 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | 1264 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; |
1057 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | 1265 | ep->mpa_attr.recv_marker_enabled = markers_enabled; |
1058 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | 1266 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; |
1059 | ep->mpa_attr.version = mpa_rev; | 1267 | ep->mpa_attr.version = mpa->revision; |
1060 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | 1268 | if (mpa->revision == 1) |
1061 | FW_RI_INIT_P2PTYPE_DISABLED; | 1269 | ep->tried_with_mpa_v1 = 1; |
1270 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | ||
1271 | |||
1272 | if (mpa->revision == 2) { | ||
1273 | ep->mpa_attr.enhanced_rdma_conn = | ||
1274 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | ||
1275 | if (ep->mpa_attr.enhanced_rdma_conn) { | ||
1276 | mpa_v2_params = (struct mpa_v2_conn_params *) | ||
1277 | (ep->mpa_pkt + sizeof(*mpa)); | ||
1278 | ep->ird = ntohs(mpa_v2_params->ird) & | ||
1279 | MPA_V2_IRD_ORD_MASK; | ||
1280 | ep->ord = ntohs(mpa_v2_params->ord) & | ||
1281 | MPA_V2_IRD_ORD_MASK; | ||
1282 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) | ||
1283 | if (peer2peer) { | ||
1284 | if (ntohs(mpa_v2_params->ord) & | ||
1285 | MPA_V2_RDMA_WRITE_RTR) | ||
1286 | ep->mpa_attr.p2p_type = | ||
1287 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | ||
1288 | else if (ntohs(mpa_v2_params->ord) & | ||
1289 | MPA_V2_RDMA_READ_RTR) | ||
1290 | ep->mpa_attr.p2p_type = | ||
1291 | FW_RI_INIT_P2PTYPE_READ_REQ; | ||
1292 | } | ||
1293 | } | ||
1294 | } else if (mpa->revision == 1) | ||
1295 | if (peer2peer) | ||
1296 | ep->mpa_attr.p2p_type = p2p_type; | ||
1297 | |||
1062 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | 1298 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
1063 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, | 1299 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, |
1064 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | 1300 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, |
@@ -1550,6 +1786,112 @@ static int is_neg_adv_abort(unsigned int status) | |||
1550 | status == CPL_ERR_PERSIST_NEG_ADVICE; | 1786 | status == CPL_ERR_PERSIST_NEG_ADVICE; |
1551 | } | 1787 | } |
1552 | 1788 | ||
1789 | static int c4iw_reconnect(struct c4iw_ep *ep) | ||
1790 | { | ||
1791 | int err = 0; | ||
1792 | struct rtable *rt; | ||
1793 | struct net_device *pdev; | ||
1794 | struct neighbour *neigh; | ||
1795 | int step; | ||
1796 | |||
1797 | PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); | ||
1798 | init_timer(&ep->timer); | ||
1799 | |||
1800 | /* | ||
1801 | * Allocate an active TID to initiate a TCP connection. | ||
1802 | */ | ||
1803 | ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); | ||
1804 | if (ep->atid == -1) { | ||
1805 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | ||
1806 | err = -ENOMEM; | ||
1807 | goto fail2; | ||
1808 | } | ||
1809 | |||
1810 | /* find a route */ | ||
1811 | rt = find_route(ep->com.dev, | ||
1812 | ep->com.cm_id->local_addr.sin_addr.s_addr, | ||
1813 | ep->com.cm_id->remote_addr.sin_addr.s_addr, | ||
1814 | ep->com.cm_id->local_addr.sin_port, | ||
1815 | ep->com.cm_id->remote_addr.sin_port, 0); | ||
1816 | if (!rt) { | ||
1817 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | ||
1818 | err = -EHOSTUNREACH; | ||
1819 | goto fail3; | ||
1820 | } | ||
1821 | ep->dst = &rt->dst; | ||
1822 | |||
1823 | neigh = dst_get_neighbour(ep->dst); | ||
1824 | |||
1825 | /* get a l2t entry */ | ||
1826 | if (neigh->dev->flags & IFF_LOOPBACK) { | ||
1827 | PDBG("%s LOOPBACK\n", __func__); | ||
1828 | pdev = ip_dev_find(&init_net, | ||
1829 | ep->com.cm_id->remote_addr.sin_addr.s_addr); | ||
1830 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | ||
1831 | neigh, pdev, 0); | ||
1832 | ep->mtu = pdev->mtu; | ||
1833 | ep->tx_chan = cxgb4_port_chan(pdev); | ||
1834 | ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; | ||
1835 | step = ep->com.dev->rdev.lldi.ntxq / | ||
1836 | ep->com.dev->rdev.lldi.nchan; | ||
1837 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | ||
1838 | step = ep->com.dev->rdev.lldi.nrxq / | ||
1839 | ep->com.dev->rdev.lldi.nchan; | ||
1840 | ep->ctrlq_idx = cxgb4_port_idx(pdev); | ||
1841 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | ||
1842 | cxgb4_port_idx(pdev) * step]; | ||
1843 | dev_put(pdev); | ||
1844 | } else { | ||
1845 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | ||
1846 | neigh, neigh->dev, 0); | ||
1847 | ep->mtu = dst_mtu(ep->dst); | ||
1848 | ep->tx_chan = cxgb4_port_chan(neigh->dev); | ||
1849 | ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; | ||
1850 | step = ep->com.dev->rdev.lldi.ntxq / | ||
1851 | ep->com.dev->rdev.lldi.nchan; | ||
1852 | ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; | ||
1853 | ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); | ||
1854 | step = ep->com.dev->rdev.lldi.nrxq / | ||
1855 | ep->com.dev->rdev.lldi.nchan; | ||
1856 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | ||
1857 | cxgb4_port_idx(neigh->dev) * step]; | ||
1858 | } | ||
1859 | if (!ep->l2t) { | ||
1860 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | ||
1861 | err = -ENOMEM; | ||
1862 | goto fail4; | ||
1863 | } | ||
1864 | |||
1865 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | ||
1866 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | ||
1867 | ep->l2t->idx); | ||
1868 | |||
1869 | state_set(&ep->com, CONNECTING); | ||
1870 | ep->tos = 0; | ||
1871 | |||
1872 | /* send connect request to rnic */ | ||
1873 | err = send_connect(ep); | ||
1874 | if (!err) | ||
1875 | goto out; | ||
1876 | |||
1877 | cxgb4_l2t_release(ep->l2t); | ||
1878 | fail4: | ||
1879 | dst_release(ep->dst); | ||
1880 | fail3: | ||
1881 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | ||
1882 | fail2: | ||
1883 | /* | ||
1884 | * remember to send notification to upper layer. | ||
1885 | * We are in here so the upper layer is not aware that this is | ||
1886 | * re-connect attempt and so, upper layer is still waiting for | ||
1887 | * response of 1st connect request. | ||
1888 | */ | ||
1889 | connect_reply_upcall(ep, -ECONNRESET); | ||
1890 | c4iw_put_ep(&ep->com); | ||
1891 | out: | ||
1892 | return err; | ||
1893 | } | ||
1894 | |||
1553 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | 1895 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) |
1554 | { | 1896 | { |
1555 | struct cpl_abort_req_rss *req = cplhdr(skb); | 1897 | struct cpl_abort_req_rss *req = cplhdr(skb); |
@@ -1573,8 +1915,11 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1573 | 1915 | ||
1574 | /* | 1916 | /* |
1575 | * Wake up any threads in rdma_init() or rdma_fini(). | 1917 | * Wake up any threads in rdma_init() or rdma_fini(). |
1918 | * However, this is not needed if com state is just | ||
1919 | * MPA_REQ_SENT | ||
1576 | */ | 1920 | */ |
1577 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | 1921 | if (ep->com.state != MPA_REQ_SENT) |
1922 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
1578 | 1923 | ||
1579 | mutex_lock(&ep->com.mutex); | 1924 | mutex_lock(&ep->com.mutex); |
1580 | switch (ep->com.state) { | 1925 | switch (ep->com.state) { |
@@ -1585,7 +1930,21 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1585 | break; | 1930 | break; |
1586 | case MPA_REQ_SENT: | 1931 | case MPA_REQ_SENT: |
1587 | stop_ep_timer(ep); | 1932 | stop_ep_timer(ep); |
1588 | connect_reply_upcall(ep, -ECONNRESET); | 1933 | if (mpa_rev == 2 && ep->tried_with_mpa_v1) |
1934 | connect_reply_upcall(ep, -ECONNRESET); | ||
1935 | else { | ||
1936 | /* | ||
1937 | * we just don't send notification upwards because we | ||
1938 | * want to retry with mpa_v1 without upper layers even | ||
1939 | * knowing it. | ||
1940 | * | ||
1941 | * do some housekeeping so as to re-initiate the | ||
1942 | * connection | ||
1943 | */ | ||
1944 | PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, | ||
1945 | mpa_rev); | ||
1946 | ep->retry_with_mpa_v1 = 1; | ||
1947 | } | ||
1589 | break; | 1948 | break; |
1590 | case MPA_REP_SENT: | 1949 | case MPA_REP_SENT: |
1591 | break; | 1950 | break; |
@@ -1621,7 +1980,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1621 | dst_confirm(ep->dst); | 1980 | dst_confirm(ep->dst); |
1622 | if (ep->com.state != ABORTING) { | 1981 | if (ep->com.state != ABORTING) { |
1623 | __state_set(&ep->com, DEAD); | 1982 | __state_set(&ep->com, DEAD); |
1624 | release = 1; | 1983 | /* we don't release if we want to retry with mpa_v1 */ |
1984 | if (!ep->retry_with_mpa_v1) | ||
1985 | release = 1; | ||
1625 | } | 1986 | } |
1626 | mutex_unlock(&ep->com.mutex); | 1987 | mutex_unlock(&ep->com.mutex); |
1627 | 1988 | ||
@@ -1641,6 +2002,15 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1641 | out: | 2002 | out: |
1642 | if (release) | 2003 | if (release) |
1643 | release_ep_resources(ep); | 2004 | release_ep_resources(ep); |
2005 | |||
2006 | /* retry with mpa-v1 */ | ||
2007 | if (ep && ep->retry_with_mpa_v1) { | ||
2008 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | ||
2009 | dst_release(ep->dst); | ||
2010 | cxgb4_l2t_release(ep->l2t); | ||
2011 | c4iw_reconnect(ep); | ||
2012 | } | ||
2013 | |||
1644 | return 0; | 2014 | return 0; |
1645 | } | 2015 | } |
1646 | 2016 | ||
@@ -1792,18 +2162,40 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1792 | goto err; | 2162 | goto err; |
1793 | } | 2163 | } |
1794 | 2164 | ||
1795 | cm_id->add_ref(cm_id); | 2165 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
1796 | ep->com.cm_id = cm_id; | 2166 | if (conn_param->ord > ep->ird) { |
1797 | ep->com.qp = qp; | 2167 | ep->ird = conn_param->ird; |
2168 | ep->ord = conn_param->ord; | ||
2169 | send_mpa_reject(ep, conn_param->private_data, | ||
2170 | conn_param->private_data_len); | ||
2171 | abort_connection(ep, NULL, GFP_KERNEL); | ||
2172 | err = -ENOMEM; | ||
2173 | goto err; | ||
2174 | } | ||
2175 | if (conn_param->ird > ep->ord) { | ||
2176 | if (!ep->ord) | ||
2177 | conn_param->ird = 1; | ||
2178 | else { | ||
2179 | abort_connection(ep, NULL, GFP_KERNEL); | ||
2180 | err = -ENOMEM; | ||
2181 | goto err; | ||
2182 | } | ||
2183 | } | ||
1798 | 2184 | ||
2185 | } | ||
1799 | ep->ird = conn_param->ird; | 2186 | ep->ird = conn_param->ird; |
1800 | ep->ord = conn_param->ord; | 2187 | ep->ord = conn_param->ord; |
1801 | 2188 | ||
1802 | if (peer2peer && ep->ird == 0) | 2189 | if (ep->mpa_attr.version != 2) |
1803 | ep->ird = 1; | 2190 | if (peer2peer && ep->ird == 0) |
2191 | ep->ird = 1; | ||
1804 | 2192 | ||
1805 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); | 2193 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); |
1806 | 2194 | ||
2195 | cm_id->add_ref(cm_id); | ||
2196 | ep->com.cm_id = cm_id; | ||
2197 | ep->com.qp = qp; | ||
2198 | |||
1807 | /* bind QP to EP and move to RTS */ | 2199 | /* bind QP to EP and move to RTS */ |
1808 | attrs.mpa_attr = ep->mpa_attr; | 2200 | attrs.mpa_attr = ep->mpa_attr; |
1809 | attrs.max_ird = ep->ird; | 2201 | attrs.max_ird = ep->ird; |
@@ -1944,6 +2336,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1944 | ep->com.dev->rdev.lldi.nchan; | 2336 | ep->com.dev->rdev.lldi.nchan; |
1945 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | 2337 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ |
1946 | cxgb4_port_idx(neigh->dev) * step]; | 2338 | cxgb4_port_idx(neigh->dev) * step]; |
2339 | ep->retry_with_mpa_v1 = 0; | ||
2340 | ep->tried_with_mpa_v1 = 0; | ||
1947 | } | 2341 | } |
1948 | if (!ep->l2t) { | 2342 | if (!ep->l2t) { |
1949 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | 2343 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
@@ -2323,8 +2717,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2323 | 2717 | ||
2324 | /* | 2718 | /* |
2325 | * Wake up any threads in rdma_init() or rdma_fini(). | 2719 | * Wake up any threads in rdma_init() or rdma_fini(). |
2720 | * However, this is not needed if com state is just | ||
2721 | * MPA_REQ_SENT | ||
2326 | */ | 2722 | */ |
2327 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | 2723 | if (ep->com.state != MPA_REQ_SENT) |
2724 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
2328 | sched(dev, skb); | 2725 | sched(dev, skb); |
2329 | return 0; | 2726 | return 0; |
2330 | } | 2727 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 1720dc790d13..f35a935267e7 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -185,7 +185,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) | |||
185 | V_CQE_OPCODE(FW_RI_SEND) | | 185 | V_CQE_OPCODE(FW_RI_SEND) | |
186 | V_CQE_TYPE(0) | | 186 | V_CQE_TYPE(0) | |
187 | V_CQE_SWCQE(1) | | 187 | V_CQE_SWCQE(1) | |
188 | V_CQE_QPID(wq->rq.qid)); | 188 | V_CQE_QPID(wq->sq.qid)); |
189 | cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); | 189 | cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); |
190 | cq->sw_queue[cq->sw_pidx] = cqe; | 190 | cq->sw_queue[cq->sw_pidx] = cqe; |
191 | t4_swcq_produce(cq); | 191 | t4_swcq_produce(cq); |
@@ -818,6 +818,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
818 | chp->cq.size--; /* status page */ | 818 | chp->cq.size--; /* status page */ |
819 | chp->ibcq.cqe = entries - 2; | 819 | chp->ibcq.cqe = entries - 2; |
820 | spin_lock_init(&chp->lock); | 820 | spin_lock_init(&chp->lock); |
821 | spin_lock_init(&chp->comp_handler_lock); | ||
821 | atomic_set(&chp->refcnt, 1); | 822 | atomic_set(&chp->refcnt, 1); |
822 | init_waitqueue_head(&chp->wait); | 823 | init_waitqueue_head(&chp->wait); |
823 | ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); | 824 | ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 40a13cc633a3..6d0df6ec161b 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -376,10 +376,8 @@ struct uld_ctx { | |||
376 | struct c4iw_dev *dev; | 376 | struct c4iw_dev *dev; |
377 | }; | 377 | }; |
378 | 378 | ||
379 | static void c4iw_remove(struct uld_ctx *ctx) | 379 | static void c4iw_dealloc(struct uld_ctx *ctx) |
380 | { | 380 | { |
381 | PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); | ||
382 | c4iw_unregister_device(ctx->dev); | ||
383 | c4iw_rdev_close(&ctx->dev->rdev); | 381 | c4iw_rdev_close(&ctx->dev->rdev); |
384 | idr_destroy(&ctx->dev->cqidr); | 382 | idr_destroy(&ctx->dev->cqidr); |
385 | idr_destroy(&ctx->dev->qpidr); | 383 | idr_destroy(&ctx->dev->qpidr); |
@@ -389,11 +387,30 @@ static void c4iw_remove(struct uld_ctx *ctx) | |||
389 | ctx->dev = NULL; | 387 | ctx->dev = NULL; |
390 | } | 388 | } |
391 | 389 | ||
390 | static void c4iw_remove(struct uld_ctx *ctx) | ||
391 | { | ||
392 | PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); | ||
393 | c4iw_unregister_device(ctx->dev); | ||
394 | c4iw_dealloc(ctx); | ||
395 | } | ||
396 | |||
397 | static int rdma_supported(const struct cxgb4_lld_info *infop) | ||
398 | { | ||
399 | return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && | ||
400 | infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && | ||
401 | infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; | ||
402 | } | ||
403 | |||
392 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | 404 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) |
393 | { | 405 | { |
394 | struct c4iw_dev *devp; | 406 | struct c4iw_dev *devp; |
395 | int ret; | 407 | int ret; |
396 | 408 | ||
409 | if (!rdma_supported(infop)) { | ||
410 | printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n", | ||
411 | pci_name(infop->pdev)); | ||
412 | return ERR_PTR(-ENOSYS); | ||
413 | } | ||
397 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); | 414 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); |
398 | if (!devp) { | 415 | if (!devp) { |
399 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); | 416 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); |
@@ -414,7 +431,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
414 | 431 | ||
415 | ret = c4iw_rdev_open(&devp->rdev); | 432 | ret = c4iw_rdev_open(&devp->rdev); |
416 | if (ret) { | 433 | if (ret) { |
417 | mutex_unlock(&dev_mutex); | ||
418 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); | 434 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); |
419 | ib_dealloc_device(&devp->ibdev); | 435 | ib_dealloc_device(&devp->ibdev); |
420 | return ERR_PTR(ret); | 436 | return ERR_PTR(ret); |
@@ -519,15 +535,24 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) | |||
519 | case CXGB4_STATE_UP: | 535 | case CXGB4_STATE_UP: |
520 | printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); | 536 | printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); |
521 | if (!ctx->dev) { | 537 | if (!ctx->dev) { |
522 | int ret = 0; | 538 | int ret; |
523 | 539 | ||
524 | ctx->dev = c4iw_alloc(&ctx->lldi); | 540 | ctx->dev = c4iw_alloc(&ctx->lldi); |
525 | if (!IS_ERR(ctx->dev)) | 541 | if (IS_ERR(ctx->dev)) { |
526 | ret = c4iw_register_device(ctx->dev); | 542 | printk(KERN_ERR MOD |
527 | if (IS_ERR(ctx->dev) || ret) | 543 | "%s: initialization failed: %ld\n", |
544 | pci_name(ctx->lldi.pdev), | ||
545 | PTR_ERR(ctx->dev)); | ||
546 | ctx->dev = NULL; | ||
547 | break; | ||
548 | } | ||
549 | ret = c4iw_register_device(ctx->dev); | ||
550 | if (ret) { | ||
528 | printk(KERN_ERR MOD | 551 | printk(KERN_ERR MOD |
529 | "%s: RDMA registration failed: %d\n", | 552 | "%s: RDMA registration failed: %d\n", |
530 | pci_name(ctx->lldi.pdev), ret); | 553 | pci_name(ctx->lldi.pdev), ret); |
554 | c4iw_dealloc(ctx); | ||
555 | } | ||
531 | } | 556 | } |
532 | break; | 557 | break; |
533 | case CXGB4_STATE_DOWN: | 558 | case CXGB4_STATE_DOWN: |
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index c13041a0aeba..397cb36cf103 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
@@ -42,6 +42,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, | |||
42 | { | 42 | { |
43 | struct ib_event event; | 43 | struct ib_event event; |
44 | struct c4iw_qp_attributes attrs; | 44 | struct c4iw_qp_attributes attrs; |
45 | unsigned long flag; | ||
45 | 46 | ||
46 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || | 47 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || |
47 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { | 48 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { |
@@ -72,7 +73,9 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, | |||
72 | if (qhp->ibqp.event_handler) | 73 | if (qhp->ibqp.event_handler) |
73 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); | 74 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); |
74 | 75 | ||
76 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
75 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 77 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
78 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | ||
76 | } | 79 | } |
77 | 80 | ||
78 | void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) | 81 | void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) |
@@ -183,11 +186,14 @@ out: | |||
183 | int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) | 186 | int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) |
184 | { | 187 | { |
185 | struct c4iw_cq *chp; | 188 | struct c4iw_cq *chp; |
189 | unsigned long flag; | ||
186 | 190 | ||
187 | chp = get_chp(dev, qid); | 191 | chp = get_chp(dev, qid); |
188 | if (chp) | 192 | if (chp) { |
193 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | ||
189 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 194 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
190 | else | 195 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); |
196 | } else | ||
191 | PDBG("%s unknown cqid 0x%x\n", __func__, qid); | 197 | PDBG("%s unknown cqid 0x%x\n", __func__, qid); |
192 | return 0; | 198 | return 0; |
193 | } | 199 | } |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 4f045375c8e2..1357c5bf209b 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -309,6 +309,7 @@ struct c4iw_cq { | |||
309 | struct c4iw_dev *rhp; | 309 | struct c4iw_dev *rhp; |
310 | struct t4_cq cq; | 310 | struct t4_cq cq; |
311 | spinlock_t lock; | 311 | spinlock_t lock; |
312 | spinlock_t comp_handler_lock; | ||
312 | atomic_t refcnt; | 313 | atomic_t refcnt; |
313 | wait_queue_head_t wait; | 314 | wait_queue_head_t wait; |
314 | }; | 315 | }; |
@@ -323,6 +324,7 @@ struct c4iw_mpa_attributes { | |||
323 | u8 recv_marker_enabled; | 324 | u8 recv_marker_enabled; |
324 | u8 xmit_marker_enabled; | 325 | u8 xmit_marker_enabled; |
325 | u8 crc_enabled; | 326 | u8 crc_enabled; |
327 | u8 enhanced_rdma_conn; | ||
326 | u8 version; | 328 | u8 version; |
327 | u8 p2p_type; | 329 | u8 p2p_type; |
328 | }; | 330 | }; |
@@ -349,6 +351,8 @@ struct c4iw_qp_attributes { | |||
349 | u8 is_terminate_local; | 351 | u8 is_terminate_local; |
350 | struct c4iw_mpa_attributes mpa_attr; | 352 | struct c4iw_mpa_attributes mpa_attr; |
351 | struct c4iw_ep *llp_stream_handle; | 353 | struct c4iw_ep *llp_stream_handle; |
354 | u8 layer_etype; | ||
355 | u8 ecode; | ||
352 | }; | 356 | }; |
353 | 357 | ||
354 | struct c4iw_qp { | 358 | struct c4iw_qp { |
@@ -501,11 +505,18 @@ enum c4iw_mmid_state { | |||
501 | #define MPA_KEY_REP "MPA ID Rep Frame" | 505 | #define MPA_KEY_REP "MPA ID Rep Frame" |
502 | 506 | ||
503 | #define MPA_MAX_PRIVATE_DATA 256 | 507 | #define MPA_MAX_PRIVATE_DATA 256 |
508 | #define MPA_ENHANCED_RDMA_CONN 0x10 | ||
504 | #define MPA_REJECT 0x20 | 509 | #define MPA_REJECT 0x20 |
505 | #define MPA_CRC 0x40 | 510 | #define MPA_CRC 0x40 |
506 | #define MPA_MARKERS 0x80 | 511 | #define MPA_MARKERS 0x80 |
507 | #define MPA_FLAGS_MASK 0xE0 | 512 | #define MPA_FLAGS_MASK 0xE0 |
508 | 513 | ||
514 | #define MPA_V2_PEER2PEER_MODEL 0x8000 | ||
515 | #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000 | ||
516 | #define MPA_V2_RDMA_WRITE_RTR 0x8000 | ||
517 | #define MPA_V2_RDMA_READ_RTR 0x4000 | ||
518 | #define MPA_V2_IRD_ORD_MASK 0x3FFF | ||
519 | |||
509 | #define c4iw_put_ep(ep) { \ | 520 | #define c4iw_put_ep(ep) { \ |
510 | PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ | 521 | PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ |
511 | ep, atomic_read(&((ep)->kref.refcount))); \ | 522 | ep, atomic_read(&((ep)->kref.refcount))); \ |
@@ -528,6 +539,11 @@ struct mpa_message { | |||
528 | u8 private_data[0]; | 539 | u8 private_data[0]; |
529 | }; | 540 | }; |
530 | 541 | ||
542 | struct mpa_v2_conn_params { | ||
543 | __be16 ird; | ||
544 | __be16 ord; | ||
545 | }; | ||
546 | |||
531 | struct terminate_message { | 547 | struct terminate_message { |
532 | u8 layer_etype; | 548 | u8 layer_etype; |
533 | u8 ecode; | 549 | u8 ecode; |
@@ -580,7 +596,10 @@ enum c4iw_ddp_ecodes { | |||
580 | 596 | ||
581 | enum c4iw_mpa_ecodes { | 597 | enum c4iw_mpa_ecodes { |
582 | MPA_CRC_ERR = 0x02, | 598 | MPA_CRC_ERR = 0x02, |
583 | MPA_MARKER_ERR = 0x03 | 599 | MPA_MARKER_ERR = 0x03, |
600 | MPA_LOCAL_CATA = 0x05, | ||
601 | MPA_INSUFF_IRD = 0x06, | ||
602 | MPA_NOMATCH_RTR = 0x07, | ||
584 | }; | 603 | }; |
585 | 604 | ||
586 | enum c4iw_ep_state { | 605 | enum c4iw_ep_state { |
@@ -651,6 +670,8 @@ struct c4iw_ep { | |||
651 | u16 txq_idx; | 670 | u16 txq_idx; |
652 | u16 ctrlq_idx; | 671 | u16 ctrlq_idx; |
653 | u8 tos; | 672 | u8 tos; |
673 | u8 retry_with_mpa_v1; | ||
674 | u8 tried_with_mpa_v1; | ||
654 | }; | 675 | }; |
655 | 676 | ||
656 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) | 677 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 60056e2b8d99..5f940aeaab1e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -920,7 +920,11 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, | |||
920 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; | 920 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; |
921 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); | 921 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); |
922 | term = (struct terminate_message *)wqe->u.terminate.termmsg; | 922 | term = (struct terminate_message *)wqe->u.terminate.termmsg; |
923 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | 923 | if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { |
924 | term->layer_etype = qhp->attr.layer_etype; | ||
925 | term->ecode = qhp->attr.ecode; | ||
926 | } else | ||
927 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | ||
924 | c4iw_ofld_send(&qhp->rhp->rdev, skb); | 928 | c4iw_ofld_send(&qhp->rhp->rdev, skb); |
925 | } | 929 | } |
926 | 930 | ||
@@ -944,8 +948,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
944 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); | 948 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); |
945 | spin_unlock(&qhp->lock); | 949 | spin_unlock(&qhp->lock); |
946 | spin_unlock_irqrestore(&rchp->lock, flag); | 950 | spin_unlock_irqrestore(&rchp->lock, flag); |
947 | if (flushed) | 951 | if (flushed) { |
952 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
948 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 953 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
954 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
955 | } | ||
949 | 956 | ||
950 | /* locking hierarchy: cq lock first, then qp lock. */ | 957 | /* locking hierarchy: cq lock first, then qp lock. */ |
951 | spin_lock_irqsave(&schp->lock, flag); | 958 | spin_lock_irqsave(&schp->lock, flag); |
@@ -955,13 +962,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
955 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); | 962 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); |
956 | spin_unlock(&qhp->lock); | 963 | spin_unlock(&qhp->lock); |
957 | spin_unlock_irqrestore(&schp->lock, flag); | 964 | spin_unlock_irqrestore(&schp->lock, flag); |
958 | if (flushed) | 965 | if (flushed) { |
966 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | ||
959 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 967 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); |
968 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | ||
969 | } | ||
960 | } | 970 | } |
961 | 971 | ||
962 | static void flush_qp(struct c4iw_qp *qhp) | 972 | static void flush_qp(struct c4iw_qp *qhp) |
963 | { | 973 | { |
964 | struct c4iw_cq *rchp, *schp; | 974 | struct c4iw_cq *rchp, *schp; |
975 | unsigned long flag; | ||
965 | 976 | ||
966 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | 977 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); |
967 | schp = get_chp(qhp->rhp, qhp->attr.scq); | 978 | schp = get_chp(qhp->rhp, qhp->attr.scq); |
@@ -969,8 +980,16 @@ static void flush_qp(struct c4iw_qp *qhp) | |||
969 | if (qhp->ibqp.uobject) { | 980 | if (qhp->ibqp.uobject) { |
970 | t4_set_wq_in_error(&qhp->wq); | 981 | t4_set_wq_in_error(&qhp->wq); |
971 | t4_set_cq_in_error(&rchp->cq); | 982 | t4_set_cq_in_error(&rchp->cq); |
972 | if (schp != rchp) | 983 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
984 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | ||
985 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
986 | if (schp != rchp) { | ||
973 | t4_set_cq_in_error(&schp->cq); | 987 | t4_set_cq_in_error(&schp->cq); |
988 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | ||
989 | (*schp->ibcq.comp_handler)(&schp->ibcq, | ||
990 | schp->ibcq.cq_context); | ||
991 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | ||
992 | } | ||
974 | return; | 993 | return; |
975 | } | 994 | } |
976 | __flush_qp(qhp, rchp, schp); | 995 | __flush_qp(qhp, rchp, schp); |
@@ -1015,6 +1034,7 @@ out: | |||
1015 | 1034 | ||
1016 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) | 1035 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) |
1017 | { | 1036 | { |
1037 | PDBG("%s p2p_type = %d\n", __func__, p2p_type); | ||
1018 | memset(&init->u, 0, sizeof init->u); | 1038 | memset(&init->u, 0, sizeof init->u); |
1019 | switch (p2p_type) { | 1039 | switch (p2p_type) { |
1020 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: | 1040 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: |
@@ -1209,12 +1229,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1209 | disconnect = 1; | 1229 | disconnect = 1; |
1210 | c4iw_get_ep(&qhp->ep->com); | 1230 | c4iw_get_ep(&qhp->ep->com); |
1211 | } | 1231 | } |
1232 | if (qhp->ibqp.uobject) | ||
1233 | t4_set_wq_in_error(&qhp->wq); | ||
1212 | ret = rdma_fini(rhp, qhp, ep); | 1234 | ret = rdma_fini(rhp, qhp, ep); |
1213 | if (ret) | 1235 | if (ret) |
1214 | goto err; | 1236 | goto err; |
1215 | break; | 1237 | break; |
1216 | case C4IW_QP_STATE_TERMINATE: | 1238 | case C4IW_QP_STATE_TERMINATE: |
1217 | set_state(qhp, C4IW_QP_STATE_TERMINATE); | 1239 | set_state(qhp, C4IW_QP_STATE_TERMINATE); |
1240 | qhp->attr.layer_etype = attrs->layer_etype; | ||
1241 | qhp->attr.ecode = attrs->ecode; | ||
1218 | if (qhp->ibqp.uobject) | 1242 | if (qhp->ibqp.uobject) |
1219 | t4_set_wq_in_error(&qhp->wq); | 1243 | t4_set_wq_in_error(&qhp->wq); |
1220 | ep = qhp->ep; | 1244 | ep = qhp->ep; |
@@ -1225,6 +1249,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1225 | break; | 1249 | break; |
1226 | case C4IW_QP_STATE_ERROR: | 1250 | case C4IW_QP_STATE_ERROR: |
1227 | set_state(qhp, C4IW_QP_STATE_ERROR); | 1251 | set_state(qhp, C4IW_QP_STATE_ERROR); |
1252 | if (qhp->ibqp.uobject) | ||
1253 | t4_set_wq_in_error(&qhp->wq); | ||
1228 | if (!internal) { | 1254 | if (!internal) { |
1229 | abort = 1; | 1255 | abort = 1; |
1230 | disconnect = 1; | 1256 | disconnect = 1; |
@@ -1337,7 +1363,10 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) | |||
1337 | rhp = qhp->rhp; | 1363 | rhp = qhp->rhp; |
1338 | 1364 | ||
1339 | attrs.next_state = C4IW_QP_STATE_ERROR; | 1365 | attrs.next_state = C4IW_QP_STATE_ERROR; |
1340 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1366 | if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) |
1367 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1368 | else | ||
1369 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | ||
1341 | wait_event(qhp->wait, !qhp->ep); | 1370 | wait_event(qhp->wait, !qhp->ep); |
1342 | 1371 | ||
1343 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | 1372 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index d9b1bb40f480..818d721fc448 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c | |||
@@ -125,7 +125,7 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
125 | tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); | 125 | tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); |
126 | 126 | ||
127 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, | 127 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, |
128 | IRQF_DISABLED, "ehca_eq", | 128 | 0, "ehca_eq", |
129 | (void *)shca); | 129 | (void *)shca); |
130 | if (ret < 0) | 130 | if (ret < 0) |
131 | ehca_err(ib_dev, "Can't map interrupt handler."); | 131 | ehca_err(ib_dev, "Can't map interrupt handler."); |
@@ -133,7 +133,7 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
133 | tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); | 133 | tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); |
134 | 134 | ||
135 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, | 135 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, |
136 | IRQF_DISABLED, "ehca_neq", | 136 | 0, "ehca_neq", |
137 | (void *)shca); | 137 | (void *)shca); |
138 | if (ret < 0) | 138 | if (ret < 0) |
139 | ehca_err(ib_dev, "Can't map interrupt handler."); | 139 | ehca_err(ib_dev, "Can't map interrupt handler."); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 32fb34201aba..964f85520798 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -977,6 +977,9 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd, | |||
977 | struct hcp_modify_qp_control_block *mqpcb; | 977 | struct hcp_modify_qp_control_block *mqpcb; |
978 | u64 hret, update_mask; | 978 | u64 hret, update_mask; |
979 | 979 | ||
980 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) | ||
981 | return ERR_PTR(-ENOSYS); | ||
982 | |||
980 | /* For common attributes, internal_create_qp() takes its info | 983 | /* For common attributes, internal_create_qp() takes its info |
981 | * out of qp_init_attr, so copy all common attrs there. | 984 | * out of qp_init_attr, so copy all common attrs there. |
982 | */ | 985 | */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index 386e2c717c53..26271984b717 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c | |||
@@ -107,6 +107,11 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |||
107 | u32 sz; | 107 | u32 sz; |
108 | struct ib_srq *ret; | 108 | struct ib_srq *ret; |
109 | 109 | ||
110 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) { | ||
111 | ret = ERR_PTR(-ENOSYS); | ||
112 | goto done; | ||
113 | } | ||
114 | |||
110 | if (srq_init_attr->attr.max_wr == 0) { | 115 | if (srq_init_attr->attr.max_wr == 0) { |
111 | ret = ERR_PTR(-EINVAL); | 116 | ret = ERR_PTR(-EINVAL); |
112 | goto done; | 117 | goto done; |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index cfed5399f074..dc66c4506916 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -79,7 +79,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, | |||
79 | goto bail_release; | 79 | goto bail_release; |
80 | } | 80 | } |
81 | 81 | ||
82 | current->mm->locked_vm += num_pages; | 82 | current->mm->pinned_vm += num_pages; |
83 | 83 | ||
84 | ret = 0; | 84 | ret = 0; |
85 | goto bail; | 85 | goto bail; |
@@ -178,7 +178,7 @@ void ipath_release_user_pages(struct page **p, size_t num_pages) | |||
178 | 178 | ||
179 | __ipath_release_user_pages(p, num_pages, 1); | 179 | __ipath_release_user_pages(p, num_pages, 1); |
180 | 180 | ||
181 | current->mm->locked_vm -= num_pages; | 181 | current->mm->pinned_vm -= num_pages; |
182 | 182 | ||
183 | up_write(¤t->mm->mmap_sem); | 183 | up_write(¤t->mm->mmap_sem); |
184 | } | 184 | } |
@@ -195,7 +195,7 @@ static void user_pages_account(struct work_struct *_work) | |||
195 | container_of(_work, struct ipath_user_pages_work, work); | 195 | container_of(_work, struct ipath_user_pages_work, work); |
196 | 196 | ||
197 | down_write(&work->mm->mmap_sem); | 197 | down_write(&work->mm->mmap_sem); |
198 | work->mm->locked_vm -= work->num_pages; | 198 | work->mm->pinned_vm -= work->num_pages; |
199 | up_write(&work->mm->mmap_sem); | 199 | up_write(&work->mm->mmap_sem); |
200 | mmput(work->mm); | 200 | mmput(work->mm); |
201 | kfree(work); | 201 | kfree(work); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index fa643f4f4e28..77f3dbc0aaa1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -128,6 +128,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
128 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && | 128 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && |
129 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) | 129 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) |
130 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | 130 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
131 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) | ||
132 | props->device_cap_flags |= IB_DEVICE_XRC; | ||
131 | 133 | ||
132 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & | 134 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & |
133 | 0xffffff; | 135 | 0xffffff; |
@@ -181,8 +183,12 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) | |||
181 | 183 | ||
182 | static int ib_link_query_port(struct ib_device *ibdev, u8 port, | 184 | static int ib_link_query_port(struct ib_device *ibdev, u8 port, |
183 | struct ib_port_attr *props, | 185 | struct ib_port_attr *props, |
186 | struct ib_smp *in_mad, | ||
184 | struct ib_smp *out_mad) | 187 | struct ib_smp *out_mad) |
185 | { | 188 | { |
189 | int ext_active_speed; | ||
190 | int err; | ||
191 | |||
186 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); | 192 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); |
187 | props->lmc = out_mad->data[34] & 0x7; | 193 | props->lmc = out_mad->data[34] & 0x7; |
188 | props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); | 194 | props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); |
@@ -203,6 +209,39 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, | |||
203 | props->max_vl_num = out_mad->data[37] >> 4; | 209 | props->max_vl_num = out_mad->data[37] >> 4; |
204 | props->init_type_reply = out_mad->data[41] >> 4; | 210 | props->init_type_reply = out_mad->data[41] >> 4; |
205 | 211 | ||
212 | /* Check if extended speeds (EDR/FDR/...) are supported */ | ||
213 | if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { | ||
214 | ext_active_speed = out_mad->data[62] >> 4; | ||
215 | |||
216 | switch (ext_active_speed) { | ||
217 | case 1: | ||
218 | props->active_speed = 16; /* FDR */ | ||
219 | break; | ||
220 | case 2: | ||
221 | props->active_speed = 32; /* EDR */ | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | /* If reported active speed is QDR, check if is FDR-10 */ | ||
227 | if (props->active_speed == 4) { | ||
228 | if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] & | ||
229 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { | ||
230 | init_query_mad(in_mad); | ||
231 | in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; | ||
232 | in_mad->attr_mod = cpu_to_be32(port); | ||
233 | |||
234 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, | ||
235 | NULL, NULL, in_mad, out_mad); | ||
236 | if (err) | ||
237 | return err; | ||
238 | |||
239 | /* Checking LinkSpeedActive for FDR-10 */ | ||
240 | if (out_mad->data[15] & 0x1) | ||
241 | props->active_speed = 8; | ||
242 | } | ||
243 | } | ||
244 | |||
206 | return 0; | 245 | return 0; |
207 | } | 246 | } |
208 | 247 | ||
@@ -227,7 +266,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
227 | props->pkey_tbl_len = 1; | 266 | props->pkey_tbl_len = 1; |
228 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); | 267 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); |
229 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); | 268 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); |
230 | props->max_mtu = IB_MTU_2048; | 269 | props->max_mtu = IB_MTU_4096; |
231 | props->subnet_timeout = 0; | 270 | props->subnet_timeout = 0; |
232 | props->max_vl_num = out_mad->data[37] >> 4; | 271 | props->max_vl_num = out_mad->data[37] >> 4; |
233 | props->init_type_reply = 0; | 272 | props->init_type_reply = 0; |
@@ -274,7 +313,7 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | |||
274 | goto out; | 313 | goto out; |
275 | 314 | ||
276 | err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? | 315 | err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? |
277 | ib_link_query_port(ibdev, port, props, out_mad) : | 316 | ib_link_query_port(ibdev, port, props, in_mad, out_mad) : |
278 | eth_link_query_port(ibdev, port, props, out_mad); | 317 | eth_link_query_port(ibdev, port, props, out_mad); |
279 | 318 | ||
280 | out: | 319 | out: |
@@ -566,6 +605,57 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd) | |||
566 | return 0; | 605 | return 0; |
567 | } | 606 | } |
568 | 607 | ||
608 | static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, | ||
609 | struct ib_ucontext *context, | ||
610 | struct ib_udata *udata) | ||
611 | { | ||
612 | struct mlx4_ib_xrcd *xrcd; | ||
613 | int err; | ||
614 | |||
615 | if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) | ||
616 | return ERR_PTR(-ENOSYS); | ||
617 | |||
618 | xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL); | ||
619 | if (!xrcd) | ||
620 | return ERR_PTR(-ENOMEM); | ||
621 | |||
622 | err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); | ||
623 | if (err) | ||
624 | goto err1; | ||
625 | |||
626 | xrcd->pd = ib_alloc_pd(ibdev); | ||
627 | if (IS_ERR(xrcd->pd)) { | ||
628 | err = PTR_ERR(xrcd->pd); | ||
629 | goto err2; | ||
630 | } | ||
631 | |||
632 | xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0); | ||
633 | if (IS_ERR(xrcd->cq)) { | ||
634 | err = PTR_ERR(xrcd->cq); | ||
635 | goto err3; | ||
636 | } | ||
637 | |||
638 | return &xrcd->ibxrcd; | ||
639 | |||
640 | err3: | ||
641 | ib_dealloc_pd(xrcd->pd); | ||
642 | err2: | ||
643 | mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); | ||
644 | err1: | ||
645 | kfree(xrcd); | ||
646 | return ERR_PTR(err); | ||
647 | } | ||
648 | |||
649 | static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd) | ||
650 | { | ||
651 | ib_destroy_cq(to_mxrcd(xrcd)->cq); | ||
652 | ib_dealloc_pd(to_mxrcd(xrcd)->pd); | ||
653 | mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); | ||
654 | kfree(xrcd); | ||
655 | |||
656 | return 0; | ||
657 | } | ||
658 | |||
569 | static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) | 659 | static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) |
570 | { | 660 | { |
571 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | 661 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
@@ -1044,7 +1134,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1044 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | | 1134 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | |
1045 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | | 1135 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | |
1046 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | | 1136 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | |
1047 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); | 1137 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | |
1138 | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | | ||
1139 | (1ull << IB_USER_VERBS_CMD_OPEN_QP); | ||
1048 | 1140 | ||
1049 | ibdev->ib_dev.query_device = mlx4_ib_query_device; | 1141 | ibdev->ib_dev.query_device = mlx4_ib_query_device; |
1050 | ibdev->ib_dev.query_port = mlx4_ib_query_port; | 1142 | ibdev->ib_dev.query_port = mlx4_ib_query_port; |
@@ -1093,6 +1185,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1093 | ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; | 1185 | ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; |
1094 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; | 1186 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; |
1095 | 1187 | ||
1188 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { | ||
1189 | ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; | ||
1190 | ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; | ||
1191 | ibdev->ib_dev.uverbs_cmd_mask |= | ||
1192 | (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | | ||
1193 | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); | ||
1194 | } | ||
1195 | |||
1096 | spin_lock_init(&iboe->lock); | 1196 | spin_lock_init(&iboe->lock); |
1097 | 1197 | ||
1098 | if (init_node_data(ibdev)) | 1198 | if (init_node_data(ibdev)) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e4bf2cff8662..ed80345c99ae 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -56,6 +56,13 @@ struct mlx4_ib_pd { | |||
56 | u32 pdn; | 56 | u32 pdn; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct mlx4_ib_xrcd { | ||
60 | struct ib_xrcd ibxrcd; | ||
61 | u32 xrcdn; | ||
62 | struct ib_pd *pd; | ||
63 | struct ib_cq *cq; | ||
64 | }; | ||
65 | |||
59 | struct mlx4_ib_cq_buf { | 66 | struct mlx4_ib_cq_buf { |
60 | struct mlx4_buf buf; | 67 | struct mlx4_buf buf; |
61 | struct mlx4_mtt mtt; | 68 | struct mlx4_mtt mtt; |
@@ -138,6 +145,7 @@ struct mlx4_ib_qp { | |||
138 | struct mlx4_mtt mtt; | 145 | struct mlx4_mtt mtt; |
139 | int buf_size; | 146 | int buf_size; |
140 | struct mutex mutex; | 147 | struct mutex mutex; |
148 | u16 xrcdn; | ||
141 | u32 flags; | 149 | u32 flags; |
142 | u8 port; | 150 | u8 port; |
143 | u8 alt_port; | 151 | u8 alt_port; |
@@ -211,6 +219,11 @@ static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd) | |||
211 | return container_of(ibpd, struct mlx4_ib_pd, ibpd); | 219 | return container_of(ibpd, struct mlx4_ib_pd, ibpd); |
212 | } | 220 | } |
213 | 221 | ||
222 | static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) | ||
223 | { | ||
224 | return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd); | ||
225 | } | ||
226 | |||
214 | static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) | 227 | static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) |
215 | { | 228 | { |
216 | return container_of(ibcq, struct mlx4_ib_cq, ibcq); | 229 | return container_of(ibcq, struct mlx4_ib_cq, ibcq); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 3a91d9d8dc51..a16f0c8e6f3f 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -302,15 +302,14 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags) | |||
302 | } | 302 | } |
303 | 303 | ||
304 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | 304 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, |
305 | int is_user, int has_srq, struct mlx4_ib_qp *qp) | 305 | int is_user, int has_rq, struct mlx4_ib_qp *qp) |
306 | { | 306 | { |
307 | /* Sanity check RQ size before proceeding */ | 307 | /* Sanity check RQ size before proceeding */ |
308 | if (cap->max_recv_wr > dev->dev->caps.max_wqes || | 308 | if (cap->max_recv_wr > dev->dev->caps.max_wqes || |
309 | cap->max_recv_sge > dev->dev->caps.max_rq_sg) | 309 | cap->max_recv_sge > dev->dev->caps.max_rq_sg) |
310 | return -EINVAL; | 310 | return -EINVAL; |
311 | 311 | ||
312 | if (has_srq) { | 312 | if (!has_rq) { |
313 | /* QPs attached to an SRQ should have no RQ */ | ||
314 | if (cap->max_recv_wr) | 313 | if (cap->max_recv_wr) |
315 | return -EINVAL; | 314 | return -EINVAL; |
316 | 315 | ||
@@ -463,6 +462,14 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev, | |||
463 | return 0; | 462 | return 0; |
464 | } | 463 | } |
465 | 464 | ||
465 | static int qp_has_rq(struct ib_qp_init_attr *attr) | ||
466 | { | ||
467 | if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) | ||
468 | return 0; | ||
469 | |||
470 | return !attr->srq; | ||
471 | } | ||
472 | |||
466 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | 473 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, |
467 | struct ib_qp_init_attr *init_attr, | 474 | struct ib_qp_init_attr *init_attr, |
468 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) | 475 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) |
@@ -479,7 +486,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
479 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | 486 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
480 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | 487 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); |
481 | 488 | ||
482 | err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); | 489 | err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); |
483 | if (err) | 490 | if (err) |
484 | goto err; | 491 | goto err; |
485 | 492 | ||
@@ -513,7 +520,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
513 | if (err) | 520 | if (err) |
514 | goto err_mtt; | 521 | goto err_mtt; |
515 | 522 | ||
516 | if (!init_attr->srq) { | 523 | if (qp_has_rq(init_attr)) { |
517 | err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), | 524 | err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), |
518 | ucmd.db_addr, &qp->db); | 525 | ucmd.db_addr, &qp->db); |
519 | if (err) | 526 | if (err) |
@@ -532,7 +539,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
532 | if (err) | 539 | if (err) |
533 | goto err; | 540 | goto err; |
534 | 541 | ||
535 | if (!init_attr->srq) { | 542 | if (qp_has_rq(init_attr)) { |
536 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); | 543 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); |
537 | if (err) | 544 | if (err) |
538 | goto err; | 545 | goto err; |
@@ -575,6 +582,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
575 | if (err) | 582 | if (err) |
576 | goto err_qpn; | 583 | goto err_qpn; |
577 | 584 | ||
585 | if (init_attr->qp_type == IB_QPT_XRC_TGT) | ||
586 | qp->mqp.qpn |= (1 << 23); | ||
587 | |||
578 | /* | 588 | /* |
579 | * Hardware wants QPN written in big-endian order (after | 589 | * Hardware wants QPN written in big-endian order (after |
580 | * shifting) for send doorbell. Precompute this value to save | 590 | * shifting) for send doorbell. Precompute this value to save |
@@ -592,9 +602,8 @@ err_qpn: | |||
592 | 602 | ||
593 | err_wrid: | 603 | err_wrid: |
594 | if (pd->uobject) { | 604 | if (pd->uobject) { |
595 | if (!init_attr->srq) | 605 | if (qp_has_rq(init_attr)) |
596 | mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), | 606 | mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); |
597 | &qp->db); | ||
598 | } else { | 607 | } else { |
599 | kfree(qp->sq.wrid); | 608 | kfree(qp->sq.wrid); |
600 | kfree(qp->rq.wrid); | 609 | kfree(qp->rq.wrid); |
@@ -610,7 +619,7 @@ err_buf: | |||
610 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 619 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
611 | 620 | ||
612 | err_db: | 621 | err_db: |
613 | if (!pd->uobject && !init_attr->srq) | 622 | if (!pd->uobject && qp_has_rq(init_attr)) |
614 | mlx4_db_free(dev->dev, &qp->db); | 623 | mlx4_db_free(dev->dev, &qp->db); |
615 | 624 | ||
616 | err: | 625 | err: |
@@ -671,6 +680,33 @@ static void del_gid_entries(struct mlx4_ib_qp *qp) | |||
671 | } | 680 | } |
672 | } | 681 | } |
673 | 682 | ||
683 | static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) | ||
684 | { | ||
685 | if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) | ||
686 | return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); | ||
687 | else | ||
688 | return to_mpd(qp->ibqp.pd); | ||
689 | } | ||
690 | |||
691 | static void get_cqs(struct mlx4_ib_qp *qp, | ||
692 | struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) | ||
693 | { | ||
694 | switch (qp->ibqp.qp_type) { | ||
695 | case IB_QPT_XRC_TGT: | ||
696 | *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); | ||
697 | *recv_cq = *send_cq; | ||
698 | break; | ||
699 | case IB_QPT_XRC_INI: | ||
700 | *send_cq = to_mcq(qp->ibqp.send_cq); | ||
701 | *recv_cq = *send_cq; | ||
702 | break; | ||
703 | default: | ||
704 | *send_cq = to_mcq(qp->ibqp.send_cq); | ||
705 | *recv_cq = to_mcq(qp->ibqp.recv_cq); | ||
706 | break; | ||
707 | } | ||
708 | } | ||
709 | |||
674 | static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | 710 | static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, |
675 | int is_user) | 711 | int is_user) |
676 | { | 712 | { |
@@ -682,8 +718,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
682 | printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", | 718 | printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", |
683 | qp->mqp.qpn); | 719 | qp->mqp.qpn); |
684 | 720 | ||
685 | send_cq = to_mcq(qp->ibqp.send_cq); | 721 | get_cqs(qp, &send_cq, &recv_cq); |
686 | recv_cq = to_mcq(qp->ibqp.recv_cq); | ||
687 | 722 | ||
688 | mlx4_ib_lock_cqs(send_cq, recv_cq); | 723 | mlx4_ib_lock_cqs(send_cq, recv_cq); |
689 | 724 | ||
@@ -706,7 +741,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
706 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | 741 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); |
707 | 742 | ||
708 | if (is_user) { | 743 | if (is_user) { |
709 | if (!qp->ibqp.srq) | 744 | if (qp->rq.wqe_cnt) |
710 | mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), | 745 | mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), |
711 | &qp->db); | 746 | &qp->db); |
712 | ib_umem_release(qp->umem); | 747 | ib_umem_release(qp->umem); |
@@ -714,7 +749,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
714 | kfree(qp->sq.wrid); | 749 | kfree(qp->sq.wrid); |
715 | kfree(qp->rq.wrid); | 750 | kfree(qp->rq.wrid); |
716 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 751 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
717 | if (!qp->ibqp.srq) | 752 | if (qp->rq.wqe_cnt) |
718 | mlx4_db_free(dev->dev, &qp->db); | 753 | mlx4_db_free(dev->dev, &qp->db); |
719 | } | 754 | } |
720 | 755 | ||
@@ -725,10 +760,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
725 | struct ib_qp_init_attr *init_attr, | 760 | struct ib_qp_init_attr *init_attr, |
726 | struct ib_udata *udata) | 761 | struct ib_udata *udata) |
727 | { | 762 | { |
728 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
729 | struct mlx4_ib_sqp *sqp; | 763 | struct mlx4_ib_sqp *sqp; |
730 | struct mlx4_ib_qp *qp; | 764 | struct mlx4_ib_qp *qp; |
731 | int err; | 765 | int err; |
766 | u16 xrcdn = 0; | ||
732 | 767 | ||
733 | /* | 768 | /* |
734 | * We only support LSO and multicast loopback blocking, and | 769 | * We only support LSO and multicast loopback blocking, and |
@@ -739,10 +774,20 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
739 | return ERR_PTR(-EINVAL); | 774 | return ERR_PTR(-EINVAL); |
740 | 775 | ||
741 | if (init_attr->create_flags && | 776 | if (init_attr->create_flags && |
742 | (pd->uobject || init_attr->qp_type != IB_QPT_UD)) | 777 | (udata || init_attr->qp_type != IB_QPT_UD)) |
743 | return ERR_PTR(-EINVAL); | 778 | return ERR_PTR(-EINVAL); |
744 | 779 | ||
745 | switch (init_attr->qp_type) { | 780 | switch (init_attr->qp_type) { |
781 | case IB_QPT_XRC_TGT: | ||
782 | pd = to_mxrcd(init_attr->xrcd)->pd; | ||
783 | xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; | ||
784 | init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; | ||
785 | /* fall through */ | ||
786 | case IB_QPT_XRC_INI: | ||
787 | if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) | ||
788 | return ERR_PTR(-ENOSYS); | ||
789 | init_attr->recv_cq = init_attr->send_cq; | ||
790 | /* fall through */ | ||
746 | case IB_QPT_RC: | 791 | case IB_QPT_RC: |
747 | case IB_QPT_UC: | 792 | case IB_QPT_UC: |
748 | case IB_QPT_UD: | 793 | case IB_QPT_UD: |
@@ -751,13 +796,14 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
751 | if (!qp) | 796 | if (!qp) |
752 | return ERR_PTR(-ENOMEM); | 797 | return ERR_PTR(-ENOMEM); |
753 | 798 | ||
754 | err = create_qp_common(dev, pd, init_attr, udata, 0, qp); | 799 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, qp); |
755 | if (err) { | 800 | if (err) { |
756 | kfree(qp); | 801 | kfree(qp); |
757 | return ERR_PTR(err); | 802 | return ERR_PTR(err); |
758 | } | 803 | } |
759 | 804 | ||
760 | qp->ibqp.qp_num = qp->mqp.qpn; | 805 | qp->ibqp.qp_num = qp->mqp.qpn; |
806 | qp->xrcdn = xrcdn; | ||
761 | 807 | ||
762 | break; | 808 | break; |
763 | } | 809 | } |
@@ -765,7 +811,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
765 | case IB_QPT_GSI: | 811 | case IB_QPT_GSI: |
766 | { | 812 | { |
767 | /* Userspace is not allowed to create special QPs: */ | 813 | /* Userspace is not allowed to create special QPs: */ |
768 | if (pd->uobject) | 814 | if (udata) |
769 | return ERR_PTR(-EINVAL); | 815 | return ERR_PTR(-EINVAL); |
770 | 816 | ||
771 | sqp = kzalloc(sizeof *sqp, GFP_KERNEL); | 817 | sqp = kzalloc(sizeof *sqp, GFP_KERNEL); |
@@ -774,8 +820,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
774 | 820 | ||
775 | qp = &sqp->qp; | 821 | qp = &sqp->qp; |
776 | 822 | ||
777 | err = create_qp_common(dev, pd, init_attr, udata, | 823 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, |
778 | dev->dev->caps.sqp_start + | 824 | to_mdev(pd->device)->dev->caps.sqp_start + |
779 | (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + | 825 | (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + |
780 | init_attr->port_num - 1, | 826 | init_attr->port_num - 1, |
781 | qp); | 827 | qp); |
@@ -801,11 +847,13 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) | |||
801 | { | 847 | { |
802 | struct mlx4_ib_dev *dev = to_mdev(qp->device); | 848 | struct mlx4_ib_dev *dev = to_mdev(qp->device); |
803 | struct mlx4_ib_qp *mqp = to_mqp(qp); | 849 | struct mlx4_ib_qp *mqp = to_mqp(qp); |
850 | struct mlx4_ib_pd *pd; | ||
804 | 851 | ||
805 | if (is_qp0(dev, mqp)) | 852 | if (is_qp0(dev, mqp)) |
806 | mlx4_CLOSE_PORT(dev->dev, mqp->port); | 853 | mlx4_CLOSE_PORT(dev->dev, mqp->port); |
807 | 854 | ||
808 | destroy_qp_common(dev, mqp, !!qp->pd->uobject); | 855 | pd = get_pd(mqp); |
856 | destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); | ||
809 | 857 | ||
810 | if (is_sqp(dev, mqp)) | 858 | if (is_sqp(dev, mqp)) |
811 | kfree(to_msqp(mqp)); | 859 | kfree(to_msqp(mqp)); |
@@ -821,6 +869,8 @@ static int to_mlx4_st(enum ib_qp_type type) | |||
821 | case IB_QPT_RC: return MLX4_QP_ST_RC; | 869 | case IB_QPT_RC: return MLX4_QP_ST_RC; |
822 | case IB_QPT_UC: return MLX4_QP_ST_UC; | 870 | case IB_QPT_UC: return MLX4_QP_ST_UC; |
823 | case IB_QPT_UD: return MLX4_QP_ST_UD; | 871 | case IB_QPT_UD: return MLX4_QP_ST_UD; |
872 | case IB_QPT_XRC_INI: | ||
873 | case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; | ||
824 | case IB_QPT_SMI: | 874 | case IB_QPT_SMI: |
825 | case IB_QPT_GSI: return MLX4_QP_ST_MLX; | 875 | case IB_QPT_GSI: return MLX4_QP_ST_MLX; |
826 | default: return -1; | 876 | default: return -1; |
@@ -959,6 +1009,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
959 | { | 1009 | { |
960 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); | 1010 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); |
961 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | 1011 | struct mlx4_ib_qp *qp = to_mqp(ibqp); |
1012 | struct mlx4_ib_pd *pd; | ||
1013 | struct mlx4_ib_cq *send_cq, *recv_cq; | ||
962 | struct mlx4_qp_context *context; | 1014 | struct mlx4_qp_context *context; |
963 | enum mlx4_qp_optpar optpar = 0; | 1015 | enum mlx4_qp_optpar optpar = 0; |
964 | int sqd_event; | 1016 | int sqd_event; |
@@ -1014,8 +1066,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1014 | context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; | 1066 | context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; |
1015 | context->sq_size_stride |= qp->sq.wqe_shift - 4; | 1067 | context->sq_size_stride |= qp->sq.wqe_shift - 4; |
1016 | 1068 | ||
1017 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | 1069 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
1018 | context->sq_size_stride |= !!qp->sq_no_prefetch << 7; | 1070 | context->sq_size_stride |= !!qp->sq_no_prefetch << 7; |
1071 | context->xrcd = cpu_to_be32((u32) qp->xrcdn); | ||
1072 | } | ||
1019 | 1073 | ||
1020 | if (qp->ibqp.uobject) | 1074 | if (qp->ibqp.uobject) |
1021 | context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); | 1075 | context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); |
@@ -1079,8 +1133,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1079 | optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; | 1133 | optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; |
1080 | } | 1134 | } |
1081 | 1135 | ||
1082 | context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); | 1136 | pd = get_pd(qp); |
1083 | context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); | 1137 | get_cqs(qp, &send_cq, &recv_cq); |
1138 | context->pd = cpu_to_be32(pd->pdn); | ||
1139 | context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); | ||
1140 | context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); | ||
1141 | context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); | ||
1084 | 1142 | ||
1085 | /* Set "fast registration enabled" for all kernel QPs */ | 1143 | /* Set "fast registration enabled" for all kernel QPs */ |
1086 | if (!qp->ibqp.uobject) | 1144 | if (!qp->ibqp.uobject) |
@@ -1106,8 +1164,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1106 | if (attr_mask & IB_QP_SQ_PSN) | 1164 | if (attr_mask & IB_QP_SQ_PSN) |
1107 | context->next_send_psn = cpu_to_be32(attr->sq_psn); | 1165 | context->next_send_psn = cpu_to_be32(attr->sq_psn); |
1108 | 1166 | ||
1109 | context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn); | ||
1110 | |||
1111 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { | 1167 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
1112 | if (attr->max_dest_rd_atomic) | 1168 | if (attr->max_dest_rd_atomic) |
1113 | context->params2 |= | 1169 | context->params2 |= |
@@ -1130,8 +1186,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1130 | if (attr_mask & IB_QP_RQ_PSN) | 1186 | if (attr_mask & IB_QP_RQ_PSN) |
1131 | context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); | 1187 | context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); |
1132 | 1188 | ||
1133 | context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn); | ||
1134 | |||
1135 | if (attr_mask & IB_QP_QKEY) { | 1189 | if (attr_mask & IB_QP_QKEY) { |
1136 | context->qkey = cpu_to_be32(attr->qkey); | 1190 | context->qkey = cpu_to_be32(attr->qkey); |
1137 | optpar |= MLX4_QP_OPTPAR_Q_KEY; | 1191 | optpar |= MLX4_QP_OPTPAR_Q_KEY; |
@@ -1140,7 +1194,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1140 | if (ibqp->srq) | 1194 | if (ibqp->srq) |
1141 | context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); | 1195 | context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); |
1142 | 1196 | ||
1143 | if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | 1197 | if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) |
1144 | context->db_rec_addr = cpu_to_be64(qp->db.dma); | 1198 | context->db_rec_addr = cpu_to_be64(qp->db.dma); |
1145 | 1199 | ||
1146 | if (cur_state == IB_QPS_INIT && | 1200 | if (cur_state == IB_QPS_INIT && |
@@ -1225,17 +1279,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1225 | * entries and reinitialize the QP. | 1279 | * entries and reinitialize the QP. |
1226 | */ | 1280 | */ |
1227 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { | 1281 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { |
1228 | mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn, | 1282 | mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, |
1229 | ibqp->srq ? to_msrq(ibqp->srq): NULL); | 1283 | ibqp->srq ? to_msrq(ibqp->srq): NULL); |
1230 | if (ibqp->send_cq != ibqp->recv_cq) | 1284 | if (send_cq != recv_cq) |
1231 | mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL); | 1285 | mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); |
1232 | 1286 | ||
1233 | qp->rq.head = 0; | 1287 | qp->rq.head = 0; |
1234 | qp->rq.tail = 0; | 1288 | qp->rq.tail = 0; |
1235 | qp->sq.head = 0; | 1289 | qp->sq.head = 0; |
1236 | qp->sq.tail = 0; | 1290 | qp->sq.tail = 0; |
1237 | qp->sq_next_wqe = 0; | 1291 | qp->sq_next_wqe = 0; |
1238 | if (!ibqp->srq) | 1292 | if (qp->rq.wqe_cnt) |
1239 | *qp->db.db = 0; | 1293 | *qp->db.db = 0; |
1240 | } | 1294 | } |
1241 | 1295 | ||
@@ -1547,14 +1601,13 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, | |||
1547 | } | 1601 | } |
1548 | 1602 | ||
1549 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | 1603 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, |
1550 | struct ib_send_wr *wr, __be16 *vlan) | 1604 | struct ib_send_wr *wr) |
1551 | { | 1605 | { |
1552 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | 1606 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); |
1553 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1607 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
1554 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1608 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); |
1555 | dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; | 1609 | dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; |
1556 | memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); | 1610 | memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); |
1557 | *vlan = dseg->vlan; | ||
1558 | } | 1611 | } |
1559 | 1612 | ||
1560 | static void set_mlx_icrc_seg(void *dseg) | 1613 | static void set_mlx_icrc_seg(void *dseg) |
@@ -1657,7 +1710,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1657 | __be32 uninitialized_var(lso_hdr_sz); | 1710 | __be32 uninitialized_var(lso_hdr_sz); |
1658 | __be32 blh; | 1711 | __be32 blh; |
1659 | int i; | 1712 | int i; |
1660 | __be16 vlan = cpu_to_be16(0xffff); | ||
1661 | 1713 | ||
1662 | spin_lock_irqsave(&qp->sq.lock, flags); | 1714 | spin_lock_irqsave(&qp->sq.lock, flags); |
1663 | 1715 | ||
@@ -1761,7 +1813,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1761 | break; | 1813 | break; |
1762 | 1814 | ||
1763 | case IB_QPT_UD: | 1815 | case IB_QPT_UD: |
1764 | set_datagram_seg(wqe, wr, &vlan); | 1816 | set_datagram_seg(wqe, wr); |
1765 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 1817 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
1766 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 1818 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
1767 | 1819 | ||
@@ -1824,11 +1876,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1824 | ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? | 1876 | ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? |
1825 | MLX4_WQE_CTRL_FENCE : 0) | size; | 1877 | MLX4_WQE_CTRL_FENCE : 0) | size; |
1826 | 1878 | ||
1827 | if (be16_to_cpu(vlan) < 0x1000) { | ||
1828 | ctrl->ins_vlan = 1 << 6; | ||
1829 | ctrl->vlan_tag = vlan; | ||
1830 | } | ||
1831 | |||
1832 | /* | 1879 | /* |
1833 | * Make sure descriptor is fully written before | 1880 | * Make sure descriptor is fully written before |
1834 | * setting ownership bit (because HW can start | 1881 | * setting ownership bit (because HW can start |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 818b7ecace5e..39542f3703b8 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -76,6 +76,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
76 | struct mlx4_ib_srq *srq; | 76 | struct mlx4_ib_srq *srq; |
77 | struct mlx4_wqe_srq_next_seg *next; | 77 | struct mlx4_wqe_srq_next_seg *next; |
78 | struct mlx4_wqe_data_seg *scatter; | 78 | struct mlx4_wqe_data_seg *scatter; |
79 | u32 cqn; | ||
80 | u16 xrcdn; | ||
79 | int desc_size; | 81 | int desc_size; |
80 | int buf_size; | 82 | int buf_size; |
81 | int err; | 83 | int err; |
@@ -174,12 +176,18 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
174 | } | 176 | } |
175 | } | 177 | } |
176 | 178 | ||
177 | err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt, | 179 | cqn = (init_attr->srq_type == IB_SRQT_XRC) ? |
180 | to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; | ||
181 | xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ? | ||
182 | to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn : | ||
183 | (u16) dev->dev->caps.reserved_xrcds; | ||
184 | err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, | ||
178 | srq->db.dma, &srq->msrq); | 185 | srq->db.dma, &srq->msrq); |
179 | if (err) | 186 | if (err) |
180 | goto err_wrid; | 187 | goto err_wrid; |
181 | 188 | ||
182 | srq->msrq.event = mlx4_ib_srq_event; | 189 | srq->msrq.event = mlx4_ib_srq_event; |
190 | srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; | ||
183 | 191 | ||
184 | if (pd->uobject) | 192 | if (pd->uobject) |
185 | if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { | 193 | if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index ab876f928a1b..ed9a989e501b 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -146,7 +146,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) | |||
146 | 146 | ||
147 | buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), | 147 | buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), |
148 | GFP_KERNEL); | 148 | GFP_KERNEL); |
149 | buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), | 149 | buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, |
150 | GFP_KERNEL); | 150 | GFP_KERNEL); |
151 | if (!buddy->bits || !buddy->num_free) | 151 | if (!buddy->bits || !buddy->num_free) |
152 | goto err_out; | 152 | goto err_out; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 4f8adca11e94..5b71d43bd89c 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -440,6 +440,9 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd, | |||
440 | struct mthca_srq *srq; | 440 | struct mthca_srq *srq; |
441 | int err; | 441 | int err; |
442 | 442 | ||
443 | if (init_attr->srq_type != IB_SRQT_BASIC) | ||
444 | return ERR_PTR(-ENOSYS); | ||
445 | |||
443 | srq = kmalloc(sizeof *srq, GFP_KERNEL); | 446 | srq = kmalloc(sizeof *srq, GFP_KERNEL); |
444 | if (!srq) | 447 | if (!srq) |
445 | return ERR_PTR(-ENOMEM); | 448 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/infiniband/hw/nes/Makefile b/drivers/infiniband/hw/nes/Makefile index 35148513c47e..97820c23ecef 100644 --- a/drivers/infiniband/hw/nes/Makefile +++ b/drivers/infiniband/hw/nes/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o | 1 | obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o |
2 | 2 | ||
3 | iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o | 3 | iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o nes_mgt.o |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 2d668c69f6d9..5965b3df8f2f 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -84,7 +84,7 @@ module_param(send_first, int, 0644); | |||
84 | MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection"); | 84 | MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection"); |
85 | 85 | ||
86 | 86 | ||
87 | unsigned int nes_drv_opt = 0; | 87 | unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU; |
88 | module_param(nes_drv_opt, int, 0644); | 88 | module_param(nes_drv_opt, int, 0644); |
89 | MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); | 89 | MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); |
90 | 90 | ||
@@ -130,9 +130,6 @@ static struct notifier_block nes_net_notifier = { | |||
130 | .notifier_call = nes_net_event | 130 | .notifier_call = nes_net_event |
131 | }; | 131 | }; |
132 | 132 | ||
133 | |||
134 | |||
135 | |||
136 | /** | 133 | /** |
137 | * nes_inetaddr_event | 134 | * nes_inetaddr_event |
138 | */ | 135 | */ |
@@ -321,6 +318,9 @@ void nes_rem_ref(struct ib_qp *ibqp) | |||
321 | } | 318 | } |
322 | 319 | ||
323 | if (atomic_dec_and_test(&nesqp->refcount)) { | 320 | if (atomic_dec_and_test(&nesqp->refcount)) { |
321 | if (nesqp->pau_mode) | ||
322 | nes_destroy_pau_qp(nesdev, nesqp); | ||
323 | |||
324 | /* Destroy the QP */ | 324 | /* Destroy the QP */ |
325 | cqp_request = nes_get_cqp_request(nesdev); | 325 | cqp_request = nes_get_cqp_request(nesdev); |
326 | if (cqp_request == NULL) { | 326 | if (cqp_request == NULL) { |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 6fe79876009e..568b4f11380a 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -102,6 +102,7 @@ | |||
102 | #define NES_DRV_OPT_NO_INLINE_DATA 0x00000080 | 102 | #define NES_DRV_OPT_NO_INLINE_DATA 0x00000080 |
103 | #define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100 | 103 | #define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100 |
104 | #define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200 | 104 | #define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200 |
105 | #define NES_DRV_OPT_ENABLE_PAU 0x00000400 | ||
105 | 106 | ||
106 | #define NES_AEQ_EVENT_TIMEOUT 2500 | 107 | #define NES_AEQ_EVENT_TIMEOUT 2500 |
107 | #define NES_DISCONNECT_EVENT_TIMEOUT 2000 | 108 | #define NES_DISCONNECT_EVENT_TIMEOUT 2000 |
@@ -128,6 +129,7 @@ | |||
128 | #define NES_DBG_IW_RX 0x00020000 | 129 | #define NES_DBG_IW_RX 0x00020000 |
129 | #define NES_DBG_IW_TX 0x00040000 | 130 | #define NES_DBG_IW_TX 0x00040000 |
130 | #define NES_DBG_SHUTDOWN 0x00080000 | 131 | #define NES_DBG_SHUTDOWN 0x00080000 |
132 | #define NES_DBG_PAU 0x00100000 | ||
131 | #define NES_DBG_RSVD1 0x10000000 | 133 | #define NES_DBG_RSVD1 0x10000000 |
132 | #define NES_DBG_RSVD2 0x20000000 | 134 | #define NES_DBG_RSVD2 0x20000000 |
133 | #define NES_DBG_RSVD3 0x40000000 | 135 | #define NES_DBG_RSVD3 0x40000000 |
@@ -162,6 +164,7 @@ do { \ | |||
162 | #include "nes_context.h" | 164 | #include "nes_context.h" |
163 | #include "nes_user.h" | 165 | #include "nes_user.h" |
164 | #include "nes_cm.h" | 166 | #include "nes_cm.h" |
167 | #include "nes_mgt.h" | ||
165 | 168 | ||
166 | extern int max_mtu; | 169 | extern int max_mtu; |
167 | #define max_frame_len (max_mtu+ETH_HLEN) | 170 | #define max_frame_len (max_mtu+ETH_HLEN) |
@@ -202,6 +205,8 @@ extern atomic_t cm_nodes_created; | |||
202 | extern atomic_t cm_nodes_destroyed; | 205 | extern atomic_t cm_nodes_destroyed; |
203 | extern atomic_t cm_accel_dropped_pkts; | 206 | extern atomic_t cm_accel_dropped_pkts; |
204 | extern atomic_t cm_resets_recvd; | 207 | extern atomic_t cm_resets_recvd; |
208 | extern atomic_t pau_qps_created; | ||
209 | extern atomic_t pau_qps_destroyed; | ||
205 | 210 | ||
206 | extern u32 int_mod_timer_init; | 211 | extern u32 int_mod_timer_init; |
207 | extern u32 int_mod_cq_depth_256; | 212 | extern u32 int_mod_cq_depth_256; |
@@ -273,6 +278,14 @@ struct nes_device { | |||
273 | u8 link_recheck; | 278 | u8 link_recheck; |
274 | }; | 279 | }; |
275 | 280 | ||
281 | /* Receive skb private area - must fit in skb->cb area */ | ||
282 | struct nes_rskb_cb { | ||
283 | u64 busaddr; | ||
284 | u32 maplen; | ||
285 | u32 seqnum; | ||
286 | u8 *data_start; | ||
287 | struct nes_qp *nesqp; | ||
288 | }; | ||
276 | 289 | ||
277 | static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) | 290 | static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) |
278 | { | 291 | { |
@@ -305,8 +318,8 @@ set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value) | |||
305 | static inline void | 318 | static inline void |
306 | nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev) | 319 | nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev) |
307 | { | 320 | { |
308 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_CTX_LOW_IDX, | 321 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = 0; |
309 | (u64)((unsigned long) &nesdev->cqp)); | 322 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0; |
310 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0; | 323 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0; |
311 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0; | 324 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0; |
312 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0; | 325 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0; |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index c118663e4437..dfce9ea98a39 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -77,26 +77,19 @@ atomic_t cm_nodes_destroyed; | |||
77 | atomic_t cm_accel_dropped_pkts; | 77 | atomic_t cm_accel_dropped_pkts; |
78 | atomic_t cm_resets_recvd; | 78 | atomic_t cm_resets_recvd; |
79 | 79 | ||
80 | static inline int mini_cm_accelerated(struct nes_cm_core *, | 80 | static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); |
81 | struct nes_cm_node *); | 81 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *); |
82 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, | ||
83 | struct nes_vnic *, struct nes_cm_info *); | ||
84 | static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); | 82 | static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); |
85 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, | 83 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, struct nes_vnic *, u16, void *, struct nes_cm_info *); |
86 | struct nes_vnic *, u16, void *, struct nes_cm_info *); | ||
87 | static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *); | 84 | static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *); |
88 | static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, | 85 | static int mini_cm_accept(struct nes_cm_core *, struct nes_cm_node *); |
89 | struct nes_cm_node *); | 86 | static int mini_cm_reject(struct nes_cm_core *, struct nes_cm_node *); |
90 | static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, | 87 | static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *); |
91 | struct nes_cm_node *); | ||
92 | static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, | ||
93 | struct sk_buff *); | ||
94 | static int mini_cm_dealloc_core(struct nes_cm_core *); | 88 | static int mini_cm_dealloc_core(struct nes_cm_core *); |
95 | static int mini_cm_get(struct nes_cm_core *); | 89 | static int mini_cm_get(struct nes_cm_core *); |
96 | static int mini_cm_set(struct nes_cm_core *, u32, u32); | 90 | static int mini_cm_set(struct nes_cm_core *, u32, u32); |
97 | 91 | ||
98 | static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, | 92 | static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, void *, u32, void *, u32, u8); |
99 | void *, u32, void *, u32, u8); | ||
100 | static int add_ref_cm_node(struct nes_cm_node *); | 93 | static int add_ref_cm_node(struct nes_cm_node *); |
101 | static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); | 94 | static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); |
102 | 95 | ||
@@ -111,16 +104,14 @@ static int send_syn(struct nes_cm_node *, u32, struct sk_buff *); | |||
111 | static int send_reset(struct nes_cm_node *, struct sk_buff *); | 104 | static int send_reset(struct nes_cm_node *, struct sk_buff *); |
112 | static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); | 105 | static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); |
113 | static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb); | 106 | static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb); |
114 | static void process_packet(struct nes_cm_node *, struct sk_buff *, | 107 | static void process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *); |
115 | struct nes_cm_core *); | ||
116 | 108 | ||
117 | static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); | 109 | static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); |
118 | static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); | 110 | static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); |
119 | static void cleanup_retrans_entry(struct nes_cm_node *); | 111 | static void cleanup_retrans_entry(struct nes_cm_node *); |
120 | static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *); | 112 | static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *); |
121 | static void free_retrans_entry(struct nes_cm_node *cm_node); | 113 | static void free_retrans_entry(struct nes_cm_node *cm_node); |
122 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 114 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb, int optionsize, int passive); |
123 | struct sk_buff *skb, int optionsize, int passive); | ||
124 | 115 | ||
125 | /* CM event handler functions */ | 116 | /* CM event handler functions */ |
126 | static void cm_event_connected(struct nes_cm_event *); | 117 | static void cm_event_connected(struct nes_cm_event *); |
@@ -130,6 +121,12 @@ static void cm_event_mpa_req(struct nes_cm_event *); | |||
130 | static void cm_event_mpa_reject(struct nes_cm_event *); | 121 | static void cm_event_mpa_reject(struct nes_cm_event *); |
131 | static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node); | 122 | static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node); |
132 | 123 | ||
124 | /* MPA build functions */ | ||
125 | static int cm_build_mpa_frame(struct nes_cm_node *, u8 **, u16 *, u8 *, u8); | ||
126 | static void build_mpa_v2(struct nes_cm_node *, void *, u8); | ||
127 | static void build_mpa_v1(struct nes_cm_node *, void *, u8); | ||
128 | static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **); | ||
129 | |||
133 | static void print_core(struct nes_cm_core *core); | 130 | static void print_core(struct nes_cm_core *core); |
134 | 131 | ||
135 | /* External CM API Interface */ | 132 | /* External CM API Interface */ |
@@ -159,12 +156,21 @@ atomic_t cm_connecteds; | |||
159 | atomic_t cm_connect_reqs; | 156 | atomic_t cm_connect_reqs; |
160 | atomic_t cm_rejects; | 157 | atomic_t cm_rejects; |
161 | 158 | ||
159 | int nes_add_ref_cm_node(struct nes_cm_node *cm_node) | ||
160 | { | ||
161 | return add_ref_cm_node(cm_node); | ||
162 | } | ||
163 | |||
164 | int nes_rem_ref_cm_node(struct nes_cm_node *cm_node) | ||
165 | { | ||
166 | return rem_ref_cm_node(cm_node->cm_core, cm_node); | ||
167 | } | ||
162 | 168 | ||
163 | /** | 169 | /** |
164 | * create_event | 170 | * create_event |
165 | */ | 171 | */ |
166 | static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, | 172 | static struct nes_cm_event *create_event(struct nes_cm_node * cm_node, |
167 | enum nes_cm_event_type type) | 173 | enum nes_cm_event_type type) |
168 | { | 174 | { |
169 | struct nes_cm_event *event; | 175 | struct nes_cm_event *event; |
170 | 176 | ||
@@ -186,10 +192,10 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, | |||
186 | event->cm_info.cm_id = cm_node->cm_id; | 192 | event->cm_info.cm_id = cm_node->cm_id; |
187 | 193 | ||
188 | nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, " | 194 | nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, " |
189 | "dst_addr=%08x[%x], src_addr=%08x[%x]\n", | 195 | "dst_addr=%08x[%x], src_addr=%08x[%x]\n", |
190 | cm_node, event, type, event->cm_info.loc_addr, | 196 | cm_node, event, type, event->cm_info.loc_addr, |
191 | event->cm_info.loc_port, event->cm_info.rem_addr, | 197 | event->cm_info.loc_port, event->cm_info.rem_addr, |
192 | event->cm_info.rem_port); | 198 | event->cm_info.rem_port); |
193 | 199 | ||
194 | nes_cm_post_event(event); | 200 | nes_cm_post_event(event); |
195 | return event; | 201 | return event; |
@@ -201,14 +207,19 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, | |||
201 | */ | 207 | */ |
202 | static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) | 208 | static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) |
203 | { | 209 | { |
210 | u8 start_addr = 0; | ||
211 | u8 *start_ptr = &start_addr; | ||
212 | u8 **start_buff = &start_ptr; | ||
213 | u16 buff_len = 0; | ||
214 | |||
204 | if (!skb) { | 215 | if (!skb) { |
205 | nes_debug(NES_DBG_CM, "skb set to NULL\n"); | 216 | nes_debug(NES_DBG_CM, "skb set to NULL\n"); |
206 | return -1; | 217 | return -1; |
207 | } | 218 | } |
208 | 219 | ||
209 | /* send an MPA Request frame */ | 220 | /* send an MPA Request frame */ |
210 | form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, | 221 | cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REQUEST); |
211 | cm_node->mpa_frame_size, SET_ACK); | 222 | form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK); |
212 | 223 | ||
213 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | 224 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); |
214 | } | 225 | } |
@@ -217,7 +228,11 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
217 | 228 | ||
218 | static int send_mpa_reject(struct nes_cm_node *cm_node) | 229 | static int send_mpa_reject(struct nes_cm_node *cm_node) |
219 | { | 230 | { |
220 | struct sk_buff *skb = NULL; | 231 | struct sk_buff *skb = NULL; |
232 | u8 start_addr = 0; | ||
233 | u8 *start_ptr = &start_addr; | ||
234 | u8 **start_buff = &start_ptr; | ||
235 | u16 buff_len = 0; | ||
221 | 236 | ||
222 | skb = dev_alloc_skb(MAX_CM_BUFFER); | 237 | skb = dev_alloc_skb(MAX_CM_BUFFER); |
223 | if (!skb) { | 238 | if (!skb) { |
@@ -226,8 +241,8 @@ static int send_mpa_reject(struct nes_cm_node *cm_node) | |||
226 | } | 241 | } |
227 | 242 | ||
228 | /* send an MPA reject frame */ | 243 | /* send an MPA reject frame */ |
229 | form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, | 244 | cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY); |
230 | cm_node->mpa_frame_size, SET_ACK | SET_FIN); | 245 | form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN); |
231 | 246 | ||
232 | cm_node->state = NES_CM_STATE_FIN_WAIT1; | 247 | cm_node->state = NES_CM_STATE_FIN_WAIT1; |
233 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | 248 | return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); |
@@ -239,24 +254,31 @@ static int send_mpa_reject(struct nes_cm_node *cm_node) | |||
239 | * IETF MPA frame | 254 | * IETF MPA frame |
240 | */ | 255 | */ |
241 | static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | 256 | static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, |
242 | u32 len) | 257 | u32 len) |
243 | { | 258 | { |
244 | struct ietf_mpa_frame *mpa_frame; | 259 | struct ietf_mpa_v1 *mpa_frame; |
260 | struct ietf_mpa_v2 *mpa_v2_frame; | ||
261 | struct ietf_rtr_msg *rtr_msg; | ||
262 | int mpa_hdr_len; | ||
263 | int priv_data_len; | ||
245 | 264 | ||
246 | *type = NES_MPA_REQUEST_ACCEPT; | 265 | *type = NES_MPA_REQUEST_ACCEPT; |
247 | 266 | ||
248 | /* assume req frame is in tcp data payload */ | 267 | /* assume req frame is in tcp data payload */ |
249 | if (len < sizeof(struct ietf_mpa_frame)) { | 268 | if (len < sizeof(struct ietf_mpa_v1)) { |
250 | nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); | 269 | nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); |
251 | return -EINVAL; | 270 | return -EINVAL; |
252 | } | 271 | } |
253 | 272 | ||
254 | mpa_frame = (struct ietf_mpa_frame *)buffer; | 273 | /* points to the beginning of the frame, which could be MPA V1 or V2 */ |
255 | cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len); | 274 | mpa_frame = (struct ietf_mpa_v1 *)buffer; |
275 | mpa_hdr_len = sizeof(struct ietf_mpa_v1); | ||
276 | priv_data_len = ntohs(mpa_frame->priv_data_len); | ||
277 | |||
256 | /* make sure mpa private data len is less than 512 bytes */ | 278 | /* make sure mpa private data len is less than 512 bytes */ |
257 | if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) { | 279 | if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) { |
258 | nes_debug(NES_DBG_CM, "The received Length of Private" | 280 | nes_debug(NES_DBG_CM, "The received Length of Private" |
259 | " Data field exceeds 512 octets\n"); | 281 | " Data field exceeds 512 octets\n"); |
260 | return -EINVAL; | 282 | return -EINVAL; |
261 | } | 283 | } |
262 | /* | 284 | /* |
@@ -264,11 +286,22 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
264 | * received MPA version and MPA key information | 286 | * received MPA version and MPA key information |
265 | * | 287 | * |
266 | */ | 288 | */ |
267 | if (mpa_frame->rev != mpa_version) { | 289 | if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) { |
290 | nes_debug(NES_DBG_CM, "The received mpa version" | ||
291 | " is not supported\n"); | ||
292 | return -EINVAL; | ||
293 | } | ||
294 | /* | ||
295 | * backwards compatibility only | ||
296 | */ | ||
297 | if (mpa_frame->rev > cm_node->mpa_frame_rev) { | ||
268 | nes_debug(NES_DBG_CM, "The received mpa version" | 298 | nes_debug(NES_DBG_CM, "The received mpa version" |
269 | " can not be interoperated\n"); | 299 | " can not be interoperated\n"); |
270 | return -EINVAL; | 300 | return -EINVAL; |
301 | } else { | ||
302 | cm_node->mpa_frame_rev = mpa_frame->rev; | ||
271 | } | 303 | } |
304 | |||
272 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { | 305 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { |
273 | if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { | 306 | if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { |
274 | nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n"); | 307 | nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n"); |
@@ -281,25 +314,75 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
281 | } | 314 | } |
282 | } | 315 | } |
283 | 316 | ||
284 | if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { | 317 | |
318 | if (priv_data_len + mpa_hdr_len != len) { | ||
285 | nes_debug(NES_DBG_CM, "The received ietf buffer was not right" | 319 | nes_debug(NES_DBG_CM, "The received ietf buffer was not right" |
286 | " complete (%x + %x != %x)\n", | 320 | " complete (%x + %x != %x)\n", |
287 | cm_node->mpa_frame_size, | 321 | priv_data_len, mpa_hdr_len, len); |
288 | (u32)sizeof(struct ietf_mpa_frame), len); | ||
289 | return -EINVAL; | 322 | return -EINVAL; |
290 | } | 323 | } |
291 | /* make sure it does not exceed the max size */ | 324 | /* make sure it does not exceed the max size */ |
292 | if (len > MAX_CM_BUFFER) { | 325 | if (len > MAX_CM_BUFFER) { |
293 | nes_debug(NES_DBG_CM, "The received ietf buffer was too large" | 326 | nes_debug(NES_DBG_CM, "The received ietf buffer was too large" |
294 | " (%x + %x != %x)\n", | 327 | " (%x + %x != %x)\n", |
295 | cm_node->mpa_frame_size, | 328 | priv_data_len, mpa_hdr_len, len); |
296 | (u32)sizeof(struct ietf_mpa_frame), len); | ||
297 | return -EINVAL; | 329 | return -EINVAL; |
298 | } | 330 | } |
299 | 331 | ||
332 | cm_node->mpa_frame_size = priv_data_len; | ||
333 | |||
334 | switch (mpa_frame->rev) { | ||
335 | case IETF_MPA_V2: { | ||
336 | u16 ird_size; | ||
337 | u16 ord_size; | ||
338 | mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; | ||
339 | mpa_hdr_len += IETF_RTR_MSG_SIZE; | ||
340 | cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE; | ||
341 | rtr_msg = &mpa_v2_frame->rtr_msg; | ||
342 | |||
343 | /* parse rtr message */ | ||
344 | rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird); | ||
345 | rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord); | ||
346 | ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD; | ||
347 | ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD; | ||
348 | |||
349 | if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) { | ||
350 | /* send reset */ | ||
351 | return -EINVAL; | ||
352 | } | ||
353 | |||
354 | if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { | ||
355 | /* responder */ | ||
356 | if (cm_node->ord_size > ird_size) | ||
357 | cm_node->ord_size = ird_size; | ||
358 | } else { | ||
359 | /* initiator */ | ||
360 | if (cm_node->ord_size > ird_size) | ||
361 | cm_node->ord_size = ird_size; | ||
362 | |||
363 | if (cm_node->ird_size < ord_size) { | ||
364 | /* no resources available */ | ||
365 | /* send terminate message */ | ||
366 | return -EINVAL; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) { | ||
371 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | ||
372 | } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) { | ||
373 | cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; | ||
374 | } else { /* Not supported RDMA0 operation */ | ||
375 | return -EINVAL; | ||
376 | } | ||
377 | break; | ||
378 | } | ||
379 | case IETF_MPA_V1: | ||
380 | default: | ||
381 | break; | ||
382 | } | ||
383 | |||
300 | /* copy entire MPA frame to our cm_node's frame */ | 384 | /* copy entire MPA frame to our cm_node's frame */ |
301 | memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame), | 385 | memcpy(cm_node->mpa_frame_buf, buffer + mpa_hdr_len, cm_node->mpa_frame_size); |
302 | cm_node->mpa_frame_size); | ||
303 | 386 | ||
304 | if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) | 387 | if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) |
305 | *type = NES_MPA_REQUEST_REJECT; | 388 | *type = NES_MPA_REQUEST_REJECT; |
@@ -312,8 +395,8 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
312 | * node info to build. | 395 | * node info to build. |
313 | */ | 396 | */ |
314 | static void form_cm_frame(struct sk_buff *skb, | 397 | static void form_cm_frame(struct sk_buff *skb, |
315 | struct nes_cm_node *cm_node, void *options, u32 optionsize, | 398 | struct nes_cm_node *cm_node, void *options, u32 optionsize, |
316 | void *data, u32 datasize, u8 flags) | 399 | void *data, u32 datasize, u8 flags) |
317 | { | 400 | { |
318 | struct tcphdr *tcph; | 401 | struct tcphdr *tcph; |
319 | struct iphdr *iph; | 402 | struct iphdr *iph; |
@@ -322,14 +405,14 @@ static void form_cm_frame(struct sk_buff *skb, | |||
322 | u16 packetsize = sizeof(*iph); | 405 | u16 packetsize = sizeof(*iph); |
323 | 406 | ||
324 | packetsize += sizeof(*tcph); | 407 | packetsize += sizeof(*tcph); |
325 | packetsize += optionsize + datasize; | 408 | packetsize += optionsize + datasize; |
326 | 409 | ||
410 | skb_trim(skb, 0); | ||
327 | memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph)); | 411 | memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph)); |
328 | 412 | ||
329 | skb->len = 0; | ||
330 | buf = skb_put(skb, packetsize + ETH_HLEN); | 413 | buf = skb_put(skb, packetsize + ETH_HLEN); |
331 | 414 | ||
332 | ethh = (struct ethhdr *) buf; | 415 | ethh = (struct ethhdr *)buf; |
333 | buf += ETH_HLEN; | 416 | buf += ETH_HLEN; |
334 | 417 | ||
335 | iph = (struct iphdr *)buf; | 418 | iph = (struct iphdr *)buf; |
@@ -337,7 +420,7 @@ static void form_cm_frame(struct sk_buff *skb, | |||
337 | tcph = (struct tcphdr *)buf; | 420 | tcph = (struct tcphdr *)buf; |
338 | skb_reset_mac_header(skb); | 421 | skb_reset_mac_header(skb); |
339 | skb_set_network_header(skb, ETH_HLEN); | 422 | skb_set_network_header(skb, ETH_HLEN); |
340 | skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph)); | 423 | skb_set_transport_header(skb, ETH_HLEN + sizeof(*iph)); |
341 | buf += sizeof(*tcph); | 424 | buf += sizeof(*tcph); |
342 | 425 | ||
343 | skb->ip_summed = CHECKSUM_PARTIAL; | 426 | skb->ip_summed = CHECKSUM_PARTIAL; |
@@ -350,14 +433,14 @@ static void form_cm_frame(struct sk_buff *skb, | |||
350 | ethh->h_proto = htons(0x0800); | 433 | ethh->h_proto = htons(0x0800); |
351 | 434 | ||
352 | iph->version = IPVERSION; | 435 | iph->version = IPVERSION; |
353 | iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ | 436 | iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ |
354 | iph->tos = 0; | 437 | iph->tos = 0; |
355 | iph->tot_len = htons(packetsize); | 438 | iph->tot_len = htons(packetsize); |
356 | iph->id = htons(++cm_node->tcp_cntxt.loc_id); | 439 | iph->id = htons(++cm_node->tcp_cntxt.loc_id); |
357 | 440 | ||
358 | iph->frag_off = htons(0x4000); | 441 | iph->frag_off = htons(0x4000); |
359 | iph->ttl = 0x40; | 442 | iph->ttl = 0x40; |
360 | iph->protocol = 0x06; /* IPPROTO_TCP */ | 443 | iph->protocol = 0x06; /* IPPROTO_TCP */ |
361 | 444 | ||
362 | iph->saddr = htonl(cm_node->loc_addr); | 445 | iph->saddr = htonl(cm_node->loc_addr); |
363 | iph->daddr = htonl(cm_node->rem_addr); | 446 | iph->daddr = htonl(cm_node->rem_addr); |
@@ -370,14 +453,16 @@ static void form_cm_frame(struct sk_buff *skb, | |||
370 | cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; | 453 | cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; |
371 | tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); | 454 | tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); |
372 | tcph->ack = 1; | 455 | tcph->ack = 1; |
373 | } else | 456 | } else { |
374 | tcph->ack_seq = 0; | 457 | tcph->ack_seq = 0; |
458 | } | ||
375 | 459 | ||
376 | if (flags & SET_SYN) { | 460 | if (flags & SET_SYN) { |
377 | cm_node->tcp_cntxt.loc_seq_num++; | 461 | cm_node->tcp_cntxt.loc_seq_num++; |
378 | tcph->syn = 1; | 462 | tcph->syn = 1; |
379 | } else | 463 | } else { |
380 | cm_node->tcp_cntxt.loc_seq_num += datasize; | 464 | cm_node->tcp_cntxt.loc_seq_num += datasize; |
465 | } | ||
381 | 466 | ||
382 | if (flags & SET_FIN) { | 467 | if (flags & SET_FIN) { |
383 | cm_node->tcp_cntxt.loc_seq_num++; | 468 | cm_node->tcp_cntxt.loc_seq_num++; |
@@ -398,10 +483,8 @@ static void form_cm_frame(struct sk_buff *skb, | |||
398 | 483 | ||
399 | skb_shinfo(skb)->nr_frags = 0; | 484 | skb_shinfo(skb)->nr_frags = 0; |
400 | cm_packets_created++; | 485 | cm_packets_created++; |
401 | |||
402 | } | 486 | } |
403 | 487 | ||
404 | |||
405 | /** | 488 | /** |
406 | * print_core - dump a cm core | 489 | * print_core - dump a cm core |
407 | */ | 490 | */ |
@@ -413,7 +496,7 @@ static void print_core(struct nes_cm_core *core) | |||
413 | return; | 496 | return; |
414 | nes_debug(NES_DBG_CM, "---------------------------------------------\n"); | 497 | nes_debug(NES_DBG_CM, "---------------------------------------------\n"); |
415 | 498 | ||
416 | nes_debug(NES_DBG_CM, "State : %u \n", core->state); | 499 | nes_debug(NES_DBG_CM, "State : %u \n", core->state); |
417 | 500 | ||
418 | nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); | 501 | nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); |
419 | nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); | 502 | nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); |
@@ -423,6 +506,147 @@ static void print_core(struct nes_cm_core *core) | |||
423 | nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); | 506 | nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); |
424 | } | 507 | } |
425 | 508 | ||
509 | /** | ||
510 | * cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame | ||
511 | */ | ||
512 | static int cm_build_mpa_frame(struct nes_cm_node *cm_node, u8 **start_buff, | ||
513 | u16 *buff_len, u8 *pci_mem, u8 mpa_key) | ||
514 | { | ||
515 | int ret = 0; | ||
516 | |||
517 | *start_buff = (pci_mem) ? pci_mem : &cm_node->mpa_frame_buf[0]; | ||
518 | |||
519 | switch (cm_node->mpa_frame_rev) { | ||
520 | case IETF_MPA_V1: | ||
521 | *start_buff = (u8 *)*start_buff + sizeof(struct ietf_rtr_msg); | ||
522 | *buff_len = sizeof(struct ietf_mpa_v1) + cm_node->mpa_frame_size; | ||
523 | build_mpa_v1(cm_node, *start_buff, mpa_key); | ||
524 | break; | ||
525 | case IETF_MPA_V2: | ||
526 | *buff_len = sizeof(struct ietf_mpa_v2) + cm_node->mpa_frame_size; | ||
527 | build_mpa_v2(cm_node, *start_buff, mpa_key); | ||
528 | break; | ||
529 | default: | ||
530 | ret = -EINVAL; | ||
531 | } | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * build_mpa_v2 - build a MPA V2 frame | ||
537 | */ | ||
538 | static void build_mpa_v2(struct nes_cm_node *cm_node, | ||
539 | void *start_addr, u8 mpa_key) | ||
540 | { | ||
541 | struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; | ||
542 | struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; | ||
543 | |||
544 | /* initialize the upper 5 bytes of the frame */ | ||
545 | build_mpa_v1(cm_node, start_addr, mpa_key); | ||
546 | mpa_frame->flags |= IETF_MPA_V2_FLAG; /* set a bit to indicate MPA V2 */ | ||
547 | mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); | ||
548 | |||
549 | /* initialize RTR msg */ | ||
550 | rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? | ||
551 | IETF_NO_IRD_ORD : cm_node->ird_size; | ||
552 | rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? | ||
553 | IETF_NO_IRD_ORD : cm_node->ord_size; | ||
554 | |||
555 | rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER; | ||
556 | rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN; | ||
557 | |||
558 | switch (mpa_key) { | ||
559 | case MPA_KEY_REQUEST: | ||
560 | rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; | ||
561 | rtr_msg->ctrl_ord |= IETF_RDMA0_READ; | ||
562 | break; | ||
563 | case MPA_KEY_REPLY: | ||
564 | switch (cm_node->send_rdma0_op) { | ||
565 | case SEND_RDMA_WRITE_ZERO: | ||
566 | rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; | ||
567 | break; | ||
568 | case SEND_RDMA_READ_ZERO: | ||
569 | rtr_msg->ctrl_ord |= IETF_RDMA0_READ; | ||
570 | break; | ||
571 | } | ||
572 | } | ||
573 | rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird); | ||
574 | rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord); | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * build_mpa_v1 - build a MPA V1 frame | ||
579 | */ | ||
580 | static void build_mpa_v1(struct nes_cm_node *cm_node, void *start_addr, u8 mpa_key) | ||
581 | { | ||
582 | struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr; | ||
583 | |||
584 | switch (mpa_key) { | ||
585 | case MPA_KEY_REQUEST: | ||
586 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); | ||
587 | break; | ||
588 | case MPA_KEY_REPLY: | ||
589 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | ||
590 | break; | ||
591 | } | ||
592 | mpa_frame->flags = IETF_MPA_FLAGS_CRC; | ||
593 | mpa_frame->rev = cm_node->mpa_frame_rev; | ||
594 | mpa_frame->priv_data_len = htons(cm_node->mpa_frame_size); | ||
595 | } | ||
596 | |||
597 | static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_addr) | ||
598 | { | ||
599 | u64 u64temp; | ||
600 | struct nes_qp *nesqp = *nesqp_addr; | ||
601 | struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; | ||
602 | |||
603 | u64temp = (unsigned long)nesqp; | ||
604 | u64temp |= NES_SW_CONTEXT_ALIGN >> 1; | ||
605 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); | ||
606 | |||
607 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; | ||
608 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; | ||
609 | |||
610 | switch (cm_node->send_rdma0_op) { | ||
611 | case SEND_RDMA_WRITE_ZERO: | ||
612 | nes_debug(NES_DBG_CM, "Sending first write.\n"); | ||
613 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | ||
614 | cpu_to_le32(NES_IWARP_SQ_OP_RDMAW); | ||
615 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; | ||
616 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; | ||
617 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; | ||
618 | break; | ||
619 | |||
620 | case SEND_RDMA_READ_ZERO: | ||
621 | default: | ||
622 | if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) { | ||
623 | printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n", | ||
624 | __func__, __LINE__, cm_node->send_rdma0_op); | ||
625 | WARN_ON(1); | ||
626 | } | ||
627 | nes_debug(NES_DBG_CM, "Sending first rdma operation.\n"); | ||
628 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | ||
629 | cpu_to_le32(NES_IWARP_SQ_OP_RDMAR); | ||
630 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = 1; | ||
631 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX] = 0; | ||
632 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = 0; | ||
633 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_STAG_IDX] = 1; | ||
634 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 1; | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | if (nesqp->sq_kmapped) { | ||
639 | nesqp->sq_kmapped = 0; | ||
640 | kunmap(nesqp->page); | ||
641 | } | ||
642 | |||
643 | /*use the reserved spot on the WQ for the extra first WQE*/ | ||
644 | nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | ||
645 | NES_QPCONTEXT_ORDIRD_WRPDU | | ||
646 | NES_QPCONTEXT_ORDIRD_ALSMM)); | ||
647 | nesqp->skip_lsmm = 1; | ||
648 | nesqp->hwqp.sq_tail = 0; | ||
649 | } | ||
426 | 650 | ||
427 | /** | 651 | /** |
428 | * schedule_nes_timer | 652 | * schedule_nes_timer |
@@ -430,10 +654,10 @@ static void print_core(struct nes_cm_core *core) | |||
430 | * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node); | 654 | * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node); |
431 | */ | 655 | */ |
432 | int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | 656 | int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, |
433 | enum nes_timer_type type, int send_retrans, | 657 | enum nes_timer_type type, int send_retrans, |
434 | int close_when_complete) | 658 | int close_when_complete) |
435 | { | 659 | { |
436 | unsigned long flags; | 660 | unsigned long flags; |
437 | struct nes_cm_core *cm_core = cm_node->cm_core; | 661 | struct nes_cm_core *cm_core = cm_node->cm_core; |
438 | struct nes_timer_entry *new_send; | 662 | struct nes_timer_entry *new_send; |
439 | int ret = 0; | 663 | int ret = 0; |
@@ -454,7 +678,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
454 | new_send->close_when_complete = close_when_complete; | 678 | new_send->close_when_complete = close_when_complete; |
455 | 679 | ||
456 | if (type == NES_TIMER_TYPE_CLOSE) { | 680 | if (type == NES_TIMER_TYPE_CLOSE) { |
457 | new_send->timetosend += (HZ/10); | 681 | new_send->timetosend += (HZ / 10); |
458 | if (cm_node->recv_entry) { | 682 | if (cm_node->recv_entry) { |
459 | kfree(new_send); | 683 | kfree(new_send); |
460 | WARN_ON(1); | 684 | WARN_ON(1); |
@@ -475,7 +699,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
475 | ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); | 699 | ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); |
476 | if (ret != NETDEV_TX_OK) { | 700 | if (ret != NETDEV_TX_OK) { |
477 | nes_debug(NES_DBG_CM, "Error sending packet %p " | 701 | nes_debug(NES_DBG_CM, "Error sending packet %p " |
478 | "(jiffies = %lu)\n", new_send, jiffies); | 702 | "(jiffies = %lu)\n", new_send, jiffies); |
479 | new_send->timetosend = jiffies; | 703 | new_send->timetosend = jiffies; |
480 | ret = NETDEV_TX_OK; | 704 | ret = NETDEV_TX_OK; |
481 | } else { | 705 | } else { |
@@ -504,6 +728,7 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node) | |||
504 | struct iw_cm_id *cm_id = cm_node->cm_id; | 728 | struct iw_cm_id *cm_id = cm_node->cm_id; |
505 | enum nes_cm_node_state state = cm_node->state; | 729 | enum nes_cm_node_state state = cm_node->state; |
506 | cm_node->state = NES_CM_STATE_CLOSED; | 730 | cm_node->state = NES_CM_STATE_CLOSED; |
731 | |||
507 | switch (state) { | 732 | switch (state) { |
508 | case NES_CM_STATE_SYN_RCVD: | 733 | case NES_CM_STATE_SYN_RCVD: |
509 | case NES_CM_STATE_CLOSING: | 734 | case NES_CM_STATE_CLOSING: |
@@ -536,10 +761,10 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node) | |||
536 | spin_lock_irqsave(&nesqp->lock, qplockflags); | 761 | spin_lock_irqsave(&nesqp->lock, qplockflags); |
537 | if (nesqp->cm_id) { | 762 | if (nesqp->cm_id) { |
538 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " | 763 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " |
539 | "refcount = %d: HIT A " | 764 | "refcount = %d: HIT A " |
540 | "NES_TIMER_TYPE_CLOSE with something " | 765 | "NES_TIMER_TYPE_CLOSE with something " |
541 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, | 766 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, |
542 | atomic_read(&nesqp->refcount)); | 767 | atomic_read(&nesqp->refcount)); |
543 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | 768 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; |
544 | nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; | 769 | nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; |
545 | nesqp->ibqp_state = IB_QPS_ERR; | 770 | nesqp->ibqp_state = IB_QPS_ERR; |
@@ -548,10 +773,10 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node) | |||
548 | } else { | 773 | } else { |
549 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | 774 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); |
550 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " | 775 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " |
551 | "refcount = %d: HIT A " | 776 | "refcount = %d: HIT A " |
552 | "NES_TIMER_TYPE_CLOSE with nothing " | 777 | "NES_TIMER_TYPE_CLOSE with nothing " |
553 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, | 778 | "to do!!!\n", nesqp->hwqp.qp_id, cm_id, |
554 | atomic_read(&nesqp->refcount)); | 779 | atomic_read(&nesqp->refcount)); |
555 | } | 780 | } |
556 | } else if (rem_node) { | 781 | } else if (rem_node) { |
557 | /* TIME_WAIT state */ | 782 | /* TIME_WAIT state */ |
@@ -580,11 +805,12 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
580 | int ret = NETDEV_TX_OK; | 805 | int ret = NETDEV_TX_OK; |
581 | 806 | ||
582 | struct list_head timer_list; | 807 | struct list_head timer_list; |
808 | |||
583 | INIT_LIST_HEAD(&timer_list); | 809 | INIT_LIST_HEAD(&timer_list); |
584 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 810 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
585 | 811 | ||
586 | list_for_each_safe(list_node, list_core_temp, | 812 | list_for_each_safe(list_node, list_core_temp, |
587 | &cm_core->connected_nodes) { | 813 | &cm_core->connected_nodes) { |
588 | cm_node = container_of(list_node, struct nes_cm_node, list); | 814 | cm_node = container_of(list_node, struct nes_cm_node, list); |
589 | if ((cm_node->recv_entry) || (cm_node->send_entry)) { | 815 | if ((cm_node->recv_entry) || (cm_node->send_entry)) { |
590 | add_ref_cm_node(cm_node); | 816 | add_ref_cm_node(cm_node); |
@@ -595,18 +821,19 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
595 | 821 | ||
596 | list_for_each_safe(list_node, list_core_temp, &timer_list) { | 822 | list_for_each_safe(list_node, list_core_temp, &timer_list) { |
597 | cm_node = container_of(list_node, struct nes_cm_node, | 823 | cm_node = container_of(list_node, struct nes_cm_node, |
598 | timer_entry); | 824 | timer_entry); |
599 | recv_entry = cm_node->recv_entry; | 825 | recv_entry = cm_node->recv_entry; |
600 | 826 | ||
601 | if (recv_entry) { | 827 | if (recv_entry) { |
602 | if (time_after(recv_entry->timetosend, jiffies)) { | 828 | if (time_after(recv_entry->timetosend, jiffies)) { |
603 | if (nexttimeout > recv_entry->timetosend || | 829 | if (nexttimeout > recv_entry->timetosend || |
604 | !settimer) { | 830 | !settimer) { |
605 | nexttimeout = recv_entry->timetosend; | 831 | nexttimeout = recv_entry->timetosend; |
606 | settimer = 1; | 832 | settimer = 1; |
607 | } | 833 | } |
608 | } else | 834 | } else { |
609 | handle_recv_entry(cm_node, 1); | 835 | handle_recv_entry(cm_node, 1); |
836 | } | ||
610 | } | 837 | } |
611 | 838 | ||
612 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | 839 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); |
@@ -617,8 +844,8 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
617 | if (time_after(send_entry->timetosend, jiffies)) { | 844 | if (time_after(send_entry->timetosend, jiffies)) { |
618 | if (cm_node->state != NES_CM_STATE_TSA) { | 845 | if (cm_node->state != NES_CM_STATE_TSA) { |
619 | if ((nexttimeout > | 846 | if ((nexttimeout > |
620 | send_entry->timetosend) || | 847 | send_entry->timetosend) || |
621 | !settimer) { | 848 | !settimer) { |
622 | nexttimeout = | 849 | nexttimeout = |
623 | send_entry->timetosend; | 850 | send_entry->timetosend; |
624 | settimer = 1; | 851 | settimer = 1; |
@@ -630,13 +857,13 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
630 | } | 857 | } |
631 | 858 | ||
632 | if ((cm_node->state == NES_CM_STATE_TSA) || | 859 | if ((cm_node->state == NES_CM_STATE_TSA) || |
633 | (cm_node->state == NES_CM_STATE_CLOSED)) { | 860 | (cm_node->state == NES_CM_STATE_CLOSED)) { |
634 | free_retrans_entry(cm_node); | 861 | free_retrans_entry(cm_node); |
635 | break; | 862 | break; |
636 | } | 863 | } |
637 | 864 | ||
638 | if (!send_entry->retranscount || | 865 | if (!send_entry->retranscount || |
639 | !send_entry->retrycount) { | 866 | !send_entry->retrycount) { |
640 | cm_packets_dropped++; | 867 | cm_packets_dropped++; |
641 | free_retrans_entry(cm_node); | 868 | free_retrans_entry(cm_node); |
642 | 869 | ||
@@ -645,28 +872,28 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
645 | nes_retrans_expired(cm_node); | 872 | nes_retrans_expired(cm_node); |
646 | cm_node->state = NES_CM_STATE_CLOSED; | 873 | cm_node->state = NES_CM_STATE_CLOSED; |
647 | spin_lock_irqsave(&cm_node->retrans_list_lock, | 874 | spin_lock_irqsave(&cm_node->retrans_list_lock, |
648 | flags); | 875 | flags); |
649 | break; | 876 | break; |
650 | } | 877 | } |
651 | atomic_inc(&send_entry->skb->users); | 878 | atomic_inc(&send_entry->skb->users); |
652 | cm_packets_retrans++; | 879 | cm_packets_retrans++; |
653 | nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " | 880 | nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " |
654 | "for node %p, jiffies = %lu, time to send = " | 881 | "for node %p, jiffies = %lu, time to send = " |
655 | "%lu, retranscount = %u, send_entry->seq_num = " | 882 | "%lu, retranscount = %u, send_entry->seq_num = " |
656 | "0x%08X, cm_node->tcp_cntxt.rem_ack_num = " | 883 | "0x%08X, cm_node->tcp_cntxt.rem_ack_num = " |
657 | "0x%08X\n", send_entry, cm_node, jiffies, | 884 | "0x%08X\n", send_entry, cm_node, jiffies, |
658 | send_entry->timetosend, | 885 | send_entry->timetosend, |
659 | send_entry->retranscount, | 886 | send_entry->retranscount, |
660 | send_entry->seq_num, | 887 | send_entry->seq_num, |
661 | cm_node->tcp_cntxt.rem_ack_num); | 888 | cm_node->tcp_cntxt.rem_ack_num); |
662 | 889 | ||
663 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, | 890 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, |
664 | flags); | 891 | flags); |
665 | ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev); | 892 | ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev); |
666 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | 893 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); |
667 | if (ret != NETDEV_TX_OK) { | 894 | if (ret != NETDEV_TX_OK) { |
668 | nes_debug(NES_DBG_CM, "rexmit failed for " | 895 | nes_debug(NES_DBG_CM, "rexmit failed for " |
669 | "node=%p\n", cm_node); | 896 | "node=%p\n", cm_node); |
670 | cm_packets_bounced++; | 897 | cm_packets_bounced++; |
671 | send_entry->retrycount--; | 898 | send_entry->retrycount--; |
672 | nexttimeout = jiffies + NES_SHORT_TIME; | 899 | nexttimeout = jiffies + NES_SHORT_TIME; |
@@ -676,18 +903,18 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
676 | cm_packets_sent++; | 903 | cm_packets_sent++; |
677 | } | 904 | } |
678 | nes_debug(NES_DBG_CM, "Packet Sent: retrans count = " | 905 | nes_debug(NES_DBG_CM, "Packet Sent: retrans count = " |
679 | "%u, retry count = %u.\n", | 906 | "%u, retry count = %u.\n", |
680 | send_entry->retranscount, | 907 | send_entry->retranscount, |
681 | send_entry->retrycount); | 908 | send_entry->retrycount); |
682 | if (send_entry->send_retrans) { | 909 | if (send_entry->send_retrans) { |
683 | send_entry->retranscount--; | 910 | send_entry->retranscount--; |
684 | timetosend = (NES_RETRY_TIMEOUT << | 911 | timetosend = (NES_RETRY_TIMEOUT << |
685 | (NES_DEFAULT_RETRANS - send_entry->retranscount)); | 912 | (NES_DEFAULT_RETRANS - send_entry->retranscount)); |
686 | 913 | ||
687 | send_entry->timetosend = jiffies + | 914 | send_entry->timetosend = jiffies + |
688 | min(timetosend, NES_MAX_TIMEOUT); | 915 | min(timetosend, NES_MAX_TIMEOUT); |
689 | if (nexttimeout > send_entry->timetosend || | 916 | if (nexttimeout > send_entry->timetosend || |
690 | !settimer) { | 917 | !settimer) { |
691 | nexttimeout = send_entry->timetosend; | 918 | nexttimeout = send_entry->timetosend; |
692 | settimer = 1; | 919 | settimer = 1; |
693 | } | 920 | } |
@@ -696,11 +923,11 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
696 | close_when_complete = | 923 | close_when_complete = |
697 | send_entry->close_when_complete; | 924 | send_entry->close_when_complete; |
698 | nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n", | 925 | nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n", |
699 | cm_node, cm_node->state); | 926 | cm_node, cm_node->state); |
700 | free_retrans_entry(cm_node); | 927 | free_retrans_entry(cm_node); |
701 | if (close_when_complete) | 928 | if (close_when_complete) |
702 | rem_ref_cm_node(cm_node->cm_core, | 929 | rem_ref_cm_node(cm_node->cm_core, |
703 | cm_node); | 930 | cm_node); |
704 | } | 931 | } |
705 | } while (0); | 932 | } while (0); |
706 | 933 | ||
@@ -710,7 +937,7 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
710 | 937 | ||
711 | if (settimer) { | 938 | if (settimer) { |
712 | if (!timer_pending(&cm_core->tcp_timer)) { | 939 | if (!timer_pending(&cm_core->tcp_timer)) { |
713 | cm_core->tcp_timer.expires = nexttimeout; | 940 | cm_core->tcp_timer.expires = nexttimeout; |
714 | add_timer(&cm_core->tcp_timer); | 941 | add_timer(&cm_core->tcp_timer); |
715 | } | 942 | } |
716 | } | 943 | } |
@@ -721,13 +948,13 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
721 | * send_syn | 948 | * send_syn |
722 | */ | 949 | */ |
723 | static int send_syn(struct nes_cm_node *cm_node, u32 sendack, | 950 | static int send_syn(struct nes_cm_node *cm_node, u32 sendack, |
724 | struct sk_buff *skb) | 951 | struct sk_buff *skb) |
725 | { | 952 | { |
726 | int ret; | 953 | int ret; |
727 | int flags = SET_SYN; | 954 | int flags = SET_SYN; |
728 | char optionsbuffer[sizeof(struct option_mss) + | 955 | char optionsbuffer[sizeof(struct option_mss) + |
729 | sizeof(struct option_windowscale) + sizeof(struct option_base) + | 956 | sizeof(struct option_windowscale) + sizeof(struct option_base) + |
730 | TCP_OPTIONS_PADDING]; | 957 | TCP_OPTIONS_PADDING]; |
731 | 958 | ||
732 | int optionssize = 0; | 959 | int optionssize = 0; |
733 | /* Sending MSS option */ | 960 | /* Sending MSS option */ |
@@ -854,7 +1081,7 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
854 | * find_node - find a cm node that matches the reference cm node | 1081 | * find_node - find a cm node that matches the reference cm node |
855 | */ | 1082 | */ |
856 | static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | 1083 | static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, |
857 | u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) | 1084 | u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) |
858 | { | 1085 | { |
859 | unsigned long flags; | 1086 | unsigned long flags; |
860 | struct list_head *hte; | 1087 | struct list_head *hte; |
@@ -868,12 +1095,12 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
868 | list_for_each_entry(cm_node, hte, list) { | 1095 | list_for_each_entry(cm_node, hte, list) { |
869 | /* compare quad, return node handle if a match */ | 1096 | /* compare quad, return node handle if a match */ |
870 | nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", | 1097 | nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", |
871 | cm_node->loc_addr, cm_node->loc_port, | 1098 | cm_node->loc_addr, cm_node->loc_port, |
872 | loc_addr, loc_port, | 1099 | loc_addr, loc_port, |
873 | cm_node->rem_addr, cm_node->rem_port, | 1100 | cm_node->rem_addr, cm_node->rem_port, |
874 | rem_addr, rem_port); | 1101 | rem_addr, rem_port); |
875 | if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) && | 1102 | if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) && |
876 | (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) { | 1103 | (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) { |
877 | add_ref_cm_node(cm_node); | 1104 | add_ref_cm_node(cm_node); |
878 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | 1105 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); |
879 | return cm_node; | 1106 | return cm_node; |
@@ -890,7 +1117,7 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
890 | * find_listener - find a cm node listening on this addr-port pair | 1117 | * find_listener - find a cm node listening on this addr-port pair |
891 | */ | 1118 | */ |
892 | static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | 1119 | static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, |
893 | nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) | 1120 | nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) |
894 | { | 1121 | { |
895 | unsigned long flags; | 1122 | unsigned long flags; |
896 | struct nes_cm_listener *listen_node; | 1123 | struct nes_cm_listener *listen_node; |
@@ -900,9 +1127,9 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | |||
900 | list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { | 1127 | list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { |
901 | /* compare node pair, return node handle if a match */ | 1128 | /* compare node pair, return node handle if a match */ |
902 | if (((listen_node->loc_addr == dst_addr) || | 1129 | if (((listen_node->loc_addr == dst_addr) || |
903 | listen_node->loc_addr == 0x00000000) && | 1130 | listen_node->loc_addr == 0x00000000) && |
904 | (listen_node->loc_port == dst_port) && | 1131 | (listen_node->loc_port == dst_port) && |
905 | (listener_state & listen_node->listener_state)) { | 1132 | (listener_state & listen_node->listener_state)) { |
906 | atomic_inc(&listen_node->ref_count); | 1133 | atomic_inc(&listen_node->ref_count); |
907 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1134 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
908 | return listen_node; | 1135 | return listen_node; |
@@ -927,7 +1154,7 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node | |||
927 | return -EINVAL; | 1154 | return -EINVAL; |
928 | 1155 | ||
929 | nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", | 1156 | nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", |
930 | cm_node); | 1157 | cm_node); |
931 | 1158 | ||
932 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 1159 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
933 | 1160 | ||
@@ -946,7 +1173,7 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node | |||
946 | * mini_cm_dec_refcnt_listen | 1173 | * mini_cm_dec_refcnt_listen |
947 | */ | 1174 | */ |
948 | static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | 1175 | static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, |
949 | struct nes_cm_listener *listener, int free_hanging_nodes) | 1176 | struct nes_cm_listener *listener, int free_hanging_nodes) |
950 | { | 1177 | { |
951 | int ret = -EINVAL; | 1178 | int ret = -EINVAL; |
952 | int err = 0; | 1179 | int err = 0; |
@@ -957,8 +1184,8 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
957 | struct list_head reset_list; | 1184 | struct list_head reset_list; |
958 | 1185 | ||
959 | nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " | 1186 | nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " |
960 | "refcnt=%d\n", listener, free_hanging_nodes, | 1187 | "refcnt=%d\n", listener, free_hanging_nodes, |
961 | atomic_read(&listener->ref_count)); | 1188 | atomic_read(&listener->ref_count)); |
962 | /* free non-accelerated child nodes for this listener */ | 1189 | /* free non-accelerated child nodes for this listener */ |
963 | INIT_LIST_HEAD(&reset_list); | 1190 | INIT_LIST_HEAD(&reset_list); |
964 | if (free_hanging_nodes) { | 1191 | if (free_hanging_nodes) { |
@@ -966,7 +1193,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
966 | list_for_each_safe(list_pos, list_temp, | 1193 | list_for_each_safe(list_pos, list_temp, |
967 | &g_cm_core->connected_nodes) { | 1194 | &g_cm_core->connected_nodes) { |
968 | cm_node = container_of(list_pos, struct nes_cm_node, | 1195 | cm_node = container_of(list_pos, struct nes_cm_node, |
969 | list); | 1196 | list); |
970 | if ((cm_node->listener == listener) && | 1197 | if ((cm_node->listener == listener) && |
971 | (!cm_node->accelerated)) { | 1198 | (!cm_node->accelerated)) { |
972 | add_ref_cm_node(cm_node); | 1199 | add_ref_cm_node(cm_node); |
@@ -978,7 +1205,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
978 | 1205 | ||
979 | list_for_each_safe(list_pos, list_temp, &reset_list) { | 1206 | list_for_each_safe(list_pos, list_temp, &reset_list) { |
980 | cm_node = container_of(list_pos, struct nes_cm_node, | 1207 | cm_node = container_of(list_pos, struct nes_cm_node, |
981 | reset_entry); | 1208 | reset_entry); |
982 | { | 1209 | { |
983 | struct nes_cm_node *loopback = cm_node->loopbackpartner; | 1210 | struct nes_cm_node *loopback = cm_node->loopbackpartner; |
984 | enum nes_cm_node_state old_state; | 1211 | enum nes_cm_node_state old_state; |
@@ -990,7 +1217,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
990 | err = send_reset(cm_node, NULL); | 1217 | err = send_reset(cm_node, NULL); |
991 | if (err) { | 1218 | if (err) { |
992 | cm_node->state = | 1219 | cm_node->state = |
993 | NES_CM_STATE_CLOSED; | 1220 | NES_CM_STATE_CLOSED; |
994 | WARN_ON(1); | 1221 | WARN_ON(1); |
995 | } else { | 1222 | } else { |
996 | old_state = cm_node->state; | 1223 | old_state = cm_node->state; |
@@ -1035,10 +1262,9 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
1035 | 1262 | ||
1036 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1263 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
1037 | 1264 | ||
1038 | if (listener->nesvnic) { | 1265 | if (listener->nesvnic) |
1039 | nes_manage_apbvt(listener->nesvnic, listener->loc_port, | 1266 | nes_manage_apbvt(listener->nesvnic, listener->loc_port, |
1040 | PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); | 1267 | PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); |
1041 | } | ||
1042 | 1268 | ||
1043 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | 1269 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); |
1044 | 1270 | ||
@@ -1052,8 +1278,8 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
1052 | if (listener) { | 1278 | if (listener) { |
1053 | if (atomic_read(&listener->pend_accepts_cnt) > 0) | 1279 | if (atomic_read(&listener->pend_accepts_cnt) > 0) |
1054 | nes_debug(NES_DBG_CM, "destroying listener (%p)" | 1280 | nes_debug(NES_DBG_CM, "destroying listener (%p)" |
1055 | " with non-zero pending accepts=%u\n", | 1281 | " with non-zero pending accepts=%u\n", |
1056 | listener, atomic_read(&listener->pend_accepts_cnt)); | 1282 | listener, atomic_read(&listener->pend_accepts_cnt)); |
1057 | } | 1283 | } |
1058 | 1284 | ||
1059 | return ret; | 1285 | return ret; |
@@ -1064,7 +1290,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
1064 | * mini_cm_del_listen | 1290 | * mini_cm_del_listen |
1065 | */ | 1291 | */ |
1066 | static int mini_cm_del_listen(struct nes_cm_core *cm_core, | 1292 | static int mini_cm_del_listen(struct nes_cm_core *cm_core, |
1067 | struct nes_cm_listener *listener) | 1293 | struct nes_cm_listener *listener) |
1068 | { | 1294 | { |
1069 | listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE; | 1295 | listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE; |
1070 | listener->cm_id = NULL; /* going to be destroyed pretty soon */ | 1296 | listener->cm_id = NULL; /* going to be destroyed pretty soon */ |
@@ -1076,9 +1302,10 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core, | |||
1076 | * mini_cm_accelerated | 1302 | * mini_cm_accelerated |
1077 | */ | 1303 | */ |
1078 | static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, | 1304 | static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, |
1079 | struct nes_cm_node *cm_node) | 1305 | struct nes_cm_node *cm_node) |
1080 | { | 1306 | { |
1081 | u32 was_timer_set; | 1307 | u32 was_timer_set; |
1308 | |||
1082 | cm_node->accelerated = 1; | 1309 | cm_node->accelerated = 1; |
1083 | 1310 | ||
1084 | if (cm_node->accept_pend) { | 1311 | if (cm_node->accept_pend) { |
@@ -1112,7 +1339,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1112 | rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); | 1339 | rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); |
1113 | if (IS_ERR(rt)) { | 1340 | if (IS_ERR(rt)) { |
1114 | printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", | 1341 | printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", |
1115 | __func__, dst_ip); | 1342 | __func__, dst_ip); |
1116 | return rc; | 1343 | return rc; |
1117 | } | 1344 | } |
1118 | 1345 | ||
@@ -1130,7 +1357,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1130 | 1357 | ||
1131 | if (arpindex >= 0) { | 1358 | if (arpindex >= 0) { |
1132 | if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, | 1359 | if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, |
1133 | neigh->ha, ETH_ALEN)){ | 1360 | neigh->ha, ETH_ALEN)) { |
1134 | /* Mac address same as in nes_arp_table */ | 1361 | /* Mac address same as in nes_arp_table */ |
1135 | neigh_release(neigh); | 1362 | neigh_release(neigh); |
1136 | ip_rt_put(rt); | 1363 | ip_rt_put(rt); |
@@ -1138,8 +1365,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1138 | } | 1365 | } |
1139 | 1366 | ||
1140 | nes_manage_arp_cache(nesvnic->netdev, | 1367 | nes_manage_arp_cache(nesvnic->netdev, |
1141 | nesadapter->arp_table[arpindex].mac_addr, | 1368 | nesadapter->arp_table[arpindex].mac_addr, |
1142 | dst_ip, NES_ARP_DELETE); | 1369 | dst_ip, NES_ARP_DELETE); |
1143 | } | 1370 | } |
1144 | 1371 | ||
1145 | nes_manage_arp_cache(nesvnic->netdev, neigh->ha, | 1372 | nes_manage_arp_cache(nesvnic->netdev, neigh->ha, |
@@ -1161,8 +1388,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1161 | * make_cm_node - create a new instance of a cm node | 1388 | * make_cm_node - create a new instance of a cm node |
1162 | */ | 1389 | */ |
1163 | static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | 1390 | static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, |
1164 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info, | 1391 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info, |
1165 | struct nes_cm_listener *listener) | 1392 | struct nes_cm_listener *listener) |
1166 | { | 1393 | { |
1167 | struct nes_cm_node *cm_node; | 1394 | struct nes_cm_node *cm_node; |
1168 | struct timespec ts; | 1395 | struct timespec ts; |
@@ -1181,7 +1408,12 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1181 | cm_node->rem_addr = cm_info->rem_addr; | 1408 | cm_node->rem_addr = cm_info->rem_addr; |
1182 | cm_node->loc_port = cm_info->loc_port; | 1409 | cm_node->loc_port = cm_info->loc_port; |
1183 | cm_node->rem_port = cm_info->rem_port; | 1410 | cm_node->rem_port = cm_info->rem_port; |
1184 | cm_node->send_write0 = send_first; | 1411 | |
1412 | cm_node->mpa_frame_rev = mpa_version; | ||
1413 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | ||
1414 | cm_node->ird_size = IETF_NO_IRD_ORD; | ||
1415 | cm_node->ord_size = IETF_NO_IRD_ORD; | ||
1416 | |||
1185 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", | 1417 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", |
1186 | &cm_node->loc_addr, cm_node->loc_port, | 1418 | &cm_node->loc_addr, cm_node->loc_port, |
1187 | &cm_node->rem_addr, cm_node->rem_port); | 1419 | &cm_node->rem_addr, cm_node->rem_port); |
@@ -1191,7 +1423,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1191 | memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); | 1423 | memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); |
1192 | 1424 | ||
1193 | nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener, | 1425 | nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener, |
1194 | cm_node->cm_id); | 1426 | cm_node->cm_id); |
1195 | 1427 | ||
1196 | spin_lock_init(&cm_node->retrans_list_lock); | 1428 | spin_lock_init(&cm_node->retrans_list_lock); |
1197 | 1429 | ||
@@ -1202,11 +1434,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1202 | cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID; | 1434 | cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID; |
1203 | cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; | 1435 | cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; |
1204 | cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >> | 1436 | cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >> |
1205 | NES_CM_DEFAULT_RCV_WND_SCALE; | 1437 | NES_CM_DEFAULT_RCV_WND_SCALE; |
1206 | ts = current_kernel_time(); | 1438 | ts = current_kernel_time(); |
1207 | cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec); | 1439 | cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec); |
1208 | cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) - | 1440 | cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) - |
1209 | sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN; | 1441 | sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN; |
1210 | cm_node->tcp_cntxt.rcv_nxt = 0; | 1442 | cm_node->tcp_cntxt.rcv_nxt = 0; |
1211 | /* get a unique session ID , add thread_id to an upcounter to handle race */ | 1443 | /* get a unique session ID , add thread_id to an upcounter to handle race */ |
1212 | atomic_inc(&cm_core->node_cnt); | 1444 | atomic_inc(&cm_core->node_cnt); |
@@ -1222,12 +1454,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1222 | cm_node->loopbackpartner = NULL; | 1454 | cm_node->loopbackpartner = NULL; |
1223 | 1455 | ||
1224 | /* get the mac addr for the remote node */ | 1456 | /* get the mac addr for the remote node */ |
1225 | if (ipv4_is_loopback(htonl(cm_node->rem_addr))) | 1457 | if (ipv4_is_loopback(htonl(cm_node->rem_addr))) { |
1226 | arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); | 1458 | arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); |
1227 | else { | 1459 | } else { |
1228 | oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); | 1460 | oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); |
1229 | arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex); | 1461 | arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex); |
1230 | |||
1231 | } | 1462 | } |
1232 | if (arpindex < 0) { | 1463 | if (arpindex < 0) { |
1233 | kfree(cm_node); | 1464 | kfree(cm_node); |
@@ -1260,7 +1491,7 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node) | |||
1260 | * rem_ref_cm_node - destroy an instance of a cm node | 1491 | * rem_ref_cm_node - destroy an instance of a cm node |
1261 | */ | 1492 | */ |
1262 | static int rem_ref_cm_node(struct nes_cm_core *cm_core, | 1493 | static int rem_ref_cm_node(struct nes_cm_core *cm_core, |
1263 | struct nes_cm_node *cm_node) | 1494 | struct nes_cm_node *cm_node) |
1264 | { | 1495 | { |
1265 | unsigned long flags; | 1496 | unsigned long flags; |
1266 | struct nes_qp *nesqp; | 1497 | struct nes_qp *nesqp; |
@@ -1291,9 +1522,9 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, | |||
1291 | } else { | 1522 | } else { |
1292 | if (cm_node->apbvt_set && cm_node->nesvnic) { | 1523 | if (cm_node->apbvt_set && cm_node->nesvnic) { |
1293 | nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, | 1524 | nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, |
1294 | PCI_FUNC( | 1525 | PCI_FUNC( |
1295 | cm_node->nesvnic->nesdev->pcidev->devfn), | 1526 | cm_node->nesvnic->nesdev->pcidev->devfn), |
1296 | NES_MANAGE_APBVT_DEL); | 1527 | NES_MANAGE_APBVT_DEL); |
1297 | } | 1528 | } |
1298 | } | 1529 | } |
1299 | 1530 | ||
@@ -1314,7 +1545,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, | |||
1314 | * process_options | 1545 | * process_options |
1315 | */ | 1546 | */ |
1316 | static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, | 1547 | static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, |
1317 | u32 optionsize, u32 syn_packet) | 1548 | u32 optionsize, u32 syn_packet) |
1318 | { | 1549 | { |
1319 | u32 tmp; | 1550 | u32 tmp; |
1320 | u32 offset = 0; | 1551 | u32 offset = 0; |
@@ -1332,15 +1563,15 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, | |||
1332 | continue; | 1563 | continue; |
1333 | case OPTION_NUMBER_MSS: | 1564 | case OPTION_NUMBER_MSS: |
1334 | nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d " | 1565 | nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d " |
1335 | "Size: %d\n", __func__, | 1566 | "Size: %d\n", __func__, |
1336 | all_options->as_mss.length, offset, optionsize); | 1567 | all_options->as_mss.length, offset, optionsize); |
1337 | got_mss_option = 1; | 1568 | got_mss_option = 1; |
1338 | if (all_options->as_mss.length != 4) { | 1569 | if (all_options->as_mss.length != 4) { |
1339 | return 1; | 1570 | return 1; |
1340 | } else { | 1571 | } else { |
1341 | tmp = ntohs(all_options->as_mss.mss); | 1572 | tmp = ntohs(all_options->as_mss.mss); |
1342 | if (tmp > 0 && tmp < | 1573 | if (tmp > 0 && tmp < |
1343 | cm_node->tcp_cntxt.mss) | 1574 | cm_node->tcp_cntxt.mss) |
1344 | cm_node->tcp_cntxt.mss = tmp; | 1575 | cm_node->tcp_cntxt.mss = tmp; |
1345 | } | 1576 | } |
1346 | break; | 1577 | break; |
@@ -1348,12 +1579,9 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, | |||
1348 | cm_node->tcp_cntxt.snd_wscale = | 1579 | cm_node->tcp_cntxt.snd_wscale = |
1349 | all_options->as_windowscale.shiftcount; | 1580 | all_options->as_windowscale.shiftcount; |
1350 | break; | 1581 | break; |
1351 | case OPTION_NUMBER_WRITE0: | ||
1352 | cm_node->send_write0 = 1; | ||
1353 | break; | ||
1354 | default: | 1582 | default: |
1355 | nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n", | 1583 | nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n", |
1356 | all_options->as_base.optionnum); | 1584 | all_options->as_base.optionnum); |
1357 | break; | 1585 | break; |
1358 | } | 1586 | } |
1359 | offset += all_options->as_base.length; | 1587 | offset += all_options->as_base.length; |
@@ -1372,8 +1600,8 @@ static void drop_packet(struct sk_buff *skb) | |||
1372 | static void handle_fin_pkt(struct nes_cm_node *cm_node) | 1600 | static void handle_fin_pkt(struct nes_cm_node *cm_node) |
1373 | { | 1601 | { |
1374 | nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " | 1602 | nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " |
1375 | "refcnt=%d\n", cm_node, cm_node->state, | 1603 | "refcnt=%d\n", cm_node, cm_node->state, |
1376 | atomic_read(&cm_node->ref_count)); | 1604 | atomic_read(&cm_node->ref_count)); |
1377 | switch (cm_node->state) { | 1605 | switch (cm_node->state) { |
1378 | case NES_CM_STATE_SYN_RCVD: | 1606 | case NES_CM_STATE_SYN_RCVD: |
1379 | case NES_CM_STATE_SYN_SENT: | 1607 | case NES_CM_STATE_SYN_SENT: |
@@ -1439,7 +1667,20 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1439 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " | 1667 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " |
1440 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, | 1668 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, |
1441 | cm_node->listener, cm_node->state); | 1669 | cm_node->listener, cm_node->state); |
1442 | active_open_err(cm_node, skb, reset); | 1670 | switch (cm_node->mpa_frame_rev) { |
1671 | case IETF_MPA_V2: | ||
1672 | cm_node->mpa_frame_rev = IETF_MPA_V1; | ||
1673 | /* send a syn and goto syn sent state */ | ||
1674 | cm_node->state = NES_CM_STATE_SYN_SENT; | ||
1675 | if (send_syn(cm_node, 0, NULL)) { | ||
1676 | active_open_err(cm_node, skb, reset); | ||
1677 | } | ||
1678 | break; | ||
1679 | case IETF_MPA_V1: | ||
1680 | default: | ||
1681 | active_open_err(cm_node, skb, reset); | ||
1682 | break; | ||
1683 | } | ||
1443 | break; | 1684 | break; |
1444 | case NES_CM_STATE_MPAREQ_RCVD: | 1685 | case NES_CM_STATE_MPAREQ_RCVD: |
1445 | atomic_inc(&cm_node->passive_state); | 1686 | atomic_inc(&cm_node->passive_state); |
@@ -1475,21 +1716,21 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1475 | 1716 | ||
1476 | static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) | 1717 | static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) |
1477 | { | 1718 | { |
1478 | 1719 | int ret = 0; | |
1479 | int ret = 0; | ||
1480 | int datasize = skb->len; | 1720 | int datasize = skb->len; |
1481 | u8 *dataloc = skb->data; | 1721 | u8 *dataloc = skb->data; |
1482 | 1722 | ||
1483 | enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN; | 1723 | enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN; |
1484 | u32 res_type; | 1724 | u32 res_type; |
1725 | |||
1485 | ret = parse_mpa(cm_node, dataloc, &res_type, datasize); | 1726 | ret = parse_mpa(cm_node, dataloc, &res_type, datasize); |
1486 | if (ret) { | 1727 | if (ret) { |
1487 | nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); | 1728 | nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); |
1488 | if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) { | 1729 | if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) { |
1489 | nes_debug(NES_DBG_CM, "%s[%u] create abort for " | 1730 | nes_debug(NES_DBG_CM, "%s[%u] create abort for " |
1490 | "cm_node=%p listener=%p state=%d\n", __func__, | 1731 | "cm_node=%p listener=%p state=%d\n", __func__, |
1491 | __LINE__, cm_node, cm_node->listener, | 1732 | __LINE__, cm_node, cm_node->listener, |
1492 | cm_node->state); | 1733 | cm_node->state); |
1493 | active_open_err(cm_node, skb, 1); | 1734 | active_open_err(cm_node, skb, 1); |
1494 | } else { | 1735 | } else { |
1495 | passive_open_err(cm_node, skb, 1); | 1736 | passive_open_err(cm_node, skb, 1); |
@@ -1499,16 +1740,15 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
1499 | 1740 | ||
1500 | switch (cm_node->state) { | 1741 | switch (cm_node->state) { |
1501 | case NES_CM_STATE_ESTABLISHED: | 1742 | case NES_CM_STATE_ESTABLISHED: |
1502 | if (res_type == NES_MPA_REQUEST_REJECT) { | 1743 | if (res_type == NES_MPA_REQUEST_REJECT) |
1503 | /*BIG problem as we are receiving the MPA.. So should | 1744 | /*BIG problem as we are receiving the MPA.. So should |
1504 | * not be REJECT.. This is Passive Open.. We can | 1745 | * not be REJECT.. This is Passive Open.. We can |
1505 | * only receive it Reject for Active Open...*/ | 1746 | * only receive it Reject for Active Open...*/ |
1506 | WARN_ON(1); | 1747 | WARN_ON(1); |
1507 | } | ||
1508 | cm_node->state = NES_CM_STATE_MPAREQ_RCVD; | 1748 | cm_node->state = NES_CM_STATE_MPAREQ_RCVD; |
1509 | type = NES_CM_EVENT_MPA_REQ; | 1749 | type = NES_CM_EVENT_MPA_REQ; |
1510 | atomic_set(&cm_node->passive_state, | 1750 | atomic_set(&cm_node->passive_state, |
1511 | NES_PASSIVE_STATE_INDICATED); | 1751 | NES_PASSIVE_STATE_INDICATED); |
1512 | break; | 1752 | break; |
1513 | case NES_CM_STATE_MPAREQ_SENT: | 1753 | case NES_CM_STATE_MPAREQ_SENT: |
1514 | cleanup_retrans_entry(cm_node); | 1754 | cleanup_retrans_entry(cm_node); |
@@ -1535,8 +1775,8 @@ static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
1535 | case NES_CM_STATE_SYN_SENT: | 1775 | case NES_CM_STATE_SYN_SENT: |
1536 | case NES_CM_STATE_MPAREQ_SENT: | 1776 | case NES_CM_STATE_MPAREQ_SENT: |
1537 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " | 1777 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " |
1538 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, | 1778 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, |
1539 | cm_node->listener, cm_node->state); | 1779 | cm_node->listener, cm_node->state); |
1540 | active_open_err(cm_node, skb, 1); | 1780 | active_open_err(cm_node, skb, 1); |
1541 | break; | 1781 | break; |
1542 | case NES_CM_STATE_ESTABLISHED: | 1782 | case NES_CM_STATE_ESTABLISHED: |
@@ -1550,11 +1790,11 @@ static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
1550 | } | 1790 | } |
1551 | 1791 | ||
1552 | static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 1792 | static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph, |
1553 | struct sk_buff *skb) | 1793 | struct sk_buff *skb) |
1554 | { | 1794 | { |
1555 | int err; | 1795 | int err; |
1556 | 1796 | ||
1557 | err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num))? 0 : 1; | 1797 | err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num)) ? 0 : 1; |
1558 | if (err) | 1798 | if (err) |
1559 | active_open_err(cm_node, skb, 1); | 1799 | active_open_err(cm_node, skb, 1); |
1560 | 1800 | ||
@@ -1562,7 +1802,7 @@ static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1562 | } | 1802 | } |
1563 | 1803 | ||
1564 | static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 1804 | static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, |
1565 | struct sk_buff *skb) | 1805 | struct sk_buff *skb) |
1566 | { | 1806 | { |
1567 | int err = 0; | 1807 | int err = 0; |
1568 | u32 seq; | 1808 | u32 seq; |
@@ -1570,21 +1810,22 @@ static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1570 | u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; | 1810 | u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; |
1571 | u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; | 1811 | u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; |
1572 | u32 rcv_wnd; | 1812 | u32 rcv_wnd; |
1813 | |||
1573 | seq = ntohl(tcph->seq); | 1814 | seq = ntohl(tcph->seq); |
1574 | ack_seq = ntohl(tcph->ack_seq); | 1815 | ack_seq = ntohl(tcph->ack_seq); |
1575 | rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; | 1816 | rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; |
1576 | if (ack_seq != loc_seq_num) | 1817 | if (ack_seq != loc_seq_num) |
1577 | err = 1; | 1818 | err = 1; |
1578 | else if (!between(seq, rcv_nxt, (rcv_nxt+rcv_wnd))) | 1819 | else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd))) |
1579 | err = 1; | 1820 | err = 1; |
1580 | if (err) { | 1821 | if (err) { |
1581 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " | 1822 | nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " |
1582 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, | 1823 | "listener=%p state=%d\n", __func__, __LINE__, cm_node, |
1583 | cm_node->listener, cm_node->state); | 1824 | cm_node->listener, cm_node->state); |
1584 | indicate_pkt_err(cm_node, skb); | 1825 | indicate_pkt_err(cm_node, skb); |
1585 | nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X " | 1826 | nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X " |
1586 | "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt, | 1827 | "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt, |
1587 | rcv_wnd); | 1828 | rcv_wnd); |
1588 | } | 1829 | } |
1589 | return err; | 1830 | return err; |
1590 | } | 1831 | } |
@@ -1594,9 +1835,8 @@ static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1594 | * is created with a listener or it may comein as rexmitted packet which in | 1835 | * is created with a listener or it may comein as rexmitted packet which in |
1595 | * that case will be just dropped. | 1836 | * that case will be just dropped. |
1596 | */ | 1837 | */ |
1597 | |||
1598 | static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | 1838 | static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1599 | struct tcphdr *tcph) | 1839 | struct tcphdr *tcph) |
1600 | { | 1840 | { |
1601 | int ret; | 1841 | int ret; |
1602 | u32 inc_sequence; | 1842 | u32 inc_sequence; |
@@ -1615,15 +1855,15 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1615 | case NES_CM_STATE_LISTENING: | 1855 | case NES_CM_STATE_LISTENING: |
1616 | /* Passive OPEN */ | 1856 | /* Passive OPEN */ |
1617 | if (atomic_read(&cm_node->listener->pend_accepts_cnt) > | 1857 | if (atomic_read(&cm_node->listener->pend_accepts_cnt) > |
1618 | cm_node->listener->backlog) { | 1858 | cm_node->listener->backlog) { |
1619 | nes_debug(NES_DBG_CM, "drop syn due to backlog " | 1859 | nes_debug(NES_DBG_CM, "drop syn due to backlog " |
1620 | "pressure \n"); | 1860 | "pressure \n"); |
1621 | cm_backlog_drops++; | 1861 | cm_backlog_drops++; |
1622 | passive_open_err(cm_node, skb, 0); | 1862 | passive_open_err(cm_node, skb, 0); |
1623 | break; | 1863 | break; |
1624 | } | 1864 | } |
1625 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, | 1865 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, |
1626 | 1); | 1866 | 1); |
1627 | if (ret) { | 1867 | if (ret) { |
1628 | passive_open_err(cm_node, skb, 0); | 1868 | passive_open_err(cm_node, skb, 0); |
1629 | /* drop pkt */ | 1869 | /* drop pkt */ |
@@ -1657,9 +1897,8 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1657 | } | 1897 | } |
1658 | 1898 | ||
1659 | static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | 1899 | static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1660 | struct tcphdr *tcph) | 1900 | struct tcphdr *tcph) |
1661 | { | 1901 | { |
1662 | |||
1663 | int ret; | 1902 | int ret; |
1664 | u32 inc_sequence; | 1903 | u32 inc_sequence; |
1665 | int optionsize; | 1904 | int optionsize; |
@@ -1678,7 +1917,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1678 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0); | 1917 | ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0); |
1679 | if (ret) { | 1918 | if (ret) { |
1680 | nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n", | 1919 | nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n", |
1681 | cm_node); | 1920 | cm_node); |
1682 | break; | 1921 | break; |
1683 | } | 1922 | } |
1684 | cleanup_retrans_entry(cm_node); | 1923 | cleanup_retrans_entry(cm_node); |
@@ -1717,12 +1956,13 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1717 | } | 1956 | } |
1718 | 1957 | ||
1719 | static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | 1958 | static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1720 | struct tcphdr *tcph) | 1959 | struct tcphdr *tcph) |
1721 | { | 1960 | { |
1722 | int datasize = 0; | 1961 | int datasize = 0; |
1723 | u32 inc_sequence; | 1962 | u32 inc_sequence; |
1724 | int ret = 0; | 1963 | int ret = 0; |
1725 | int optionsize; | 1964 | int optionsize; |
1965 | |||
1726 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | 1966 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); |
1727 | 1967 | ||
1728 | if (check_seq(cm_node, tcph, skb)) | 1968 | if (check_seq(cm_node, tcph, skb)) |
@@ -1743,8 +1983,9 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1743 | if (datasize) { | 1983 | if (datasize) { |
1744 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 1984 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
1745 | handle_rcv_mpa(cm_node, skb); | 1985 | handle_rcv_mpa(cm_node, skb); |
1746 | } else /* rcvd ACK only */ | 1986 | } else { /* rcvd ACK only */ |
1747 | dev_kfree_skb_any(skb); | 1987 | dev_kfree_skb_any(skb); |
1988 | } | ||
1748 | break; | 1989 | break; |
1749 | case NES_CM_STATE_ESTABLISHED: | 1990 | case NES_CM_STATE_ESTABLISHED: |
1750 | /* Passive OPEN */ | 1991 | /* Passive OPEN */ |
@@ -1752,16 +1993,18 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1752 | if (datasize) { | 1993 | if (datasize) { |
1753 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 1994 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
1754 | handle_rcv_mpa(cm_node, skb); | 1995 | handle_rcv_mpa(cm_node, skb); |
1755 | } else | 1996 | } else { |
1756 | drop_packet(skb); | 1997 | drop_packet(skb); |
1998 | } | ||
1757 | break; | 1999 | break; |
1758 | case NES_CM_STATE_MPAREQ_SENT: | 2000 | case NES_CM_STATE_MPAREQ_SENT: |
1759 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); | 2001 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); |
1760 | if (datasize) { | 2002 | if (datasize) { |
1761 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 2003 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
1762 | handle_rcv_mpa(cm_node, skb); | 2004 | handle_rcv_mpa(cm_node, skb); |
1763 | } else /* Could be just an ack pkt.. */ | 2005 | } else { /* Could be just an ack pkt.. */ |
1764 | dev_kfree_skb_any(skb); | 2006 | dev_kfree_skb_any(skb); |
2007 | } | ||
1765 | break; | 2008 | break; |
1766 | case NES_CM_STATE_LISTENING: | 2009 | case NES_CM_STATE_LISTENING: |
1767 | cleanup_retrans_entry(cm_node); | 2010 | cleanup_retrans_entry(cm_node); |
@@ -1802,14 +2045,15 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1802 | 2045 | ||
1803 | 2046 | ||
1804 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | 2047 | static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, |
1805 | struct sk_buff *skb, int optionsize, int passive) | 2048 | struct sk_buff *skb, int optionsize, int passive) |
1806 | { | 2049 | { |
1807 | u8 *optionsloc = (u8 *)&tcph[1]; | 2050 | u8 *optionsloc = (u8 *)&tcph[1]; |
2051 | |||
1808 | if (optionsize) { | 2052 | if (optionsize) { |
1809 | if (process_options(cm_node, optionsloc, optionsize, | 2053 | if (process_options(cm_node, optionsloc, optionsize, |
1810 | (u32)tcph->syn)) { | 2054 | (u32)tcph->syn)) { |
1811 | nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", | 2055 | nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", |
1812 | __func__, cm_node); | 2056 | __func__, cm_node); |
1813 | if (passive) | 2057 | if (passive) |
1814 | passive_open_err(cm_node, skb, 1); | 2058 | passive_open_err(cm_node, skb, 1); |
1815 | else | 2059 | else |
@@ -1819,7 +2063,7 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1819 | } | 2063 | } |
1820 | 2064 | ||
1821 | cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << | 2065 | cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << |
1822 | cm_node->tcp_cntxt.snd_wscale; | 2066 | cm_node->tcp_cntxt.snd_wscale; |
1823 | 2067 | ||
1824 | if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) | 2068 | if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) |
1825 | cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; | 2069 | cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; |
@@ -1830,18 +2074,18 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, | |||
1830 | * active_open_err() will send reset() if flag set.. | 2074 | * active_open_err() will send reset() if flag set.. |
1831 | * It will also send ABORT event. | 2075 | * It will also send ABORT event. |
1832 | */ | 2076 | */ |
1833 | |||
1834 | static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | 2077 | static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1835 | int reset) | 2078 | int reset) |
1836 | { | 2079 | { |
1837 | cleanup_retrans_entry(cm_node); | 2080 | cleanup_retrans_entry(cm_node); |
1838 | if (reset) { | 2081 | if (reset) { |
1839 | nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, " | 2082 | nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, " |
1840 | "state=%d\n", cm_node, cm_node->state); | 2083 | "state=%d\n", cm_node, cm_node->state); |
1841 | add_ref_cm_node(cm_node); | 2084 | add_ref_cm_node(cm_node); |
1842 | send_reset(cm_node, skb); | 2085 | send_reset(cm_node, skb); |
1843 | } else | 2086 | } else { |
1844 | dev_kfree_skb_any(skb); | 2087 | dev_kfree_skb_any(skb); |
2088 | } | ||
1845 | 2089 | ||
1846 | cm_node->state = NES_CM_STATE_CLOSED; | 2090 | cm_node->state = NES_CM_STATE_CLOSED; |
1847 | create_event(cm_node, NES_CM_EVENT_ABORTED); | 2091 | create_event(cm_node, NES_CM_EVENT_ABORTED); |
@@ -1851,15 +2095,14 @@ static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1851 | * passive_open_err() will either do a reset() or will free up the skb and | 2095 | * passive_open_err() will either do a reset() or will free up the skb and |
1852 | * remove the cm_node. | 2096 | * remove the cm_node. |
1853 | */ | 2097 | */ |
1854 | |||
1855 | static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | 2098 | static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1856 | int reset) | 2099 | int reset) |
1857 | { | 2100 | { |
1858 | cleanup_retrans_entry(cm_node); | 2101 | cleanup_retrans_entry(cm_node); |
1859 | cm_node->state = NES_CM_STATE_CLOSED; | 2102 | cm_node->state = NES_CM_STATE_CLOSED; |
1860 | if (reset) { | 2103 | if (reset) { |
1861 | nes_debug(NES_DBG_CM, "passive_open_err sending RST for " | 2104 | nes_debug(NES_DBG_CM, "passive_open_err sending RST for " |
1862 | "cm_node=%p state =%d\n", cm_node, cm_node->state); | 2105 | "cm_node=%p state =%d\n", cm_node, cm_node->state); |
1863 | send_reset(cm_node, skb); | 2106 | send_reset(cm_node, skb); |
1864 | } else { | 2107 | } else { |
1865 | dev_kfree_skb_any(skb); | 2108 | dev_kfree_skb_any(skb); |
@@ -1874,6 +2117,7 @@ static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1874 | static void free_retrans_entry(struct nes_cm_node *cm_node) | 2117 | static void free_retrans_entry(struct nes_cm_node *cm_node) |
1875 | { | 2118 | { |
1876 | struct nes_timer_entry *send_entry; | 2119 | struct nes_timer_entry *send_entry; |
2120 | |||
1877 | send_entry = cm_node->send_entry; | 2121 | send_entry = cm_node->send_entry; |
1878 | if (send_entry) { | 2122 | if (send_entry) { |
1879 | cm_node->send_entry = NULL; | 2123 | cm_node->send_entry = NULL; |
@@ -1897,26 +2141,28 @@ static void cleanup_retrans_entry(struct nes_cm_node *cm_node) | |||
1897 | * Returns skb if to be freed, else it will return NULL if already used.. | 2141 | * Returns skb if to be freed, else it will return NULL if already used.. |
1898 | */ | 2142 | */ |
1899 | static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, | 2143 | static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, |
1900 | struct nes_cm_core *cm_core) | 2144 | struct nes_cm_core *cm_core) |
1901 | { | 2145 | { |
1902 | enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; | 2146 | enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; |
1903 | struct tcphdr *tcph = tcp_hdr(skb); | 2147 | struct tcphdr *tcph = tcp_hdr(skb); |
1904 | u32 fin_set = 0; | 2148 | u32 fin_set = 0; |
1905 | int ret = 0; | 2149 | int ret = 0; |
2150 | |||
1906 | skb_pull(skb, ip_hdr(skb)->ihl << 2); | 2151 | skb_pull(skb, ip_hdr(skb)->ihl << 2); |
1907 | 2152 | ||
1908 | nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " | 2153 | nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " |
1909 | "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn, | 2154 | "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn, |
1910 | tcph->ack, tcph->rst, tcph->fin); | 2155 | tcph->ack, tcph->rst, tcph->fin); |
1911 | 2156 | ||
1912 | if (tcph->rst) | 2157 | if (tcph->rst) { |
1913 | pkt_type = NES_PKT_TYPE_RST; | 2158 | pkt_type = NES_PKT_TYPE_RST; |
1914 | else if (tcph->syn) { | 2159 | } else if (tcph->syn) { |
1915 | pkt_type = NES_PKT_TYPE_SYN; | 2160 | pkt_type = NES_PKT_TYPE_SYN; |
1916 | if (tcph->ack) | 2161 | if (tcph->ack) |
1917 | pkt_type = NES_PKT_TYPE_SYNACK; | 2162 | pkt_type = NES_PKT_TYPE_SYNACK; |
1918 | } else if (tcph->ack) | 2163 | } else if (tcph->ack) { |
1919 | pkt_type = NES_PKT_TYPE_ACK; | 2164 | pkt_type = NES_PKT_TYPE_ACK; |
2165 | } | ||
1920 | if (tcph->fin) | 2166 | if (tcph->fin) |
1921 | fin_set = 1; | 2167 | fin_set = 1; |
1922 | 2168 | ||
@@ -1947,17 +2193,17 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1947 | * mini_cm_listen - create a listen node with params | 2193 | * mini_cm_listen - create a listen node with params |
1948 | */ | 2194 | */ |
1949 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | 2195 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, |
1950 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) | 2196 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) |
1951 | { | 2197 | { |
1952 | struct nes_cm_listener *listener; | 2198 | struct nes_cm_listener *listener; |
1953 | unsigned long flags; | 2199 | unsigned long flags; |
1954 | 2200 | ||
1955 | nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", | 2201 | nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", |
1956 | cm_info->loc_addr, cm_info->loc_port); | 2202 | cm_info->loc_addr, cm_info->loc_port); |
1957 | 2203 | ||
1958 | /* cannot have multiple matching listeners */ | 2204 | /* cannot have multiple matching listeners */ |
1959 | listener = find_listener(cm_core, htonl(cm_info->loc_addr), | 2205 | listener = find_listener(cm_core, htonl(cm_info->loc_addr), |
1960 | htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE); | 2206 | htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE); |
1961 | if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { | 2207 | if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { |
1962 | /* find automatically incs ref count ??? */ | 2208 | /* find automatically incs ref count ??? */ |
1963 | atomic_dec(&listener->ref_count); | 2209 | atomic_dec(&listener->ref_count); |
@@ -2003,9 +2249,9 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | |||
2003 | } | 2249 | } |
2004 | 2250 | ||
2005 | nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x," | 2251 | nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x," |
2006 | " listener = %p, backlog = %d, cm_id = %p.\n", | 2252 | " listener = %p, backlog = %d, cm_id = %p.\n", |
2007 | cm_info->loc_addr, cm_info->loc_port, | 2253 | cm_info->loc_addr, cm_info->loc_port, |
2008 | listener, listener->backlog, listener->cm_id); | 2254 | listener, listener->backlog, listener->cm_id); |
2009 | 2255 | ||
2010 | return listener; | 2256 | return listener; |
2011 | } | 2257 | } |
@@ -2015,26 +2261,20 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | |||
2015 | * mini_cm_connect - make a connection node with params | 2261 | * mini_cm_connect - make a connection node with params |
2016 | */ | 2262 | */ |
2017 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | 2263 | static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, |
2018 | struct nes_vnic *nesvnic, u16 private_data_len, | 2264 | struct nes_vnic *nesvnic, u16 private_data_len, |
2019 | void *private_data, struct nes_cm_info *cm_info) | 2265 | void *private_data, struct nes_cm_info *cm_info) |
2020 | { | 2266 | { |
2021 | int ret = 0; | 2267 | int ret = 0; |
2022 | struct nes_cm_node *cm_node; | 2268 | struct nes_cm_node *cm_node; |
2023 | struct nes_cm_listener *loopbackremotelistener; | 2269 | struct nes_cm_listener *loopbackremotelistener; |
2024 | struct nes_cm_node *loopbackremotenode; | 2270 | struct nes_cm_node *loopbackremotenode; |
2025 | struct nes_cm_info loopback_cm_info; | 2271 | struct nes_cm_info loopback_cm_info; |
2026 | u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + private_data_len; | 2272 | u8 *start_buff; |
2027 | struct ietf_mpa_frame *mpa_frame = NULL; | ||
2028 | 2273 | ||
2029 | /* create a CM connection node */ | 2274 | /* create a CM connection node */ |
2030 | cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL); | 2275 | cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL); |
2031 | if (!cm_node) | 2276 | if (!cm_node) |
2032 | return NULL; | 2277 | return NULL; |
2033 | mpa_frame = &cm_node->mpa_frame; | ||
2034 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); | ||
2035 | mpa_frame->flags = IETF_MPA_FLAGS_CRC; | ||
2036 | mpa_frame->rev = IETF_MPA_VERSION; | ||
2037 | mpa_frame->priv_data_len = htons(private_data_len); | ||
2038 | 2278 | ||
2039 | /* set our node side to client (active) side */ | 2279 | /* set our node side to client (active) side */ |
2040 | cm_node->tcp_cntxt.client = 1; | 2280 | cm_node->tcp_cntxt.client = 1; |
@@ -2042,8 +2282,8 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2042 | 2282 | ||
2043 | if (cm_info->loc_addr == cm_info->rem_addr) { | 2283 | if (cm_info->loc_addr == cm_info->rem_addr) { |
2044 | loopbackremotelistener = find_listener(cm_core, | 2284 | loopbackremotelistener = find_listener(cm_core, |
2045 | ntohl(nesvnic->local_ipaddr), cm_node->rem_port, | 2285 | ntohl(nesvnic->local_ipaddr), cm_node->rem_port, |
2046 | NES_CM_LISTENER_ACTIVE_STATE); | 2286 | NES_CM_LISTENER_ACTIVE_STATE); |
2047 | if (loopbackremotelistener == NULL) { | 2287 | if (loopbackremotelistener == NULL) { |
2048 | create_event(cm_node, NES_CM_EVENT_ABORTED); | 2288 | create_event(cm_node, NES_CM_EVENT_ABORTED); |
2049 | } else { | 2289 | } else { |
@@ -2052,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2052 | loopback_cm_info.rem_port = cm_info->loc_port; | 2292 | loopback_cm_info.rem_port = cm_info->loc_port; |
2053 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; | 2293 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; |
2054 | loopbackremotenode = make_cm_node(cm_core, nesvnic, | 2294 | loopbackremotenode = make_cm_node(cm_core, nesvnic, |
2055 | &loopback_cm_info, loopbackremotelistener); | 2295 | &loopback_cm_info, loopbackremotelistener); |
2056 | if (!loopbackremotenode) { | 2296 | if (!loopbackremotenode) { |
2057 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 2297 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
2058 | return NULL; | 2298 | return NULL; |
@@ -2063,7 +2303,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2063 | NES_CM_DEFAULT_RCV_WND_SCALE; | 2303 | NES_CM_DEFAULT_RCV_WND_SCALE; |
2064 | cm_node->loopbackpartner = loopbackremotenode; | 2304 | cm_node->loopbackpartner = loopbackremotenode; |
2065 | memcpy(loopbackremotenode->mpa_frame_buf, private_data, | 2305 | memcpy(loopbackremotenode->mpa_frame_buf, private_data, |
2066 | private_data_len); | 2306 | private_data_len); |
2067 | loopbackremotenode->mpa_frame_size = private_data_len; | 2307 | loopbackremotenode->mpa_frame_size = private_data_len; |
2068 | 2308 | ||
2069 | /* we are done handling this state. */ | 2309 | /* we are done handling this state. */ |
@@ -2091,12 +2331,10 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2091 | return cm_node; | 2331 | return cm_node; |
2092 | } | 2332 | } |
2093 | 2333 | ||
2094 | /* set our node side to client (active) side */ | 2334 | start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2); |
2095 | cm_node->tcp_cntxt.client = 1; | 2335 | cm_node->mpa_frame_size = private_data_len; |
2096 | /* init our MPA frame ptr */ | ||
2097 | memcpy(mpa_frame->priv_data, private_data, private_data_len); | ||
2098 | 2336 | ||
2099 | cm_node->mpa_frame_size = mpa_frame_size; | 2337 | memcpy(start_buff, private_data, private_data_len); |
2100 | 2338 | ||
2101 | /* send a syn and goto syn sent state */ | 2339 | /* send a syn and goto syn sent state */ |
2102 | cm_node->state = NES_CM_STATE_SYN_SENT; | 2340 | cm_node->state = NES_CM_STATE_SYN_SENT; |
@@ -2105,18 +2343,19 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2105 | if (ret) { | 2343 | if (ret) { |
2106 | /* error in sending the syn free up the cm_node struct */ | 2344 | /* error in sending the syn free up the cm_node struct */ |
2107 | nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest " | 2345 | nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest " |
2108 | "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n", | 2346 | "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n", |
2109 | cm_node->rem_addr, cm_node->rem_port, cm_node, | 2347 | cm_node->rem_addr, cm_node->rem_port, cm_node, |
2110 | cm_node->cm_id); | 2348 | cm_node->cm_id); |
2111 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 2349 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
2112 | cm_node = NULL; | 2350 | cm_node = NULL; |
2113 | } | 2351 | } |
2114 | 2352 | ||
2115 | if (cm_node) | 2353 | if (cm_node) { |
2116 | nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X," | 2354 | nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X," |
2117 | "port=0x%04x, cm_node=%p, cm_id = %p.\n", | 2355 | "port=0x%04x, cm_node=%p, cm_id = %p.\n", |
2118 | cm_node->rem_addr, cm_node->rem_port, cm_node, | 2356 | cm_node->rem_addr, cm_node->rem_port, cm_node, |
2119 | cm_node->cm_id); | 2357 | cm_node->cm_id); |
2358 | } | ||
2120 | 2359 | ||
2121 | return cm_node; | 2360 | return cm_node; |
2122 | } | 2361 | } |
@@ -2126,8 +2365,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2126 | * mini_cm_accept - accept a connection | 2365 | * mini_cm_accept - accept a connection |
2127 | * This function is never called | 2366 | * This function is never called |
2128 | */ | 2367 | */ |
2129 | static int mini_cm_accept(struct nes_cm_core *cm_core, | 2368 | static int mini_cm_accept(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) |
2130 | struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) | ||
2131 | { | 2369 | { |
2132 | return 0; | 2370 | return 0; |
2133 | } | 2371 | } |
@@ -2136,8 +2374,7 @@ static int mini_cm_accept(struct nes_cm_core *cm_core, | |||
2136 | /** | 2374 | /** |
2137 | * mini_cm_reject - reject and teardown a connection | 2375 | * mini_cm_reject - reject and teardown a connection |
2138 | */ | 2376 | */ |
2139 | static int mini_cm_reject(struct nes_cm_core *cm_core, | 2377 | static int mini_cm_reject(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) |
2140 | struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) | ||
2141 | { | 2378 | { |
2142 | int ret = 0; | 2379 | int ret = 0; |
2143 | int err = 0; | 2380 | int err = 0; |
@@ -2147,7 +2384,7 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, | |||
2147 | struct nes_cm_node *loopback = cm_node->loopbackpartner; | 2384 | struct nes_cm_node *loopback = cm_node->loopbackpartner; |
2148 | 2385 | ||
2149 | nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", | 2386 | nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", |
2150 | __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); | 2387 | __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); |
2151 | 2388 | ||
2152 | if (cm_node->tcp_cntxt.client) | 2389 | if (cm_node->tcp_cntxt.client) |
2153 | return ret; | 2390 | return ret; |
@@ -2168,8 +2405,9 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, | |||
2168 | err = send_reset(cm_node, NULL); | 2405 | err = send_reset(cm_node, NULL); |
2169 | if (err) | 2406 | if (err) |
2170 | WARN_ON(1); | 2407 | WARN_ON(1); |
2171 | } else | 2408 | } else { |
2172 | cm_id->add_ref(cm_id); | 2409 | cm_id->add_ref(cm_id); |
2410 | } | ||
2173 | } | 2411 | } |
2174 | } | 2412 | } |
2175 | } else { | 2413 | } else { |
@@ -2244,7 +2482,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod | |||
2244 | case NES_CM_STATE_TSA: | 2482 | case NES_CM_STATE_TSA: |
2245 | if (cm_node->send_entry) | 2483 | if (cm_node->send_entry) |
2246 | printk(KERN_ERR "ERROR Close got called from STATE_TSA " | 2484 | printk(KERN_ERR "ERROR Close got called from STATE_TSA " |
2247 | "send_entry=%p\n", cm_node->send_entry); | 2485 | "send_entry=%p\n", cm_node->send_entry); |
2248 | ret = rem_ref_cm_node(cm_core, cm_node); | 2486 | ret = rem_ref_cm_node(cm_core, cm_node); |
2249 | break; | 2487 | break; |
2250 | } | 2488 | } |
@@ -2257,7 +2495,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod | |||
2257 | * node state machine | 2495 | * node state machine |
2258 | */ | 2496 | */ |
2259 | static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | 2497 | static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, |
2260 | struct nes_vnic *nesvnic, struct sk_buff *skb) | 2498 | struct nes_vnic *nesvnic, struct sk_buff *skb) |
2261 | { | 2499 | { |
2262 | struct nes_cm_node *cm_node = NULL; | 2500 | struct nes_cm_node *cm_node = NULL; |
2263 | struct nes_cm_listener *listener = NULL; | 2501 | struct nes_cm_listener *listener = NULL; |
@@ -2269,9 +2507,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2269 | 2507 | ||
2270 | if (!skb) | 2508 | if (!skb) |
2271 | return 0; | 2509 | return 0; |
2272 | if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { | 2510 | if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) |
2273 | return 0; | 2511 | return 0; |
2274 | } | ||
2275 | 2512 | ||
2276 | iph = (struct iphdr *)skb->data; | 2513 | iph = (struct iphdr *)skb->data; |
2277 | tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); | 2514 | tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); |
@@ -2289,8 +2526,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2289 | 2526 | ||
2290 | do { | 2527 | do { |
2291 | cm_node = find_node(cm_core, | 2528 | cm_node = find_node(cm_core, |
2292 | nfo.rem_port, nfo.rem_addr, | 2529 | nfo.rem_port, nfo.rem_addr, |
2293 | nfo.loc_port, nfo.loc_addr); | 2530 | nfo.loc_port, nfo.loc_addr); |
2294 | 2531 | ||
2295 | if (!cm_node) { | 2532 | if (!cm_node) { |
2296 | /* Only type of packet accepted are for */ | 2533 | /* Only type of packet accepted are for */ |
@@ -2300,8 +2537,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2300 | break; | 2537 | break; |
2301 | } | 2538 | } |
2302 | listener = find_listener(cm_core, nfo.loc_addr, | 2539 | listener = find_listener(cm_core, nfo.loc_addr, |
2303 | nfo.loc_port, | 2540 | nfo.loc_port, |
2304 | NES_CM_LISTENER_ACTIVE_STATE); | 2541 | NES_CM_LISTENER_ACTIVE_STATE); |
2305 | if (!listener) { | 2542 | if (!listener) { |
2306 | nfo.cm_id = NULL; | 2543 | nfo.cm_id = NULL; |
2307 | nfo.conn_type = 0; | 2544 | nfo.conn_type = 0; |
@@ -2312,10 +2549,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2312 | nfo.cm_id = listener->cm_id; | 2549 | nfo.cm_id = listener->cm_id; |
2313 | nfo.conn_type = listener->conn_type; | 2550 | nfo.conn_type = listener->conn_type; |
2314 | cm_node = make_cm_node(cm_core, nesvnic, &nfo, | 2551 | cm_node = make_cm_node(cm_core, nesvnic, &nfo, |
2315 | listener); | 2552 | listener); |
2316 | if (!cm_node) { | 2553 | if (!cm_node) { |
2317 | nes_debug(NES_DBG_CM, "Unable to allocate " | 2554 | nes_debug(NES_DBG_CM, "Unable to allocate " |
2318 | "node\n"); | 2555 | "node\n"); |
2319 | cm_packets_dropped++; | 2556 | cm_packets_dropped++; |
2320 | atomic_dec(&listener->ref_count); | 2557 | atomic_dec(&listener->ref_count); |
2321 | dev_kfree_skb_any(skb); | 2558 | dev_kfree_skb_any(skb); |
@@ -2331,9 +2568,13 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2331 | } | 2568 | } |
2332 | add_ref_cm_node(cm_node); | 2569 | add_ref_cm_node(cm_node); |
2333 | } else if (cm_node->state == NES_CM_STATE_TSA) { | 2570 | } else if (cm_node->state == NES_CM_STATE_TSA) { |
2334 | rem_ref_cm_node(cm_core, cm_node); | 2571 | if (cm_node->nesqp->pau_mode) |
2335 | atomic_inc(&cm_accel_dropped_pkts); | 2572 | nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp); |
2336 | dev_kfree_skb_any(skb); | 2573 | else { |
2574 | rem_ref_cm_node(cm_core, cm_node); | ||
2575 | atomic_inc(&cm_accel_dropped_pkts); | ||
2576 | dev_kfree_skb_any(skb); | ||
2577 | } | ||
2337 | break; | 2578 | break; |
2338 | } | 2579 | } |
2339 | skb_reset_network_header(skb); | 2580 | skb_reset_network_header(skb); |
@@ -2363,7 +2604,7 @@ static struct nes_cm_core *nes_cm_alloc_core(void) | |||
2363 | init_timer(&cm_core->tcp_timer); | 2604 | init_timer(&cm_core->tcp_timer); |
2364 | cm_core->tcp_timer.function = nes_cm_timer_tick; | 2605 | cm_core->tcp_timer.function = nes_cm_timer_tick; |
2365 | 2606 | ||
2366 | cm_core->mtu = NES_CM_DEFAULT_MTU; | 2607 | cm_core->mtu = NES_CM_DEFAULT_MTU; |
2367 | cm_core->state = NES_CM_STATE_INITED; | 2608 | cm_core->state = NES_CM_STATE_INITED; |
2368 | cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; | 2609 | cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; |
2369 | 2610 | ||
@@ -2401,9 +2642,8 @@ static int mini_cm_dealloc_core(struct nes_cm_core *cm_core) | |||
2401 | 2642 | ||
2402 | barrier(); | 2643 | barrier(); |
2403 | 2644 | ||
2404 | if (timer_pending(&cm_core->tcp_timer)) { | 2645 | if (timer_pending(&cm_core->tcp_timer)) |
2405 | del_timer(&cm_core->tcp_timer); | 2646 | del_timer(&cm_core->tcp_timer); |
2406 | } | ||
2407 | 2647 | ||
2408 | destroy_workqueue(cm_core->event_wq); | 2648 | destroy_workqueue(cm_core->event_wq); |
2409 | destroy_workqueue(cm_core->disconn_wq); | 2649 | destroy_workqueue(cm_core->disconn_wq); |
@@ -2458,8 +2698,8 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod | |||
2458 | return -EINVAL; | 2698 | return -EINVAL; |
2459 | 2699 | ||
2460 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 | | 2700 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 | |
2461 | NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG | | 2701 | NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG | |
2462 | NES_QPCONTEXT_MISC_DROS); | 2702 | NES_QPCONTEXT_MISC_DROS); |
2463 | 2703 | ||
2464 | if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale) | 2704 | if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale) |
2465 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE); | 2705 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE); |
@@ -2469,15 +2709,15 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod | |||
2469 | nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16); | 2709 | nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16); |
2470 | 2710 | ||
2471 | nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32( | 2711 | nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32( |
2472 | (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT); | 2712 | (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT); |
2473 | 2713 | ||
2474 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( | 2714 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( |
2475 | (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) & | 2715 | (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) & |
2476 | NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK); | 2716 | NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK); |
2477 | 2717 | ||
2478 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( | 2718 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( |
2479 | (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) & | 2719 | (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) & |
2480 | NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK); | 2720 | NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK); |
2481 | 2721 | ||
2482 | nesqp->nesqp_context->keepalive = cpu_to_le32(0x80); | 2722 | nesqp->nesqp_context->keepalive = cpu_to_le32(0x80); |
2483 | nesqp->nesqp_context->ts_recent = 0; | 2723 | nesqp->nesqp_context->ts_recent = 0; |
@@ -2486,24 +2726,24 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod | |||
2486 | nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); | 2726 | nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); |
2487 | nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | 2727 | nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); |
2488 | nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << | 2728 | nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << |
2489 | cm_node->tcp_cntxt.rcv_wscale); | 2729 | cm_node->tcp_cntxt.rcv_wscale); |
2490 | nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | 2730 | nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); |
2491 | nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | 2731 | nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); |
2492 | nesqp->nesqp_context->srtt = 0; | 2732 | nesqp->nesqp_context->srtt = 0; |
2493 | nesqp->nesqp_context->rttvar = cpu_to_le32(0x6); | 2733 | nesqp->nesqp_context->rttvar = cpu_to_le32(0x6); |
2494 | nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000); | 2734 | nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000); |
2495 | nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss); | 2735 | nesqp->nesqp_context->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss); |
2496 | nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | 2736 | nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); |
2497 | nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | 2737 | nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); |
2498 | nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); | 2738 | nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); |
2499 | 2739 | ||
2500 | nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X," | 2740 | nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X," |
2501 | " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n", | 2741 | " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n", |
2502 | nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt), | 2742 | nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt), |
2503 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), | 2743 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), |
2504 | cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale), | 2744 | cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale), |
2505 | le32_to_cpu(nesqp->nesqp_context->rcv_wnd), | 2745 | le32_to_cpu(nesqp->nesqp_context->rcv_wnd), |
2506 | le32_to_cpu(nesqp->nesqp_context->misc)); | 2746 | le32_to_cpu(nesqp->nesqp_context->misc)); |
2507 | nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd)); | 2747 | nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd)); |
2508 | nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd)); | 2748 | nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd)); |
2509 | nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd)); | 2749 | nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd)); |
@@ -2524,7 +2764,7 @@ int nes_cm_disconn(struct nes_qp *nesqp) | |||
2524 | 2764 | ||
2525 | work = kzalloc(sizeof *work, GFP_ATOMIC); | 2765 | work = kzalloc(sizeof *work, GFP_ATOMIC); |
2526 | if (!work) | 2766 | if (!work) |
2527 | return -ENOMEM; /* Timer will clean up */ | 2767 | return -ENOMEM; /* Timer will clean up */ |
2528 | 2768 | ||
2529 | nes_add_ref(&nesqp->ibqp); | 2769 | nes_add_ref(&nesqp->ibqp); |
2530 | work->nesqp = nesqp; | 2770 | work->nesqp = nesqp; |
@@ -2544,7 +2784,7 @@ static void nes_disconnect_worker(struct work_struct *work) | |||
2544 | 2784 | ||
2545 | kfree(dwork); | 2785 | kfree(dwork); |
2546 | nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", | 2786 | nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", |
2547 | nesqp->last_aeq, nesqp->hwqp.qp_id); | 2787 | nesqp->last_aeq, nesqp->hwqp.qp_id); |
2548 | nes_cm_disconn_true(nesqp); | 2788 | nes_cm_disconn_true(nesqp); |
2549 | nes_rem_ref(&nesqp->ibqp); | 2789 | nes_rem_ref(&nesqp->ibqp); |
2550 | } | 2790 | } |
@@ -2580,7 +2820,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2580 | /* make sure we havent already closed this connection */ | 2820 | /* make sure we havent already closed this connection */ |
2581 | if (!cm_id) { | 2821 | if (!cm_id) { |
2582 | nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n", | 2822 | nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n", |
2583 | nesqp->hwqp.qp_id); | 2823 | nesqp->hwqp.qp_id); |
2584 | spin_unlock_irqrestore(&nesqp->lock, flags); | 2824 | spin_unlock_irqrestore(&nesqp->lock, flags); |
2585 | return -1; | 2825 | return -1; |
2586 | } | 2826 | } |
@@ -2589,7 +2829,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2589 | nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id); | 2829 | nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id); |
2590 | 2830 | ||
2591 | original_hw_tcp_state = nesqp->hw_tcp_state; | 2831 | original_hw_tcp_state = nesqp->hw_tcp_state; |
2592 | original_ibqp_state = nesqp->ibqp_state; | 2832 | original_ibqp_state = nesqp->ibqp_state; |
2593 | last_ae = nesqp->last_aeq; | 2833 | last_ae = nesqp->last_aeq; |
2594 | 2834 | ||
2595 | if (nesqp->term_flags) { | 2835 | if (nesqp->term_flags) { |
@@ -2647,16 +2887,16 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2647 | cm_event.private_data_len = 0; | 2887 | cm_event.private_data_len = 0; |
2648 | 2888 | ||
2649 | nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event" | 2889 | nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event" |
2650 | " for QP%u, SQ Head = %u, SQ Tail = %u. " | 2890 | " for QP%u, SQ Head = %u, SQ Tail = %u. " |
2651 | "cm_id = %p, refcount = %u.\n", | 2891 | "cm_id = %p, refcount = %u.\n", |
2652 | nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, | 2892 | nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, |
2653 | nesqp->hwqp.sq_tail, cm_id, | 2893 | nesqp->hwqp.sq_tail, cm_id, |
2654 | atomic_read(&nesqp->refcount)); | 2894 | atomic_read(&nesqp->refcount)); |
2655 | 2895 | ||
2656 | ret = cm_id->event_handler(cm_id, &cm_event); | 2896 | ret = cm_id->event_handler(cm_id, &cm_event); |
2657 | if (ret) | 2897 | if (ret) |
2658 | nes_debug(NES_DBG_CM, "OFA CM event_handler " | 2898 | nes_debug(NES_DBG_CM, "OFA CM event_handler " |
2659 | "returned, ret=%d\n", ret); | 2899 | "returned, ret=%d\n", ret); |
2660 | } | 2900 | } |
2661 | 2901 | ||
2662 | if (issue_close) { | 2902 | if (issue_close) { |
@@ -2674,9 +2914,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2674 | cm_event.private_data_len = 0; | 2914 | cm_event.private_data_len = 0; |
2675 | 2915 | ||
2676 | ret = cm_id->event_handler(cm_id, &cm_event); | 2916 | ret = cm_id->event_handler(cm_id, &cm_event); |
2677 | if (ret) { | 2917 | if (ret) |
2678 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | 2918 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); |
2679 | } | ||
2680 | 2919 | ||
2681 | cm_id->rem_ref(cm_id); | 2920 | cm_id->rem_ref(cm_id); |
2682 | } | 2921 | } |
@@ -2716,8 +2955,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) | |||
2716 | if (nesqp->lsmm_mr) | 2955 | if (nesqp->lsmm_mr) |
2717 | nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); | 2956 | nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); |
2718 | pci_free_consistent(nesdev->pcidev, | 2957 | pci_free_consistent(nesdev->pcidev, |
2719 | nesqp->private_data_len+sizeof(struct ietf_mpa_frame), | 2958 | nesqp->private_data_len + nesqp->ietf_frame_size, |
2720 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); | 2959 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); |
2721 | } | 2960 | } |
2722 | } | 2961 | } |
2723 | 2962 | ||
@@ -2756,6 +2995,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2756 | struct ib_phys_buf ibphysbuf; | 2995 | struct ib_phys_buf ibphysbuf; |
2757 | struct nes_pd *nespd; | 2996 | struct nes_pd *nespd; |
2758 | u64 tagged_offset; | 2997 | u64 tagged_offset; |
2998 | u8 mpa_frame_offset = 0; | ||
2999 | struct ietf_mpa_v2 *mpa_v2_frame; | ||
3000 | u8 start_addr = 0; | ||
3001 | u8 *start_ptr = &start_addr; | ||
3002 | u8 **start_buff = &start_ptr; | ||
3003 | u16 buff_len = 0; | ||
2759 | 3004 | ||
2760 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | 3005 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); |
2761 | if (!ibqp) | 3006 | if (!ibqp) |
@@ -2796,53 +3041,49 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2796 | nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", | 3041 | nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", |
2797 | netdev_refcnt_read(nesvnic->netdev)); | 3042 | netdev_refcnt_read(nesvnic->netdev)); |
2798 | 3043 | ||
3044 | nesqp->ietf_frame_size = sizeof(struct ietf_mpa_v2); | ||
2799 | /* allocate the ietf frame and space for private data */ | 3045 | /* allocate the ietf frame and space for private data */ |
2800 | nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, | 3046 | nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, |
2801 | sizeof(struct ietf_mpa_frame) + conn_param->private_data_len, | 3047 | nesqp->ietf_frame_size + conn_param->private_data_len, |
2802 | &nesqp->ietf_frame_pbase); | 3048 | &nesqp->ietf_frame_pbase); |
2803 | 3049 | ||
2804 | if (!nesqp->ietf_frame) { | 3050 | if (!nesqp->ietf_frame) { |
2805 | nes_debug(NES_DBG_CM, "Unable to allocate memory for private " | 3051 | nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n"); |
2806 | "data\n"); | ||
2807 | return -ENOMEM; | 3052 | return -ENOMEM; |
2808 | } | 3053 | } |
3054 | mpa_v2_frame = (struct ietf_mpa_v2 *)nesqp->ietf_frame; | ||
2809 | 3055 | ||
3056 | if (cm_node->mpa_frame_rev == IETF_MPA_V1) | ||
3057 | mpa_frame_offset = 4; | ||
2810 | 3058 | ||
2811 | /* setup the MPA frame */ | 3059 | memcpy(mpa_v2_frame->priv_data, conn_param->private_data, |
2812 | nesqp->private_data_len = conn_param->private_data_len; | 3060 | conn_param->private_data_len); |
2813 | memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | ||
2814 | |||
2815 | memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data, | ||
2816 | conn_param->private_data_len); | ||
2817 | 3061 | ||
2818 | nesqp->ietf_frame->priv_data_len = | 3062 | cm_build_mpa_frame(cm_node, start_buff, &buff_len, nesqp->ietf_frame, MPA_KEY_REPLY); |
2819 | cpu_to_be16(conn_param->private_data_len); | 3063 | nesqp->private_data_len = conn_param->private_data_len; |
2820 | nesqp->ietf_frame->rev = mpa_version; | ||
2821 | nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC; | ||
2822 | 3064 | ||
2823 | /* setup our first outgoing iWarp send WQE (the IETF frame response) */ | 3065 | /* setup our first outgoing iWarp send WQE (the IETF frame response) */ |
2824 | wqe = &nesqp->hwqp.sq_vbase[0]; | 3066 | wqe = &nesqp->hwqp.sq_vbase[0]; |
2825 | 3067 | ||
2826 | if (cm_id->remote_addr.sin_addr.s_addr != | 3068 | if (cm_id->remote_addr.sin_addr.s_addr != |
2827 | cm_id->local_addr.sin_addr.s_addr) { | 3069 | cm_id->local_addr.sin_addr.s_addr) { |
2828 | u64temp = (unsigned long)nesqp; | 3070 | u64temp = (unsigned long)nesqp; |
2829 | nesibdev = nesvnic->nesibdev; | 3071 | nesibdev = nesvnic->nesibdev; |
2830 | nespd = nesqp->nespd; | 3072 | nespd = nesqp->nespd; |
2831 | ibphysbuf.addr = nesqp->ietf_frame_pbase; | 3073 | ibphysbuf.addr = nesqp->ietf_frame_pbase + mpa_frame_offset; |
2832 | ibphysbuf.size = conn_param->private_data_len + | 3074 | ibphysbuf.size = buff_len; |
2833 | sizeof(struct ietf_mpa_frame); | 3075 | tagged_offset = (u64)(unsigned long)*start_buff; |
2834 | tagged_offset = (u64)(unsigned long)nesqp->ietf_frame; | ||
2835 | ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, | 3076 | ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, |
2836 | &ibphysbuf, 1, | 3077 | &ibphysbuf, 1, |
2837 | IB_ACCESS_LOCAL_WRITE, | 3078 | IB_ACCESS_LOCAL_WRITE, |
2838 | &tagged_offset); | 3079 | &tagged_offset); |
2839 | if (!ibmr) { | 3080 | if (!ibmr) { |
2840 | nes_debug(NES_DBG_CM, "Unable to register memory region" | 3081 | nes_debug(NES_DBG_CM, "Unable to register memory region" |
2841 | "for lSMM for cm_node = %p \n", | 3082 | "for lSMM for cm_node = %p \n", |
2842 | cm_node); | 3083 | cm_node); |
2843 | pci_free_consistent(nesdev->pcidev, | 3084 | pci_free_consistent(nesdev->pcidev, |
2844 | nesqp->private_data_len+sizeof(struct ietf_mpa_frame), | 3085 | nesqp->private_data_len + nesqp->ietf_frame_size, |
2845 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); | 3086 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); |
2846 | return -ENOMEM; | 3087 | return -ENOMEM; |
2847 | } | 3088 | } |
2848 | 3089 | ||
@@ -2850,22 +3091,20 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2850 | ibmr->device = nespd->ibpd.device; | 3091 | ibmr->device = nespd->ibpd.device; |
2851 | nesqp->lsmm_mr = ibmr; | 3092 | nesqp->lsmm_mr = ibmr; |
2852 | 3093 | ||
2853 | u64temp |= NES_SW_CONTEXT_ALIGN>>1; | 3094 | u64temp |= NES_SW_CONTEXT_ALIGN >> 1; |
2854 | set_wqe_64bit_value(wqe->wqe_words, | 3095 | set_wqe_64bit_value(wqe->wqe_words, |
2855 | NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, | 3096 | NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, |
2856 | u64temp); | 3097 | u64temp); |
2857 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | 3098 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = |
2858 | cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | | 3099 | cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | |
2859 | NES_IWARP_SQ_WQE_WRPDU); | 3100 | NES_IWARP_SQ_WQE_WRPDU); |
2860 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = | 3101 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = |
2861 | cpu_to_le32(conn_param->private_data_len + | 3102 | cpu_to_le32(buff_len); |
2862 | sizeof(struct ietf_mpa_frame)); | ||
2863 | set_wqe_64bit_value(wqe->wqe_words, | 3103 | set_wqe_64bit_value(wqe->wqe_words, |
2864 | NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, | 3104 | NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, |
2865 | (u64)(unsigned long)nesqp->ietf_frame); | 3105 | (u64)(unsigned long)(*start_buff)); |
2866 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = | 3106 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = |
2867 | cpu_to_le32(conn_param->private_data_len + | 3107 | cpu_to_le32(buff_len); |
2868 | sizeof(struct ietf_mpa_frame)); | ||
2869 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; | 3108 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; |
2870 | if (nesqp->sq_kmapped) { | 3109 | if (nesqp->sq_kmapped) { |
2871 | nesqp->sq_kmapped = 0; | 3110 | nesqp->sq_kmapped = 0; |
@@ -2874,7 +3113,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2874 | 3113 | ||
2875 | nesqp->nesqp_context->ird_ord_sizes |= | 3114 | nesqp->nesqp_context->ird_ord_sizes |= |
2876 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | 3115 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | |
2877 | NES_QPCONTEXT_ORDIRD_WRPDU); | 3116 | NES_QPCONTEXT_ORDIRD_WRPDU); |
2878 | } else { | 3117 | } else { |
2879 | nesqp->nesqp_context->ird_ord_sizes |= | 3118 | nesqp->nesqp_context->ird_ord_sizes |= |
2880 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU); | 3119 | cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU); |
@@ -2888,11 +3127,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2888 | 3127 | ||
2889 | /* nesqp->cm_node = (void *)cm_id->provider_data; */ | 3128 | /* nesqp->cm_node = (void *)cm_id->provider_data; */ |
2890 | cm_id->provider_data = nesqp; | 3129 | cm_id->provider_data = nesqp; |
2891 | nesqp->active_conn = 0; | 3130 | nesqp->active_conn = 0; |
2892 | 3131 | ||
2893 | if (cm_node->state == NES_CM_STATE_TSA) | 3132 | if (cm_node->state == NES_CM_STATE_TSA) |
2894 | nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n", | 3133 | nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n", |
2895 | cm_node); | 3134 | cm_node); |
2896 | 3135 | ||
2897 | nes_cm_init_tsa_conn(nesqp, cm_node); | 3136 | nes_cm_init_tsa_conn(nesqp, cm_node); |
2898 | 3137 | ||
@@ -2909,13 +3148,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2909 | cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); | 3148 | cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); |
2910 | 3149 | ||
2911 | nesqp->nesqp_context->misc2 |= cpu_to_le32( | 3150 | nesqp->nesqp_context->misc2 |= cpu_to_le32( |
2912 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << | 3151 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << |
2913 | NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); | 3152 | NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); |
2914 | 3153 | ||
2915 | nesqp->nesqp_context->arp_index_vlan |= | 3154 | nesqp->nesqp_context->arp_index_vlan |= |
2916 | cpu_to_le32(nes_arp_table(nesdev, | 3155 | cpu_to_le32(nes_arp_table(nesdev, |
2917 | le32_to_cpu(nesqp->nesqp_context->ip0), NULL, | 3156 | le32_to_cpu(nesqp->nesqp_context->ip0), NULL, |
2918 | NES_ARP_RESOLVE) << 16); | 3157 | NES_ARP_RESOLVE) << 16); |
2919 | 3158 | ||
2920 | nesqp->nesqp_context->ts_val_delta = cpu_to_le32( | 3159 | nesqp->nesqp_context->ts_val_delta = cpu_to_le32( |
2921 | jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); | 3160 | jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); |
@@ -2941,7 +3180,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2941 | crc_value = get_crc_value(&nes_quad); | 3180 | crc_value = get_crc_value(&nes_quad); |
2942 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); | 3181 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2943 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", | 3182 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", |
2944 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); | 3183 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); |
2945 | 3184 | ||
2946 | nesqp->hte_index &= adapter->hte_index_mask; | 3185 | nesqp->hte_index &= adapter->hte_index_mask; |
2947 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); | 3186 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); |
@@ -2949,17 +3188,15 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2949 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); | 3188 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); |
2950 | 3189 | ||
2951 | nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = " | 3190 | nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = " |
2952 | "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + " | 3191 | "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + " |
2953 | "private data length=%zu.\n", nesqp->hwqp.qp_id, | 3192 | "private data length=%u.\n", nesqp->hwqp.qp_id, |
2954 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | 3193 | ntohl(cm_id->remote_addr.sin_addr.s_addr), |
2955 | ntohs(cm_id->remote_addr.sin_port), | 3194 | ntohs(cm_id->remote_addr.sin_port), |
2956 | ntohl(cm_id->local_addr.sin_addr.s_addr), | 3195 | ntohl(cm_id->local_addr.sin_addr.s_addr), |
2957 | ntohs(cm_id->local_addr.sin_port), | 3196 | ntohs(cm_id->local_addr.sin_port), |
2958 | le32_to_cpu(nesqp->nesqp_context->rcv_nxt), | 3197 | le32_to_cpu(nesqp->nesqp_context->rcv_nxt), |
2959 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), | 3198 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), |
2960 | conn_param->private_data_len + | 3199 | buff_len); |
2961 | sizeof(struct ietf_mpa_frame)); | ||
2962 | |||
2963 | 3200 | ||
2964 | /* notify OF layer that accept event was successful */ | 3201 | /* notify OF layer that accept event was successful */ |
2965 | cm_id->add_ref(cm_id); | 3202 | cm_id->add_ref(cm_id); |
@@ -2980,12 +3217,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2980 | nesqp->private_data_len; | 3217 | nesqp->private_data_len; |
2981 | /* copy entire MPA frame to our cm_node's frame */ | 3218 | /* copy entire MPA frame to our cm_node's frame */ |
2982 | memcpy(cm_node->loopbackpartner->mpa_frame_buf, | 3219 | memcpy(cm_node->loopbackpartner->mpa_frame_buf, |
2983 | nesqp->ietf_frame->priv_data, nesqp->private_data_len); | 3220 | conn_param->private_data, conn_param->private_data_len); |
2984 | create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED); | 3221 | create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED); |
2985 | } | 3222 | } |
2986 | if (ret) | 3223 | if (ret) |
2987 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 3224 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
2988 | "ret=%d\n", __func__, __LINE__, ret); | 3225 | "ret=%d\n", __func__, __LINE__, ret); |
2989 | 3226 | ||
2990 | return 0; | 3227 | return 0; |
2991 | } | 3228 | } |
@@ -2998,34 +3235,28 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | |||
2998 | { | 3235 | { |
2999 | struct nes_cm_node *cm_node; | 3236 | struct nes_cm_node *cm_node; |
3000 | struct nes_cm_node *loopback; | 3237 | struct nes_cm_node *loopback; |
3001 | |||
3002 | struct nes_cm_core *cm_core; | 3238 | struct nes_cm_core *cm_core; |
3239 | u8 *start_buff; | ||
3003 | 3240 | ||
3004 | atomic_inc(&cm_rejects); | 3241 | atomic_inc(&cm_rejects); |
3005 | cm_node = (struct nes_cm_node *) cm_id->provider_data; | 3242 | cm_node = (struct nes_cm_node *)cm_id->provider_data; |
3006 | loopback = cm_node->loopbackpartner; | 3243 | loopback = cm_node->loopbackpartner; |
3007 | cm_core = cm_node->cm_core; | 3244 | cm_core = cm_node->cm_core; |
3008 | cm_node->cm_id = cm_id; | 3245 | cm_node->cm_id = cm_id; |
3009 | cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; | ||
3010 | 3246 | ||
3011 | if (cm_node->mpa_frame_size > MAX_CM_BUFFER) | 3247 | if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER) |
3012 | return -EINVAL; | 3248 | return -EINVAL; |
3013 | 3249 | ||
3014 | memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | ||
3015 | if (loopback) { | 3250 | if (loopback) { |
3016 | memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len); | 3251 | memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len); |
3017 | loopback->mpa_frame.priv_data_len = pdata_len; | 3252 | loopback->mpa_frame.priv_data_len = pdata_len; |
3018 | loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) + | 3253 | loopback->mpa_frame_size = pdata_len; |
3019 | pdata_len; | ||
3020 | } else { | 3254 | } else { |
3021 | memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); | 3255 | start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2); |
3022 | cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len); | 3256 | cm_node->mpa_frame_size = pdata_len; |
3257 | memcpy(start_buff, pdata, pdata_len); | ||
3023 | } | 3258 | } |
3024 | 3259 | return cm_core->api->reject(cm_core, cm_node); | |
3025 | cm_node->mpa_frame.rev = mpa_version; | ||
3026 | cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT; | ||
3027 | |||
3028 | return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); | ||
3029 | } | 3260 | } |
3030 | 3261 | ||
3031 | 3262 | ||
@@ -3052,7 +3283,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3052 | nesvnic = to_nesvnic(nesqp->ibqp.device); | 3283 | nesvnic = to_nesvnic(nesqp->ibqp.device); |
3053 | if (!nesvnic) | 3284 | if (!nesvnic) |
3054 | return -EINVAL; | 3285 | return -EINVAL; |
3055 | nesdev = nesvnic->nesdev; | 3286 | nesdev = nesvnic->nesdev; |
3056 | if (!nesdev) | 3287 | if (!nesdev) |
3057 | return -EINVAL; | 3288 | return -EINVAL; |
3058 | 3289 | ||
@@ -3060,12 +3291,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3060 | return -EINVAL; | 3291 | return -EINVAL; |
3061 | 3292 | ||
3062 | nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = " | 3293 | nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = " |
3063 | "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id, | 3294 | "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id, |
3064 | ntohl(nesvnic->local_ipaddr), | 3295 | ntohl(nesvnic->local_ipaddr), |
3065 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | 3296 | ntohl(cm_id->remote_addr.sin_addr.s_addr), |
3066 | ntohs(cm_id->remote_addr.sin_port), | 3297 | ntohs(cm_id->remote_addr.sin_port), |
3067 | ntohl(cm_id->local_addr.sin_addr.s_addr), | 3298 | ntohl(cm_id->local_addr.sin_addr.s_addr), |
3068 | ntohs(cm_id->local_addr.sin_port)); | 3299 | ntohs(cm_id->local_addr.sin_port)); |
3069 | 3300 | ||
3070 | atomic_inc(&cm_connects); | 3301 | atomic_inc(&cm_connects); |
3071 | nesqp->active_conn = 1; | 3302 | nesqp->active_conn = 1; |
@@ -3079,12 +3310,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3079 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); | 3310 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); |
3080 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); | 3311 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); |
3081 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", | 3312 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", |
3082 | conn_param->private_data_len); | 3313 | conn_param->private_data_len); |
3083 | 3314 | ||
3084 | if (cm_id->local_addr.sin_addr.s_addr != | 3315 | if (cm_id->local_addr.sin_addr.s_addr != |
3085 | cm_id->remote_addr.sin_addr.s_addr) { | 3316 | cm_id->remote_addr.sin_addr.s_addr) { |
3086 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), | 3317 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), |
3087 | PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); | 3318 | PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); |
3088 | apbvt_set = 1; | 3319 | apbvt_set = 1; |
3089 | } | 3320 | } |
3090 | 3321 | ||
@@ -3100,13 +3331,13 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3100 | 3331 | ||
3101 | /* create a connect CM node connection */ | 3332 | /* create a connect CM node connection */ |
3102 | cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, | 3333 | cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, |
3103 | conn_param->private_data_len, (void *)conn_param->private_data, | 3334 | conn_param->private_data_len, (void *)conn_param->private_data, |
3104 | &cm_info); | 3335 | &cm_info); |
3105 | if (!cm_node) { | 3336 | if (!cm_node) { |
3106 | if (apbvt_set) | 3337 | if (apbvt_set) |
3107 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), | 3338 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), |
3108 | PCI_FUNC(nesdev->pcidev->devfn), | 3339 | PCI_FUNC(nesdev->pcidev->devfn), |
3109 | NES_MANAGE_APBVT_DEL); | 3340 | NES_MANAGE_APBVT_DEL); |
3110 | 3341 | ||
3111 | cm_id->rem_ref(cm_id); | 3342 | cm_id->rem_ref(cm_id); |
3112 | return -ENOMEM; | 3343 | return -ENOMEM; |
@@ -3156,7 +3387,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3156 | cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); | 3387 | cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); |
3157 | if (!cm_node) { | 3388 | if (!cm_node) { |
3158 | printk(KERN_ERR "%s[%u] Error returned from listen API call\n", | 3389 | printk(KERN_ERR "%s[%u] Error returned from listen API call\n", |
3159 | __func__, __LINE__); | 3390 | __func__, __LINE__); |
3160 | return -ENOMEM; | 3391 | return -ENOMEM; |
3161 | } | 3392 | } |
3162 | 3393 | ||
@@ -3164,12 +3395,12 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3164 | 3395 | ||
3165 | if (!cm_node->reused_node) { | 3396 | if (!cm_node->reused_node) { |
3166 | err = nes_manage_apbvt(nesvnic, | 3397 | err = nes_manage_apbvt(nesvnic, |
3167 | ntohs(cm_id->local_addr.sin_port), | 3398 | ntohs(cm_id->local_addr.sin_port), |
3168 | PCI_FUNC(nesvnic->nesdev->pcidev->devfn), | 3399 | PCI_FUNC(nesvnic->nesdev->pcidev->devfn), |
3169 | NES_MANAGE_APBVT_ADD); | 3400 | NES_MANAGE_APBVT_ADD); |
3170 | if (err) { | 3401 | if (err) { |
3171 | printk(KERN_ERR "nes_manage_apbvt call returned %d.\n", | 3402 | printk(KERN_ERR "nes_manage_apbvt call returned %d.\n", |
3172 | err); | 3403 | err); |
3173 | g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); | 3404 | g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); |
3174 | return err; | 3405 | return err; |
3175 | } | 3406 | } |
@@ -3206,13 +3437,13 @@ int nes_destroy_listen(struct iw_cm_id *cm_id) | |||
3206 | int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) | 3437 | int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) |
3207 | { | 3438 | { |
3208 | int rc = 0; | 3439 | int rc = 0; |
3440 | |||
3209 | cm_packets_received++; | 3441 | cm_packets_received++; |
3210 | if ((g_cm_core) && (g_cm_core->api)) { | 3442 | if ((g_cm_core) && (g_cm_core->api)) |
3211 | rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); | 3443 | rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); |
3212 | } else { | 3444 | else |
3213 | nes_debug(NES_DBG_CM, "Unable to process packet for CM," | 3445 | nes_debug(NES_DBG_CM, "Unable to process packet for CM," |
3214 | " cm is not setup properly.\n"); | 3446 | " cm is not setup properly.\n"); |
3215 | } | ||
3216 | 3447 | ||
3217 | return rc; | 3448 | return rc; |
3218 | } | 3449 | } |
@@ -3227,11 +3458,10 @@ int nes_cm_start(void) | |||
3227 | nes_debug(NES_DBG_CM, "\n"); | 3458 | nes_debug(NES_DBG_CM, "\n"); |
3228 | /* create the primary CM core, pass this handle to subsequent core inits */ | 3459 | /* create the primary CM core, pass this handle to subsequent core inits */ |
3229 | g_cm_core = nes_cm_alloc_core(); | 3460 | g_cm_core = nes_cm_alloc_core(); |
3230 | if (g_cm_core) { | 3461 | if (g_cm_core) |
3231 | return 0; | 3462 | return 0; |
3232 | } else { | 3463 | else |
3233 | return -ENOMEM; | 3464 | return -ENOMEM; |
3234 | } | ||
3235 | } | 3465 | } |
3236 | 3466 | ||
3237 | 3467 | ||
@@ -3252,7 +3482,6 @@ int nes_cm_stop(void) | |||
3252 | */ | 3482 | */ |
3253 | static void cm_event_connected(struct nes_cm_event *event) | 3483 | static void cm_event_connected(struct nes_cm_event *event) |
3254 | { | 3484 | { |
3255 | u64 u64temp; | ||
3256 | struct nes_qp *nesqp; | 3485 | struct nes_qp *nesqp; |
3257 | struct nes_vnic *nesvnic; | 3486 | struct nes_vnic *nesvnic; |
3258 | struct nes_device *nesdev; | 3487 | struct nes_device *nesdev; |
@@ -3261,7 +3490,6 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3261 | struct ib_qp_attr attr; | 3490 | struct ib_qp_attr attr; |
3262 | struct iw_cm_id *cm_id; | 3491 | struct iw_cm_id *cm_id; |
3263 | struct iw_cm_event cm_event; | 3492 | struct iw_cm_event cm_event; |
3264 | struct nes_hw_qp_wqe *wqe; | ||
3265 | struct nes_v4_quad nes_quad; | 3493 | struct nes_v4_quad nes_quad; |
3266 | u32 crc_value; | 3494 | u32 crc_value; |
3267 | int ret; | 3495 | int ret; |
@@ -3275,17 +3503,16 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3275 | nesdev = nesvnic->nesdev; | 3503 | nesdev = nesvnic->nesdev; |
3276 | nesadapter = nesdev->nesadapter; | 3504 | nesadapter = nesdev->nesadapter; |
3277 | 3505 | ||
3278 | if (nesqp->destroyed) { | 3506 | if (nesqp->destroyed) |
3279 | return; | 3507 | return; |
3280 | } | ||
3281 | atomic_inc(&cm_connecteds); | 3508 | atomic_inc(&cm_connecteds); |
3282 | nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" | 3509 | nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" |
3283 | " local port 0x%04X. jiffies = %lu.\n", | 3510 | " local port 0x%04X. jiffies = %lu.\n", |
3284 | nesqp->hwqp.qp_id, | 3511 | nesqp->hwqp.qp_id, |
3285 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | 3512 | ntohl(cm_id->remote_addr.sin_addr.s_addr), |
3286 | ntohs(cm_id->remote_addr.sin_port), | 3513 | ntohs(cm_id->remote_addr.sin_port), |
3287 | ntohs(cm_id->local_addr.sin_port), | 3514 | ntohs(cm_id->local_addr.sin_port), |
3288 | jiffies); | 3515 | jiffies); |
3289 | 3516 | ||
3290 | nes_cm_init_tsa_conn(nesqp, cm_node); | 3517 | nes_cm_init_tsa_conn(nesqp, cm_node); |
3291 | 3518 | ||
@@ -3316,40 +3543,12 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3316 | NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); | 3543 | NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); |
3317 | 3544 | ||
3318 | /* Adjust tail for not having a LSMM */ | 3545 | /* Adjust tail for not having a LSMM */ |
3319 | nesqp->hwqp.sq_tail = 1; | 3546 | /*nesqp->hwqp.sq_tail = 1;*/ |
3320 | 3547 | ||
3321 | #if defined(NES_SEND_FIRST_WRITE) | 3548 | build_rdma0_msg(cm_node, &nesqp); |
3322 | if (cm_node->send_write0) { | ||
3323 | nes_debug(NES_DBG_CM, "Sending first write.\n"); | ||
3324 | wqe = &nesqp->hwqp.sq_vbase[0]; | ||
3325 | u64temp = (unsigned long)nesqp; | ||
3326 | u64temp |= NES_SW_CONTEXT_ALIGN>>1; | ||
3327 | set_wqe_64bit_value(wqe->wqe_words, | ||
3328 | NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); | ||
3329 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | ||
3330 | cpu_to_le32(NES_IWARP_SQ_OP_RDMAW); | ||
3331 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; | ||
3332 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; | ||
3333 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; | ||
3334 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; | ||
3335 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; | ||
3336 | 3549 | ||
3337 | if (nesqp->sq_kmapped) { | 3550 | nes_write32(nesdev->regs + NES_WQE_ALLOC, |
3338 | nesqp->sq_kmapped = 0; | 3551 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); |
3339 | kunmap(nesqp->page); | ||
3340 | } | ||
3341 | |||
3342 | /* use the reserved spot on the WQ for the extra first WQE */ | ||
3343 | nesqp->nesqp_context->ird_ord_sizes &= | ||
3344 | cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | ||
3345 | NES_QPCONTEXT_ORDIRD_WRPDU | | ||
3346 | NES_QPCONTEXT_ORDIRD_ALSMM)); | ||
3347 | nesqp->skip_lsmm = 1; | ||
3348 | nesqp->hwqp.sq_tail = 0; | ||
3349 | nes_write32(nesdev->regs + NES_WQE_ALLOC, | ||
3350 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); | ||
3351 | } | ||
3352 | #endif | ||
3353 | 3552 | ||
3354 | memset(&nes_quad, 0, sizeof(nes_quad)); | 3553 | memset(&nes_quad, 0, sizeof(nes_quad)); |
3355 | 3554 | ||
@@ -3366,13 +3565,13 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3366 | crc_value = get_crc_value(&nes_quad); | 3565 | crc_value = get_crc_value(&nes_quad); |
3367 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); | 3566 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
3368 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", | 3567 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", |
3369 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | 3568 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); |
3370 | 3569 | ||
3371 | nesqp->hte_index &= nesadapter->hte_index_mask; | 3570 | nesqp->hte_index &= nesadapter->hte_index_mask; |
3372 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); | 3571 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); |
3373 | 3572 | ||
3374 | nesqp->ietf_frame = &cm_node->mpa_frame; | 3573 | nesqp->ietf_frame = &cm_node->mpa_frame; |
3375 | nesqp->private_data_len = (u8) cm_node->mpa_frame_size; | 3574 | nesqp->private_data_len = (u8)cm_node->mpa_frame_size; |
3376 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); | 3575 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); |
3377 | 3576 | ||
3378 | /* notify OF layer we successfully created the requested connection */ | 3577 | /* notify OF layer we successfully created the requested connection */ |
@@ -3384,7 +3583,9 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3384 | cm_event.remote_addr = cm_id->remote_addr; | 3583 | cm_event.remote_addr = cm_id->remote_addr; |
3385 | 3584 | ||
3386 | cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; | 3585 | cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; |
3387 | cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size; | 3586 | cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size; |
3587 | cm_event.ird = cm_node->ird_size; | ||
3588 | cm_event.ord = cm_node->ord_size; | ||
3388 | 3589 | ||
3389 | cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr; | 3590 | cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr; |
3390 | ret = cm_id->event_handler(cm_id, &cm_event); | 3591 | ret = cm_id->event_handler(cm_id, &cm_event); |
@@ -3392,12 +3593,12 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3392 | 3593 | ||
3393 | if (ret) | 3594 | if (ret) |
3394 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 3595 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
3395 | "ret=%d\n", __func__, __LINE__, ret); | 3596 | "ret=%d\n", __func__, __LINE__, ret); |
3396 | attr.qp_state = IB_QPS_RTS; | 3597 | attr.qp_state = IB_QPS_RTS; |
3397 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | 3598 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); |
3398 | 3599 | ||
3399 | nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = " | 3600 | nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = " |
3400 | "%lu\n", nesqp->hwqp.qp_id, jiffies); | 3601 | "%lu\n", nesqp->hwqp.qp_id, jiffies); |
3401 | 3602 | ||
3402 | return; | 3603 | return; |
3403 | } | 3604 | } |
@@ -3418,16 +3619,14 @@ static void cm_event_connect_error(struct nes_cm_event *event) | |||
3418 | return; | 3619 | return; |
3419 | 3620 | ||
3420 | cm_id = event->cm_node->cm_id; | 3621 | cm_id = event->cm_node->cm_id; |
3421 | if (!cm_id) { | 3622 | if (!cm_id) |
3422 | return; | 3623 | return; |
3423 | } | ||
3424 | 3624 | ||
3425 | nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id); | 3625 | nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id); |
3426 | nesqp = cm_id->provider_data; | 3626 | nesqp = cm_id->provider_data; |
3427 | 3627 | ||
3428 | if (!nesqp) { | 3628 | if (!nesqp) |
3429 | return; | 3629 | return; |
3430 | } | ||
3431 | 3630 | ||
3432 | /* notify OF layer about this connection error event */ | 3631 | /* notify OF layer about this connection error event */ |
3433 | /* cm_id->rem_ref(cm_id); */ | 3632 | /* cm_id->rem_ref(cm_id); */ |
@@ -3442,14 +3641,14 @@ static void cm_event_connect_error(struct nes_cm_event *event) | |||
3442 | cm_event.private_data_len = 0; | 3641 | cm_event.private_data_len = 0; |
3443 | 3642 | ||
3444 | nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, " | 3643 | nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, " |
3445 | "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr, | 3644 | "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr, |
3446 | cm_event.remote_addr.sin_addr.s_addr); | 3645 | cm_event.remote_addr.sin_addr.s_addr); |
3447 | 3646 | ||
3448 | ret = cm_id->event_handler(cm_id, &cm_event); | 3647 | ret = cm_id->event_handler(cm_id, &cm_event); |
3449 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | 3648 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); |
3450 | if (ret) | 3649 | if (ret) |
3451 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " | 3650 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " |
3452 | "ret=%d\n", __func__, __LINE__, ret); | 3651 | "ret=%d\n", __func__, __LINE__, ret); |
3453 | cm_id->rem_ref(cm_id); | 3652 | cm_id->rem_ref(cm_id); |
3454 | 3653 | ||
3455 | rem_ref_cm_node(event->cm_node->cm_core, event->cm_node); | 3654 | rem_ref_cm_node(event->cm_node->cm_core, event->cm_node); |
@@ -3519,7 +3718,7 @@ static void cm_event_reset(struct nes_cm_event *event) | |||
3519 | */ | 3718 | */ |
3520 | static void cm_event_mpa_req(struct nes_cm_event *event) | 3719 | static void cm_event_mpa_req(struct nes_cm_event *event) |
3521 | { | 3720 | { |
3522 | struct iw_cm_id *cm_id; | 3721 | struct iw_cm_id *cm_id; |
3523 | struct iw_cm_event cm_event; | 3722 | struct iw_cm_event cm_event; |
3524 | int ret; | 3723 | int ret; |
3525 | struct nes_cm_node *cm_node; | 3724 | struct nes_cm_node *cm_node; |
@@ -3531,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) | |||
3531 | 3730 | ||
3532 | atomic_inc(&cm_connect_reqs); | 3731 | atomic_inc(&cm_connect_reqs); |
3533 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", | 3732 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", |
3534 | cm_node, cm_id, jiffies); | 3733 | cm_node, cm_id, jiffies); |
3535 | 3734 | ||
3536 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; | 3735 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; |
3537 | cm_event.status = 0; | 3736 | cm_event.status = 0; |
@@ -3545,19 +3744,21 @@ static void cm_event_mpa_req(struct nes_cm_event *event) | |||
3545 | cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); | 3744 | cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); |
3546 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); | 3745 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); |
3547 | cm_event.private_data = cm_node->mpa_frame_buf; | 3746 | cm_event.private_data = cm_node->mpa_frame_buf; |
3548 | cm_event.private_data_len = (u8) cm_node->mpa_frame_size; | 3747 | cm_event.private_data_len = (u8)cm_node->mpa_frame_size; |
3748 | cm_event.ird = cm_node->ird_size; | ||
3749 | cm_event.ord = cm_node->ord_size; | ||
3549 | 3750 | ||
3550 | ret = cm_id->event_handler(cm_id, &cm_event); | 3751 | ret = cm_id->event_handler(cm_id, &cm_event); |
3551 | if (ret) | 3752 | if (ret) |
3552 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", | 3753 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", |
3553 | __func__, __LINE__, ret); | 3754 | __func__, __LINE__, ret); |
3554 | return; | 3755 | return; |
3555 | } | 3756 | } |
3556 | 3757 | ||
3557 | 3758 | ||
3558 | static void cm_event_mpa_reject(struct nes_cm_event *event) | 3759 | static void cm_event_mpa_reject(struct nes_cm_event *event) |
3559 | { | 3760 | { |
3560 | struct iw_cm_id *cm_id; | 3761 | struct iw_cm_id *cm_id; |
3561 | struct iw_cm_event cm_event; | 3762 | struct iw_cm_event cm_event; |
3562 | struct nes_cm_node *cm_node; | 3763 | struct nes_cm_node *cm_node; |
3563 | int ret; | 3764 | int ret; |
@@ -3569,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) | |||
3569 | 3770 | ||
3570 | atomic_inc(&cm_connect_reqs); | 3771 | atomic_inc(&cm_connect_reqs); |
3571 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", | 3772 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", |
3572 | cm_node, cm_id, jiffies); | 3773 | cm_node, cm_id, jiffies); |
3573 | 3774 | ||
3574 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | 3775 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; |
3575 | cm_event.status = -ECONNREFUSED; | 3776 | cm_event.status = -ECONNREFUSED; |
@@ -3584,17 +3785,17 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) | |||
3584 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); | 3785 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); |
3585 | 3786 | ||
3586 | cm_event.private_data = cm_node->mpa_frame_buf; | 3787 | cm_event.private_data = cm_node->mpa_frame_buf; |
3587 | cm_event.private_data_len = (u8) cm_node->mpa_frame_size; | 3788 | cm_event.private_data_len = (u8)cm_node->mpa_frame_size; |
3588 | 3789 | ||
3589 | nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, " | 3790 | nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, " |
3590 | "remove_addr=%08x\n", | 3791 | "remove_addr=%08x\n", |
3591 | cm_event.local_addr.sin_addr.s_addr, | 3792 | cm_event.local_addr.sin_addr.s_addr, |
3592 | cm_event.remote_addr.sin_addr.s_addr); | 3793 | cm_event.remote_addr.sin_addr.s_addr); |
3593 | 3794 | ||
3594 | ret = cm_id->event_handler(cm_id, &cm_event); | 3795 | ret = cm_id->event_handler(cm_id, &cm_event); |
3595 | if (ret) | 3796 | if (ret) |
3596 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", | 3797 | printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", |
3597 | __func__, __LINE__, ret); | 3798 | __func__, __LINE__, ret); |
3598 | 3799 | ||
3599 | return; | 3800 | return; |
3600 | } | 3801 | } |
@@ -3613,7 +3814,7 @@ static int nes_cm_post_event(struct nes_cm_event *event) | |||
3613 | event->cm_info.cm_id->add_ref(event->cm_info.cm_id); | 3814 | event->cm_info.cm_id->add_ref(event->cm_info.cm_id); |
3614 | INIT_WORK(&event->event_work, nes_cm_event_handler); | 3815 | INIT_WORK(&event->event_work, nes_cm_event_handler); |
3615 | nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n", | 3816 | nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n", |
3616 | event->cm_node, event); | 3817 | event->cm_node, event); |
3617 | 3818 | ||
3618 | queue_work(event->cm_node->cm_core->event_wq, &event->event_work); | 3819 | queue_work(event->cm_node->cm_core->event_wq, &event->event_work); |
3619 | 3820 | ||
@@ -3630,7 +3831,7 @@ static int nes_cm_post_event(struct nes_cm_event *event) | |||
3630 | static void nes_cm_event_handler(struct work_struct *work) | 3831 | static void nes_cm_event_handler(struct work_struct *work) |
3631 | { | 3832 | { |
3632 | struct nes_cm_event *event = container_of(work, struct nes_cm_event, | 3833 | struct nes_cm_event *event = container_of(work, struct nes_cm_event, |
3633 | event_work); | 3834 | event_work); |
3634 | struct nes_cm_core *cm_core; | 3835 | struct nes_cm_core *cm_core; |
3635 | 3836 | ||
3636 | if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) | 3837 | if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) |
@@ -3638,29 +3839,29 @@ static void nes_cm_event_handler(struct work_struct *work) | |||
3638 | 3839 | ||
3639 | cm_core = event->cm_node->cm_core; | 3840 | cm_core = event->cm_node->cm_core; |
3640 | nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n", | 3841 | nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n", |
3641 | event, event->type, atomic_read(&cm_core->events_posted)); | 3842 | event, event->type, atomic_read(&cm_core->events_posted)); |
3642 | 3843 | ||
3643 | switch (event->type) { | 3844 | switch (event->type) { |
3644 | case NES_CM_EVENT_MPA_REQ: | 3845 | case NES_CM_EVENT_MPA_REQ: |
3645 | cm_event_mpa_req(event); | 3846 | cm_event_mpa_req(event); |
3646 | nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n", | 3847 | nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n", |
3647 | event->cm_node); | 3848 | event->cm_node); |
3648 | break; | 3849 | break; |
3649 | case NES_CM_EVENT_RESET: | 3850 | case NES_CM_EVENT_RESET: |
3650 | nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n", | 3851 | nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n", |
3651 | event->cm_node); | 3852 | event->cm_node); |
3652 | cm_event_reset(event); | 3853 | cm_event_reset(event); |
3653 | break; | 3854 | break; |
3654 | case NES_CM_EVENT_CONNECTED: | 3855 | case NES_CM_EVENT_CONNECTED: |
3655 | if ((!event->cm_node->cm_id) || | 3856 | if ((!event->cm_node->cm_id) || |
3656 | (event->cm_node->state != NES_CM_STATE_TSA)) | 3857 | (event->cm_node->state != NES_CM_STATE_TSA)) |
3657 | break; | 3858 | break; |
3658 | cm_event_connected(event); | 3859 | cm_event_connected(event); |
3659 | nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); | 3860 | nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); |
3660 | break; | 3861 | break; |
3661 | case NES_CM_EVENT_MPA_REJECT: | 3862 | case NES_CM_EVENT_MPA_REJECT: |
3662 | if ((!event->cm_node->cm_id) || | 3863 | if ((!event->cm_node->cm_id) || |
3663 | (event->cm_node->state == NES_CM_STATE_TSA)) | 3864 | (event->cm_node->state == NES_CM_STATE_TSA)) |
3664 | break; | 3865 | break; |
3665 | cm_event_mpa_reject(event); | 3866 | cm_event_mpa_reject(event); |
3666 | nes_debug(NES_DBG_CM, "CM Event: REJECT\n"); | 3867 | nes_debug(NES_DBG_CM, "CM Event: REJECT\n"); |
@@ -3668,7 +3869,7 @@ static void nes_cm_event_handler(struct work_struct *work) | |||
3668 | 3869 | ||
3669 | case NES_CM_EVENT_ABORTED: | 3870 | case NES_CM_EVENT_ABORTED: |
3670 | if ((!event->cm_node->cm_id) || | 3871 | if ((!event->cm_node->cm_id) || |
3671 | (event->cm_node->state == NES_CM_STATE_TSA)) | 3872 | (event->cm_node->state == NES_CM_STATE_TSA)) |
3672 | break; | 3873 | break; |
3673 | cm_event_connect_error(event); | 3874 | cm_event_connect_error(event); |
3674 | nes_debug(NES_DBG_CM, "CM Event: ABORTED\n"); | 3875 | nes_debug(NES_DBG_CM, "CM Event: ABORTED\n"); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index d9825fda70a1..bdfa1fbb35fc 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
@@ -48,7 +48,16 @@ | |||
48 | #define IETF_MPA_KEY_SIZE 16 | 48 | #define IETF_MPA_KEY_SIZE 16 |
49 | #define IETF_MPA_VERSION 1 | 49 | #define IETF_MPA_VERSION 1 |
50 | #define IETF_MAX_PRIV_DATA_LEN 512 | 50 | #define IETF_MAX_PRIV_DATA_LEN 512 |
51 | #define IETF_MPA_FRAME_SIZE 20 | 51 | #define IETF_MPA_FRAME_SIZE 20 |
52 | #define IETF_RTR_MSG_SIZE 4 | ||
53 | #define IETF_MPA_V2_FLAG 0x10 | ||
54 | |||
55 | /* IETF RTR MSG Fields */ | ||
56 | #define IETF_PEER_TO_PEER 0x8000 | ||
57 | #define IETF_FLPDU_ZERO_LEN 0x4000 | ||
58 | #define IETF_RDMA0_WRITE 0x8000 | ||
59 | #define IETF_RDMA0_READ 0x4000 | ||
60 | #define IETF_NO_IRD_ORD 0x3FFF | ||
52 | 61 | ||
53 | enum ietf_mpa_flags { | 62 | enum ietf_mpa_flags { |
54 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ | 63 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ |
@@ -56,7 +65,7 @@ enum ietf_mpa_flags { | |||
56 | IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */ | 65 | IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */ |
57 | }; | 66 | }; |
58 | 67 | ||
59 | struct ietf_mpa_frame { | 68 | struct ietf_mpa_v1 { |
60 | u8 key[IETF_MPA_KEY_SIZE]; | 69 | u8 key[IETF_MPA_KEY_SIZE]; |
61 | u8 flags; | 70 | u8 flags; |
62 | u8 rev; | 71 | u8 rev; |
@@ -66,6 +75,20 @@ struct ietf_mpa_frame { | |||
66 | 75 | ||
67 | #define ietf_mpa_req_resp_frame ietf_mpa_frame | 76 | #define ietf_mpa_req_resp_frame ietf_mpa_frame |
68 | 77 | ||
78 | struct ietf_rtr_msg { | ||
79 | __be16 ctrl_ird; | ||
80 | __be16 ctrl_ord; | ||
81 | }; | ||
82 | |||
83 | struct ietf_mpa_v2 { | ||
84 | u8 key[IETF_MPA_KEY_SIZE]; | ||
85 | u8 flags; | ||
86 | u8 rev; | ||
87 | __be16 priv_data_len; | ||
88 | struct ietf_rtr_msg rtr_msg; | ||
89 | u8 priv_data[0]; | ||
90 | }; | ||
91 | |||
69 | struct nes_v4_quad { | 92 | struct nes_v4_quad { |
70 | u32 rsvd0; | 93 | u32 rsvd0; |
71 | __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */ | 94 | __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */ |
@@ -171,8 +194,7 @@ struct nes_timer_entry { | |||
171 | 194 | ||
172 | #define NES_CM_DEF_SEQ2 0x18ed5740 | 195 | #define NES_CM_DEF_SEQ2 0x18ed5740 |
173 | #define NES_CM_DEF_LOCAL_ID2 0xb807 | 196 | #define NES_CM_DEF_LOCAL_ID2 0xb807 |
174 | #define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_MAX_PRIV_DATA_LEN) | 197 | #define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_RTR_MSG_SIZE + IETF_MAX_PRIV_DATA_LEN) |
175 | |||
176 | 198 | ||
177 | typedef u32 nes_addr_t; | 199 | typedef u32 nes_addr_t; |
178 | 200 | ||
@@ -204,6 +226,21 @@ enum nes_cm_node_state { | |||
204 | NES_CM_STATE_CLOSED | 226 | NES_CM_STATE_CLOSED |
205 | }; | 227 | }; |
206 | 228 | ||
229 | enum mpa_frame_version { | ||
230 | IETF_MPA_V1 = 1, | ||
231 | IETF_MPA_V2 = 2 | ||
232 | }; | ||
233 | |||
234 | enum mpa_frame_key { | ||
235 | MPA_KEY_REQUEST, | ||
236 | MPA_KEY_REPLY | ||
237 | }; | ||
238 | |||
239 | enum send_rdma0 { | ||
240 | SEND_RDMA_READ_ZERO = 1, | ||
241 | SEND_RDMA_WRITE_ZERO = 2 | ||
242 | }; | ||
243 | |||
207 | enum nes_tcpip_pkt_type { | 244 | enum nes_tcpip_pkt_type { |
208 | NES_PKT_TYPE_UNKNOWN, | 245 | NES_PKT_TYPE_UNKNOWN, |
209 | NES_PKT_TYPE_SYN, | 246 | NES_PKT_TYPE_SYN, |
@@ -245,9 +282,9 @@ struct nes_cm_tcp_context { | |||
245 | 282 | ||
246 | 283 | ||
247 | enum nes_cm_listener_state { | 284 | enum nes_cm_listener_state { |
248 | NES_CM_LISTENER_PASSIVE_STATE=1, | 285 | NES_CM_LISTENER_PASSIVE_STATE = 1, |
249 | NES_CM_LISTENER_ACTIVE_STATE=2, | 286 | NES_CM_LISTENER_ACTIVE_STATE = 2, |
250 | NES_CM_LISTENER_EITHER_STATE=3 | 287 | NES_CM_LISTENER_EITHER_STATE = 3 |
251 | }; | 288 | }; |
252 | 289 | ||
253 | struct nes_cm_listener { | 290 | struct nes_cm_listener { |
@@ -283,16 +320,20 @@ struct nes_cm_node { | |||
283 | 320 | ||
284 | struct nes_cm_node *loopbackpartner; | 321 | struct nes_cm_node *loopbackpartner; |
285 | 322 | ||
286 | struct nes_timer_entry *send_entry; | 323 | struct nes_timer_entry *send_entry; |
287 | 324 | struct nes_timer_entry *recv_entry; | |
288 | spinlock_t retrans_list_lock; | 325 | spinlock_t retrans_list_lock; |
289 | struct nes_timer_entry *recv_entry; | 326 | enum send_rdma0 send_rdma0_op; |
290 | 327 | ||
291 | int send_write0; | ||
292 | union { | 328 | union { |
293 | struct ietf_mpa_frame mpa_frame; | 329 | struct ietf_mpa_v1 mpa_frame; |
294 | u8 mpa_frame_buf[MAX_CM_BUFFER]; | 330 | struct ietf_mpa_v2 mpa_v2_frame; |
331 | u8 mpa_frame_buf[MAX_CM_BUFFER]; | ||
295 | }; | 332 | }; |
333 | enum mpa_frame_version mpa_frame_rev; | ||
334 | u16 ird_size; | ||
335 | u16 ord_size; | ||
336 | |||
296 | u16 mpa_frame_size; | 337 | u16 mpa_frame_size; |
297 | struct iw_cm_id *cm_id; | 338 | struct iw_cm_id *cm_id; |
298 | struct list_head list; | 339 | struct list_head list; |
@@ -399,10 +440,8 @@ struct nes_cm_ops { | |||
399 | struct nes_vnic *, u16, void *, | 440 | struct nes_vnic *, u16, void *, |
400 | struct nes_cm_info *); | 441 | struct nes_cm_info *); |
401 | int (*close)(struct nes_cm_core *, struct nes_cm_node *); | 442 | int (*close)(struct nes_cm_core *, struct nes_cm_node *); |
402 | int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *, | 443 | int (*accept)(struct nes_cm_core *, struct nes_cm_node *); |
403 | struct nes_cm_node *); | 444 | int (*reject)(struct nes_cm_core *, struct nes_cm_node *); |
404 | int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *, | ||
405 | struct nes_cm_node *); | ||
406 | int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, | 445 | int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, |
407 | struct sk_buff *); | 446 | struct sk_buff *); |
408 | int (*destroy_cm_core)(struct nes_cm_core *); | 447 | int (*destroy_cm_core)(struct nes_cm_core *); |
@@ -422,5 +461,7 @@ int nes_destroy_listen(struct iw_cm_id *); | |||
422 | int nes_cm_recv(struct sk_buff *, struct net_device *); | 461 | int nes_cm_recv(struct sk_buff *, struct net_device *); |
423 | int nes_cm_start(void); | 462 | int nes_cm_start(void); |
424 | int nes_cm_stop(void); | 463 | int nes_cm_stop(void); |
464 | int nes_add_ref_cm_node(struct nes_cm_node *cm_node); | ||
465 | int nes_rem_ref_cm_node(struct nes_cm_node *cm_node); | ||
425 | 466 | ||
426 | #endif /* NES_CM_H */ | 467 | #endif /* NES_CM_H */ |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index be36cbeae630..7c0ff19ce382 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -110,6 +110,14 @@ static unsigned char *nes_tcp_state_str[] = { | |||
110 | }; | 110 | }; |
111 | #endif | 111 | #endif |
112 | 112 | ||
113 | static inline void print_ip(struct nes_cm_node *cm_node) | ||
114 | { | ||
115 | unsigned char *rem_addr; | ||
116 | if (cm_node) { | ||
117 | rem_addr = (unsigned char *)&cm_node->rem_addr; | ||
118 | printk(KERN_ERR PFX "Remote IP addr: %pI4\n", rem_addr); | ||
119 | } | ||
120 | } | ||
113 | 121 | ||
114 | /** | 122 | /** |
115 | * nes_nic_init_timer_defaults | 123 | * nes_nic_init_timer_defaults |
@@ -1555,6 +1563,7 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic) | |||
1555 | struct nes_hw_nic_rq_wqe *nic_rqe; | 1563 | struct nes_hw_nic_rq_wqe *nic_rqe; |
1556 | struct nes_hw_nic *nesnic; | 1564 | struct nes_hw_nic *nesnic; |
1557 | struct nes_device *nesdev; | 1565 | struct nes_device *nesdev; |
1566 | struct nes_rskb_cb *cb; | ||
1558 | u32 rx_wqes_posted = 0; | 1567 | u32 rx_wqes_posted = 0; |
1559 | 1568 | ||
1560 | nesnic = &nesvnic->nic; | 1569 | nesnic = &nesvnic->nic; |
@@ -1580,6 +1589,9 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic) | |||
1580 | 1589 | ||
1581 | bus_address = pci_map_single(nesdev->pcidev, | 1590 | bus_address = pci_map_single(nesdev->pcidev, |
1582 | skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | 1591 | skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); |
1592 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
1593 | cb->busaddr = bus_address; | ||
1594 | cb->maplen = nesvnic->max_frame_size; | ||
1583 | 1595 | ||
1584 | nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head]; | 1596 | nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head]; |
1585 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = | 1597 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = |
@@ -1669,6 +1681,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) | |||
1669 | u32 cqp_head; | 1681 | u32 cqp_head; |
1670 | u32 counter; | 1682 | u32 counter; |
1671 | u32 wqe_count; | 1683 | u32 wqe_count; |
1684 | struct nes_rskb_cb *cb; | ||
1672 | u8 jumbomode=0; | 1685 | u8 jumbomode=0; |
1673 | 1686 | ||
1674 | /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */ | 1687 | /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */ |
@@ -1845,6 +1858,9 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) | |||
1845 | 1858 | ||
1846 | pmem = pci_map_single(nesdev->pcidev, skb->data, | 1859 | pmem = pci_map_single(nesdev->pcidev, skb->data, |
1847 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | 1860 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); |
1861 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
1862 | cb->busaddr = pmem; | ||
1863 | cb->maplen = nesvnic->max_frame_size; | ||
1848 | 1864 | ||
1849 | nic_rqe = &nesvnic->nic.rq_vbase[counter]; | 1865 | nic_rqe = &nesvnic->nic.rq_vbase[counter]; |
1850 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size); | 1866 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size); |
@@ -1873,6 +1889,13 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) | |||
1873 | jumbomode = 1; | 1889 | jumbomode = 1; |
1874 | nes_nic_init_timer_defaults(nesdev, jumbomode); | 1890 | nes_nic_init_timer_defaults(nesdev, jumbomode); |
1875 | } | 1891 | } |
1892 | if ((nesdev->nesadapter->allow_unaligned_fpdus) && | ||
1893 | (nes_init_mgt_qp(nesdev, netdev, nesvnic))) { | ||
1894 | nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n", netdev->name); | ||
1895 | nes_destroy_nic_qp(nesvnic); | ||
1896 | return -ENOMEM; | ||
1897 | } | ||
1898 | |||
1876 | nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr; | 1899 | nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr; |
1877 | nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS; | 1900 | nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS; |
1878 | nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc; | 1901 | nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc; |
@@ -1895,28 +1918,29 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic) | |||
1895 | struct nes_device *nesdev = nesvnic->nesdev; | 1918 | struct nes_device *nesdev = nesvnic->nesdev; |
1896 | struct nes_hw_cqp_wqe *cqp_wqe; | 1919 | struct nes_hw_cqp_wqe *cqp_wqe; |
1897 | struct nes_hw_nic_sq_wqe *nic_sqe; | 1920 | struct nes_hw_nic_sq_wqe *nic_sqe; |
1898 | struct nes_hw_nic_rq_wqe *nic_rqe; | ||
1899 | __le16 *wqe_fragment_length; | 1921 | __le16 *wqe_fragment_length; |
1900 | u16 wqe_fragment_index; | 1922 | u16 wqe_fragment_index; |
1901 | u64 wqe_frag; | ||
1902 | u32 cqp_head; | 1923 | u32 cqp_head; |
1903 | u32 wqm_cfg0; | 1924 | u32 wqm_cfg0; |
1904 | unsigned long flags; | 1925 | unsigned long flags; |
1926 | struct sk_buff *rx_skb; | ||
1927 | struct nes_rskb_cb *cb; | ||
1905 | int ret; | 1928 | int ret; |
1906 | 1929 | ||
1930 | if (nesdev->nesadapter->allow_unaligned_fpdus) | ||
1931 | nes_destroy_mgt(nesvnic); | ||
1932 | |||
1907 | /* clear wqe stall before destroying NIC QP */ | 1933 | /* clear wqe stall before destroying NIC QP */ |
1908 | wqm_cfg0 = nes_read_indexed(nesdev, NES_IDX_WQM_CONFIG0); | 1934 | wqm_cfg0 = nes_read_indexed(nesdev, NES_IDX_WQM_CONFIG0); |
1909 | nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG0, wqm_cfg0 & 0xFFFF7FFF); | 1935 | nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG0, wqm_cfg0 & 0xFFFF7FFF); |
1910 | 1936 | ||
1911 | /* Free remaining NIC receive buffers */ | 1937 | /* Free remaining NIC receive buffers */ |
1912 | while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) { | 1938 | while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) { |
1913 | nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail]; | 1939 | rx_skb = nesvnic->nic.rx_skb[nesvnic->nic.rq_tail]; |
1914 | wqe_frag = (u64)le32_to_cpu( | 1940 | cb = (struct nes_rskb_cb *)&rx_skb->cb[0]; |
1915 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]); | 1941 | pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, |
1916 | wqe_frag |= ((u64)le32_to_cpu( | 1942 | PCI_DMA_FROMDEVICE); |
1917 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX]))<<32; | 1943 | |
1918 | pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag, | ||
1919 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | ||
1920 | dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]); | 1944 | dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]); |
1921 | nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1); | 1945 | nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1); |
1922 | } | 1946 | } |
@@ -2775,6 +2799,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) | |||
2775 | struct nes_hw_nic_sq_wqe *nic_sqe; | 2799 | struct nes_hw_nic_sq_wqe *nic_sqe; |
2776 | struct sk_buff *skb; | 2800 | struct sk_buff *skb; |
2777 | struct sk_buff *rx_skb; | 2801 | struct sk_buff *rx_skb; |
2802 | struct nes_rskb_cb *cb; | ||
2778 | __le16 *wqe_fragment_length; | 2803 | __le16 *wqe_fragment_length; |
2779 | u32 head; | 2804 | u32 head; |
2780 | u32 cq_size; | 2805 | u32 cq_size; |
@@ -2859,6 +2884,8 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) | |||
2859 | bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; | 2884 | bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; |
2860 | pci_unmap_single(nesdev->pcidev, bus_address, | 2885 | pci_unmap_single(nesdev->pcidev, bus_address, |
2861 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | 2886 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); |
2887 | cb = (struct nes_rskb_cb *)&rx_skb->cb[0]; | ||
2888 | cb->busaddr = 0; | ||
2862 | /* rx_skb->tail = rx_skb->data + rx_pkt_size; */ | 2889 | /* rx_skb->tail = rx_skb->data + rx_pkt_size; */ |
2863 | /* rx_skb->len = rx_pkt_size; */ | 2890 | /* rx_skb->len = rx_pkt_size; */ |
2864 | rx_skb->len = 0; /* TODO: see if this is necessary */ | 2891 | rx_skb->len = 0; /* TODO: see if this is necessary */ |
@@ -2983,6 +3010,7 @@ skip_rx_indicate0: | |||
2983 | } | 3010 | } |
2984 | 3011 | ||
2985 | 3012 | ||
3013 | |||
2986 | /** | 3014 | /** |
2987 | * nes_cqp_ce_handler | 3015 | * nes_cqp_ce_handler |
2988 | */ | 3016 | */ |
@@ -2997,6 +3025,8 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | |||
2997 | u32 cq_size; | 3025 | u32 cq_size; |
2998 | u32 cqe_count=0; | 3026 | u32 cqe_count=0; |
2999 | u32 error_code; | 3027 | u32 error_code; |
3028 | u32 opcode; | ||
3029 | u32 ctx_index; | ||
3000 | /* u32 counter; */ | 3030 | /* u32 counter; */ |
3001 | 3031 | ||
3002 | head = cq->cq_head; | 3032 | head = cq->cq_head; |
@@ -3007,12 +3037,9 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | |||
3007 | /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head, | 3037 | /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head, |
3008 | le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */ | 3038 | le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */ |
3009 | 3039 | ||
3010 | if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) { | 3040 | opcode = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]); |
3011 | u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head]. | 3041 | if (opcode & NES_CQE_VALID) { |
3012 | cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) | | 3042 | cqp = &nesdev->cqp; |
3013 | ((u64)(le32_to_cpu(cq->cq_vbase[head]. | ||
3014 | cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]))); | ||
3015 | cqp = *((struct nes_hw_cqp **)&u64temp); | ||
3016 | 3043 | ||
3017 | error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]); | 3044 | error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]); |
3018 | if (error_code) { | 3045 | if (error_code) { |
@@ -3021,15 +3048,14 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | |||
3021 | le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f, | 3048 | le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f, |
3022 | (u16)(error_code >> 16), | 3049 | (u16)(error_code >> 16), |
3023 | (u16)error_code); | 3050 | (u16)error_code); |
3024 | nes_debug(NES_DBG_CQP, "cqp: qp_id=%u, sq_head=%u, sq_tail=%u\n", | ||
3025 | cqp->qp_id, cqp->sq_head, cqp->sq_tail); | ||
3026 | } | 3051 | } |
3027 | 3052 | ||
3028 | u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail]. | 3053 | u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head]. |
3029 | wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | | 3054 | cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) | |
3030 | ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail]. | 3055 | ((u64)(le32_to_cpu(cq->cq_vbase[head]. |
3031 | wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX]))); | 3056 | cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]))); |
3032 | cqp_request = *((struct nes_cqp_request **)&u64temp); | 3057 | |
3058 | cqp_request = (struct nes_cqp_request *)(unsigned long)u64temp; | ||
3033 | if (cqp_request) { | 3059 | if (cqp_request) { |
3034 | if (cqp_request->waiting) { | 3060 | if (cqp_request->waiting) { |
3035 | /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */ | 3061 | /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */ |
@@ -3075,9 +3101,15 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | |||
3075 | cqp_wqe = &nesdev->cqp.sq_vbase[head]; | 3101 | cqp_wqe = &nesdev->cqp.sq_vbase[head]; |
3076 | memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); | 3102 | memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); |
3077 | barrier(); | 3103 | barrier(); |
3078 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = | 3104 | |
3105 | opcode = cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]; | ||
3106 | if ((opcode & NES_CQP_OPCODE_MASK) == NES_CQP_DOWNLOAD_SEGMENT) | ||
3107 | ctx_index = NES_CQP_WQE_DL_COMP_CTX_LOW_IDX; | ||
3108 | else | ||
3109 | ctx_index = NES_CQP_WQE_COMP_CTX_LOW_IDX; | ||
3110 | cqp_wqe->wqe_words[ctx_index] = | ||
3079 | cpu_to_le32((u32)((unsigned long)cqp_request)); | 3111 | cpu_to_le32((u32)((unsigned long)cqp_request)); |
3080 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = | 3112 | cqp_wqe->wqe_words[ctx_index + 1] = |
3081 | cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request))); | 3113 | cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request))); |
3082 | nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n", | 3114 | nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n", |
3083 | cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head); | 3115 | cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head); |
@@ -3093,7 +3125,6 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | |||
3093 | nes_read32(nesdev->regs+NES_CQE_ALLOC); | 3125 | nes_read32(nesdev->regs+NES_CQE_ALLOC); |
3094 | } | 3126 | } |
3095 | 3127 | ||
3096 | |||
3097 | static u8 *locate_mpa(u8 *pkt, u32 aeq_info) | 3128 | static u8 *locate_mpa(u8 *pkt, u32 aeq_info) |
3098 | { | 3129 | { |
3099 | if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { | 3130 | if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { |
@@ -3553,9 +3584,9 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3553 | 3584 | ||
3554 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); | 3585 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); |
3555 | if (aeq_info & NES_AEQE_QP) { | 3586 | if (aeq_info & NES_AEQE_QP) { |
3556 | if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps, | 3587 | if (!nes_is_resource_allocated(nesadapter, |
3557 | aeqe_cq_id)) || | 3588 | nesadapter->allocated_qps, |
3558 | (atomic_read(&nesqp->close_timer_started))) | 3589 | aeqe_cq_id)) |
3559 | return; | 3590 | return; |
3560 | } | 3591 | } |
3561 | 3592 | ||
@@ -3566,8 +3597,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3566 | 3597 | ||
3567 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { | 3598 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { |
3568 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) && | 3599 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) && |
3569 | (nesqp->ibqp_state == IB_QPS_RTS) && | 3600 | (nesqp->ibqp_state == IB_QPS_RTS)) { |
3570 | ((nesadapter->eeprom_version >> 16) != NES_A0)) { | ||
3571 | spin_lock_irqsave(&nesqp->lock, flags); | 3601 | spin_lock_irqsave(&nesqp->lock, flags); |
3572 | nesqp->hw_iwarp_state = iwarp_state; | 3602 | nesqp->hw_iwarp_state = iwarp_state; |
3573 | nesqp->hw_tcp_state = tcp_state; | 3603 | nesqp->hw_tcp_state = tcp_state; |
@@ -3594,9 +3624,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3594 | return; | 3624 | return; |
3595 | } | 3625 | } |
3596 | spin_lock_irqsave(&nesqp->lock, flags); | 3626 | spin_lock_irqsave(&nesqp->lock, flags); |
3597 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; | 3627 | nesqp->hw_iwarp_state = iwarp_state; |
3628 | nesqp->hw_tcp_state = tcp_state; | ||
3629 | nesqp->last_aeq = async_event_id; | ||
3598 | spin_unlock_irqrestore(&nesqp->lock, flags); | 3630 | spin_unlock_irqrestore(&nesqp->lock, flags); |
3599 | nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0); | ||
3600 | nes_cm_disconn(nesqp); | 3631 | nes_cm_disconn(nesqp); |
3601 | break; | 3632 | break; |
3602 | 3633 | ||
@@ -3694,7 +3725,9 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3694 | case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: | 3725 | case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: |
3695 | printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n", | 3726 | printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n", |
3696 | nesqp->hwqp.qp_id, async_event_id); | 3727 | nesqp->hwqp.qp_id, async_event_id); |
3697 | nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); | 3728 | print_ip(nesqp->cm_node); |
3729 | if (!atomic_read(&nesqp->close_timer_started)) | ||
3730 | nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); | ||
3698 | break; | 3731 | break; |
3699 | 3732 | ||
3700 | case NES_AEQE_AEID_CQ_OPERATION_ERROR: | 3733 | case NES_AEQE_AEID_CQ_OPERATION_ERROR: |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index c3241479ec0e..0b590e152c6a 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -47,6 +47,11 @@ | |||
47 | #define NES_MULTICAST_PF_MAX 8 | 47 | #define NES_MULTICAST_PF_MAX 8 |
48 | #define NES_A0 3 | 48 | #define NES_A0 3 |
49 | 49 | ||
50 | #define NES_ENABLE_PAU 0x07000001 | ||
51 | #define NES_DISABLE_PAU 0x07000000 | ||
52 | #define NES_PAU_COUNTER 10 | ||
53 | #define NES_CQP_OPCODE_MASK 0x3f | ||
54 | |||
50 | enum pci_regs { | 55 | enum pci_regs { |
51 | NES_INT_STAT = 0x0000, | 56 | NES_INT_STAT = 0x0000, |
52 | NES_INT_MASK = 0x0004, | 57 | NES_INT_MASK = 0x0004, |
@@ -73,8 +78,10 @@ enum indexed_regs { | |||
73 | NES_IDX_QP_CONTROL = 0x0040, | 78 | NES_IDX_QP_CONTROL = 0x0040, |
74 | NES_IDX_FLM_CONTROL = 0x0080, | 79 | NES_IDX_FLM_CONTROL = 0x0080, |
75 | NES_IDX_INT_CPU_STATUS = 0x00a0, | 80 | NES_IDX_INT_CPU_STATUS = 0x00a0, |
81 | NES_IDX_GPR_TRIGGER = 0x00bc, | ||
76 | NES_IDX_GPIO_CONTROL = 0x00f0, | 82 | NES_IDX_GPIO_CONTROL = 0x00f0, |
77 | NES_IDX_GPIO_DATA = 0x00f4, | 83 | NES_IDX_GPIO_DATA = 0x00f4, |
84 | NES_IDX_GPR2 = 0x010c, | ||
78 | NES_IDX_TCP_CONFIG0 = 0x01e4, | 85 | NES_IDX_TCP_CONFIG0 = 0x01e4, |
79 | NES_IDX_TCP_TIMER_CONFIG = 0x01ec, | 86 | NES_IDX_TCP_TIMER_CONFIG = 0x01ec, |
80 | NES_IDX_TCP_NOW = 0x01f0, | 87 | NES_IDX_TCP_NOW = 0x01f0, |
@@ -202,6 +209,7 @@ enum nes_cqp_opcodes { | |||
202 | NES_CQP_REGISTER_SHARED_STAG = 0x0c, | 209 | NES_CQP_REGISTER_SHARED_STAG = 0x0c, |
203 | NES_CQP_DEALLOCATE_STAG = 0x0d, | 210 | NES_CQP_DEALLOCATE_STAG = 0x0d, |
204 | NES_CQP_MANAGE_ARP_CACHE = 0x0f, | 211 | NES_CQP_MANAGE_ARP_CACHE = 0x0f, |
212 | NES_CQP_DOWNLOAD_SEGMENT = 0x10, | ||
205 | NES_CQP_SUSPEND_QPS = 0x11, | 213 | NES_CQP_SUSPEND_QPS = 0x11, |
206 | NES_CQP_UPLOAD_CONTEXT = 0x13, | 214 | NES_CQP_UPLOAD_CONTEXT = 0x13, |
207 | NES_CQP_CREATE_CEQ = 0x16, | 215 | NES_CQP_CREATE_CEQ = 0x16, |
@@ -210,7 +218,8 @@ enum nes_cqp_opcodes { | |||
210 | NES_CQP_DESTROY_AEQ = 0x1b, | 218 | NES_CQP_DESTROY_AEQ = 0x1b, |
211 | NES_CQP_LMI_ACCESS = 0x20, | 219 | NES_CQP_LMI_ACCESS = 0x20, |
212 | NES_CQP_FLUSH_WQES = 0x22, | 220 | NES_CQP_FLUSH_WQES = 0x22, |
213 | NES_CQP_MANAGE_APBVT = 0x23 | 221 | NES_CQP_MANAGE_APBVT = 0x23, |
222 | NES_CQP_MANAGE_QUAD_HASH = 0x25 | ||
214 | }; | 223 | }; |
215 | 224 | ||
216 | enum nes_cqp_wqe_word_idx { | 225 | enum nes_cqp_wqe_word_idx { |
@@ -222,6 +231,14 @@ enum nes_cqp_wqe_word_idx { | |||
222 | NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5, | 231 | NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5, |
223 | }; | 232 | }; |
224 | 233 | ||
234 | enum nes_cqp_wqe_word_download_idx { /* format differs from other cqp ops */ | ||
235 | NES_CQP_WQE_DL_OPCODE_IDX = 0, | ||
236 | NES_CQP_WQE_DL_COMP_CTX_LOW_IDX = 1, | ||
237 | NES_CQP_WQE_DL_COMP_CTX_HIGH_IDX = 2, | ||
238 | NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX = 3 | ||
239 | /* For index values 4-15 use NES_NIC_SQ_WQE_ values */ | ||
240 | }; | ||
241 | |||
225 | enum nes_cqp_cq_wqeword_idx { | 242 | enum nes_cqp_cq_wqeword_idx { |
226 | NES_CQP_CQ_WQE_PBL_LOW_IDX = 6, | 243 | NES_CQP_CQ_WQE_PBL_LOW_IDX = 6, |
227 | NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7, | 244 | NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7, |
@@ -242,6 +259,7 @@ enum nes_cqp_stag_wqeword_idx { | |||
242 | NES_CQP_STAG_WQE_PBL_LEN_IDX = 14 | 259 | NES_CQP_STAG_WQE_PBL_LEN_IDX = 14 |
243 | }; | 260 | }; |
244 | 261 | ||
262 | #define NES_CQP_OP_LOGICAL_PORT_SHIFT 26 | ||
245 | #define NES_CQP_OP_IWARP_STATE_SHIFT 28 | 263 | #define NES_CQP_OP_IWARP_STATE_SHIFT 28 |
246 | #define NES_CQP_OP_TERMLEN_SHIFT 28 | 264 | #define NES_CQP_OP_TERMLEN_SHIFT 28 |
247 | 265 | ||
@@ -599,6 +617,7 @@ enum nes_nic_sq_wqe_bits { | |||
599 | 617 | ||
600 | enum nes_nic_cqe_word_idx { | 618 | enum nes_nic_cqe_word_idx { |
601 | NES_NIC_CQE_ACCQP_ID_IDX = 0, | 619 | NES_NIC_CQE_ACCQP_ID_IDX = 0, |
620 | NES_NIC_CQE_HASH_RCVNXT = 1, | ||
602 | NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2, | 621 | NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2, |
603 | NES_NIC_CQE_MISC_IDX = 3, | 622 | NES_NIC_CQE_MISC_IDX = 3, |
604 | }; | 623 | }; |
@@ -1005,6 +1024,11 @@ struct nes_arp_entry { | |||
1005 | #define NES_NIC_CQ_DOWNWARD_TREND 16 | 1024 | #define NES_NIC_CQ_DOWNWARD_TREND 16 |
1006 | #define NES_PFT_SIZE 48 | 1025 | #define NES_PFT_SIZE 48 |
1007 | 1026 | ||
1027 | #define NES_MGT_WQ_COUNT 32 | ||
1028 | #define NES_MGT_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_32) | (NES_NIC_CTX_SQ_SIZE_32)) | ||
1029 | #define NES_MGT_QP_OFFSET 36 | ||
1030 | #define NES_MGT_QP_COUNT 4 | ||
1031 | |||
1008 | struct nes_hw_tune_timer { | 1032 | struct nes_hw_tune_timer { |
1009 | /* u16 cq_count; */ | 1033 | /* u16 cq_count; */ |
1010 | u16 threshold_low; | 1034 | u16 threshold_low; |
@@ -1118,6 +1142,7 @@ struct nes_adapter { | |||
1118 | u32 et_rate_sample_interval; | 1142 | u32 et_rate_sample_interval; |
1119 | u32 timer_int_limit; | 1143 | u32 timer_int_limit; |
1120 | u32 wqm_quanta; | 1144 | u32 wqm_quanta; |
1145 | u8 allow_unaligned_fpdus; | ||
1121 | 1146 | ||
1122 | /* Adapter base MAC address */ | 1147 | /* Adapter base MAC address */ |
1123 | u32 mac_addr_low; | 1148 | u32 mac_addr_low; |
@@ -1251,6 +1276,14 @@ struct nes_vnic { | |||
1251 | enum ib_event_type delayed_event; | 1276 | enum ib_event_type delayed_event; |
1252 | enum ib_event_type last_dispatched_event; | 1277 | enum ib_event_type last_dispatched_event; |
1253 | spinlock_t port_ibevent_lock; | 1278 | spinlock_t port_ibevent_lock; |
1279 | u32 mgt_mem_size; | ||
1280 | void *mgt_vbase; | ||
1281 | dma_addr_t mgt_pbase; | ||
1282 | struct nes_vnic_mgt *mgtvnic[NES_MGT_QP_COUNT]; | ||
1283 | struct task_struct *mgt_thread; | ||
1284 | wait_queue_head_t mgt_wait_queue; | ||
1285 | struct sk_buff_head mgt_skb_list; | ||
1286 | |||
1254 | }; | 1287 | }; |
1255 | 1288 | ||
1256 | struct nes_ib_device { | 1289 | struct nes_ib_device { |
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c new file mode 100644 index 000000000000..b3b2a240c6e9 --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_mgt.c | |||
@@ -0,0 +1,1162 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/etherdevice.h> | ||
36 | #include <linux/kthread.h> | ||
37 | #include <linux/ip.h> | ||
38 | #include <linux/tcp.h> | ||
39 | #include <net/tcp.h> | ||
40 | #include "nes.h" | ||
41 | #include "nes_mgt.h" | ||
42 | |||
43 | atomic_t pau_qps_created; | ||
44 | atomic_t pau_qps_destroyed; | ||
45 | |||
46 | static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic) | ||
47 | { | ||
48 | unsigned long flags; | ||
49 | dma_addr_t bus_address; | ||
50 | struct sk_buff *skb; | ||
51 | struct nes_hw_nic_rq_wqe *nic_rqe; | ||
52 | struct nes_hw_mgt *nesmgt; | ||
53 | struct nes_device *nesdev; | ||
54 | struct nes_rskb_cb *cb; | ||
55 | u32 rx_wqes_posted = 0; | ||
56 | |||
57 | nesmgt = &mgtvnic->mgt; | ||
58 | nesdev = mgtvnic->nesvnic->nesdev; | ||
59 | spin_lock_irqsave(&nesmgt->rq_lock, flags); | ||
60 | if (nesmgt->replenishing_rq != 0) { | ||
61 | if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) && | ||
62 | (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) { | ||
63 | atomic_set(&mgtvnic->rx_skb_timer_running, 1); | ||
64 | spin_unlock_irqrestore(&nesmgt->rq_lock, flags); | ||
65 | mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */ | ||
66 | add_timer(&mgtvnic->rq_wqes_timer); | ||
67 | } else { | ||
68 | spin_unlock_irqrestore(&nesmgt->rq_lock, flags); | ||
69 | } | ||
70 | return; | ||
71 | } | ||
72 | nesmgt->replenishing_rq = 1; | ||
73 | spin_unlock_irqrestore(&nesmgt->rq_lock, flags); | ||
74 | do { | ||
75 | skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size); | ||
76 | if (skb) { | ||
77 | skb->dev = mgtvnic->nesvnic->netdev; | ||
78 | |||
79 | bus_address = pci_map_single(nesdev->pcidev, | ||
80 | skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | ||
81 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
82 | cb->busaddr = bus_address; | ||
83 | cb->maplen = mgtvnic->nesvnic->max_frame_size; | ||
84 | |||
85 | nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head]; | ||
86 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = | ||
87 | cpu_to_le32(mgtvnic->nesvnic->max_frame_size); | ||
88 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; | ||
89 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = | ||
90 | cpu_to_le32((u32)bus_address); | ||
91 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = | ||
92 | cpu_to_le32((u32)((u64)bus_address >> 32)); | ||
93 | nesmgt->rx_skb[nesmgt->rq_head] = skb; | ||
94 | nesmgt->rq_head++; | ||
95 | nesmgt->rq_head &= nesmgt->rq_size - 1; | ||
96 | atomic_dec(&mgtvnic->rx_skbs_needed); | ||
97 | barrier(); | ||
98 | if (++rx_wqes_posted == 255) { | ||
99 | nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id); | ||
100 | rx_wqes_posted = 0; | ||
101 | } | ||
102 | } else { | ||
103 | spin_lock_irqsave(&nesmgt->rq_lock, flags); | ||
104 | if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) && | ||
105 | (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) { | ||
106 | atomic_set(&mgtvnic->rx_skb_timer_running, 1); | ||
107 | spin_unlock_irqrestore(&nesmgt->rq_lock, flags); | ||
108 | mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */ | ||
109 | add_timer(&mgtvnic->rq_wqes_timer); | ||
110 | } else { | ||
111 | spin_unlock_irqrestore(&nesmgt->rq_lock, flags); | ||
112 | } | ||
113 | break; | ||
114 | } | ||
115 | } while (atomic_read(&mgtvnic->rx_skbs_needed)); | ||
116 | barrier(); | ||
117 | if (rx_wqes_posted) | ||
118 | nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id); | ||
119 | nesmgt->replenishing_rq = 0; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * nes_mgt_rq_wqes_timeout | ||
124 | */ | ||
125 | static void nes_mgt_rq_wqes_timeout(unsigned long parm) | ||
126 | { | ||
127 | struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm; | ||
128 | |||
129 | atomic_set(&mgtvnic->rx_skb_timer_running, 0); | ||
130 | if (atomic_read(&mgtvnic->rx_skbs_needed)) | ||
131 | nes_replenish_mgt_rq(mgtvnic); | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * nes_mgt_free_skb - unmap and free skb | ||
136 | */ | ||
137 | static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir) | ||
138 | { | ||
139 | struct nes_rskb_cb *cb; | ||
140 | |||
141 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
142 | pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir); | ||
143 | cb->busaddr = 0; | ||
144 | dev_kfree_skb_any(skb); | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * nes_download_callback - handle download completions | ||
149 | */ | ||
150 | static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) | ||
151 | { | ||
152 | struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer; | ||
153 | struct nes_qp *nesqp = fpdu_info->nesqp; | ||
154 | struct sk_buff *skb; | ||
155 | int i; | ||
156 | |||
157 | for (i = 0; i < fpdu_info->frag_cnt; i++) { | ||
158 | skb = fpdu_info->frags[i].skb; | ||
159 | if (fpdu_info->frags[i].cmplt) { | ||
160 | nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE); | ||
161 | nes_rem_ref_cm_node(nesqp->cm_node); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | if (fpdu_info->hdr_vbase) | ||
166 | pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len, | ||
167 | fpdu_info->hdr_vbase, fpdu_info->hdr_pbase); | ||
168 | kfree(fpdu_info); | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * nes_get_seq - Get the seq, ack_seq and window from the packet | ||
173 | */ | ||
174 | static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd) | ||
175 | { | ||
176 | struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
177 | struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN); | ||
178 | struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); | ||
179 | |||
180 | *ack = be32_to_cpu(tcph->ack_seq); | ||
181 | *wnd = be16_to_cpu(tcph->window); | ||
182 | *fin_rcvd = tcph->fin; | ||
183 | *rst_rcvd = tcph->rst; | ||
184 | return be32_to_cpu(tcph->seq); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * nes_get_next_skb - Get the next skb based on where current skb is in the queue | ||
189 | */ | ||
190 | static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp, | ||
191 | struct sk_buff *skb, u32 nextseq, u32 *ack, | ||
192 | u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd) | ||
193 | { | ||
194 | u32 seq; | ||
195 | bool processacks; | ||
196 | struct sk_buff *old_skb; | ||
197 | |||
198 | if (skb) { | ||
199 | /* Continue processing fpdu */ | ||
200 | if (skb->next == (struct sk_buff *)&nesqp->pau_list) | ||
201 | goto out; | ||
202 | skb = skb->next; | ||
203 | processacks = false; | ||
204 | } else { | ||
205 | /* Starting a new one */ | ||
206 | if (skb_queue_empty(&nesqp->pau_list)) | ||
207 | goto out; | ||
208 | skb = skb_peek(&nesqp->pau_list); | ||
209 | processacks = true; | ||
210 | } | ||
211 | |||
212 | while (1) { | ||
213 | seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd); | ||
214 | if (seq == nextseq) { | ||
215 | if (skb->len || processacks) | ||
216 | break; | ||
217 | } else if (after(seq, nextseq)) { | ||
218 | goto out; | ||
219 | } | ||
220 | |||
221 | if (skb->next == (struct sk_buff *)&nesqp->pau_list) | ||
222 | goto out; | ||
223 | |||
224 | old_skb = skb; | ||
225 | skb = skb->next; | ||
226 | skb_unlink(old_skb, &nesqp->pau_list); | ||
227 | nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE); | ||
228 | nes_rem_ref_cm_node(nesqp->cm_node); | ||
229 | } | ||
230 | return skb; | ||
231 | |||
232 | out: | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * get_fpdu_info - Find the next complete fpdu and return its fragments. | ||
238 | */ | ||
239 | static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp, | ||
240 | struct pau_fpdu_info **pau_fpdu_info) | ||
241 | { | ||
242 | struct sk_buff *skb; | ||
243 | struct iphdr *iph; | ||
244 | struct tcphdr *tcph; | ||
245 | struct nes_rskb_cb *cb; | ||
246 | struct pau_fpdu_info *fpdu_info = NULL; | ||
247 | struct pau_fpdu_frag frags[MAX_FPDU_FRAGS]; | ||
248 | unsigned long flags; | ||
249 | u32 fpdu_len = 0; | ||
250 | u32 tmp_len; | ||
251 | int frag_cnt = 0; | ||
252 | u32 tot_len; | ||
253 | u32 frag_tot; | ||
254 | u32 ack; | ||
255 | u32 fin_rcvd; | ||
256 | u32 rst_rcvd; | ||
257 | u16 wnd; | ||
258 | int i; | ||
259 | int rc = 0; | ||
260 | |||
261 | *pau_fpdu_info = NULL; | ||
262 | |||
263 | spin_lock_irqsave(&nesqp->pau_lock, flags); | ||
264 | skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd); | ||
265 | if (!skb) { | ||
266 | spin_unlock_irqrestore(&nesqp->pau_lock, flags); | ||
267 | goto out; | ||
268 | } | ||
269 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
270 | if (skb->len) { | ||
271 | fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING; | ||
272 | fpdu_len = (fpdu_len + 3) & 0xfffffffc; | ||
273 | tmp_len = fpdu_len; | ||
274 | |||
275 | /* See if we have all of the fpdu */ | ||
276 | frag_tot = 0; | ||
277 | memset(&frags, 0, sizeof frags); | ||
278 | for (i = 0; i < MAX_FPDU_FRAGS; i++) { | ||
279 | frags[i].physaddr = cb->busaddr; | ||
280 | frags[i].physaddr += skb->data - cb->data_start; | ||
281 | frags[i].frag_len = min(tmp_len, skb->len); | ||
282 | frags[i].skb = skb; | ||
283 | frags[i].cmplt = (skb->len == frags[i].frag_len); | ||
284 | frag_tot += frags[i].frag_len; | ||
285 | frag_cnt++; | ||
286 | |||
287 | tmp_len -= frags[i].frag_len; | ||
288 | if (tmp_len == 0) | ||
289 | break; | ||
290 | |||
291 | skb = nes_get_next_skb(nesdev, nesqp, skb, | ||
292 | nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd); | ||
293 | if (!skb) { | ||
294 | spin_unlock_irqrestore(&nesqp->pau_lock, flags); | ||
295 | goto out; | ||
296 | } else if (rst_rcvd) { | ||
297 | /* rst received in the middle of fpdu */ | ||
298 | for (; i >= 0; i--) { | ||
299 | skb_unlink(frags[i].skb, &nesqp->pau_list); | ||
300 | nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE); | ||
301 | } | ||
302 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
303 | frags[0].physaddr = cb->busaddr; | ||
304 | frags[0].physaddr += skb->data - cb->data_start; | ||
305 | frags[0].frag_len = skb->len; | ||
306 | frags[0].skb = skb; | ||
307 | frags[0].cmplt = true; | ||
308 | frag_cnt = 1; | ||
309 | break; | ||
310 | } | ||
311 | |||
312 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
313 | } | ||
314 | } else { | ||
315 | /* no data */ | ||
316 | frags[0].physaddr = cb->busaddr; | ||
317 | frags[0].frag_len = 0; | ||
318 | frags[0].skb = skb; | ||
319 | frags[0].cmplt = true; | ||
320 | frag_cnt = 1; | ||
321 | } | ||
322 | |||
323 | spin_unlock_irqrestore(&nesqp->pau_lock, flags); | ||
324 | |||
325 | /* Found one */ | ||
326 | fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC); | ||
327 | if (fpdu_info == NULL) { | ||
328 | nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n"); | ||
329 | rc = -ENOMEM; | ||
330 | goto out; | ||
331 | } | ||
332 | |||
333 | fpdu_info->cqp_request = nes_get_cqp_request(nesdev); | ||
334 | if (fpdu_info->cqp_request == NULL) { | ||
335 | nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n"); | ||
336 | rc = -ENOMEM; | ||
337 | goto out; | ||
338 | } | ||
339 | |||
340 | cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0]; | ||
341 | iph = (struct iphdr *)(cb->data_start + ETH_HLEN); | ||
342 | tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); | ||
343 | fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start; | ||
344 | fpdu_info->data_len = fpdu_len; | ||
345 | tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN; | ||
346 | |||
347 | if (frags[0].cmplt) { | ||
348 | fpdu_info->hdr_pbase = cb->busaddr; | ||
349 | fpdu_info->hdr_vbase = NULL; | ||
350 | } else { | ||
351 | fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev, | ||
352 | fpdu_info->hdr_len, &fpdu_info->hdr_pbase); | ||
353 | if (!fpdu_info->hdr_vbase) { | ||
354 | nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n"); | ||
355 | rc = -ENOMEM; | ||
356 | goto out; | ||
357 | } | ||
358 | |||
359 | /* Copy hdrs, adjusting len and seqnum */ | ||
360 | memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len); | ||
361 | iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN); | ||
362 | tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); | ||
363 | } | ||
364 | |||
365 | iph->tot_len = cpu_to_be16(tot_len); | ||
366 | iph->saddr = cpu_to_be32(0x7f000001); | ||
367 | |||
368 | tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt); | ||
369 | tcph->ack_seq = cpu_to_be32(ack); | ||
370 | tcph->window = cpu_to_be16(wnd); | ||
371 | |||
372 | nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd; | ||
373 | |||
374 | memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags)); | ||
375 | fpdu_info->frag_cnt = frag_cnt; | ||
376 | fpdu_info->nesqp = nesqp; | ||
377 | *pau_fpdu_info = fpdu_info; | ||
378 | |||
379 | /* Update skb's for next pass */ | ||
380 | for (i = 0; i < frag_cnt; i++) { | ||
381 | cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0]; | ||
382 | skb_pull(frags[i].skb, frags[i].frag_len); | ||
383 | |||
384 | if (frags[i].skb->len == 0) { | ||
385 | /* Pull skb off the list - it will be freed in the callback */ | ||
386 | spin_lock_irqsave(&nesqp->pau_lock, flags); | ||
387 | skb_unlink(frags[i].skb, &nesqp->pau_list); | ||
388 | spin_unlock_irqrestore(&nesqp->pau_lock, flags); | ||
389 | } else { | ||
390 | /* Last skb still has data so update the seq */ | ||
391 | iph = (struct iphdr *)(cb->data_start + ETH_HLEN); | ||
392 | tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); | ||
393 | tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | out: | ||
398 | if (rc) { | ||
399 | if (fpdu_info) { | ||
400 | if (fpdu_info->cqp_request) | ||
401 | nes_put_cqp_request(nesdev, fpdu_info->cqp_request); | ||
402 | kfree(fpdu_info); | ||
403 | } | ||
404 | } | ||
405 | return rc; | ||
406 | } | ||
407 | |||
408 | /** | ||
409 | * forward_fpdu - send complete fpdus, one at a time | ||
410 | */ | ||
411 | static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp) | ||
412 | { | ||
413 | struct nes_device *nesdev = nesvnic->nesdev; | ||
414 | struct pau_fpdu_info *fpdu_info; | ||
415 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
416 | struct nes_cqp_request *cqp_request; | ||
417 | u64 u64tmp; | ||
418 | u32 u32tmp; | ||
419 | int rc; | ||
420 | |||
421 | while (1) { | ||
422 | rc = get_fpdu_info(nesdev, nesqp, &fpdu_info); | ||
423 | if (fpdu_info == NULL) | ||
424 | return rc; | ||
425 | |||
426 | cqp_request = fpdu_info->cqp_request; | ||
427 | cqp_wqe = &cqp_request->cqp_wqe; | ||
428 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
429 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX, | ||
430 | NES_CQP_DOWNLOAD_SEGMENT | | ||
431 | (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT)); | ||
432 | |||
433 | u32tmp = fpdu_info->hdr_len << 16; | ||
434 | u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len; | ||
435 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX, | ||
436 | u32tmp); | ||
437 | |||
438 | u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len; | ||
439 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX, | ||
440 | u32tmp); | ||
441 | |||
442 | u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len; | ||
443 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX, | ||
444 | u32tmp); | ||
445 | |||
446 | u64tmp = (u64)fpdu_info->hdr_pbase; | ||
447 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX, | ||
448 | lower_32_bits(u64tmp)); | ||
449 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX, | ||
450 | upper_32_bits(u64tmp >> 32)); | ||
451 | |||
452 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, | ||
453 | lower_32_bits(fpdu_info->frags[0].physaddr)); | ||
454 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX, | ||
455 | upper_32_bits(fpdu_info->frags[0].physaddr)); | ||
456 | |||
457 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX, | ||
458 | lower_32_bits(fpdu_info->frags[1].physaddr)); | ||
459 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX, | ||
460 | upper_32_bits(fpdu_info->frags[1].physaddr)); | ||
461 | |||
462 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX, | ||
463 | lower_32_bits(fpdu_info->frags[2].physaddr)); | ||
464 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX, | ||
465 | upper_32_bits(fpdu_info->frags[2].physaddr)); | ||
466 | |||
467 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX, | ||
468 | lower_32_bits(fpdu_info->frags[3].physaddr)); | ||
469 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX, | ||
470 | upper_32_bits(fpdu_info->frags[3].physaddr)); | ||
471 | |||
472 | cqp_request->cqp_callback_pointer = fpdu_info; | ||
473 | cqp_request->callback = 1; | ||
474 | cqp_request->cqp_callback = nes_download_callback; | ||
475 | |||
476 | atomic_set(&cqp_request->refcount, 1); | ||
477 | nes_post_cqp_request(nesdev, cqp_request); | ||
478 | } | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp) | ||
484 | { | ||
485 | int again = 1; | ||
486 | unsigned long flags; | ||
487 | |||
488 | do { | ||
489 | /* Ignore rc - if it failed, tcp retries will cause it to try again */ | ||
490 | forward_fpdus(nesvnic, nesqp); | ||
491 | |||
492 | spin_lock_irqsave(&nesqp->pau_lock, flags); | ||
493 | if (nesqp->pau_pending) { | ||
494 | nesqp->pau_pending = 0; | ||
495 | } else { | ||
496 | nesqp->pau_busy = 0; | ||
497 | again = 0; | ||
498 | } | ||
499 | |||
500 | spin_unlock_irqrestore(&nesqp->pau_lock, flags); | ||
501 | } while (again); | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * queue_fpdus - Handle fpdu's that hw passed up to sw | ||
506 | */ | ||
507 | static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp) | ||
508 | { | ||
509 | struct sk_buff *tmpskb; | ||
510 | struct nes_rskb_cb *cb; | ||
511 | struct iphdr *iph; | ||
512 | struct tcphdr *tcph; | ||
513 | unsigned char *tcph_end; | ||
514 | u32 rcv_nxt; | ||
515 | u32 rcv_wnd; | ||
516 | u32 seqnum; | ||
517 | u32 len; | ||
518 | bool process_it = false; | ||
519 | unsigned long flags; | ||
520 | |||
521 | /* Move data ptr to after tcp header */ | ||
522 | iph = (struct iphdr *)skb->data; | ||
523 | tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); | ||
524 | seqnum = be32_to_cpu(tcph->seq); | ||
525 | tcph_end = (((char *)tcph) + (4 * tcph->doff)); | ||
526 | |||
527 | len = be16_to_cpu(iph->tot_len); | ||
528 | if (skb->len > len) | ||
529 | skb_trim(skb, len); | ||
530 | skb_pull(skb, tcph_end - skb->data); | ||
531 | |||
532 | /* Initialize tracking values */ | ||
533 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
534 | cb->seqnum = seqnum; | ||
535 | |||
536 | /* Make sure data is in the receive window */ | ||
537 | rcv_nxt = nesqp->pau_rcv_nxt; | ||
538 | rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd); | ||
539 | if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) { | ||
540 | nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE); | ||
541 | nes_rem_ref_cm_node(nesqp->cm_node); | ||
542 | return; | ||
543 | } | ||
544 | |||
545 | spin_lock_irqsave(&nesqp->pau_lock, flags); | ||
546 | |||
547 | if (nesqp->pau_busy) | ||
548 | nesqp->pau_pending = 1; | ||
549 | else | ||
550 | nesqp->pau_busy = 1; | ||
551 | |||
552 | /* Queue skb by sequence number */ | ||
553 | if (skb_queue_len(&nesqp->pau_list) == 0) { | ||
554 | skb_queue_head(&nesqp->pau_list, skb); | ||
555 | } else { | ||
556 | tmpskb = nesqp->pau_list.next; | ||
557 | while (tmpskb != (struct sk_buff *)&nesqp->pau_list) { | ||
558 | cb = (struct nes_rskb_cb *)&tmpskb->cb[0]; | ||
559 | if (before(seqnum, cb->seqnum)) | ||
560 | break; | ||
561 | tmpskb = tmpskb->next; | ||
562 | } | ||
563 | skb_insert(tmpskb, skb, &nesqp->pau_list); | ||
564 | } | ||
565 | if (nesqp->pau_state == PAU_READY) | ||
566 | process_it = true; | ||
567 | spin_unlock_irqrestore(&nesqp->pau_lock, flags); | ||
568 | |||
569 | if (process_it) | ||
570 | process_fpdus(nesvnic, nesqp); | ||
571 | |||
572 | return; | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * mgt_thread - Handle mgt skbs in a safe context | ||
577 | */ | ||
578 | static int mgt_thread(void *context) | ||
579 | { | ||
580 | struct nes_vnic *nesvnic = context; | ||
581 | struct sk_buff *skb; | ||
582 | struct nes_rskb_cb *cb; | ||
583 | |||
584 | while (!kthread_should_stop()) { | ||
585 | wait_event_interruptible(nesvnic->mgt_wait_queue, | ||
586 | skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop()); | ||
587 | while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) { | ||
588 | skb = skb_dequeue(&nesvnic->mgt_skb_list); | ||
589 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
590 | cb->data_start = skb->data - ETH_HLEN; | ||
591 | cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start, | ||
592 | nesvnic->max_frame_size, PCI_DMA_TODEVICE); | ||
593 | queue_fpdus(skb, nesvnic, cb->nesqp); | ||
594 | } | ||
595 | } | ||
596 | |||
597 | /* Closing down so delete any entries on the queue */ | ||
598 | while (skb_queue_len(&nesvnic->mgt_skb_list)) { | ||
599 | skb = skb_dequeue(&nesvnic->mgt_skb_list); | ||
600 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
601 | nes_rem_ref_cm_node(cb->nesqp->cm_node); | ||
602 | dev_kfree_skb_any(skb); | ||
603 | } | ||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | /** | ||
608 | * nes_queue_skbs - Queue skb so it can be handled in a thread context | ||
609 | */ | ||
610 | void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp) | ||
611 | { | ||
612 | struct nes_rskb_cb *cb; | ||
613 | |||
614 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
615 | cb->nesqp = nesqp; | ||
616 | skb_queue_tail(&nesvnic->mgt_skb_list, skb); | ||
617 | wake_up_interruptible(&nesvnic->mgt_wait_queue); | ||
618 | } | ||
619 | |||
620 | void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp) | ||
621 | { | ||
622 | struct sk_buff *skb; | ||
623 | unsigned long flags; | ||
624 | atomic_inc(&pau_qps_destroyed); | ||
625 | |||
626 | /* Free packets that have not yet been forwarded */ | ||
627 | /* Lock is acquired by skb_dequeue when removing the skb */ | ||
628 | spin_lock_irqsave(&nesqp->pau_lock, flags); | ||
629 | while (skb_queue_len(&nesqp->pau_list)) { | ||
630 | skb = skb_dequeue(&nesqp->pau_list); | ||
631 | nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE); | ||
632 | nes_rem_ref_cm_node(nesqp->cm_node); | ||
633 | } | ||
634 | spin_unlock_irqrestore(&nesqp->pau_lock, flags); | ||
635 | } | ||
636 | |||
637 | static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) | ||
638 | { | ||
639 | struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer; | ||
640 | struct nes_cqp_request *new_request; | ||
641 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
642 | struct nes_adapter *nesadapter; | ||
643 | struct nes_qp *nesqp; | ||
644 | struct nes_v4_quad nes_quad; | ||
645 | u32 crc_value; | ||
646 | u64 u64temp; | ||
647 | |||
648 | nesadapter = nesdev->nesadapter; | ||
649 | nesqp = qh_chg->nesqp; | ||
650 | |||
651 | /* Should we handle the bad completion */ | ||
652 | if (cqp_request->major_code) { | ||
653 | printk(KERN_ERR PFX "Invalid cqp_request major_code=0x%x\n", | ||
654 | cqp_request->major_code); | ||
655 | WARN_ON(1); | ||
656 | } | ||
657 | |||
658 | switch (nesqp->pau_state) { | ||
659 | case PAU_DEL_QH: | ||
660 | /* Old hash code deleted, now set the new one */ | ||
661 | nesqp->pau_state = PAU_ADD_LB_QH; | ||
662 | new_request = nes_get_cqp_request(nesdev); | ||
663 | if (new_request == NULL) { | ||
664 | nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n"); | ||
665 | WARN_ON(1); | ||
666 | return; | ||
667 | } | ||
668 | |||
669 | memset(&nes_quad, 0, sizeof(nes_quad)); | ||
670 | nes_quad.DstIpAdrIndex = | ||
671 | cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); | ||
672 | nes_quad.SrcIpadr = cpu_to_be32(0x7f000001); | ||
673 | nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]); | ||
674 | nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]); | ||
675 | |||
676 | /* Produce hash key */ | ||
677 | crc_value = get_crc_value(&nes_quad); | ||
678 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); | ||
679 | nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n", | ||
680 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | ||
681 | |||
682 | nesqp->hte_index &= nesadapter->hte_index_mask; | ||
683 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); | ||
684 | nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001); | ||
685 | nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt); | ||
686 | |||
687 | cqp_wqe = &new_request->cqp_wqe; | ||
688 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
689 | set_wqe_32bit_value(cqp_wqe->wqe_words, | ||
690 | NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | | ||
691 | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS); | ||
692 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); | ||
693 | u64temp = (u64)nesqp->nesqp_context_pbase; | ||
694 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); | ||
695 | |||
696 | nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n"); | ||
697 | |||
698 | new_request->cqp_callback_pointer = qh_chg; | ||
699 | new_request->callback = 1; | ||
700 | new_request->cqp_callback = nes_chg_qh_handler; | ||
701 | atomic_set(&new_request->refcount, 1); | ||
702 | nes_post_cqp_request(nesdev, new_request); | ||
703 | break; | ||
704 | |||
705 | case PAU_ADD_LB_QH: | ||
706 | /* Start processing the queued fpdu's */ | ||
707 | nesqp->pau_state = PAU_READY; | ||
708 | process_fpdus(qh_chg->nesvnic, qh_chg->nesqp); | ||
709 | kfree(qh_chg); | ||
710 | break; | ||
711 | } | ||
712 | } | ||
713 | |||
714 | /** | ||
715 | * nes_change_quad_hash | ||
716 | */ | ||
717 | static int nes_change_quad_hash(struct nes_device *nesdev, | ||
718 | struct nes_vnic *nesvnic, struct nes_qp *nesqp) | ||
719 | { | ||
720 | struct nes_cqp_request *cqp_request = NULL; | ||
721 | struct pau_qh_chg *qh_chg = NULL; | ||
722 | u64 u64temp; | ||
723 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
724 | int ret = 0; | ||
725 | |||
726 | cqp_request = nes_get_cqp_request(nesdev); | ||
727 | if (cqp_request == NULL) { | ||
728 | nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n"); | ||
729 | ret = -ENOMEM; | ||
730 | goto chg_qh_err; | ||
731 | } | ||
732 | |||
733 | qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC); | ||
734 | if (qh_chg == NULL) { | ||
735 | nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n"); | ||
736 | ret = -ENOMEM; | ||
737 | goto chg_qh_err; | ||
738 | } | ||
739 | qh_chg->nesdev = nesdev; | ||
740 | qh_chg->nesvnic = nesvnic; | ||
741 | qh_chg->nesqp = nesqp; | ||
742 | nesqp->pau_state = PAU_DEL_QH; | ||
743 | |||
744 | cqp_wqe = &cqp_request->cqp_wqe; | ||
745 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
746 | set_wqe_32bit_value(cqp_wqe->wqe_words, | ||
747 | NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE | | ||
748 | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS); | ||
749 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); | ||
750 | u64temp = (u64)nesqp->nesqp_context_pbase; | ||
751 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); | ||
752 | |||
753 | nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n"); | ||
754 | |||
755 | cqp_request->cqp_callback_pointer = qh_chg; | ||
756 | cqp_request->callback = 1; | ||
757 | cqp_request->cqp_callback = nes_chg_qh_handler; | ||
758 | atomic_set(&cqp_request->refcount, 1); | ||
759 | nes_post_cqp_request(nesdev, cqp_request); | ||
760 | |||
761 | return ret; | ||
762 | |||
763 | chg_qh_err: | ||
764 | kfree(qh_chg); | ||
765 | if (cqp_request) | ||
766 | nes_put_cqp_request(nesdev, cqp_request); | ||
767 | return ret; | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * nes_mgt_ce_handler | ||
772 | * This management code deals with any packed and unaligned (pau) fpdu's | ||
773 | * that the hardware cannot handle. | ||
774 | */ | ||
775 | static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) | ||
776 | { | ||
777 | struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq); | ||
778 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
779 | u32 head; | ||
780 | u32 cq_size; | ||
781 | u32 cqe_count = 0; | ||
782 | u32 cqe_misc; | ||
783 | u32 qp_id = 0; | ||
784 | u32 skbs_needed; | ||
785 | unsigned long context; | ||
786 | struct nes_qp *nesqp; | ||
787 | struct sk_buff *rx_skb; | ||
788 | struct nes_rskb_cb *cb; | ||
789 | |||
790 | head = cq->cq_head; | ||
791 | cq_size = cq->cq_size; | ||
792 | |||
793 | while (1) { | ||
794 | cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]); | ||
795 | if (!(cqe_misc & NES_NIC_CQE_VALID)) | ||
796 | break; | ||
797 | |||
798 | nesqp = NULL; | ||
799 | if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) { | ||
800 | qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]); | ||
801 | qp_id &= 0x001fffff; | ||
802 | if (qp_id < nesadapter->max_qp) { | ||
803 | context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN]; | ||
804 | nesqp = (struct nes_qp *)context; | ||
805 | } | ||
806 | } | ||
807 | |||
808 | if (nesqp) { | ||
809 | if (nesqp->pau_mode == false) { | ||
810 | nesqp->pau_mode = true; /* First time for this qp */ | ||
811 | nesqp->pau_rcv_nxt = le32_to_cpu( | ||
812 | cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]); | ||
813 | skb_queue_head_init(&nesqp->pau_list); | ||
814 | spin_lock_init(&nesqp->pau_lock); | ||
815 | atomic_inc(&pau_qps_created); | ||
816 | nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp); | ||
817 | } | ||
818 | |||
819 | rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail]; | ||
820 | rx_skb->len = 0; | ||
821 | skb_put(rx_skb, cqe_misc & 0x0000ffff); | ||
822 | rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev); | ||
823 | cb = (struct nes_rskb_cb *)&rx_skb->cb[0]; | ||
824 | pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE); | ||
825 | cb->busaddr = 0; | ||
826 | mgtvnic->mgt.rq_tail++; | ||
827 | mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1; | ||
828 | |||
829 | nes_add_ref_cm_node(nesqp->cm_node); | ||
830 | nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp); | ||
831 | } else { | ||
832 | printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id); | ||
833 | } | ||
834 | |||
835 | cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0; | ||
836 | cqe_count++; | ||
837 | if (++head >= cq_size) | ||
838 | head = 0; | ||
839 | |||
840 | if (cqe_count == 255) { | ||
841 | /* Replenish mgt CQ */ | ||
842 | nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16)); | ||
843 | nesdev->currcq_count += cqe_count; | ||
844 | cqe_count = 0; | ||
845 | } | ||
846 | |||
847 | skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed); | ||
848 | if (skbs_needed > (mgtvnic->mgt.rq_size >> 1)) | ||
849 | nes_replenish_mgt_rq(mgtvnic); | ||
850 | } | ||
851 | |||
852 | cq->cq_head = head; | ||
853 | nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | | ||
854 | cq->cq_number | (cqe_count << 16)); | ||
855 | nes_read32(nesdev->regs + NES_CQE_ALLOC); | ||
856 | nesdev->currcq_count += cqe_count; | ||
857 | } | ||
858 | |||
859 | /** | ||
860 | * nes_init_mgt_qp | ||
861 | */ | ||
862 | int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic) | ||
863 | { | ||
864 | struct nes_vnic_mgt *mgtvnic; | ||
865 | u32 counter; | ||
866 | void *vmem; | ||
867 | dma_addr_t pmem; | ||
868 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
869 | u32 cqp_head; | ||
870 | unsigned long flags; | ||
871 | struct nes_hw_nic_qp_context *mgt_context; | ||
872 | u64 u64temp; | ||
873 | struct nes_hw_nic_rq_wqe *mgt_rqe; | ||
874 | struct sk_buff *skb; | ||
875 | u32 wqe_count; | ||
876 | struct nes_rskb_cb *cb; | ||
877 | u32 mgt_mem_size; | ||
878 | void *mgt_vbase; | ||
879 | dma_addr_t mgt_pbase; | ||
880 | int i; | ||
881 | int ret; | ||
882 | |||
883 | /* Allocate space the all mgt QPs once */ | ||
884 | mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL); | ||
885 | if (mgtvnic == NULL) { | ||
886 | nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n"); | ||
887 | return -ENOMEM; | ||
888 | } | ||
889 | |||
890 | /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */ | ||
891 | /* We are not sending from this NIC so sq is not allocated */ | ||
892 | mgt_mem_size = 256 + | ||
893 | (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) + | ||
894 | (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) + | ||
895 | sizeof(struct nes_hw_nic_qp_context); | ||
896 | mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); | ||
897 | mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase); | ||
898 | if (!mgt_vbase) { | ||
899 | kfree(mgtvnic); | ||
900 | nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n"); | ||
901 | return -ENOMEM; | ||
902 | } | ||
903 | |||
904 | nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size; | ||
905 | nesvnic->mgt_vbase = mgt_vbase; | ||
906 | nesvnic->mgt_pbase = mgt_pbase; | ||
907 | |||
908 | skb_queue_head_init(&nesvnic->mgt_skb_list); | ||
909 | init_waitqueue_head(&nesvnic->mgt_wait_queue); | ||
910 | nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread"); | ||
911 | |||
912 | for (i = 0; i < NES_MGT_QP_COUNT; i++) { | ||
913 | mgtvnic->nesvnic = nesvnic; | ||
914 | mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i; | ||
915 | memset(mgt_vbase, 0, mgt_mem_size); | ||
916 | nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n", | ||
917 | mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size); | ||
918 | |||
919 | vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) & | ||
920 | ~(unsigned long)(256 - 1)); | ||
921 | pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) & | ||
922 | ~(unsigned long long)(256 - 1)); | ||
923 | |||
924 | spin_lock_init(&mgtvnic->mgt.rq_lock); | ||
925 | |||
926 | /* setup the RQ */ | ||
927 | mgtvnic->mgt.rq_vbase = vmem; | ||
928 | mgtvnic->mgt.rq_pbase = pmem; | ||
929 | mgtvnic->mgt.rq_head = 0; | ||
930 | mgtvnic->mgt.rq_tail = 0; | ||
931 | mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT; | ||
932 | |||
933 | /* setup the CQ */ | ||
934 | vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)); | ||
935 | pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)); | ||
936 | |||
937 | mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id; | ||
938 | mgtvnic->mgt_cq.cq_vbase = vmem; | ||
939 | mgtvnic->mgt_cq.cq_pbase = pmem; | ||
940 | mgtvnic->mgt_cq.cq_head = 0; | ||
941 | mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT; | ||
942 | |||
943 | mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler; | ||
944 | |||
945 | /* Send CreateCQ request to CQP */ | ||
946 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
947 | cqp_head = nesdev->cqp.sq_head; | ||
948 | |||
949 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
950 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
951 | |||
952 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32( | ||
953 | NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID | | ||
954 | ((u32)mgtvnic->mgt_cq.cq_size << 16)); | ||
955 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32( | ||
956 | mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)); | ||
957 | u64temp = (u64)mgtvnic->mgt_cq.cq_pbase; | ||
958 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); | ||
959 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0; | ||
960 | u64temp = (unsigned long)&mgtvnic->mgt_cq; | ||
961 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1)); | ||
962 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = | ||
963 | cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF); | ||
964 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0; | ||
965 | |||
966 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
967 | cqp_head = 0; | ||
968 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
969 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
970 | |||
971 | /* Send CreateQP request to CQP */ | ||
972 | mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]); | ||
973 | mgt_context->context_words[NES_NIC_CTX_MISC_IDX] = | ||
974 | cpu_to_le32((u32)NES_MGT_CTX_SIZE | | ||
975 | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12)); | ||
976 | nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n", | ||
977 | nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE), | ||
978 | nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE)); | ||
979 | if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) | ||
980 | mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE); | ||
981 | |||
982 | u64temp = (u64)mgtvnic->mgt.rq_pbase; | ||
983 | mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp); | ||
984 | mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); | ||
985 | u64temp = (u64)mgtvnic->mgt.rq_pbase; | ||
986 | mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp); | ||
987 | mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); | ||
988 | |||
989 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP | | ||
990 | NES_CQP_QP_TYPE_NIC); | ||
991 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id); | ||
992 | u64temp = (u64)mgtvnic->mgt_cq.cq_pbase + | ||
993 | (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe)); | ||
994 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); | ||
995 | |||
996 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
997 | cqp_head = 0; | ||
998 | nesdev->cqp.sq_head = cqp_head; | ||
999 | |||
1000 | barrier(); | ||
1001 | |||
1002 | /* Ring doorbell (2 WQEs) */ | ||
1003 | nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); | ||
1004 | |||
1005 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1006 | nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n", | ||
1007 | mgtvnic->mgt.qp_id); | ||
1008 | |||
1009 | ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), | ||
1010 | NES_EVENT_TIMEOUT); | ||
1011 | nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n", | ||
1012 | mgtvnic->mgt.qp_id, ret); | ||
1013 | if (!ret) { | ||
1014 | nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id); | ||
1015 | if (i == 0) { | ||
1016 | pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase, | ||
1017 | nesvnic->mgt_pbase); | ||
1018 | kfree(mgtvnic); | ||
1019 | } else { | ||
1020 | nes_destroy_mgt(nesvnic); | ||
1021 | } | ||
1022 | return -EIO; | ||
1023 | } | ||
1024 | |||
1025 | /* Populate the RQ */ | ||
1026 | for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) { | ||
1027 | skb = dev_alloc_skb(nesvnic->max_frame_size); | ||
1028 | if (!skb) { | ||
1029 | nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name); | ||
1030 | return -ENOMEM; | ||
1031 | } | ||
1032 | |||
1033 | skb->dev = netdev; | ||
1034 | |||
1035 | pmem = pci_map_single(nesdev->pcidev, skb->data, | ||
1036 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | ||
1037 | cb = (struct nes_rskb_cb *)&skb->cb[0]; | ||
1038 | cb->busaddr = pmem; | ||
1039 | cb->maplen = nesvnic->max_frame_size; | ||
1040 | |||
1041 | mgt_rqe = &mgtvnic->mgt.rq_vbase[counter]; | ||
1042 | mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size); | ||
1043 | mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; | ||
1044 | mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem); | ||
1045 | mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32)); | ||
1046 | mgtvnic->mgt.rx_skb[counter] = skb; | ||
1047 | } | ||
1048 | |||
1049 | init_timer(&mgtvnic->rq_wqes_timer); | ||
1050 | mgtvnic->rq_wqes_timer.function = nes_mgt_rq_wqes_timeout; | ||
1051 | mgtvnic->rq_wqes_timer.data = (unsigned long)mgtvnic; | ||
1052 | |||
1053 | wqe_count = NES_MGT_WQ_COUNT - 1; | ||
1054 | mgtvnic->mgt.rq_head = wqe_count; | ||
1055 | barrier(); | ||
1056 | do { | ||
1057 | counter = min(wqe_count, ((u32)255)); | ||
1058 | wqe_count -= counter; | ||
1059 | nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id); | ||
1060 | } while (wqe_count); | ||
1061 | |||
1062 | nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | | ||
1063 | mgtvnic->mgt_cq.cq_number); | ||
1064 | nes_read32(nesdev->regs + NES_CQE_ALLOC); | ||
1065 | |||
1066 | mgt_vbase += mgt_mem_size; | ||
1067 | mgt_pbase += mgt_mem_size; | ||
1068 | nesvnic->mgtvnic[i] = mgtvnic++; | ||
1069 | } | ||
1070 | return 0; | ||
1071 | } | ||
1072 | |||
1073 | |||
1074 | void nes_destroy_mgt(struct nes_vnic *nesvnic) | ||
1075 | { | ||
1076 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1077 | struct nes_vnic_mgt *mgtvnic; | ||
1078 | struct nes_vnic_mgt *first_mgtvnic; | ||
1079 | unsigned long flags; | ||
1080 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1081 | u32 cqp_head; | ||
1082 | struct sk_buff *rx_skb; | ||
1083 | int i; | ||
1084 | int ret; | ||
1085 | |||
1086 | kthread_stop(nesvnic->mgt_thread); | ||
1087 | |||
1088 | /* Free remaining NIC receive buffers */ | ||
1089 | first_mgtvnic = nesvnic->mgtvnic[0]; | ||
1090 | for (i = 0; i < NES_MGT_QP_COUNT; i++) { | ||
1091 | mgtvnic = nesvnic->mgtvnic[i]; | ||
1092 | if (mgtvnic == NULL) | ||
1093 | continue; | ||
1094 | |||
1095 | while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) { | ||
1096 | rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail]; | ||
1097 | nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE); | ||
1098 | mgtvnic->mgt.rq_tail++; | ||
1099 | mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1); | ||
1100 | } | ||
1101 | |||
1102 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1103 | |||
1104 | /* Destroy NIC QP */ | ||
1105 | cqp_head = nesdev->cqp.sq_head; | ||
1106 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1107 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1108 | |||
1109 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1110 | (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC)); | ||
1111 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
1112 | mgtvnic->mgt.qp_id); | ||
1113 | |||
1114 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
1115 | cqp_head = 0; | ||
1116 | |||
1117 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1118 | |||
1119 | /* Destroy NIC CQ */ | ||
1120 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1121 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1122 | (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16))); | ||
1123 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
1124 | (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16))); | ||
1125 | |||
1126 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
1127 | cqp_head = 0; | ||
1128 | |||
1129 | nesdev->cqp.sq_head = cqp_head; | ||
1130 | barrier(); | ||
1131 | |||
1132 | /* Ring doorbell (2 WQEs) */ | ||
1133 | nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); | ||
1134 | |||
1135 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1136 | nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u," | ||
1137 | " cqp.sq_tail=%u, cqp.sq_size=%u\n", | ||
1138 | cqp_head, nesdev->cqp.sq_head, | ||
1139 | nesdev->cqp.sq_tail, nesdev->cqp.sq_size); | ||
1140 | |||
1141 | ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), | ||
1142 | NES_EVENT_TIMEOUT); | ||
1143 | |||
1144 | nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u," | ||
1145 | " cqp.sq_head=%u, cqp.sq_tail=%u\n", | ||
1146 | ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail); | ||
1147 | if (!ret) | ||
1148 | nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n", | ||
1149 | mgtvnic->mgt.qp_id); | ||
1150 | |||
1151 | nesvnic->mgtvnic[i] = NULL; | ||
1152 | } | ||
1153 | |||
1154 | if (nesvnic->mgt_vbase) { | ||
1155 | pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase, | ||
1156 | nesvnic->mgt_pbase); | ||
1157 | nesvnic->mgt_vbase = NULL; | ||
1158 | nesvnic->mgt_pbase = 0; | ||
1159 | } | ||
1160 | |||
1161 | kfree(first_mgtvnic); | ||
1162 | } | ||
diff --git a/drivers/infiniband/hw/nes/nes_mgt.h b/drivers/infiniband/hw/nes/nes_mgt.h new file mode 100644 index 000000000000..8c8af254555a --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_mgt.h | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 Intel-NE, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef __NES_MGT_H | ||
34 | #define __NES_MGT_H | ||
35 | |||
36 | #define MPA_FRAMING 6 /* length is 2 bytes, crc is 4 bytes */ | ||
37 | |||
38 | int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic); | ||
39 | void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp); | ||
40 | void nes_destroy_mgt(struct nes_vnic *nesvnic); | ||
41 | void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp); | ||
42 | |||
43 | struct nes_hw_mgt { | ||
44 | struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */ | ||
45 | dma_addr_t rq_pbase; /* PCI memory for host rings */ | ||
46 | struct sk_buff *rx_skb[NES_NIC_WQ_SIZE]; | ||
47 | u16 qp_id; | ||
48 | u16 sq_head; | ||
49 | u16 rq_head; | ||
50 | u16 rq_tail; | ||
51 | u16 rq_size; | ||
52 | u8 replenishing_rq; | ||
53 | u8 reserved; | ||
54 | spinlock_t rq_lock; | ||
55 | }; | ||
56 | |||
57 | struct nes_vnic_mgt { | ||
58 | struct nes_vnic *nesvnic; | ||
59 | struct nes_hw_mgt mgt; | ||
60 | struct nes_hw_nic_cq mgt_cq; | ||
61 | atomic_t rx_skbs_needed; | ||
62 | struct timer_list rq_wqes_timer; | ||
63 | atomic_t rx_skb_timer_running; | ||
64 | }; | ||
65 | |||
66 | #define MAX_FPDU_FRAGS 4 | ||
67 | struct pau_fpdu_frag { | ||
68 | struct sk_buff *skb; | ||
69 | u64 physaddr; | ||
70 | u32 frag_len; | ||
71 | bool cmplt; | ||
72 | }; | ||
73 | |||
74 | struct pau_fpdu_info { | ||
75 | struct nes_qp *nesqp; | ||
76 | struct nes_cqp_request *cqp_request; | ||
77 | void *hdr_vbase; | ||
78 | dma_addr_t hdr_pbase; | ||
79 | int hdr_len; | ||
80 | u16 data_len; | ||
81 | u16 frag_cnt; | ||
82 | struct pau_fpdu_frag frags[MAX_FPDU_FRAGS]; | ||
83 | }; | ||
84 | |||
85 | enum pau_qh_state { | ||
86 | PAU_DEL_QH, | ||
87 | PAU_ADD_LB_QH, | ||
88 | PAU_READY | ||
89 | }; | ||
90 | |||
91 | struct pau_qh_chg { | ||
92 | struct nes_device *nesdev; | ||
93 | struct nes_vnic *nesvnic; | ||
94 | struct nes_qp *nesqp; | ||
95 | }; | ||
96 | |||
97 | #endif /* __NES_MGT_H */ | ||
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 47b2ee4c01e2..c00d2f3f8966 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -1091,6 +1091,8 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { | |||
1091 | "LRO aggregated", | 1091 | "LRO aggregated", |
1092 | "LRO flushed", | 1092 | "LRO flushed", |
1093 | "LRO no_desc", | 1093 | "LRO no_desc", |
1094 | "PAU CreateQPs", | ||
1095 | "PAU DestroyQPs", | ||
1094 | }; | 1096 | }; |
1095 | #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) | 1097 | #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) |
1096 | 1098 | ||
@@ -1306,6 +1308,8 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, | |||
1306 | target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; | 1308 | target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; |
1307 | target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; | 1309 | target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; |
1308 | target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; | 1310 | target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; |
1311 | target_stat_values[++index] = atomic_read(&pau_qps_created); | ||
1312 | target_stat_values[++index] = atomic_read(&pau_qps_destroyed); | ||
1309 | } | 1313 | } |
1310 | 1314 | ||
1311 | /** | 1315 | /** |
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index f9c417c6b3b3..cd10968bfa22 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c | |||
@@ -51,13 +51,34 @@ | |||
51 | 51 | ||
52 | #include "nes.h" | 52 | #include "nes.h" |
53 | 53 | ||
54 | |||
55 | |||
56 | static u16 nes_read16_eeprom(void __iomem *addr, u16 offset); | 54 | static u16 nes_read16_eeprom(void __iomem *addr, u16 offset); |
57 | 55 | ||
58 | u32 mh_detected; | 56 | u32 mh_detected; |
59 | u32 mh_pauses_sent; | 57 | u32 mh_pauses_sent; |
60 | 58 | ||
59 | u32 nes_set_pau(struct nes_device *nesdev) | ||
60 | { | ||
61 | u32 ret = 0; | ||
62 | u32 counter; | ||
63 | |||
64 | nes_write_indexed(nesdev, NES_IDX_GPR2, NES_ENABLE_PAU); | ||
65 | nes_write_indexed(nesdev, NES_IDX_GPR_TRIGGER, 1); | ||
66 | |||
67 | for (counter = 0; counter < NES_PAU_COUNTER; counter++) { | ||
68 | udelay(30); | ||
69 | if (!nes_read_indexed(nesdev, NES_IDX_GPR2)) { | ||
70 | printk(KERN_INFO PFX "PAU is supported.\n"); | ||
71 | break; | ||
72 | } | ||
73 | nes_write_indexed(nesdev, NES_IDX_GPR_TRIGGER, 1); | ||
74 | } | ||
75 | if (counter == NES_PAU_COUNTER) { | ||
76 | printk(KERN_INFO PFX "PAU is not supported.\n"); | ||
77 | return -EPERM; | ||
78 | } | ||
79 | return ret; | ||
80 | } | ||
81 | |||
61 | /** | 82 | /** |
62 | * nes_read_eeprom_values - | 83 | * nes_read_eeprom_values - |
63 | */ | 84 | */ |
@@ -187,6 +208,11 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada | |||
187 | if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3)) | 208 | if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3)) |
188 | nesadapter->send_term_ok = 1; | 209 | nesadapter->send_term_ok = 1; |
189 | 210 | ||
211 | if (nes_drv_opt & NES_DRV_OPT_ENABLE_PAU) { | ||
212 | if (!nes_set_pau(nesdev)) | ||
213 | nesadapter->allow_unaligned_fpdus = 1; | ||
214 | } | ||
215 | |||
190 | nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + | 216 | nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + |
191 | (u32)((u8)eeprom_data); | 217 | (u32)((u8)eeprom_data); |
192 | 218 | ||
@@ -594,6 +620,7 @@ void nes_put_cqp_request(struct nes_device *nesdev, | |||
594 | nes_free_cqp_request(nesdev, cqp_request); | 620 | nes_free_cqp_request(nesdev, cqp_request); |
595 | } | 621 | } |
596 | 622 | ||
623 | |||
597 | /** | 624 | /** |
598 | * nes_post_cqp_request | 625 | * nes_post_cqp_request |
599 | */ | 626 | */ |
@@ -604,6 +631,8 @@ void nes_post_cqp_request(struct nes_device *nesdev, | |||
604 | unsigned long flags; | 631 | unsigned long flags; |
605 | u32 cqp_head; | 632 | u32 cqp_head; |
606 | u64 u64temp; | 633 | u64 u64temp; |
634 | u32 opcode; | ||
635 | int ctx_index = NES_CQP_WQE_COMP_CTX_LOW_IDX; | ||
607 | 636 | ||
608 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | 637 | spin_lock_irqsave(&nesdev->cqp.lock, flags); |
609 | 638 | ||
@@ -614,17 +643,20 @@ void nes_post_cqp_request(struct nes_device *nesdev, | |||
614 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | 643 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; |
615 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | 644 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; |
616 | memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); | 645 | memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); |
646 | opcode = le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]); | ||
647 | if ((opcode & NES_CQP_OPCODE_MASK) == NES_CQP_DOWNLOAD_SEGMENT) | ||
648 | ctx_index = NES_CQP_WQE_DL_COMP_CTX_LOW_IDX; | ||
617 | barrier(); | 649 | barrier(); |
618 | u64temp = (unsigned long)cqp_request; | 650 | u64temp = (unsigned long)cqp_request; |
619 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX, | 651 | set_wqe_64bit_value(cqp_wqe->wqe_words, ctx_index, u64temp); |
620 | u64temp); | ||
621 | nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ," | 652 | nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ," |
622 | " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u," | 653 | " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u," |
623 | " waiting = %d, refcount = %d.\n", | 654 | " waiting = %d, refcount = %d.\n", |
624 | le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, | 655 | opcode & NES_CQP_OPCODE_MASK, |
625 | le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request, | 656 | le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request, |
626 | nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size, | 657 | nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size, |
627 | cqp_request->waiting, atomic_read(&cqp_request->refcount)); | 658 | cqp_request->waiting, atomic_read(&cqp_request->refcount)); |
659 | |||
628 | barrier(); | 660 | barrier(); |
629 | 661 | ||
630 | /* Ring doorbell (1 WQEs) */ | 662 | /* Ring doorbell (1 WQEs) */ |
@@ -645,7 +677,6 @@ void nes_post_cqp_request(struct nes_device *nesdev, | |||
645 | return; | 677 | return; |
646 | } | 678 | } |
647 | 679 | ||
648 | |||
649 | /** | 680 | /** |
650 | * nes_arp_table | 681 | * nes_arp_table |
651 | */ | 682 | */ |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 9f2f7d4b1197..5095bc41c6cc 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -1458,7 +1458,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) | |||
1458 | struct ib_qp_attr attr; | 1458 | struct ib_qp_attr attr; |
1459 | struct iw_cm_id *cm_id; | 1459 | struct iw_cm_id *cm_id; |
1460 | struct iw_cm_event cm_event; | 1460 | struct iw_cm_event cm_event; |
1461 | int ret; | 1461 | int ret = 0; |
1462 | 1462 | ||
1463 | atomic_inc(&sw_qps_destroyed); | 1463 | atomic_inc(&sw_qps_destroyed); |
1464 | nesqp->destroyed = 1; | 1464 | nesqp->destroyed = 1; |
@@ -1511,7 +1511,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp) | |||
1511 | if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq)) | 1511 | if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq)) |
1512 | nes_clean_cq(nesqp, nesqp->nesrcq); | 1512 | nes_clean_cq(nesqp, nesqp->nesrcq); |
1513 | } | 1513 | } |
1514 | |||
1515 | nes_rem_ref(&nesqp->ibqp); | 1514 | nes_rem_ref(&nesqp->ibqp); |
1516 | return 0; | 1515 | return 0; |
1517 | } | 1516 | } |
@@ -2338,8 +2337,10 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
2338 | 2337 | ||
2339 | skip_pages = ((u32)region->offset) >> 12; | 2338 | skip_pages = ((u32)region->offset) >> 12; |
2340 | 2339 | ||
2341 | if (ib_copy_from_udata(&req, udata, sizeof(req))) | 2340 | if (ib_copy_from_udata(&req, udata, sizeof(req))) { |
2341 | ib_umem_release(region); | ||
2342 | return ERR_PTR(-EFAULT); | 2342 | return ERR_PTR(-EFAULT); |
2343 | } | ||
2343 | nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type); | 2344 | nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type); |
2344 | 2345 | ||
2345 | switch (req.reg_type) { | 2346 | switch (req.reg_type) { |
@@ -2631,6 +2632,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
2631 | return &nesmr->ibmr; | 2632 | return &nesmr->ibmr; |
2632 | } | 2633 | } |
2633 | 2634 | ||
2635 | ib_umem_release(region); | ||
2634 | return ERR_PTR(-ENOSYS); | 2636 | return ERR_PTR(-ENOSYS); |
2635 | } | 2637 | } |
2636 | 2638 | ||
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index 2df9993e0cac..fe6b6e92fa90 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h | |||
@@ -139,7 +139,8 @@ struct nes_qp { | |||
139 | struct nes_cq *nesrcq; | 139 | struct nes_cq *nesrcq; |
140 | struct nes_pd *nespd; | 140 | struct nes_pd *nespd; |
141 | void *cm_node; /* handle of the node this QP is associated with */ | 141 | void *cm_node; /* handle of the node this QP is associated with */ |
142 | struct ietf_mpa_frame *ietf_frame; | 142 | void *ietf_frame; |
143 | u8 ietf_frame_size; | ||
143 | dma_addr_t ietf_frame_pbase; | 144 | dma_addr_t ietf_frame_pbase; |
144 | struct ib_mr *lsmm_mr; | 145 | struct ib_mr *lsmm_mr; |
145 | struct nes_hw_qp hwqp; | 146 | struct nes_hw_qp hwqp; |
@@ -154,6 +155,7 @@ struct nes_qp { | |||
154 | u32 mmap_sq_db_index; | 155 | u32 mmap_sq_db_index; |
155 | u32 mmap_rq_db_index; | 156 | u32 mmap_rq_db_index; |
156 | spinlock_t lock; | 157 | spinlock_t lock; |
158 | spinlock_t pau_lock; | ||
157 | struct nes_qp_context *nesqp_context; | 159 | struct nes_qp_context *nesqp_context; |
158 | dma_addr_t nesqp_context_pbase; | 160 | dma_addr_t nesqp_context_pbase; |
159 | void *pbl_vbase; | 161 | void *pbl_vbase; |
@@ -161,6 +163,8 @@ struct nes_qp { | |||
161 | struct page *page; | 163 | struct page *page; |
162 | struct timer_list terminate_timer; | 164 | struct timer_list terminate_timer; |
163 | enum ib_event_type terminate_eventtype; | 165 | enum ib_event_type terminate_eventtype; |
166 | struct sk_buff_head pau_list; | ||
167 | u32 pau_rcv_nxt; | ||
164 | u16 active_conn:1; | 168 | u16 active_conn:1; |
165 | u16 skip_lsmm:1; | 169 | u16 skip_lsmm:1; |
166 | u16 user_mode:1; | 170 | u16 user_mode:1; |
@@ -168,7 +172,8 @@ struct nes_qp { | |||
168 | u16 flush_issued:1; | 172 | u16 flush_issued:1; |
169 | u16 destroyed:1; | 173 | u16 destroyed:1; |
170 | u16 sig_all:1; | 174 | u16 sig_all:1; |
171 | u16 rsvd:9; | 175 | u16 pau_mode:1; |
176 | u16 rsvd:8; | ||
172 | u16 private_data_len; | 177 | u16 private_data_len; |
173 | u16 term_sq_flush_code; | 178 | u16 term_sq_flush_code; |
174 | u16 term_rq_flush_code; | 179 | u16 term_rq_flush_code; |
@@ -176,5 +181,8 @@ struct nes_qp { | |||
176 | u8 hw_tcp_state; | 181 | u8 hw_tcp_state; |
177 | u8 term_flags; | 182 | u8 term_flags; |
178 | u8 sq_kmapped; | 183 | u8 sq_kmapped; |
184 | u8 pau_busy; | ||
185 | u8 pau_pending; | ||
186 | u8 pau_state; | ||
179 | }; | 187 | }; |
180 | #endif /* NES_VERBS_H */ | 188 | #endif /* NES_VERBS_H */ |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index c9624ea87209..b881bdc401f5 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -171,7 +171,9 @@ struct qib_ctxtdata { | |||
171 | /* how many alloc_pages() chunks in rcvegrbuf_pages */ | 171 | /* how many alloc_pages() chunks in rcvegrbuf_pages */ |
172 | u32 rcvegrbuf_chunks; | 172 | u32 rcvegrbuf_chunks; |
173 | /* how many egrbufs per chunk */ | 173 | /* how many egrbufs per chunk */ |
174 | u32 rcvegrbufs_perchunk; | 174 | u16 rcvegrbufs_perchunk; |
175 | /* ilog2 of above */ | ||
176 | u16 rcvegrbufs_perchunk_shift; | ||
175 | /* order for rcvegrbuf_pages */ | 177 | /* order for rcvegrbuf_pages */ |
176 | size_t rcvegrbuf_size; | 178 | size_t rcvegrbuf_size; |
177 | /* rcvhdrq size (for freeing) */ | 179 | /* rcvhdrq size (for freeing) */ |
@@ -221,6 +223,9 @@ struct qib_ctxtdata { | |||
221 | /* ctxt rcvhdrq head offset */ | 223 | /* ctxt rcvhdrq head offset */ |
222 | u32 head; | 224 | u32 head; |
223 | u32 pkt_count; | 225 | u32 pkt_count; |
226 | /* lookaside fields */ | ||
227 | struct qib_qp *lookaside_qp; | ||
228 | u32 lookaside_qpn; | ||
224 | /* QPs waiting for context processing */ | 229 | /* QPs waiting for context processing */ |
225 | struct list_head qp_wait_list; | 230 | struct list_head qp_wait_list; |
226 | }; | 231 | }; |
@@ -807,6 +812,10 @@ struct qib_devdata { | |||
807 | * supports, less gives more pio bufs/ctxt, etc. | 812 | * supports, less gives more pio bufs/ctxt, etc. |
808 | */ | 813 | */ |
809 | u32 cfgctxts; | 814 | u32 cfgctxts; |
815 | /* | ||
816 | * number of ctxts available for PSM open | ||
817 | */ | ||
818 | u32 freectxts; | ||
810 | 819 | ||
811 | /* | 820 | /* |
812 | * hint that we should update pioavailshadow before | 821 | * hint that we should update pioavailshadow before |
@@ -936,7 +945,9 @@ struct qib_devdata { | |||
936 | /* chip address space used by 4k pio buffers */ | 945 | /* chip address space used by 4k pio buffers */ |
937 | u32 align4k; | 946 | u32 align4k; |
938 | /* size of each rcvegrbuffer */ | 947 | /* size of each rcvegrbuffer */ |
939 | u32 rcvegrbufsize; | 948 | u16 rcvegrbufsize; |
949 | /* log2 of above */ | ||
950 | u16 rcvegrbufsize_shift; | ||
940 | /* localbus width (1, 2,4,8,16,32) from config space */ | 951 | /* localbus width (1, 2,4,8,16,32) from config space */ |
941 | u32 lbus_width; | 952 | u32 lbus_width; |
942 | /* localbus speed in MHz */ | 953 | /* localbus speed in MHz */ |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 43a8e030194e..c90a55f4120f 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -280,10 +280,10 @@ bail: | |||
280 | */ | 280 | */ |
281 | static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) | 281 | static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) |
282 | { | 282 | { |
283 | const u32 chunk = etail / rcd->rcvegrbufs_perchunk; | 283 | const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift; |
284 | const u32 idx = etail % rcd->rcvegrbufs_perchunk; | 284 | const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1); |
285 | 285 | ||
286 | return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize; | 286 | return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | 289 | /* |
@@ -311,7 +311,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, | |||
311 | u32 opcode; | 311 | u32 opcode; |
312 | u32 psn; | 312 | u32 psn; |
313 | int diff; | 313 | int diff; |
314 | unsigned long flags; | ||
315 | 314 | ||
316 | /* Sanity check packet */ | 315 | /* Sanity check packet */ |
317 | if (tlen < 24) | 316 | if (tlen < 24) |
@@ -366,7 +365,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, | |||
366 | 365 | ||
367 | switch (qp->ibqp.qp_type) { | 366 | switch (qp->ibqp.qp_type) { |
368 | case IB_QPT_RC: | 367 | case IB_QPT_RC: |
369 | spin_lock_irqsave(&qp->s_lock, flags); | ||
370 | ruc_res = | 368 | ruc_res = |
371 | qib_ruc_check_hdr( | 369 | qib_ruc_check_hdr( |
372 | ibp, hdr, | 370 | ibp, hdr, |
@@ -374,11 +372,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, | |||
374 | qp, | 372 | qp, |
375 | be32_to_cpu(ohdr->bth[0])); | 373 | be32_to_cpu(ohdr->bth[0])); |
376 | if (ruc_res) { | 374 | if (ruc_res) { |
377 | spin_unlock_irqrestore(&qp->s_lock, | ||
378 | flags); | ||
379 | goto unlock; | 375 | goto unlock; |
380 | } | 376 | } |
381 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
382 | 377 | ||
383 | /* Only deal with RDMA Writes for now */ | 378 | /* Only deal with RDMA Writes for now */ |
384 | if (opcode < | 379 | if (opcode < |
@@ -548,6 +543,15 @@ move_along: | |||
548 | updegr = 0; | 543 | updegr = 0; |
549 | } | 544 | } |
550 | } | 545 | } |
546 | /* | ||
547 | * Notify qib_destroy_qp() if it is waiting | ||
548 | * for lookaside_qp to finish. | ||
549 | */ | ||
550 | if (rcd->lookaside_qp) { | ||
551 | if (atomic_dec_and_test(&rcd->lookaside_qp->refcount)) | ||
552 | wake_up(&rcd->lookaside_qp->wait); | ||
553 | rcd->lookaside_qp = NULL; | ||
554 | } | ||
551 | 555 | ||
552 | rcd->head = l; | 556 | rcd->head = l; |
553 | rcd->pkt_count += i; | 557 | rcd->pkt_count += i; |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 6d38e24397c5..574600ef5b42 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -1285,6 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, | |||
1285 | strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); | 1285 | strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); |
1286 | ctxt_fp(fp) = rcd; | 1286 | ctxt_fp(fp) = rcd; |
1287 | qib_stats.sps_ctxts++; | 1287 | qib_stats.sps_ctxts++; |
1288 | dd->freectxts++; | ||
1288 | ret = 0; | 1289 | ret = 0; |
1289 | goto bail; | 1290 | goto bail; |
1290 | 1291 | ||
@@ -1793,6 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp) | |||
1793 | if (dd->pageshadow) | 1794 | if (dd->pageshadow) |
1794 | unlock_expected_tids(rcd); | 1795 | unlock_expected_tids(rcd); |
1795 | qib_stats.sps_ctxts--; | 1796 | qib_stats.sps_ctxts--; |
1797 | dd->freectxts--; | ||
1796 | } | 1798 | } |
1797 | 1799 | ||
1798 | mutex_unlock(&qib_mutex); | 1800 | mutex_unlock(&qib_mutex); |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index d8ca0a0b970d..781a802a321f 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -3273,6 +3273,8 @@ static int init_6120_variables(struct qib_devdata *dd) | |||
3273 | /* we always allocate at least 2048 bytes for eager buffers */ | 3273 | /* we always allocate at least 2048 bytes for eager buffers */ |
3274 | ret = ib_mtu_enum_to_int(qib_ibmtu); | 3274 | ret = ib_mtu_enum_to_int(qib_ibmtu); |
3275 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; | 3275 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; |
3276 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); | ||
3277 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | ||
3276 | 3278 | ||
3277 | qib_6120_tidtemplate(dd); | 3279 | qib_6120_tidtemplate(dd); |
3278 | 3280 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 357234732b66..439d3c503cd5 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -4086,6 +4086,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd) | |||
4086 | /* we always allocate at least 2048 bytes for eager buffers */ | 4086 | /* we always allocate at least 2048 bytes for eager buffers */ |
4087 | ret = ib_mtu_enum_to_int(qib_ibmtu); | 4087 | ret = ib_mtu_enum_to_int(qib_ibmtu); |
4088 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; | 4088 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; |
4089 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); | ||
4090 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | ||
4089 | 4091 | ||
4090 | qib_7220_tidtemplate(dd); | 4092 | qib_7220_tidtemplate(dd); |
4091 | 4093 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index a9dc6935e07f..5bd2162b95dc 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -2311,12 +2311,15 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2311 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | 2311 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << |
2312 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | 2312 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); |
2313 | 2313 | ||
2314 | ppd->cpspec->ibcctrl_a = val; | ||
2314 | /* | 2315 | /* |
2315 | * Reset the PCS interface to the serdes (and also ibc, which is still | 2316 | * Reset the PCS interface to the serdes (and also ibc, which is still |
2316 | * in reset from above). Writes new value of ibcctrl_a as last step. | 2317 | * in reset from above). Writes new value of ibcctrl_a as last step. |
2317 | */ | 2318 | */ |
2318 | qib_7322_mini_pcs_reset(ppd); | 2319 | qib_7322_mini_pcs_reset(ppd); |
2319 | qib_write_kreg(dd, kr_scratch, 0ULL); | 2320 | qib_write_kreg(dd, kr_scratch, 0ULL); |
2321 | /* clear the linkinit cmds */ | ||
2322 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); | ||
2320 | 2323 | ||
2321 | if (!ppd->cpspec->ibcctrl_b) { | 2324 | if (!ppd->cpspec->ibcctrl_b) { |
2322 | unsigned lse = ppd->link_speed_enabled; | 2325 | unsigned lse = ppd->link_speed_enabled; |
@@ -2388,11 +2391,6 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2388 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | 2391 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); |
2389 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | 2392 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); |
2390 | 2393 | ||
2391 | /* Hold the link state machine for mezz boards */ | ||
2392 | if (IS_QMH(dd) || IS_QME(dd)) | ||
2393 | qib_set_ib_7322_lstate(ppd, 0, | ||
2394 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
2395 | |||
2396 | /* Also enable IBSTATUSCHG interrupt. */ | 2394 | /* Also enable IBSTATUSCHG interrupt. */ |
2397 | val = qib_read_kreg_port(ppd, krp_errmask); | 2395 | val = qib_read_kreg_port(ppd, krp_errmask); |
2398 | qib_write_kreg_port(ppd, krp_errmask, | 2396 | qib_write_kreg_port(ppd, krp_errmask, |
@@ -2854,9 +2852,8 @@ static irqreturn_t qib_7322intr(int irq, void *data) | |||
2854 | for (i = 0; i < dd->first_user_ctxt; i++) { | 2852 | for (i = 0; i < dd->first_user_ctxt; i++) { |
2855 | if (ctxtrbits & rmask) { | 2853 | if (ctxtrbits & rmask) { |
2856 | ctxtrbits &= ~rmask; | 2854 | ctxtrbits &= ~rmask; |
2857 | if (dd->rcd[i]) { | 2855 | if (dd->rcd[i]) |
2858 | qib_kreceive(dd->rcd[i], NULL, &npkts); | 2856 | qib_kreceive(dd->rcd[i], NULL, &npkts); |
2859 | } | ||
2860 | } | 2857 | } |
2861 | rmask <<= 1; | 2858 | rmask <<= 1; |
2862 | } | 2859 | } |
@@ -5231,6 +5228,8 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |||
5231 | QIBL_IB_AUTONEG_INPROG))) | 5228 | QIBL_IB_AUTONEG_INPROG))) |
5232 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | 5229 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); |
5233 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | 5230 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { |
5231 | struct qib_qsfp_data *qd = | ||
5232 | &ppd->cpspec->qsfp_data; | ||
5234 | /* unlock the Tx settings, speed may change */ | 5233 | /* unlock the Tx settings, speed may change */ |
5235 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | 5234 | qib_write_kreg_port(ppd, krp_tx_deemph_override, |
5236 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | 5235 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, |
@@ -5238,6 +5237,12 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |||
5238 | qib_cancel_sends(ppd); | 5237 | qib_cancel_sends(ppd); |
5239 | /* on link down, ensure sane pcs state */ | 5238 | /* on link down, ensure sane pcs state */ |
5240 | qib_7322_mini_pcs_reset(ppd); | 5239 | qib_7322_mini_pcs_reset(ppd); |
5240 | /* schedule the qsfp refresh which should turn the link | ||
5241 | off */ | ||
5242 | if (ppd->dd->flags & QIB_HAS_QSFP) { | ||
5243 | qd->t_insert = get_jiffies_64(); | ||
5244 | schedule_work(&qd->work); | ||
5245 | } | ||
5241 | spin_lock_irqsave(&ppd->sdma_lock, flags); | 5246 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
5242 | if (__qib_sdma_running(ppd)) | 5247 | if (__qib_sdma_running(ppd)) |
5243 | __qib_sdma_process_event(ppd, | 5248 | __qib_sdma_process_event(ppd, |
@@ -5588,43 +5593,79 @@ static void qsfp_7322_event(struct work_struct *work) | |||
5588 | struct qib_qsfp_data *qd; | 5593 | struct qib_qsfp_data *qd; |
5589 | struct qib_pportdata *ppd; | 5594 | struct qib_pportdata *ppd; |
5590 | u64 pwrup; | 5595 | u64 pwrup; |
5596 | unsigned long flags; | ||
5591 | int ret; | 5597 | int ret; |
5592 | u32 le2; | 5598 | u32 le2; |
5593 | 5599 | ||
5594 | qd = container_of(work, struct qib_qsfp_data, work); | 5600 | qd = container_of(work, struct qib_qsfp_data, work); |
5595 | ppd = qd->ppd; | 5601 | ppd = qd->ppd; |
5596 | pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC); | 5602 | pwrup = qd->t_insert + |
5603 | msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC); | ||
5597 | 5604 | ||
5598 | /* | 5605 | /* Delay for 20 msecs to allow ModPrs resistor to setup */ |
5599 | * Some QSFP's not only do not respond until the full power-up | 5606 | mdelay(QSFP_MODPRS_LAG_MSEC); |
5600 | * time, but may behave badly if we try. So hold off responding | 5607 | |
5601 | * to insertion. | 5608 | if (!qib_qsfp_mod_present(ppd)) { |
5602 | */ | 5609 | ppd->cpspec->qsfp_data.modpresent = 0; |
5603 | while (1) { | 5610 | /* Set the physical link to disabled */ |
5604 | u64 now = get_jiffies_64(); | 5611 | qib_set_ib_7322_lstate(ppd, 0, |
5605 | if (time_after64(now, pwrup)) | 5612 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); |
5606 | break; | 5613 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
5607 | msleep(20); | 5614 | ppd->lflags &= ~QIBL_LINKV; |
5608 | } | 5615 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
5609 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); | 5616 | } else { |
5610 | /* | 5617 | /* |
5611 | * Need to change LE2 back to defaults if we couldn't | 5618 | * Some QSFP's not only do not respond until the full power-up |
5612 | * read the cable type (to handle cable swaps), so do this | 5619 | * time, but may behave badly if we try. So hold off responding |
5613 | * even on failure to read cable information. We don't | 5620 | * to insertion. |
5614 | * get here for QME, so IS_QME check not needed here. | 5621 | */ |
5615 | */ | 5622 | while (1) { |
5616 | if (!ret && !ppd->dd->cspec->r1) { | 5623 | u64 now = get_jiffies_64(); |
5617 | if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) | 5624 | if (time_after64(now, pwrup)) |
5618 | le2 = LE2_QME; | 5625 | break; |
5619 | else if (qd->cache.atten[1] >= qib_long_atten && | 5626 | msleep(20); |
5620 | QSFP_IS_CU(qd->cache.tech)) | 5627 | } |
5621 | le2 = LE2_5m; | 5628 | |
5622 | else | 5629 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); |
5630 | |||
5631 | /* | ||
5632 | * Need to change LE2 back to defaults if we couldn't | ||
5633 | * read the cable type (to handle cable swaps), so do this | ||
5634 | * even on failure to read cable information. We don't | ||
5635 | * get here for QME, so IS_QME check not needed here. | ||
5636 | */ | ||
5637 | if (!ret && !ppd->dd->cspec->r1) { | ||
5638 | if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) | ||
5639 | le2 = LE2_QME; | ||
5640 | else if (qd->cache.atten[1] >= qib_long_atten && | ||
5641 | QSFP_IS_CU(qd->cache.tech)) | ||
5642 | le2 = LE2_5m; | ||
5643 | else | ||
5644 | le2 = LE2_DEFAULT; | ||
5645 | } else | ||
5623 | le2 = LE2_DEFAULT; | 5646 | le2 = LE2_DEFAULT; |
5624 | } else | 5647 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); |
5625 | le2 = LE2_DEFAULT; | 5648 | /* |
5626 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); | 5649 | * We always change parameteters, since we can choose |
5627 | init_txdds_table(ppd, 0); | 5650 | * values for cables without eeproms, and the cable may have |
5651 | * changed from a cable with full or partial eeprom content | ||
5652 | * to one with partial or no content. | ||
5653 | */ | ||
5654 | init_txdds_table(ppd, 0); | ||
5655 | /* The physical link is being re-enabled only when the | ||
5656 | * previous state was DISABLED and the VALID bit is not | ||
5657 | * set. This should only happen when the cable has been | ||
5658 | * physically pulled. */ | ||
5659 | if (!ppd->cpspec->qsfp_data.modpresent && | ||
5660 | (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) { | ||
5661 | ppd->cpspec->qsfp_data.modpresent = 1; | ||
5662 | qib_set_ib_7322_lstate(ppd, 0, | ||
5663 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | ||
5664 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
5665 | ppd->lflags |= QIBL_LINKV; | ||
5666 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
5667 | } | ||
5668 | } | ||
5628 | } | 5669 | } |
5629 | 5670 | ||
5630 | /* | 5671 | /* |
@@ -5728,7 +5769,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5728 | /* now change the IBC and serdes, overriding generic */ | 5769 | /* now change the IBC and serdes, overriding generic */ |
5729 | init_txdds_table(ppd, 1); | 5770 | init_txdds_table(ppd, 1); |
5730 | /* Re-enable the physical state machine on mezz boards | 5771 | /* Re-enable the physical state machine on mezz boards |
5731 | * now that the correct settings have been set. */ | 5772 | * now that the correct settings have been set. |
5773 | * QSFP boards are handles by the QSFP event handler */ | ||
5732 | if (IS_QMH(dd) || IS_QME(dd)) | 5774 | if (IS_QMH(dd) || IS_QME(dd)) |
5733 | qib_set_ib_7322_lstate(ppd, 0, | 5775 | qib_set_ib_7322_lstate(ppd, 0, |
5734 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | 5776 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); |
@@ -6206,6 +6248,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6206 | 6248 | ||
6207 | /* we always allocate at least 2048 bytes for eager buffers */ | 6249 | /* we always allocate at least 2048 bytes for eager buffers */ |
6208 | dd->rcvegrbufsize = max(mtu, 2048); | 6250 | dd->rcvegrbufsize = max(mtu, 2048); |
6251 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); | ||
6252 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | ||
6209 | 6253 | ||
6210 | qib_7322_tidtemplate(dd); | 6254 | qib_7322_tidtemplate(dd); |
6211 | 6255 | ||
@@ -7148,7 +7192,8 @@ static void find_best_ent(struct qib_pportdata *ppd, | |||
7148 | } | 7192 | } |
7149 | } | 7193 | } |
7150 | 7194 | ||
7151 | /* Lookup serdes setting by cable type and attenuation */ | 7195 | /* Active cables don't have attenuation so we only set SERDES |
7196 | * settings to account for the attenuation of the board traces. */ | ||
7152 | if (!override && QSFP_IS_ACTIVE(qd->tech)) { | 7197 | if (!override && QSFP_IS_ACTIVE(qd->tech)) { |
7153 | *sdr_dds = txdds_sdr + ppd->dd->board_atten; | 7198 | *sdr_dds = txdds_sdr + ppd->dd->board_atten; |
7154 | *ddr_dds = txdds_ddr + ppd->dd->board_atten; | 7199 | *ddr_dds = txdds_ddr + ppd->dd->board_atten; |
@@ -7465,12 +7510,6 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7465 | u32 le_val, rxcaldone; | 7510 | u32 le_val, rxcaldone; |
7466 | int chan, chan_done = (1 << SERDES_CHANS) - 1; | 7511 | int chan, chan_done = (1 << SERDES_CHANS) - 1; |
7467 | 7512 | ||
7468 | /* | ||
7469 | * Initialize the Tx DDS tables. Also done every QSFP event, | ||
7470 | * for adapters with QSFP | ||
7471 | */ | ||
7472 | init_txdds_table(ppd, 0); | ||
7473 | |||
7474 | /* Clear cmode-override, may be set from older driver */ | 7513 | /* Clear cmode-override, may be set from older driver */ |
7475 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | 7514 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); |
7476 | 7515 | ||
@@ -7656,6 +7695,12 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7656 | /* VGA output common mode */ | 7695 | /* VGA output common mode */ |
7657 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); | 7696 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); |
7658 | 7697 | ||
7698 | /* | ||
7699 | * Initialize the Tx DDS tables. Also done every QSFP event, | ||
7700 | * for adapters with QSFP | ||
7701 | */ | ||
7702 | init_txdds_table(ppd, 0); | ||
7703 | |||
7659 | return 0; | 7704 | return 0; |
7660 | } | 7705 | } |
7661 | 7706 | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index bf9c4a41a4e9..58b0f8ad4a29 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -184,6 +184,9 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) | |||
184 | rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + | 184 | rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + |
185 | rcd->rcvegrbufs_perchunk - 1) / | 185 | rcd->rcvegrbufs_perchunk - 1) / |
186 | rcd->rcvegrbufs_perchunk; | 186 | rcd->rcvegrbufs_perchunk; |
187 | BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk)); | ||
188 | rcd->rcvegrbufs_perchunk_shift = | ||
189 | ilog2(rcd->rcvegrbufs_perchunk); | ||
187 | } | 190 | } |
188 | return rcd; | 191 | return rcd; |
189 | } | 192 | } |
@@ -399,6 +402,7 @@ static void enable_chip(struct qib_devdata *dd) | |||
399 | if (rcd) | 402 | if (rcd) |
400 | dd->f_rcvctrl(rcd->ppd, rcvmask, i); | 403 | dd->f_rcvctrl(rcd->ppd, rcvmask, i); |
401 | } | 404 | } |
405 | dd->freectxts = dd->cfgctxts - dd->first_user_ctxt; | ||
402 | } | 406 | } |
403 | 407 | ||
404 | static void verify_interrupt(unsigned long opaque) | 408 | static void verify_interrupt(unsigned long opaque) |
@@ -582,10 +586,6 @@ int qib_init(struct qib_devdata *dd, int reinit) | |||
582 | continue; | 586 | continue; |
583 | } | 587 | } |
584 | 588 | ||
585 | /* let link come up, and enable IBC */ | ||
586 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
587 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | ||
588 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
589 | portok++; | 589 | portok++; |
590 | } | 590 | } |
591 | 591 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index e16751f8639e..7e7e16fbee99 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <linux/err.h> | 35 | #include <linux/err.h> |
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include <linux/jhash.h> | ||
37 | 38 | ||
38 | #include "qib.h" | 39 | #include "qib.h" |
39 | 40 | ||
@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | |||
204 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); | 205 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); |
205 | } | 206 | } |
206 | 207 | ||
208 | static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) | ||
209 | { | ||
210 | return jhash_1word(qpn, dev->qp_rnd) & | ||
211 | (dev->qp_table_size - 1); | ||
212 | } | ||
213 | |||
214 | |||
207 | /* | 215 | /* |
208 | * Put the QP into the hash table. | 216 | * Put the QP into the hash table. |
209 | * The hash table holds a reference to the QP. | 217 | * The hash table holds a reference to the QP. |
@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | |||
211 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | 219 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) |
212 | { | 220 | { |
213 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | 221 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
214 | unsigned n = qp->ibqp.qp_num % dev->qp_table_size; | ||
215 | unsigned long flags; | 222 | unsigned long flags; |
223 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); | ||
216 | 224 | ||
217 | spin_lock_irqsave(&dev->qpt_lock, flags); | 225 | spin_lock_irqsave(&dev->qpt_lock, flags); |
226 | atomic_inc(&qp->refcount); | ||
218 | 227 | ||
219 | if (qp->ibqp.qp_num == 0) | 228 | if (qp->ibqp.qp_num == 0) |
220 | ibp->qp0 = qp; | 229 | rcu_assign_pointer(ibp->qp0, qp); |
221 | else if (qp->ibqp.qp_num == 1) | 230 | else if (qp->ibqp.qp_num == 1) |
222 | ibp->qp1 = qp; | 231 | rcu_assign_pointer(ibp->qp1, qp); |
223 | else { | 232 | else { |
224 | qp->next = dev->qp_table[n]; | 233 | qp->next = dev->qp_table[n]; |
225 | dev->qp_table[n] = qp; | 234 | rcu_assign_pointer(dev->qp_table[n], qp); |
226 | } | 235 | } |
227 | atomic_inc(&qp->refcount); | ||
228 | 236 | ||
229 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 237 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
238 | synchronize_rcu(); | ||
230 | } | 239 | } |
231 | 240 | ||
232 | /* | 241 | /* |
@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
236 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | 245 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) |
237 | { | 246 | { |
238 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | 247 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
239 | struct qib_qp *q, **qpp; | 248 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); |
240 | unsigned long flags; | 249 | unsigned long flags; |
241 | 250 | ||
242 | qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size]; | ||
243 | |||
244 | spin_lock_irqsave(&dev->qpt_lock, flags); | 251 | spin_lock_irqsave(&dev->qpt_lock, flags); |
245 | 252 | ||
246 | if (ibp->qp0 == qp) { | 253 | if (ibp->qp0 == qp) { |
247 | ibp->qp0 = NULL; | ||
248 | atomic_dec(&qp->refcount); | 254 | atomic_dec(&qp->refcount); |
255 | rcu_assign_pointer(ibp->qp0, NULL); | ||
249 | } else if (ibp->qp1 == qp) { | 256 | } else if (ibp->qp1 == qp) { |
250 | ibp->qp1 = NULL; | ||
251 | atomic_dec(&qp->refcount); | 257 | atomic_dec(&qp->refcount); |
252 | } else | 258 | rcu_assign_pointer(ibp->qp1, NULL); |
259 | } else { | ||
260 | struct qib_qp *q, **qpp; | ||
261 | |||
262 | qpp = &dev->qp_table[n]; | ||
253 | for (; (q = *qpp) != NULL; qpp = &q->next) | 263 | for (; (q = *qpp) != NULL; qpp = &q->next) |
254 | if (q == qp) { | 264 | if (q == qp) { |
255 | *qpp = qp->next; | ||
256 | qp->next = NULL; | ||
257 | atomic_dec(&qp->refcount); | 265 | atomic_dec(&qp->refcount); |
266 | rcu_assign_pointer(*qpp, qp->next); | ||
267 | qp->next = NULL; | ||
258 | break; | 268 | break; |
259 | } | 269 | } |
270 | } | ||
260 | 271 | ||
261 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 272 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
273 | synchronize_rcu(); | ||
262 | } | 274 | } |
263 | 275 | ||
264 | /** | 276 | /** |
@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) | |||
280 | 292 | ||
281 | if (!qib_mcast_tree_empty(ibp)) | 293 | if (!qib_mcast_tree_empty(ibp)) |
282 | qp_inuse++; | 294 | qp_inuse++; |
283 | if (ibp->qp0) | 295 | rcu_read_lock(); |
296 | if (rcu_dereference(ibp->qp0)) | ||
284 | qp_inuse++; | 297 | qp_inuse++; |
285 | if (ibp->qp1) | 298 | if (rcu_dereference(ibp->qp1)) |
286 | qp_inuse++; | 299 | qp_inuse++; |
300 | rcu_read_unlock(); | ||
287 | } | 301 | } |
288 | 302 | ||
289 | spin_lock_irqsave(&dev->qpt_lock, flags); | 303 | spin_lock_irqsave(&dev->qpt_lock, flags); |
290 | for (n = 0; n < dev->qp_table_size; n++) { | 304 | for (n = 0; n < dev->qp_table_size; n++) { |
291 | qp = dev->qp_table[n]; | 305 | qp = dev->qp_table[n]; |
292 | dev->qp_table[n] = NULL; | 306 | rcu_assign_pointer(dev->qp_table[n], NULL); |
293 | 307 | ||
294 | for (; qp; qp = qp->next) | 308 | for (; qp; qp = qp->next) |
295 | qp_inuse++; | 309 | qp_inuse++; |
296 | } | 310 | } |
297 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 311 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
312 | synchronize_rcu(); | ||
298 | 313 | ||
299 | return qp_inuse; | 314 | return qp_inuse; |
300 | } | 315 | } |
@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) | |||
309 | */ | 324 | */ |
310 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | 325 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) |
311 | { | 326 | { |
312 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | 327 | struct qib_qp *qp = NULL; |
313 | unsigned long flags; | ||
314 | struct qib_qp *qp; | ||
315 | 328 | ||
316 | spin_lock_irqsave(&dev->qpt_lock, flags); | 329 | if (unlikely(qpn <= 1)) { |
330 | rcu_read_lock(); | ||
331 | if (qpn == 0) | ||
332 | qp = rcu_dereference(ibp->qp0); | ||
333 | else | ||
334 | qp = rcu_dereference(ibp->qp1); | ||
335 | } else { | ||
336 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | ||
337 | unsigned n = qpn_hash(dev, qpn); | ||
317 | 338 | ||
318 | if (qpn == 0) | 339 | rcu_read_lock(); |
319 | qp = ibp->qp0; | 340 | for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next) |
320 | else if (qpn == 1) | ||
321 | qp = ibp->qp1; | ||
322 | else | ||
323 | for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp; | ||
324 | qp = qp->next) | ||
325 | if (qp->ibqp.qp_num == qpn) | 341 | if (qp->ibqp.qp_num == qpn) |
326 | break; | 342 | break; |
343 | } | ||
327 | if (qp) | 344 | if (qp) |
328 | atomic_inc(&qp->refcount); | 345 | if (unlikely(!atomic_inc_not_zero(&qp->refcount))) |
346 | qp = NULL; | ||
329 | 347 | ||
330 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 348 | rcu_read_unlock(); |
331 | return qp; | 349 | return qp; |
332 | } | 350 | } |
333 | 351 | ||
@@ -765,8 +783,10 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
765 | } | 783 | } |
766 | } | 784 | } |
767 | 785 | ||
768 | if (attr_mask & IB_QP_PATH_MTU) | 786 | if (attr_mask & IB_QP_PATH_MTU) { |
769 | qp->path_mtu = pmtu; | 787 | qp->path_mtu = pmtu; |
788 | qp->pmtu = ib_mtu_enum_to_int(pmtu); | ||
789 | } | ||
770 | 790 | ||
771 | if (attr_mask & IB_QP_RETRY_CNT) { | 791 | if (attr_mask & IB_QP_RETRY_CNT) { |
772 | qp->s_retry_cnt = attr->retry_cnt; | 792 | qp->s_retry_cnt = attr->retry_cnt; |
@@ -781,8 +801,12 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
781 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | 801 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
782 | qp->r_min_rnr_timer = attr->min_rnr_timer; | 802 | qp->r_min_rnr_timer = attr->min_rnr_timer; |
783 | 803 | ||
784 | if (attr_mask & IB_QP_TIMEOUT) | 804 | if (attr_mask & IB_QP_TIMEOUT) { |
785 | qp->timeout = attr->timeout; | 805 | qp->timeout = attr->timeout; |
806 | qp->timeout_jiffies = | ||
807 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | ||
808 | 1000UL); | ||
809 | } | ||
786 | 810 | ||
787 | if (attr_mask & IB_QP_QKEY) | 811 | if (attr_mask & IB_QP_QKEY) |
788 | qp->qkey = attr->qkey; | 812 | qp->qkey = attr->qkey; |
@@ -1013,6 +1037,10 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |||
1013 | ret = ERR_PTR(-ENOMEM); | 1037 | ret = ERR_PTR(-ENOMEM); |
1014 | goto bail_swq; | 1038 | goto bail_swq; |
1015 | } | 1039 | } |
1040 | RCU_INIT_POINTER(qp->next, NULL); | ||
1041 | qp->timeout_jiffies = | ||
1042 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | ||
1043 | 1000UL); | ||
1016 | if (init_attr->srq) | 1044 | if (init_attr->srq) |
1017 | sz = 0; | 1045 | sz = 0; |
1018 | else { | 1046 | else { |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 3374a52232c1..e06c4ed383f1 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c | |||
@@ -273,18 +273,12 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) | |||
273 | int ret; | 273 | int ret; |
274 | int idx; | 274 | int idx; |
275 | u16 cks; | 275 | u16 cks; |
276 | u32 mask; | ||
277 | u8 peek[4]; | 276 | u8 peek[4]; |
278 | 277 | ||
279 | /* ensure sane contents on invalid reads, for cable swaps */ | 278 | /* ensure sane contents on invalid reads, for cable swaps */ |
280 | memset(cp, 0, sizeof(*cp)); | 279 | memset(cp, 0, sizeof(*cp)); |
281 | 280 | ||
282 | mask = QSFP_GPIO_MOD_PRS_N; | 281 | if (!qib_qsfp_mod_present(ppd)) { |
283 | if (ppd->hw_pidx) | ||
284 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
285 | |||
286 | ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); | ||
287 | if (ret & mask) { | ||
288 | ret = -ENODEV; | 282 | ret = -ENODEV; |
289 | goto bail; | 283 | goto bail; |
290 | } | 284 | } |
@@ -444,6 +438,19 @@ const char * const qib_qsfp_devtech[16] = { | |||
444 | 438 | ||
445 | static const char *pwr_codes = "1.5W2.0W2.5W3.5W"; | 439 | static const char *pwr_codes = "1.5W2.0W2.5W3.5W"; |
446 | 440 | ||
441 | int qib_qsfp_mod_present(struct qib_pportdata *ppd) | ||
442 | { | ||
443 | u32 mask; | ||
444 | int ret; | ||
445 | |||
446 | mask = QSFP_GPIO_MOD_PRS_N << | ||
447 | (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT); | ||
448 | ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); | ||
449 | |||
450 | return !((ret & mask) >> | ||
451 | ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3)); | ||
452 | } | ||
453 | |||
447 | /* | 454 | /* |
448 | * Initialize structures that control access to QSFP. Called once per port | 455 | * Initialize structures that control access to QSFP. Called once per port |
449 | * on cards that support QSFP. | 456 | * on cards that support QSFP. |
@@ -452,7 +459,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
452 | void (*fevent)(struct work_struct *)) | 459 | void (*fevent)(struct work_struct *)) |
453 | { | 460 | { |
454 | u32 mask, highs; | 461 | u32 mask, highs; |
455 | int pins; | ||
456 | 462 | ||
457 | struct qib_devdata *dd = qd->ppd->dd; | 463 | struct qib_devdata *dd = qd->ppd->dd; |
458 | 464 | ||
@@ -480,8 +486,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
480 | mask <<= QSFP_GPIO_PORT2_SHIFT; | 486 | mask <<= QSFP_GPIO_PORT2_SHIFT; |
481 | 487 | ||
482 | /* Do not try to wait here. Better to let event handle it */ | 488 | /* Do not try to wait here. Better to let event handle it */ |
483 | pins = dd->f_gpio_mod(dd, 0, 0, 0); | 489 | if (!qib_qsfp_mod_present(qd->ppd)) |
484 | if (pins & mask) | ||
485 | goto bail; | 490 | goto bail; |
486 | /* We see a module, but it may be unwise to look yet. Just schedule */ | 491 | /* We see a module, but it may be unwise to look yet. Just schedule */ |
487 | qd->t_insert = get_jiffies_64(); | 492 | qd->t_insert = get_jiffies_64(); |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h index c109bbdc90ac..46002a9417c0 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.h +++ b/drivers/infiniband/hw/qib/qib_qsfp.h | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #define QSFP_DEV 0xA0 | 35 | #define QSFP_DEV 0xA0 |
36 | #define QSFP_PWR_LAG_MSEC 2000 | 36 | #define QSFP_PWR_LAG_MSEC 2000 |
37 | #define QSFP_MODPRS_LAG_MSEC 20 | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Below are masks for various QSFP signals, for Port 1. | 40 | * Below are masks for various QSFP signals, for Port 1. |
@@ -177,10 +178,12 @@ struct qib_qsfp_data { | |||
177 | struct work_struct work; | 178 | struct work_struct work; |
178 | struct qib_qsfp_cache cache; | 179 | struct qib_qsfp_cache cache; |
179 | u64 t_insert; | 180 | u64 t_insert; |
181 | u8 modpresent; | ||
180 | }; | 182 | }; |
181 | 183 | ||
182 | extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, | 184 | extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, |
183 | struct qib_qsfp_cache *cp); | 185 | struct qib_qsfp_cache *cp); |
186 | extern int qib_qsfp_mod_present(struct qib_pportdata *ppd); | ||
184 | extern void qib_qsfp_init(struct qib_qsfp_data *qd, | 187 | extern void qib_qsfp_init(struct qib_qsfp_data *qd, |
185 | void (*fevent)(struct work_struct *)); | 188 | void (*fevent)(struct work_struct *)); |
186 | extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); | 189 | extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index eca0c41f1226..894afac26f3b 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -59,8 +59,7 @@ static void start_timer(struct qib_qp *qp) | |||
59 | qp->s_flags |= QIB_S_TIMER; | 59 | qp->s_flags |= QIB_S_TIMER; |
60 | qp->s_timer.function = rc_timeout; | 60 | qp->s_timer.function = rc_timeout; |
61 | /* 4.096 usec. * (1 << qp->timeout) */ | 61 | /* 4.096 usec. * (1 << qp->timeout) */ |
62 | qp->s_timer.expires = jiffies + | 62 | qp->s_timer.expires = jiffies + qp->timeout_jiffies; |
63 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); | ||
64 | add_timer(&qp->s_timer); | 63 | add_timer(&qp->s_timer); |
65 | } | 64 | } |
66 | 65 | ||
@@ -239,7 +238,7 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
239 | u32 len; | 238 | u32 len; |
240 | u32 bth0; | 239 | u32 bth0; |
241 | u32 bth2; | 240 | u32 bth2; |
242 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 241 | u32 pmtu = qp->pmtu; |
243 | char newreq; | 242 | char newreq; |
244 | unsigned long flags; | 243 | unsigned long flags; |
245 | int ret = 0; | 244 | int ret = 0; |
@@ -272,13 +271,9 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
272 | goto bail; | 271 | goto bail; |
273 | } | 272 | } |
274 | wqe = get_swqe_ptr(qp, qp->s_last); | 273 | wqe = get_swqe_ptr(qp, qp->s_last); |
275 | while (qp->s_last != qp->s_acked) { | 274 | qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? |
276 | qib_send_complete(qp, wqe, IB_WC_SUCCESS); | 275 | IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); |
277 | if (++qp->s_last >= qp->s_size) | 276 | /* will get called again */ |
278 | qp->s_last = 0; | ||
279 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
280 | } | ||
281 | qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); | ||
282 | goto done; | 277 | goto done; |
283 | } | 278 | } |
284 | 279 | ||
@@ -1519,9 +1514,7 @@ read_middle: | |||
1519 | * 4.096 usec. * (1 << qp->timeout) | 1514 | * 4.096 usec. * (1 << qp->timeout) |
1520 | */ | 1515 | */ |
1521 | qp->s_flags |= QIB_S_TIMER; | 1516 | qp->s_flags |= QIB_S_TIMER; |
1522 | mod_timer(&qp->s_timer, jiffies + | 1517 | mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); |
1523 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | ||
1524 | 1000UL)); | ||
1525 | if (qp->s_flags & QIB_S_WAIT_ACK) { | 1518 | if (qp->s_flags & QIB_S_WAIT_ACK) { |
1526 | qp->s_flags &= ~QIB_S_WAIT_ACK; | 1519 | qp->s_flags &= ~QIB_S_WAIT_ACK; |
1527 | qib_schedule_send(qp); | 1520 | qib_schedule_send(qp); |
@@ -1732,7 +1725,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, | |||
1732 | * same request. | 1725 | * same request. |
1733 | */ | 1726 | */ |
1734 | offset = ((psn - e->psn) & QIB_PSN_MASK) * | 1727 | offset = ((psn - e->psn) & QIB_PSN_MASK) * |
1735 | ib_mtu_enum_to_int(qp->path_mtu); | 1728 | qp->pmtu; |
1736 | len = be32_to_cpu(reth->length); | 1729 | len = be32_to_cpu(reth->length); |
1737 | if (unlikely(offset + len != e->rdma_sge.sge_length)) | 1730 | if (unlikely(offset + len != e->rdma_sge.sge_length)) |
1738 | goto unlock_done; | 1731 | goto unlock_done; |
@@ -1876,7 +1869,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1876 | u32 psn; | 1869 | u32 psn; |
1877 | u32 pad; | 1870 | u32 pad; |
1878 | struct ib_wc wc; | 1871 | struct ib_wc wc; |
1879 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 1872 | u32 pmtu = qp->pmtu; |
1880 | int diff; | 1873 | int diff; |
1881 | struct ib_reth *reth; | 1874 | struct ib_reth *reth; |
1882 | unsigned long flags; | 1875 | unsigned long flags; |
@@ -1892,10 +1885,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1892 | } | 1885 | } |
1893 | 1886 | ||
1894 | opcode = be32_to_cpu(ohdr->bth[0]); | 1887 | opcode = be32_to_cpu(ohdr->bth[0]); |
1895 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1896 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) | 1888 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) |
1897 | goto sunlock; | 1889 | return; |
1898 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1899 | 1890 | ||
1900 | psn = be32_to_cpu(ohdr->bth[2]); | 1891 | psn = be32_to_cpu(ohdr->bth[2]); |
1901 | opcode >>= 24; | 1892 | opcode >>= 24; |
@@ -1955,8 +1946,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1955 | break; | 1946 | break; |
1956 | } | 1947 | } |
1957 | 1948 | ||
1958 | memset(&wc, 0, sizeof wc); | ||
1959 | |||
1960 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { | 1949 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { |
1961 | qp->r_flags |= QIB_R_COMM_EST; | 1950 | qp->r_flags |= QIB_R_COMM_EST; |
1962 | if (qp->ibqp.event_handler) { | 1951 | if (qp->ibqp.event_handler) { |
@@ -2009,16 +1998,19 @@ send_middle: | |||
2009 | goto rnr_nak; | 1998 | goto rnr_nak; |
2010 | qp->r_rcv_len = 0; | 1999 | qp->r_rcv_len = 0; |
2011 | if (opcode == OP(SEND_ONLY)) | 2000 | if (opcode == OP(SEND_ONLY)) |
2012 | goto send_last; | 2001 | goto no_immediate_data; |
2013 | /* FALLTHROUGH */ | 2002 | /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */ |
2014 | case OP(SEND_LAST_WITH_IMMEDIATE): | 2003 | case OP(SEND_LAST_WITH_IMMEDIATE): |
2015 | send_last_imm: | 2004 | send_last_imm: |
2016 | wc.ex.imm_data = ohdr->u.imm_data; | 2005 | wc.ex.imm_data = ohdr->u.imm_data; |
2017 | hdrsize += 4; | 2006 | hdrsize += 4; |
2018 | wc.wc_flags = IB_WC_WITH_IMM; | 2007 | wc.wc_flags = IB_WC_WITH_IMM; |
2019 | /* FALLTHROUGH */ | 2008 | goto send_last; |
2020 | case OP(SEND_LAST): | 2009 | case OP(SEND_LAST): |
2021 | case OP(RDMA_WRITE_LAST): | 2010 | case OP(RDMA_WRITE_LAST): |
2011 | no_immediate_data: | ||
2012 | wc.wc_flags = 0; | ||
2013 | wc.ex.imm_data = 0; | ||
2022 | send_last: | 2014 | send_last: |
2023 | /* Get the number of bytes the message was padded by. */ | 2015 | /* Get the number of bytes the message was padded by. */ |
2024 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 2016 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
@@ -2051,6 +2043,12 @@ send_last: | |||
2051 | wc.src_qp = qp->remote_qpn; | 2043 | wc.src_qp = qp->remote_qpn; |
2052 | wc.slid = qp->remote_ah_attr.dlid; | 2044 | wc.slid = qp->remote_ah_attr.dlid; |
2053 | wc.sl = qp->remote_ah_attr.sl; | 2045 | wc.sl = qp->remote_ah_attr.sl; |
2046 | /* zero fields that are N/A */ | ||
2047 | wc.vendor_err = 0; | ||
2048 | wc.pkey_index = 0; | ||
2049 | wc.dlid_path_bits = 0; | ||
2050 | wc.port_num = 0; | ||
2051 | wc.csum_ok = 0; | ||
2054 | /* Signal completion event if the solicited bit is set. */ | 2052 | /* Signal completion event if the solicited bit is set. */ |
2055 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 2053 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
2056 | (ohdr->bth[0] & | 2054 | (ohdr->bth[0] & |
@@ -2089,7 +2087,7 @@ send_last: | |||
2089 | if (opcode == OP(RDMA_WRITE_FIRST)) | 2087 | if (opcode == OP(RDMA_WRITE_FIRST)) |
2090 | goto send_middle; | 2088 | goto send_middle; |
2091 | else if (opcode == OP(RDMA_WRITE_ONLY)) | 2089 | else if (opcode == OP(RDMA_WRITE_ONLY)) |
2092 | goto send_last; | 2090 | goto no_immediate_data; |
2093 | ret = qib_get_rwqe(qp, 1); | 2091 | ret = qib_get_rwqe(qp, 1); |
2094 | if (ret < 0) | 2092 | if (ret < 0) |
2095 | goto nack_op_err; | 2093 | goto nack_op_err; |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index eb78d9367f06..b4b37e47321a 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -260,12 +260,15 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) | |||
260 | 260 | ||
261 | /* | 261 | /* |
262 | * | 262 | * |
263 | * This should be called with the QP s_lock held. | 263 | * This should be called with the QP r_lock held. |
264 | * | ||
265 | * The s_lock will be acquired around the qib_migrate_qp() call. | ||
264 | */ | 266 | */ |
265 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, | 267 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, |
266 | int has_grh, struct qib_qp *qp, u32 bth0) | 268 | int has_grh, struct qib_qp *qp, u32 bth0) |
267 | { | 269 | { |
268 | __be64 guid; | 270 | __be64 guid; |
271 | unsigned long flags; | ||
269 | 272 | ||
270 | if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { | 273 | if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { |
271 | if (!has_grh) { | 274 | if (!has_grh) { |
@@ -295,7 +298,9 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
295 | if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || | 298 | if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || |
296 | ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) | 299 | ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) |
297 | goto err; | 300 | goto err; |
301 | spin_lock_irqsave(&qp->s_lock, flags); | ||
298 | qib_migrate_qp(qp); | 302 | qib_migrate_qp(qp); |
303 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
299 | } else { | 304 | } else { |
300 | if (!has_grh) { | 305 | if (!has_grh) { |
301 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | 306 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) |
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c index c3ec8efc2ed8..d6235931a1ba 100644 --- a/drivers/infiniband/hw/qib/qib_srq.c +++ b/drivers/infiniband/hw/qib/qib_srq.c | |||
@@ -107,6 +107,11 @@ struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | |||
107 | u32 sz; | 107 | u32 sz; |
108 | struct ib_srq *ret; | 108 | struct ib_srq *ret; |
109 | 109 | ||
110 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) { | ||
111 | ret = ERR_PTR(-ENOSYS); | ||
112 | goto done; | ||
113 | } | ||
114 | |||
110 | if (srq_init_attr->attr.max_sge == 0 || | 115 | if (srq_init_attr->attr.max_sge == 0 || |
111 | srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || | 116 | srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || |
112 | srq_init_attr->attr.max_wr == 0 || | 117 | srq_init_attr->attr.max_wr == 0 || |
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 14d129de4320..78fbd56879d4 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
@@ -515,8 +515,7 @@ static ssize_t show_nfreectxts(struct device *device, | |||
515 | struct qib_devdata *dd = dd_from_dev(dev); | 515 | struct qib_devdata *dd = dd_from_dev(dev); |
516 | 516 | ||
517 | /* Return the number of free user ports (contexts) available. */ | 517 | /* Return the number of free user ports (contexts) available. */ |
518 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts - | 518 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts); |
519 | dd->first_user_ctxt - (u32)qib_stats.sps_ctxts); | ||
520 | } | 519 | } |
521 | 520 | ||
522 | static ssize_t show_serial(struct device *device, | 521 | static ssize_t show_serial(struct device *device, |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 32ccf3c824ca..847e7afdfd94 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -51,7 +51,7 @@ int qib_make_uc_req(struct qib_qp *qp) | |||
51 | u32 hwords; | 51 | u32 hwords; |
52 | u32 bth0; | 52 | u32 bth0; |
53 | u32 len; | 53 | u32 len; |
54 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 54 | u32 pmtu = qp->pmtu; |
55 | int ret = 0; | 55 | int ret = 0; |
56 | 56 | ||
57 | spin_lock_irqsave(&qp->s_lock, flags); | 57 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -243,13 +243,12 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
243 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) | 243 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) |
244 | { | 244 | { |
245 | struct qib_other_headers *ohdr; | 245 | struct qib_other_headers *ohdr; |
246 | unsigned long flags; | ||
247 | u32 opcode; | 246 | u32 opcode; |
248 | u32 hdrsize; | 247 | u32 hdrsize; |
249 | u32 psn; | 248 | u32 psn; |
250 | u32 pad; | 249 | u32 pad; |
251 | struct ib_wc wc; | 250 | struct ib_wc wc; |
252 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 251 | u32 pmtu = qp->pmtu; |
253 | struct ib_reth *reth; | 252 | struct ib_reth *reth; |
254 | int ret; | 253 | int ret; |
255 | 254 | ||
@@ -263,14 +262,11 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
263 | } | 262 | } |
264 | 263 | ||
265 | opcode = be32_to_cpu(ohdr->bth[0]); | 264 | opcode = be32_to_cpu(ohdr->bth[0]); |
266 | spin_lock_irqsave(&qp->s_lock, flags); | ||
267 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) | 265 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) |
268 | goto sunlock; | 266 | return; |
269 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
270 | 267 | ||
271 | psn = be32_to_cpu(ohdr->bth[2]); | 268 | psn = be32_to_cpu(ohdr->bth[2]); |
272 | opcode >>= 24; | 269 | opcode >>= 24; |
273 | memset(&wc, 0, sizeof wc); | ||
274 | 270 | ||
275 | /* Compare the PSN verses the expected PSN. */ | 271 | /* Compare the PSN verses the expected PSN. */ |
276 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { | 272 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { |
@@ -370,7 +366,7 @@ send_first: | |||
370 | } | 366 | } |
371 | qp->r_rcv_len = 0; | 367 | qp->r_rcv_len = 0; |
372 | if (opcode == OP(SEND_ONLY)) | 368 | if (opcode == OP(SEND_ONLY)) |
373 | goto send_last; | 369 | goto no_immediate_data; |
374 | else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) | 370 | else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) |
375 | goto send_last_imm; | 371 | goto send_last_imm; |
376 | /* FALLTHROUGH */ | 372 | /* FALLTHROUGH */ |
@@ -389,8 +385,11 @@ send_last_imm: | |||
389 | wc.ex.imm_data = ohdr->u.imm_data; | 385 | wc.ex.imm_data = ohdr->u.imm_data; |
390 | hdrsize += 4; | 386 | hdrsize += 4; |
391 | wc.wc_flags = IB_WC_WITH_IMM; | 387 | wc.wc_flags = IB_WC_WITH_IMM; |
392 | /* FALLTHROUGH */ | 388 | goto send_last; |
393 | case OP(SEND_LAST): | 389 | case OP(SEND_LAST): |
390 | no_immediate_data: | ||
391 | wc.ex.imm_data = 0; | ||
392 | wc.wc_flags = 0; | ||
394 | send_last: | 393 | send_last: |
395 | /* Get the number of bytes the message was padded by. */ | 394 | /* Get the number of bytes the message was padded by. */ |
396 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 395 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
@@ -418,6 +417,12 @@ last_imm: | |||
418 | wc.src_qp = qp->remote_qpn; | 417 | wc.src_qp = qp->remote_qpn; |
419 | wc.slid = qp->remote_ah_attr.dlid; | 418 | wc.slid = qp->remote_ah_attr.dlid; |
420 | wc.sl = qp->remote_ah_attr.sl; | 419 | wc.sl = qp->remote_ah_attr.sl; |
420 | /* zero fields that are N/A */ | ||
421 | wc.vendor_err = 0; | ||
422 | wc.pkey_index = 0; | ||
423 | wc.dlid_path_bits = 0; | ||
424 | wc.port_num = 0; | ||
425 | wc.csum_ok = 0; | ||
421 | /* Signal completion event if the solicited bit is set. */ | 426 | /* Signal completion event if the solicited bit is set. */ |
422 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 427 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
423 | (ohdr->bth[0] & | 428 | (ohdr->bth[0] & |
@@ -546,6 +551,4 @@ op_err: | |||
546 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 551 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
547 | return; | 552 | return; |
548 | 553 | ||
549 | sunlock: | ||
550 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
551 | } | 554 | } |
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 7689e49c13c9..2bc1d2b96298 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c | |||
@@ -74,7 +74,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, | |||
74 | goto bail_release; | 74 | goto bail_release; |
75 | } | 75 | } |
76 | 76 | ||
77 | current->mm->locked_vm += num_pages; | 77 | current->mm->pinned_vm += num_pages; |
78 | 78 | ||
79 | ret = 0; | 79 | ret = 0; |
80 | goto bail; | 80 | goto bail; |
@@ -151,7 +151,7 @@ void qib_release_user_pages(struct page **p, size_t num_pages) | |||
151 | __qib_release_user_pages(p, num_pages, 1); | 151 | __qib_release_user_pages(p, num_pages, 1); |
152 | 152 | ||
153 | if (current->mm) { | 153 | if (current->mm) { |
154 | current->mm->locked_vm -= num_pages; | 154 | current->mm->pinned_vm -= num_pages; |
155 | up_write(¤t->mm->mmap_sem); | 155 | up_write(¤t->mm->mmap_sem); |
156 | } | 156 | } |
157 | } | 157 | } |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 1d24652b2c39..a894762da462 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -39,11 +39,12 @@ | |||
39 | #include <linux/utsname.h> | 39 | #include <linux/utsname.h> |
40 | #include <linux/rculist.h> | 40 | #include <linux/rculist.h> |
41 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
42 | #include <linux/random.h> | ||
42 | 43 | ||
43 | #include "qib.h" | 44 | #include "qib.h" |
44 | #include "qib_common.h" | 45 | #include "qib_common.h" |
45 | 46 | ||
46 | static unsigned int ib_qib_qp_table_size = 251; | 47 | static unsigned int ib_qib_qp_table_size = 256; |
47 | module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); | 48 | module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); |
48 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | 49 | MODULE_PARM_DESC(qp_table_size, "QP table size"); |
49 | 50 | ||
@@ -660,17 +661,25 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | |||
660 | if (atomic_dec_return(&mcast->refcount) <= 1) | 661 | if (atomic_dec_return(&mcast->refcount) <= 1) |
661 | wake_up(&mcast->wait); | 662 | wake_up(&mcast->wait); |
662 | } else { | 663 | } else { |
663 | qp = qib_lookup_qpn(ibp, qp_num); | 664 | if (rcd->lookaside_qp) { |
664 | if (!qp) | 665 | if (rcd->lookaside_qpn != qp_num) { |
665 | goto drop; | 666 | if (atomic_dec_and_test( |
667 | &rcd->lookaside_qp->refcount)) | ||
668 | wake_up( | ||
669 | &rcd->lookaside_qp->wait); | ||
670 | rcd->lookaside_qp = NULL; | ||
671 | } | ||
672 | } | ||
673 | if (!rcd->lookaside_qp) { | ||
674 | qp = qib_lookup_qpn(ibp, qp_num); | ||
675 | if (!qp) | ||
676 | goto drop; | ||
677 | rcd->lookaside_qp = qp; | ||
678 | rcd->lookaside_qpn = qp_num; | ||
679 | } else | ||
680 | qp = rcd->lookaside_qp; | ||
666 | ibp->n_unicast_rcv++; | 681 | ibp->n_unicast_rcv++; |
667 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); | 682 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); |
668 | /* | ||
669 | * Notify qib_destroy_qp() if it is waiting | ||
670 | * for us to finish. | ||
671 | */ | ||
672 | if (atomic_dec_and_test(&qp->refcount)) | ||
673 | wake_up(&qp->wait); | ||
674 | } | 683 | } |
675 | return; | 684 | return; |
676 | 685 | ||
@@ -1975,6 +1984,8 @@ static void init_ibport(struct qib_pportdata *ppd) | |||
1975 | ibp->z_excessive_buffer_overrun_errors = | 1984 | ibp->z_excessive_buffer_overrun_errors = |
1976 | cntrs.excessive_buffer_overrun_errors; | 1985 | cntrs.excessive_buffer_overrun_errors; |
1977 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | 1986 | ibp->z_vl15_dropped = cntrs.vl15_dropped; |
1987 | RCU_INIT_POINTER(ibp->qp0, NULL); | ||
1988 | RCU_INIT_POINTER(ibp->qp1, NULL); | ||
1978 | } | 1989 | } |
1979 | 1990 | ||
1980 | /** | 1991 | /** |
@@ -1991,12 +2002,15 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
1991 | int ret; | 2002 | int ret; |
1992 | 2003 | ||
1993 | dev->qp_table_size = ib_qib_qp_table_size; | 2004 | dev->qp_table_size = ib_qib_qp_table_size; |
1994 | dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, | 2005 | get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); |
2006 | dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, | ||
1995 | GFP_KERNEL); | 2007 | GFP_KERNEL); |
1996 | if (!dev->qp_table) { | 2008 | if (!dev->qp_table) { |
1997 | ret = -ENOMEM; | 2009 | ret = -ENOMEM; |
1998 | goto err_qpt; | 2010 | goto err_qpt; |
1999 | } | 2011 | } |
2012 | for (i = 0; i < dev->qp_table_size; i++) | ||
2013 | RCU_INIT_POINTER(dev->qp_table[i], NULL); | ||
2000 | 2014 | ||
2001 | for (i = 0; i < dd->num_pports; i++) | 2015 | for (i = 0; i < dd->num_pports; i++) |
2002 | init_ibport(ppd + i); | 2016 | init_ibport(ppd + i); |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 95e5b47223b3..0c19ef0c4123 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -485,6 +485,7 @@ struct qib_qp { | |||
485 | u8 alt_timeout; /* Alternate path timeout for this QP */ | 485 | u8 alt_timeout; /* Alternate path timeout for this QP */ |
486 | u8 port_num; | 486 | u8 port_num; |
487 | enum ib_mtu path_mtu; | 487 | enum ib_mtu path_mtu; |
488 | u32 pmtu; /* decoded from path_mtu */ | ||
488 | u32 remote_qpn; | 489 | u32 remote_qpn; |
489 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | 490 | u32 qkey; /* QKEY for this QP (for UD or RD) */ |
490 | u32 s_size; /* send work queue size */ | 491 | u32 s_size; /* send work queue size */ |
@@ -495,6 +496,7 @@ struct qib_qp { | |||
495 | u32 s_last; /* last completed entry */ | 496 | u32 s_last; /* last completed entry */ |
496 | u32 s_ssn; /* SSN of tail entry */ | 497 | u32 s_ssn; /* SSN of tail entry */ |
497 | u32 s_lsn; /* limit sequence number (credit) */ | 498 | u32 s_lsn; /* limit sequence number (credit) */ |
499 | unsigned long timeout_jiffies; /* computed from timeout */ | ||
498 | struct qib_swqe *s_wq; /* send work queue */ | 500 | struct qib_swqe *s_wq; /* send work queue */ |
499 | struct qib_swqe *s_wqe; | 501 | struct qib_swqe *s_wqe; |
500 | struct qib_rq r_rq; /* receive work queue */ | 502 | struct qib_rq r_rq; /* receive work queue */ |
@@ -723,7 +725,8 @@ struct qib_ibdev { | |||
723 | dma_addr_t pio_hdrs_phys; | 725 | dma_addr_t pio_hdrs_phys; |
724 | /* list of QPs waiting for RNR timer */ | 726 | /* list of QPs waiting for RNR timer */ |
725 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ | 727 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ |
726 | unsigned qp_table_size; /* size of the hash table */ | 728 | u32 qp_table_size; /* size of the hash table */ |
729 | u32 qp_rnd; /* random bytes for hash */ | ||
727 | spinlock_t qpt_lock; | 730 | spinlock_t qpt_lock; |
728 | 731 | ||
729 | u32 n_piowait; | 732 | u32 n_piowait; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index b67b8e9d9ad1..014504d8e43c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -85,7 +85,7 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, | |||
85 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | 85 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); |
86 | 86 | ||
87 | for (i = 0; i < frags; ++i) | 87 | for (i = 0; i < frags; ++i) |
88 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); | 88 | ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); |
89 | } | 89 | } |
90 | 90 | ||
91 | static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) | 91 | static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) |
@@ -184,7 +184,7 @@ partial_error: | |||
184 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | 184 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); |
185 | 185 | ||
186 | for (; i > 0; --i) | 186 | for (; i > 0; --i) |
187 | ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); | 187 | ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); |
188 | 188 | ||
189 | dev_kfree_skb_any(skb); | 189 | dev_kfree_skb_any(skb); |
190 | return NULL; | 190 | return NULL; |
@@ -1498,6 +1498,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) | |||
1498 | { | 1498 | { |
1499 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 1499 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
1500 | struct ib_srq_init_attr srq_init_attr = { | 1500 | struct ib_srq_init_attr srq_init_attr = { |
1501 | .srq_type = IB_SRQT_BASIC, | ||
1501 | .attr = { | 1502 | .attr = { |
1502 | .max_wr = ipoib_recvq_size, | 1503 | .max_wr = ipoib_recvq_size, |
1503 | .max_sge = max_sge | 1504 | .max_sge = max_sge |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index a3dcf455a2f8..50061854616e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c | |||
@@ -213,16 +213,15 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr) | |||
213 | gid_buf, path.pathrec.dlid ? "yes" : "no"); | 213 | gid_buf, path.pathrec.dlid ? "yes" : "no"); |
214 | 214 | ||
215 | if (path.pathrec.dlid) { | 215 | if (path.pathrec.dlid) { |
216 | rate = ib_rate_to_mult(path.pathrec.rate) * 25; | 216 | rate = ib_rate_to_mbps(path.pathrec.rate); |
217 | 217 | ||
218 | seq_printf(file, | 218 | seq_printf(file, |
219 | " DLID: 0x%04x\n" | 219 | " DLID: 0x%04x\n" |
220 | " SL: %12d\n" | 220 | " SL: %12d\n" |
221 | " rate: %*d%s Gb/sec\n", | 221 | " rate: %8d.%d Gb/sec\n", |
222 | be16_to_cpu(path.pathrec.dlid), | 222 | be16_to_cpu(path.pathrec.dlid), |
223 | path.pathrec.sl, | 223 | path.pathrec.sl, |
224 | 10 - ((rate % 10) ? 2 : 0), | 224 | rate / 1000, rate % 1000); |
225 | rate / 10, rate % 10 ? ".5" : ""); | ||
226 | } | 225 | } |
227 | 226 | ||
228 | seq_putc(file, '\n'); | 227 | seq_putc(file, '\n'); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 526310768d9a..7e7373a700e6 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -152,7 +152,6 @@ int iser_initialize_task_headers(struct iscsi_task *task, | |||
152 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; | 152 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; |
153 | tx_desc->tx_sg[0].lkey = device->mr->lkey; | 153 | tx_desc->tx_sg[0].lkey = device->mr->lkey; |
154 | 154 | ||
155 | iser_task->headers_initialized = 1; | ||
156 | iser_task->iser_conn = iser_conn; | 155 | iser_task->iser_conn = iser_conn; |
157 | return 0; | 156 | return 0; |
158 | } | 157 | } |
@@ -167,8 +166,7 @@ iscsi_iser_task_init(struct iscsi_task *task) | |||
167 | { | 166 | { |
168 | struct iscsi_iser_task *iser_task = task->dd_data; | 167 | struct iscsi_iser_task *iser_task = task->dd_data; |
169 | 168 | ||
170 | if (!iser_task->headers_initialized) | 169 | if (iser_initialize_task_headers(task, &iser_task->desc)) |
171 | if (iser_initialize_task_headers(task, &iser_task->desc)) | ||
172 | return -ENOMEM; | 170 | return -ENOMEM; |
173 | 171 | ||
174 | /* mgmt task */ | 172 | /* mgmt task */ |
@@ -279,6 +277,13 @@ iscsi_iser_task_xmit(struct iscsi_task *task) | |||
279 | static void iscsi_iser_cleanup_task(struct iscsi_task *task) | 277 | static void iscsi_iser_cleanup_task(struct iscsi_task *task) |
280 | { | 278 | { |
281 | struct iscsi_iser_task *iser_task = task->dd_data; | 279 | struct iscsi_iser_task *iser_task = task->dd_data; |
280 | struct iser_tx_desc *tx_desc = &iser_task->desc; | ||
281 | |||
282 | struct iscsi_iser_conn *iser_conn = task->conn->dd_data; | ||
283 | struct iser_device *device = iser_conn->ib_conn->device; | ||
284 | |||
285 | ib_dma_unmap_single(device->ib_device, | ||
286 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); | ||
282 | 287 | ||
283 | /* mgmt tasks do not need special cleanup */ | 288 | /* mgmt tasks do not need special cleanup */ |
284 | if (!task->sc) | 289 | if (!task->sc) |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index db6f3ce9f3bf..db7ea3704da7 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -257,7 +257,8 @@ struct iser_conn { | |||
257 | struct list_head conn_list; /* entry in ig conn list */ | 257 | struct list_head conn_list; /* entry in ig conn list */ |
258 | 258 | ||
259 | char *login_buf; | 259 | char *login_buf; |
260 | u64 login_dma; | 260 | char *login_req_buf, *login_resp_buf; |
261 | u64 login_req_dma, login_resp_dma; | ||
261 | unsigned int rx_desc_head; | 262 | unsigned int rx_desc_head; |
262 | struct iser_rx_desc *rx_descs; | 263 | struct iser_rx_desc *rx_descs; |
263 | struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; | 264 | struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; |
@@ -277,7 +278,6 @@ struct iscsi_iser_task { | |||
277 | struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ | 278 | struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ |
278 | struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ | 279 | struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ |
279 | struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ | 280 | struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ |
280 | int headers_initialized; | ||
281 | }; | 281 | }; |
282 | 282 | ||
283 | struct iser_page_vec { | 283 | struct iser_page_vec { |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index f299de6b419b..a607542fc796 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -221,8 +221,14 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) | |||
221 | struct iser_device *device = ib_conn->device; | 221 | struct iser_device *device = ib_conn->device; |
222 | 222 | ||
223 | if (ib_conn->login_buf) { | 223 | if (ib_conn->login_buf) { |
224 | ib_dma_unmap_single(device->ib_device, ib_conn->login_dma, | 224 | if (ib_conn->login_req_dma) |
225 | ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); | 225 | ib_dma_unmap_single(device->ib_device, |
226 | ib_conn->login_req_dma, | ||
227 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); | ||
228 | if (ib_conn->login_resp_dma) | ||
229 | ib_dma_unmap_single(device->ib_device, | ||
230 | ib_conn->login_resp_dma, | ||
231 | ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); | ||
226 | kfree(ib_conn->login_buf); | 232 | kfree(ib_conn->login_buf); |
227 | } | 233 | } |
228 | 234 | ||
@@ -394,6 +400,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
394 | unsigned long data_seg_len; | 400 | unsigned long data_seg_len; |
395 | int err = 0; | 401 | int err = 0; |
396 | struct iser_device *device; | 402 | struct iser_device *device; |
403 | struct iser_conn *ib_conn = iser_conn->ib_conn; | ||
397 | 404 | ||
398 | /* build the tx desc regd header and add it to the tx desc dto */ | 405 | /* build the tx desc regd header and add it to the tx desc dto */ |
399 | mdesc->type = ISCSI_TX_CONTROL; | 406 | mdesc->type = ISCSI_TX_CONTROL; |
@@ -409,9 +416,19 @@ int iser_send_control(struct iscsi_conn *conn, | |||
409 | iser_err("data present on non login task!!!\n"); | 416 | iser_err("data present on non login task!!!\n"); |
410 | goto send_control_error; | 417 | goto send_control_error; |
411 | } | 418 | } |
412 | memcpy(iser_conn->ib_conn->login_buf, task->data, | 419 | |
420 | ib_dma_sync_single_for_cpu(device->ib_device, | ||
421 | ib_conn->login_req_dma, task->data_count, | ||
422 | DMA_TO_DEVICE); | ||
423 | |||
424 | memcpy(iser_conn->ib_conn->login_req_buf, task->data, | ||
413 | task->data_count); | 425 | task->data_count); |
414 | tx_dsg->addr = iser_conn->ib_conn->login_dma; | 426 | |
427 | ib_dma_sync_single_for_device(device->ib_device, | ||
428 | ib_conn->login_req_dma, task->data_count, | ||
429 | DMA_TO_DEVICE); | ||
430 | |||
431 | tx_dsg->addr = iser_conn->ib_conn->login_req_dma; | ||
415 | tx_dsg->length = task->data_count; | 432 | tx_dsg->length = task->data_count; |
416 | tx_dsg->lkey = device->mr->lkey; | 433 | tx_dsg->lkey = device->mr->lkey; |
417 | mdesc->num_sge = 2; | 434 | mdesc->num_sge = 2; |
@@ -445,8 +462,8 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, | |||
445 | int rx_buflen, outstanding, count, err; | 462 | int rx_buflen, outstanding, count, err; |
446 | 463 | ||
447 | /* differentiate between login to all other PDUs */ | 464 | /* differentiate between login to all other PDUs */ |
448 | if ((char *)rx_desc == ib_conn->login_buf) { | 465 | if ((char *)rx_desc == ib_conn->login_resp_buf) { |
449 | rx_dma = ib_conn->login_dma; | 466 | rx_dma = ib_conn->login_resp_dma; |
450 | rx_buflen = ISER_RX_LOGIN_SIZE; | 467 | rx_buflen = ISER_RX_LOGIN_SIZE; |
451 | } else { | 468 | } else { |
452 | rx_dma = rx_desc->dma_addr; | 469 | rx_dma = rx_desc->dma_addr; |
@@ -473,7 +490,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, | |||
473 | * for the posted rx bufs refcount to become zero handles everything */ | 490 | * for the posted rx bufs refcount to become zero handles everything */ |
474 | conn->ib_conn->post_recv_buf_count--; | 491 | conn->ib_conn->post_recv_buf_count--; |
475 | 492 | ||
476 | if (rx_dma == ib_conn->login_dma) | 493 | if (rx_dma == ib_conn->login_resp_dma) |
477 | return; | 494 | return; |
478 | 495 | ||
479 | outstanding = ib_conn->post_recv_buf_count; | 496 | outstanding = ib_conn->post_recv_buf_count; |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ede1475bee09..e28877c4ce15 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -155,20 +155,39 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
155 | { | 155 | { |
156 | struct iser_device *device; | 156 | struct iser_device *device; |
157 | struct ib_qp_init_attr init_attr; | 157 | struct ib_qp_init_attr init_attr; |
158 | int ret = -ENOMEM; | 158 | int req_err, resp_err, ret = -ENOMEM; |
159 | struct ib_fmr_pool_param params; | 159 | struct ib_fmr_pool_param params; |
160 | 160 | ||
161 | BUG_ON(ib_conn->device == NULL); | 161 | BUG_ON(ib_conn->device == NULL); |
162 | 162 | ||
163 | device = ib_conn->device; | 163 | device = ib_conn->device; |
164 | 164 | ||
165 | ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); | 165 | ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + |
166 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); | ||
166 | if (!ib_conn->login_buf) | 167 | if (!ib_conn->login_buf) |
167 | goto out_err; | 168 | goto out_err; |
168 | 169 | ||
169 | ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, | 170 | ib_conn->login_req_buf = ib_conn->login_buf; |
170 | (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, | 171 | ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN; |
171 | DMA_FROM_DEVICE); | 172 | |
173 | ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device, | ||
174 | (void *)ib_conn->login_req_buf, | ||
175 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); | ||
176 | |||
177 | ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device, | ||
178 | (void *)ib_conn->login_resp_buf, | ||
179 | ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); | ||
180 | |||
181 | req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma); | ||
182 | resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma); | ||
183 | |||
184 | if (req_err || resp_err) { | ||
185 | if (req_err) | ||
186 | ib_conn->login_req_dma = 0; | ||
187 | if (resp_err) | ||
188 | ib_conn->login_resp_dma = 0; | ||
189 | goto out_err; | ||
190 | } | ||
172 | 191 | ||
173 | ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + | 192 | ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + |
174 | (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), | 193 | (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), |
@@ -658,11 +677,11 @@ int iser_post_recvl(struct iser_conn *ib_conn) | |||
658 | struct ib_sge sge; | 677 | struct ib_sge sge; |
659 | int ib_ret; | 678 | int ib_ret; |
660 | 679 | ||
661 | sge.addr = ib_conn->login_dma; | 680 | sge.addr = ib_conn->login_resp_dma; |
662 | sge.length = ISER_RX_LOGIN_SIZE; | 681 | sge.length = ISER_RX_LOGIN_SIZE; |
663 | sge.lkey = ib_conn->device->mr->lkey; | 682 | sge.lkey = ib_conn->device->mr->lkey; |
664 | 683 | ||
665 | rx_wr.wr_id = (unsigned long)ib_conn->login_buf; | 684 | rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf; |
666 | rx_wr.sg_list = &sge; | 685 | rx_wr.sg_list = &sge; |
667 | rx_wr.num_sge = 1; | 686 | rx_wr.num_sge = 1; |
668 | rx_wr.next = NULL; | 687 | rx_wr.next = NULL; |