aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2007-05-14 14:27:27 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-09 23:12:26 -0400
commitde3d353072f9342f04112ba0504c3e294220cb8f (patch)
tree94c07283ea583f72544eb76795aa127bef6c5aa9 /drivers
parent149983af609e8f5c57157467baf8545d17b8a6a1 (diff)
RDMA/cxgb3: Streaming -> RDMA mode transition fixes
Due to a HW issue, our current scheme to transition the connection from streaming to rdma mode is broken on the passive side. The firmware and driver now support a new transition scheme for the passive side: - driver posts rdma_init_wr (now including the initial receive seqno) - driver posts last streaming message via TX_DATA message (MPA start response) - uP atomically sends the last streaming message and transitions the tcb to rdma mode. - driver waits for wr_ack indicating the last streaming message was ACKed. NOTE: This change also bumps the required firmware version to 4.3. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c82
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/net/cxgb3/version.h2
6 files changed, 38 insertions, 53 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 76049afc7655..215bbe51047a 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -833,7 +833,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
833 wqe->ird = cpu_to_be32(attr->ird); 833 wqe->ird = cpu_to_be32(attr->ird);
834 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); 834 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
835 wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); 835 wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
836 wqe->rsvd = 0; 836 wqe->irs = cpu_to_be32(attr->irs);
837 skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */ 837 skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
838 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); 838 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
839} 839}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index ff7290eacefb..c84d4ac49355 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -294,6 +294,7 @@ struct t3_rdma_init_attr {
294 u64 qp_dma_addr; 294 u64 qp_dma_addr;
295 u32 qp_dma_size; 295 u32 qp_dma_size;
296 u32 flags; 296 u32 flags;
297 u32 irs;
297}; 298};
298 299
299struct t3_rdma_init_wr { 300struct t3_rdma_init_wr {
@@ -314,7 +315,7 @@ struct t3_rdma_init_wr {
314 __be32 ird; 315 __be32 ird;
315 __be64 qp_dma_addr; /* 7 */ 316 __be64 qp_dma_addr; /* 7 */
316 __be32 qp_dma_size; /* 8 */ 317 __be32 qp_dma_size; /* 8 */
317 u32 rsvd; 318 u32 irs;
318}; 319};
319 320
320struct t3_genbit { 321struct t3_genbit {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index b2faff5abce8..7b8d5aaa2204 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -515,7 +515,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
515 req->len = htonl(len); 515 req->len = htonl(len);
516 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 516 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
517 V_TX_SNDBUF(snd_win>>15)); 517 V_TX_SNDBUF(snd_win>>15));
518 req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT); 518 req->flags = htonl(F_TX_INIT);
519 req->sndseq = htonl(ep->snd_seq); 519 req->sndseq = htonl(ep->snd_seq);
520 BUG_ON(ep->mpa_skb); 520 BUG_ON(ep->mpa_skb);
521 ep->mpa_skb = skb; 521 ep->mpa_skb = skb;
@@ -566,7 +566,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
566 req->len = htonl(mpalen); 566 req->len = htonl(mpalen);
567 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 567 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
568 V_TX_SNDBUF(snd_win>>15)); 568 V_TX_SNDBUF(snd_win>>15));
569 req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT); 569 req->flags = htonl(F_TX_INIT);
570 req->sndseq = htonl(ep->snd_seq); 570 req->sndseq = htonl(ep->snd_seq);
571 BUG_ON(ep->mpa_skb); 571 BUG_ON(ep->mpa_skb);
572 ep->mpa_skb = skb; 572 ep->mpa_skb = skb;
@@ -618,7 +618,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
618 req->len = htonl(len); 618 req->len = htonl(len);
619 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 619 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
620 V_TX_SNDBUF(snd_win>>15)); 620 V_TX_SNDBUF(snd_win>>15));
621 req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT); 621 req->flags = htonl(F_TX_INIT);
622 req->sndseq = htonl(ep->snd_seq); 622 req->sndseq = htonl(ep->snd_seq);
623 ep->mpa_skb = skb; 623 ep->mpa_skb = skb;
624 state_set(&ep->com, MPA_REP_SENT); 624 state_set(&ep->com, MPA_REP_SENT);
@@ -641,6 +641,7 @@ static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
641 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid); 641 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
642 642
643 ep->snd_seq = ntohl(req->snd_isn); 643 ep->snd_seq = ntohl(req->snd_isn);
644 ep->rcv_seq = ntohl(req->rcv_isn);
644 645
645 set_emss(ep, ntohs(req->tcp_opt)); 646 set_emss(ep, ntohs(req->tcp_opt));
646 647
@@ -1023,6 +1024,9 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1023 skb_pull(skb, sizeof(*hdr)); 1024 skb_pull(skb, sizeof(*hdr));
1024 skb_trim(skb, dlen); 1025 skb_trim(skb, dlen);
1025 1026
1027 ep->rcv_seq += dlen;
1028 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1029
1026 switch (state_read(&ep->com)) { 1030 switch (state_read(&ep->com)) {
1027 case MPA_REQ_SENT: 1031 case MPA_REQ_SENT:
1028 process_mpa_reply(ep, skb); 1032 process_mpa_reply(ep, skb);
@@ -1060,7 +1064,6 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1060 struct iwch_ep *ep = ctx; 1064 struct iwch_ep *ep = ctx;
1061 struct cpl_wr_ack *hdr = cplhdr(skb); 1065 struct cpl_wr_ack *hdr = cplhdr(skb);
1062 unsigned int credits = ntohs(hdr->credits); 1066 unsigned int credits = ntohs(hdr->credits);
1063 enum iwch_qp_attr_mask mask;
1064 1067
1065 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 1068 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
1066 1069
@@ -1072,30 +1075,6 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1072 ep->mpa_skb = NULL; 1075 ep->mpa_skb = NULL;
1073 dst_confirm(ep->dst); 1076 dst_confirm(ep->dst);
1074 if (state_read(&ep->com) == MPA_REP_SENT) { 1077 if (state_read(&ep->com) == MPA_REP_SENT) {
1075 struct iwch_qp_attributes attrs;
1076
1077 /* bind QP to EP and move to RTS */
1078 attrs.mpa_attr = ep->mpa_attr;
1079 attrs.max_ird = ep->ord;
1080 attrs.max_ord = ep->ord;
1081 attrs.llp_stream_handle = ep;
1082 attrs.next_state = IWCH_QP_STATE_RTS;
1083
1084 /* bind QP and TID with INIT_WR */
1085 mask = IWCH_QP_ATTR_NEXT_STATE |
1086 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1087 IWCH_QP_ATTR_MPA_ATTR |
1088 IWCH_QP_ATTR_MAX_IRD |
1089 IWCH_QP_ATTR_MAX_ORD;
1090
1091 ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp,
1092 ep->com.qp, mask, &attrs, 1);
1093
1094 if (!ep->com.rpl_err) {
1095 state_set(&ep->com, FPDU_MODE);
1096 established_upcall(ep);
1097 }
1098
1099 ep->com.rpl_done = 1; 1078 ep->com.rpl_done = 1;
1100 PDBG("waking up ep %p\n", ep); 1079 PDBG("waking up ep %p\n", ep);
1101 wake_up(&ep->com.waitq); 1080 wake_up(&ep->com.waitq);
@@ -1378,6 +1357,7 @@ static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1378 1357
1379 PDBG("%s ep %p\n", __FUNCTION__, ep); 1358 PDBG("%s ep %p\n", __FUNCTION__, ep);
1380 ep->snd_seq = ntohl(req->snd_isn); 1359 ep->snd_seq = ntohl(req->snd_isn);
1360 ep->rcv_seq = ntohl(req->rcv_isn);
1381 1361
1382 set_emss(ep, ntohs(req->tcp_opt)); 1362 set_emss(ep, ntohs(req->tcp_opt));
1383 1363
@@ -1732,10 +1712,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1732 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1712 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1733 1713
1734 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1714 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
1735 if (state_read(&ep->com) == DEAD) { 1715 if (state_read(&ep->com) == DEAD)
1736 put_ep(&ep->com);
1737 return -ECONNRESET; 1716 return -ECONNRESET;
1738 }
1739 1717
1740 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1718 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1741 BUG_ON(!qp); 1719 BUG_ON(!qp);
@@ -1755,17 +1733,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1755 ep->ird = conn_param->ird; 1733 ep->ird = conn_param->ird;
1756 ep->ord = conn_param->ord; 1734 ep->ord = conn_param->ord;
1757 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord); 1735 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
1736
1758 get_ep(&ep->com); 1737 get_ep(&ep->com);
1759 err = send_mpa_reply(ep, conn_param->private_data,
1760 conn_param->private_data_len);
1761 if (err) {
1762 ep->com.cm_id = NULL;
1763 ep->com.qp = NULL;
1764 cm_id->rem_ref(cm_id);
1765 abort_connection(ep, NULL, GFP_KERNEL);
1766 put_ep(&ep->com);
1767 return err;
1768 }
1769 1738
1770 /* bind QP to EP and move to RTS */ 1739 /* bind QP to EP and move to RTS */
1771 attrs.mpa_attr = ep->mpa_attr; 1740 attrs.mpa_attr = ep->mpa_attr;
@@ -1783,16 +1752,29 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1783 1752
1784 err = iwch_modify_qp(ep->com.qp->rhp, 1753 err = iwch_modify_qp(ep->com.qp->rhp,
1785 ep->com.qp, mask, &attrs, 1); 1754 ep->com.qp, mask, &attrs, 1);
1755 if (err)
1756 goto err;
1786 1757
1787 if (err) { 1758 err = send_mpa_reply(ep, conn_param->private_data,
1788 ep->com.cm_id = NULL; 1759 conn_param->private_data_len);
1789 ep->com.qp = NULL; 1760 if (err)
1790 cm_id->rem_ref(cm_id); 1761 goto err;
1791 abort_connection(ep, NULL, GFP_KERNEL); 1762
1792 } else { 1763 /* wait for wr_ack */
1793 state_set(&ep->com, FPDU_MODE); 1764 wait_event(ep->com.waitq, ep->com.rpl_done);
1794 established_upcall(ep); 1765 err = ep->com.rpl_err;
1795 } 1766 if (err)
1767 goto err;
1768
1769 state_set(&ep->com, FPDU_MODE);
1770 established_upcall(ep);
1771 put_ep(&ep->com);
1772 return 0;
1773err:
1774 ep->com.cm_id = NULL;
1775 ep->com.qp = NULL;
1776 cm_id->rem_ref(cm_id);
1777 abort_connection(ep, NULL, GFP_KERNEL);
1796 put_ep(&ep->com); 1778 put_ep(&ep->com);
1797 return err; 1779 return err;
1798} 1780}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 21a388c313cf..6107e7cd9b57 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -175,6 +175,7 @@ struct iwch_ep {
175 unsigned int atid; 175 unsigned int atid;
176 u32 hwtid; 176 u32 hwtid;
177 u32 snd_seq; 177 u32 snd_seq;
178 u32 rcv_seq;
178 struct l2t_entry *l2t; 179 struct l2t_entry *l2t;
179 struct dst_entry *dst; 180 struct dst_entry *dst;
180 struct sk_buff *mpa_skb; 181 struct sk_buff *mpa_skb;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 714dddbc9a98..679b7c179273 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -732,6 +732,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
732 init_attr.qp_dma_addr = qhp->wq.dma_addr; 732 init_attr.qp_dma_addr = qhp->wq.dma_addr;
733 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 733 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
734 init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0; 734 init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
735 init_attr.irs = qhp->ep->rcv_seq;
735 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " 736 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
736 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__, 737 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
737 init_attr.rq_addr, init_attr.rq_size, 738 init_attr.rq_addr, init_attr.rq_size,
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 8eddd23a3a51..eb508bf8022a 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 4 41#define FW_VERSION_MAJOR 4
42#define FW_VERSION_MINOR 1 42#define FW_VERSION_MINOR 3
43#define FW_VERSION_MICRO 0 43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */