aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c160
1 files changed, 80 insertions, 80 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index fb61f6685809..83fa16fa4644 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -472,10 +472,10 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
472 skb = get_skb(skb, flowclen, GFP_KERNEL); 472 skb = get_skb(skb, flowclen, GFP_KERNEL);
473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
474 474
475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
476 FW_FLOWC_WR_NPARAMS(8)); 476 FW_FLOWC_WR_NPARAMS_V(8));
477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
478 16)) | FW_WR_FLOWID(ep->hwtid)); 478 16)) | FW_WR_FLOWID_V(ep->hwtid));
479 479
480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN 481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
@@ -649,31 +649,31 @@ static int send_connect(struct c4iw_ep *ep)
649 * remainder will be specified in the rx_data_ack. 649 * remainder will be specified in the rx_data_ack.
650 */ 650 */
651 win = ep->rcv_win >> 10; 651 win = ep->rcv_win >> 10;
652 if (win > RCV_BUFSIZ_MASK) 652 if (win > RCV_BUFSIZ_M)
653 win = RCV_BUFSIZ_MASK; 653 win = RCV_BUFSIZ_M;
654 654
655 opt0 = (nocong ? NO_CONG(1) : 0) | 655 opt0 = (nocong ? NO_CONG(1) : 0) |
656 KEEP_ALIVE(1) | 656 KEEP_ALIVE_F |
657 DELACK(1) | 657 DELACK(1) |
658 WND_SCALE(wscale) | 658 WND_SCALE_V(wscale) |
659 MSS_IDX(mtu_idx) | 659 MSS_IDX_V(mtu_idx) |
660 L2T_IDX(ep->l2t->idx) | 660 L2T_IDX_V(ep->l2t->idx) |
661 TX_CHAN(ep->tx_chan) | 661 TX_CHAN_V(ep->tx_chan) |
662 SMAC_SEL(ep->smac_idx) | 662 SMAC_SEL_V(ep->smac_idx) |
663 DSCP(ep->tos) | 663 DSCP(ep->tos) |
664 ULP_MODE(ULP_MODE_TCPDDP) | 664 ULP_MODE_V(ULP_MODE_TCPDDP) |
665 RCV_BUFSIZ(win); 665 RCV_BUFSIZ_V(win);
666 opt2 = RX_CHANNEL(0) | 666 opt2 = RX_CHANNEL_V(0) |
667 CCTRL_ECN(enable_ecn) | 667 CCTRL_ECN(enable_ecn) |
668 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 668 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
669 if (enable_tcp_timestamps) 669 if (enable_tcp_timestamps)
670 opt2 |= TSTAMPS_EN(1); 670 opt2 |= TSTAMPS_EN(1);
671 if (enable_tcp_sack) 671 if (enable_tcp_sack)
672 opt2 |= SACK_EN(1); 672 opt2 |= SACK_EN(1);
673 if (wscale && enable_tcp_window_scaling) 673 if (wscale && enable_tcp_window_scaling)
674 opt2 |= WND_SCALE_EN(1); 674 opt2 |= WND_SCALE_EN_F;
675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
676 opt2 |= T5_OPT_2_VALID; 676 opt2 |= T5_OPT_2_VALID_F;
677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
679 } 679 }
@@ -736,7 +736,7 @@ static int send_connect(struct c4iw_ep *ep)
736 t5_req->local_ip = la->sin_addr.s_addr; 736 t5_req->local_ip = la->sin_addr.s_addr;
737 t5_req->peer_ip = ra->sin_addr.s_addr; 737 t5_req->peer_ip = ra->sin_addr.s_addr;
738 t5_req->opt0 = cpu_to_be64(opt0); 738 t5_req->opt0 = cpu_to_be64(opt0);
739 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 739 t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
740 cxgb4_select_ntuple( 740 cxgb4_select_ntuple(
741 ep->com.dev->rdev.lldi.ports[0], 741 ep->com.dev->rdev.lldi.ports[0],
742 ep->l2t))); 742 ep->l2t)));
@@ -762,7 +762,7 @@ static int send_connect(struct c4iw_ep *ep)
762 t5_req6->peer_ip_lo = *((__be64 *) 762 t5_req6->peer_ip_lo = *((__be64 *)
763 (ra6->sin6_addr.s6_addr + 8)); 763 (ra6->sin6_addr.s6_addr + 8));
764 t5_req6->opt0 = cpu_to_be64(opt0); 764 t5_req6->opt0 = cpu_to_be64(opt0);
765 t5_req6->params = cpu_to_be64(V_FILTER_TUPLE( 765 t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
766 cxgb4_select_ntuple( 766 cxgb4_select_ntuple(
767 ep->com.dev->rdev.lldi.ports[0], 767 ep->com.dev->rdev.lldi.ports[0],
768 ep->l2t))); 768 ep->l2t)));
@@ -803,16 +803,16 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
803 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 803 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
804 memset(req, 0, wrlen); 804 memset(req, 0, wrlen);
805 req->op_to_immdlen = cpu_to_be32( 805 req->op_to_immdlen = cpu_to_be32(
806 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 806 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
807 FW_WR_COMPL(1) | 807 FW_WR_COMPL_F |
808 FW_WR_IMMDLEN(mpalen)); 808 FW_WR_IMMDLEN_V(mpalen));
809 req->flowid_len16 = cpu_to_be32( 809 req->flowid_len16 = cpu_to_be32(
810 FW_WR_FLOWID(ep->hwtid) | 810 FW_WR_FLOWID_V(ep->hwtid) |
811 FW_WR_LEN16(wrlen >> 4)); 811 FW_WR_LEN16_V(wrlen >> 4));
812 req->plen = cpu_to_be32(mpalen); 812 req->plen = cpu_to_be32(mpalen);
813 req->tunnel_to_proxy = cpu_to_be32( 813 req->tunnel_to_proxy = cpu_to_be32(
814 FW_OFLD_TX_DATA_WR_FLUSH(1) | 814 FW_OFLD_TX_DATA_WR_FLUSH_F |
815 FW_OFLD_TX_DATA_WR_SHOVE(1)); 815 FW_OFLD_TX_DATA_WR_SHOVE_F);
816 816
817 mpa = (struct mpa_message *)(req + 1); 817 mpa = (struct mpa_message *)(req + 1);
818 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 818 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
@@ -897,16 +897,16 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
897 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 897 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
898 memset(req, 0, wrlen); 898 memset(req, 0, wrlen);
899 req->op_to_immdlen = cpu_to_be32( 899 req->op_to_immdlen = cpu_to_be32(
900 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 900 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
901 FW_WR_COMPL(1) | 901 FW_WR_COMPL_F |
902 FW_WR_IMMDLEN(mpalen)); 902 FW_WR_IMMDLEN_V(mpalen));
903 req->flowid_len16 = cpu_to_be32( 903 req->flowid_len16 = cpu_to_be32(
904 FW_WR_FLOWID(ep->hwtid) | 904 FW_WR_FLOWID_V(ep->hwtid) |
905 FW_WR_LEN16(wrlen >> 4)); 905 FW_WR_LEN16_V(wrlen >> 4));
906 req->plen = cpu_to_be32(mpalen); 906 req->plen = cpu_to_be32(mpalen);
907 req->tunnel_to_proxy = cpu_to_be32( 907 req->tunnel_to_proxy = cpu_to_be32(
908 FW_OFLD_TX_DATA_WR_FLUSH(1) | 908 FW_OFLD_TX_DATA_WR_FLUSH_F |
909 FW_OFLD_TX_DATA_WR_SHOVE(1)); 909 FW_OFLD_TX_DATA_WR_SHOVE_F);
910 910
911 mpa = (struct mpa_message *)(req + 1); 911 mpa = (struct mpa_message *)(req + 1);
912 memset(mpa, 0, sizeof(*mpa)); 912 memset(mpa, 0, sizeof(*mpa));
@@ -977,16 +977,16 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
977 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 977 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
978 memset(req, 0, wrlen); 978 memset(req, 0, wrlen);
979 req->op_to_immdlen = cpu_to_be32( 979 req->op_to_immdlen = cpu_to_be32(
980 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 980 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
981 FW_WR_COMPL(1) | 981 FW_WR_COMPL_F |
982 FW_WR_IMMDLEN(mpalen)); 982 FW_WR_IMMDLEN_V(mpalen));
983 req->flowid_len16 = cpu_to_be32( 983 req->flowid_len16 = cpu_to_be32(
984 FW_WR_FLOWID(ep->hwtid) | 984 FW_WR_FLOWID_V(ep->hwtid) |
985 FW_WR_LEN16(wrlen >> 4)); 985 FW_WR_LEN16_V(wrlen >> 4));
986 req->plen = cpu_to_be32(mpalen); 986 req->plen = cpu_to_be32(mpalen);
987 req->tunnel_to_proxy = cpu_to_be32( 987 req->tunnel_to_proxy = cpu_to_be32(
988 FW_OFLD_TX_DATA_WR_FLUSH(1) | 988 FW_OFLD_TX_DATA_WR_FLUSH_F |
989 FW_OFLD_TX_DATA_WR_SHOVE(1)); 989 FW_OFLD_TX_DATA_WR_SHOVE_F);
990 990
991 mpa = (struct mpa_message *)(req + 1); 991 mpa = (struct mpa_message *)(req + 1);
992 memset(mpa, 0, sizeof(*mpa)); 992 memset(mpa, 0, sizeof(*mpa));
@@ -1249,15 +1249,15 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1249 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1249 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1250 * then add the overage in to the credits returned. 1250 * then add the overage in to the credits returned.
1251 */ 1251 */
1252 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024) 1252 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1253 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024; 1253 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1254 1254
1255 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1255 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1256 memset(req, 0, wrlen); 1256 memset(req, 0, wrlen);
1257 INIT_TP_WR(req, ep->hwtid); 1257 INIT_TP_WR(req, ep->hwtid);
1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1259 ep->hwtid)); 1259 ep->hwtid));
1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
1261 F_RX_DACK_CHANGE | 1261 F_RX_DACK_CHANGE |
1262 V_RX_DACK_MODE(dack_mode)); 1262 V_RX_DACK_MODE(dack_mode));
1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1751 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1751 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1752 memset(req, 0, sizeof(*req)); 1752 memset(req, 0, sizeof(*req));
1753 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1753 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1754 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1754 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1755 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1755 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1756 ep->com.dev->rdev.lldi.ports[0], 1756 ep->com.dev->rdev.lldi.ports[0],
1757 ep->l2t)); 1757 ep->l2t));
@@ -1778,34 +1778,34 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1778 * remainder will be specified in the rx_data_ack. 1778 * remainder will be specified in the rx_data_ack.
1779 */ 1779 */
1780 win = ep->rcv_win >> 10; 1780 win = ep->rcv_win >> 10;
1781 if (win > RCV_BUFSIZ_MASK) 1781 if (win > RCV_BUFSIZ_M)
1782 win = RCV_BUFSIZ_MASK; 1782 win = RCV_BUFSIZ_M;
1783 1783
1784 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1784 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1785 (nocong ? NO_CONG(1) : 0) | 1785 (nocong ? NO_CONG(1) : 0) |
1786 KEEP_ALIVE(1) | 1786 KEEP_ALIVE_F |
1787 DELACK(1) | 1787 DELACK(1) |
1788 WND_SCALE(wscale) | 1788 WND_SCALE_V(wscale) |
1789 MSS_IDX(mtu_idx) | 1789 MSS_IDX_V(mtu_idx) |
1790 L2T_IDX(ep->l2t->idx) | 1790 L2T_IDX_V(ep->l2t->idx) |
1791 TX_CHAN(ep->tx_chan) | 1791 TX_CHAN_V(ep->tx_chan) |
1792 SMAC_SEL(ep->smac_idx) | 1792 SMAC_SEL_V(ep->smac_idx) |
1793 DSCP(ep->tos) | 1793 DSCP(ep->tos) |
1794 ULP_MODE(ULP_MODE_TCPDDP) | 1794 ULP_MODE_V(ULP_MODE_TCPDDP) |
1795 RCV_BUFSIZ(win)); 1795 RCV_BUFSIZ_V(win));
1796 req->tcb.opt2 = (__force __be32) (PACE(1) | 1796 req->tcb.opt2 = (__force __be32) (PACE(1) |
1797 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1797 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1798 RX_CHANNEL(0) | 1798 RX_CHANNEL_V(0) |
1799 CCTRL_ECN(enable_ecn) | 1799 CCTRL_ECN(enable_ecn) |
1800 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); 1800 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1801 if (enable_tcp_timestamps) 1801 if (enable_tcp_timestamps)
1802 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); 1802 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
1803 if (enable_tcp_sack) 1803 if (enable_tcp_sack)
1804 req->tcb.opt2 |= (__force __be32) SACK_EN(1); 1804 req->tcb.opt2 |= (__force __be32)SACK_EN(1);
1805 if (wscale && enable_tcp_window_scaling) 1805 if (wscale && enable_tcp_window_scaling)
1806 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); 1806 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1807 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); 1807 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
1808 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); 1808 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
1809 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1809 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1810 set_bit(ACT_OFLD_CONN, &ep->com.history); 1810 set_bit(ACT_OFLD_CONN, &ep->com.history);
1811 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1811 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -2178,28 +2178,28 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2178 * remainder will be specified in the rx_data_ack. 2178 * remainder will be specified in the rx_data_ack.
2179 */ 2179 */
2180 win = ep->rcv_win >> 10; 2180 win = ep->rcv_win >> 10;
2181 if (win > RCV_BUFSIZ_MASK) 2181 if (win > RCV_BUFSIZ_M)
2182 win = RCV_BUFSIZ_MASK; 2182 win = RCV_BUFSIZ_M;
2183 opt0 = (nocong ? NO_CONG(1) : 0) | 2183 opt0 = (nocong ? NO_CONG(1) : 0) |
2184 KEEP_ALIVE(1) | 2184 KEEP_ALIVE_F |
2185 DELACK(1) | 2185 DELACK(1) |
2186 WND_SCALE(wscale) | 2186 WND_SCALE_V(wscale) |
2187 MSS_IDX(mtu_idx) | 2187 MSS_IDX_V(mtu_idx) |
2188 L2T_IDX(ep->l2t->idx) | 2188 L2T_IDX_V(ep->l2t->idx) |
2189 TX_CHAN(ep->tx_chan) | 2189 TX_CHAN_V(ep->tx_chan) |
2190 SMAC_SEL(ep->smac_idx) | 2190 SMAC_SEL_V(ep->smac_idx) |
2191 DSCP(ep->tos >> 2) | 2191 DSCP(ep->tos >> 2) |
2192 ULP_MODE(ULP_MODE_TCPDDP) | 2192 ULP_MODE_V(ULP_MODE_TCPDDP) |
2193 RCV_BUFSIZ(win); 2193 RCV_BUFSIZ_V(win);
2194 opt2 = RX_CHANNEL(0) | 2194 opt2 = RX_CHANNEL_V(0) |
2195 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 2195 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2196 2196
2197 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2197 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2198 opt2 |= TSTAMPS_EN(1); 2198 opt2 |= TSTAMPS_EN(1);
2199 if (enable_tcp_sack && req->tcpopt.sack) 2199 if (enable_tcp_sack && req->tcpopt.sack)
2200 opt2 |= SACK_EN(1); 2200 opt2 |= SACK_EN(1);
2201 if (wscale && enable_tcp_window_scaling) 2201 if (wscale && enable_tcp_window_scaling)
2202 opt2 |= WND_SCALE_EN(1); 2202 opt2 |= WND_SCALE_EN_F;
2203 if (enable_ecn) { 2203 if (enable_ecn) {
2204 const struct tcphdr *tcph; 2204 const struct tcphdr *tcph;
2205 u32 hlen = ntohl(req->hdr_len); 2205 u32 hlen = ntohl(req->hdr_len);
@@ -2211,7 +2211,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2211 } 2211 }
2212 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2212 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2213 u32 isn = (prandom_u32() & ~7UL) - 1; 2213 u32 isn = (prandom_u32() & ~7UL) - 1;
2214 opt2 |= T5_OPT_2_VALID; 2214 opt2 |= T5_OPT_2_VALID_F;
2215 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2215 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2216 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2216 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
2217 rpl5 = (void *)rpl; 2217 rpl5 = (void *)rpl;
@@ -3537,8 +3537,8 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3537 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3537 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3538 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3538 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3539 memset(req, 0, sizeof(*req)); 3539 memset(req, 0, sizeof(*req));
3540 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 3540 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3541 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 3541 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3542 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 3542 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
3543 req->le.filter = (__force __be32) filter; 3543 req->le.filter = (__force __be32) filter;
3544 req->le.lport = lport; 3544 req->le.lport = lport;
@@ -3557,7 +3557,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3557 * We store the qid in opt2 which will be used by the firmware 3557 * We store the qid in opt2 which will be used by the firmware
3558 * to send us the wr response. 3558 * to send us the wr response.
3559 */ 3559 */
3560 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 3560 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
3561 3561
3562 /* 3562 /*
3563 * We initialize the MSS index in TCB to 0xF. 3563 * We initialize the MSS index in TCB to 0xF.
@@ -3565,7 +3565,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3565 * TCB picks up the correct value. If this was 0 3565 * TCB picks up the correct value. If this was 0
3566 * TP will ignore any value > 0 for MSS index. 3566 * TP will ignore any value > 0 for MSS index.
3567 */ 3567 */
3568 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 3568 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3569 req->cookie = (unsigned long)skb; 3569 req->cookie = (unsigned long)skb;
3570 3570
3571 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3571 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);