aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnish Bhatt <anish@chelsio.com>2014-11-12 20:15:57 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-13 14:36:22 -0500
commitd7990b0c34623cd54475a0562c607efbaba4899d (patch)
tree07df588a531da6bb0cbdac66633f2ce26e50263b
parent8c847d254146d32c86574a1b16923ff91bb784dd (diff)
cxgb4i/cxgb4 : Refactor macros to conform to uniform standards
Refactored all macros used in cxgb4i as part of previously started cxgb4 macro names cleanup. Makes them more uniform and avoids namespace collision. Minor changes in other drivers where required as some of these macros are used by multiple drivers, affected drivers are iw_cxgb4, cxgb4(vf) & csiostor Signed-off-by: Anish Bhatt <anish@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c104
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c78
11 files changed, 200 insertions, 142 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a07d8e124a80..83fa16fa4644 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -649,31 +649,31 @@ static int send_connect(struct c4iw_ep *ep)
649 * remainder will be specified in the rx_data_ack. 649 * remainder will be specified in the rx_data_ack.
650 */ 650 */
651 win = ep->rcv_win >> 10; 651 win = ep->rcv_win >> 10;
652 if (win > RCV_BUFSIZ_MASK) 652 if (win > RCV_BUFSIZ_M)
653 win = RCV_BUFSIZ_MASK; 653 win = RCV_BUFSIZ_M;
654 654
655 opt0 = (nocong ? NO_CONG(1) : 0) | 655 opt0 = (nocong ? NO_CONG(1) : 0) |
656 KEEP_ALIVE(1) | 656 KEEP_ALIVE_F |
657 DELACK(1) | 657 DELACK(1) |
658 WND_SCALE(wscale) | 658 WND_SCALE_V(wscale) |
659 MSS_IDX(mtu_idx) | 659 MSS_IDX_V(mtu_idx) |
660 L2T_IDX(ep->l2t->idx) | 660 L2T_IDX_V(ep->l2t->idx) |
661 TX_CHAN(ep->tx_chan) | 661 TX_CHAN_V(ep->tx_chan) |
662 SMAC_SEL(ep->smac_idx) | 662 SMAC_SEL_V(ep->smac_idx) |
663 DSCP(ep->tos) | 663 DSCP(ep->tos) |
664 ULP_MODE(ULP_MODE_TCPDDP) | 664 ULP_MODE_V(ULP_MODE_TCPDDP) |
665 RCV_BUFSIZ(win); 665 RCV_BUFSIZ_V(win);
666 opt2 = RX_CHANNEL(0) | 666 opt2 = RX_CHANNEL_V(0) |
667 CCTRL_ECN(enable_ecn) | 667 CCTRL_ECN(enable_ecn) |
668 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 668 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
669 if (enable_tcp_timestamps) 669 if (enable_tcp_timestamps)
670 opt2 |= TSTAMPS_EN(1); 670 opt2 |= TSTAMPS_EN(1);
671 if (enable_tcp_sack) 671 if (enable_tcp_sack)
672 opt2 |= SACK_EN(1); 672 opt2 |= SACK_EN(1);
673 if (wscale && enable_tcp_window_scaling) 673 if (wscale && enable_tcp_window_scaling)
674 opt2 |= WND_SCALE_EN(1); 674 opt2 |= WND_SCALE_EN_F;
675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
676 opt2 |= T5_OPT_2_VALID; 676 opt2 |= T5_OPT_2_VALID_F;
677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
679 } 679 }
@@ -736,7 +736,7 @@ static int send_connect(struct c4iw_ep *ep)
736 t5_req->local_ip = la->sin_addr.s_addr; 736 t5_req->local_ip = la->sin_addr.s_addr;
737 t5_req->peer_ip = ra->sin_addr.s_addr; 737 t5_req->peer_ip = ra->sin_addr.s_addr;
738 t5_req->opt0 = cpu_to_be64(opt0); 738 t5_req->opt0 = cpu_to_be64(opt0);
739 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 739 t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
740 cxgb4_select_ntuple( 740 cxgb4_select_ntuple(
741 ep->com.dev->rdev.lldi.ports[0], 741 ep->com.dev->rdev.lldi.ports[0],
742 ep->l2t))); 742 ep->l2t)));
@@ -762,7 +762,7 @@ static int send_connect(struct c4iw_ep *ep)
762 t5_req6->peer_ip_lo = *((__be64 *) 762 t5_req6->peer_ip_lo = *((__be64 *)
763 (ra6->sin6_addr.s6_addr + 8)); 763 (ra6->sin6_addr.s6_addr + 8));
764 t5_req6->opt0 = cpu_to_be64(opt0); 764 t5_req6->opt0 = cpu_to_be64(opt0);
765 t5_req6->params = cpu_to_be64(V_FILTER_TUPLE( 765 t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
766 cxgb4_select_ntuple( 766 cxgb4_select_ntuple(
767 ep->com.dev->rdev.lldi.ports[0], 767 ep->com.dev->rdev.lldi.ports[0],
768 ep->l2t))); 768 ep->l2t)));
@@ -1249,15 +1249,15 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1249 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1249 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1250 * then add the overage in to the credits returned. 1250 * then add the overage in to the credits returned.
1251 */ 1251 */
1252 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024) 1252 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1253 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024; 1253 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1254 1254
1255 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1255 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1256 memset(req, 0, wrlen); 1256 memset(req, 0, wrlen);
1257 INIT_TP_WR(req, ep->hwtid); 1257 INIT_TP_WR(req, ep->hwtid);
1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1259 ep->hwtid)); 1259 ep->hwtid));
1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
1261 F_RX_DACK_CHANGE | 1261 F_RX_DACK_CHANGE |
1262 V_RX_DACK_MODE(dack_mode)); 1262 V_RX_DACK_MODE(dack_mode));
1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
@@ -1778,34 +1778,34 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1778 * remainder will be specified in the rx_data_ack. 1778 * remainder will be specified in the rx_data_ack.
1779 */ 1779 */
1780 win = ep->rcv_win >> 10; 1780 win = ep->rcv_win >> 10;
1781 if (win > RCV_BUFSIZ_MASK) 1781 if (win > RCV_BUFSIZ_M)
1782 win = RCV_BUFSIZ_MASK; 1782 win = RCV_BUFSIZ_M;
1783 1783
1784 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1784 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1785 (nocong ? NO_CONG(1) : 0) | 1785 (nocong ? NO_CONG(1) : 0) |
1786 KEEP_ALIVE(1) | 1786 KEEP_ALIVE_F |
1787 DELACK(1) | 1787 DELACK(1) |
1788 WND_SCALE(wscale) | 1788 WND_SCALE_V(wscale) |
1789 MSS_IDX(mtu_idx) | 1789 MSS_IDX_V(mtu_idx) |
1790 L2T_IDX(ep->l2t->idx) | 1790 L2T_IDX_V(ep->l2t->idx) |
1791 TX_CHAN(ep->tx_chan) | 1791 TX_CHAN_V(ep->tx_chan) |
1792 SMAC_SEL(ep->smac_idx) | 1792 SMAC_SEL_V(ep->smac_idx) |
1793 DSCP(ep->tos) | 1793 DSCP(ep->tos) |
1794 ULP_MODE(ULP_MODE_TCPDDP) | 1794 ULP_MODE_V(ULP_MODE_TCPDDP) |
1795 RCV_BUFSIZ(win)); 1795 RCV_BUFSIZ_V(win));
1796 req->tcb.opt2 = (__force __be32) (PACE(1) | 1796 req->tcb.opt2 = (__force __be32) (PACE(1) |
1797 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1797 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1798 RX_CHANNEL(0) | 1798 RX_CHANNEL_V(0) |
1799 CCTRL_ECN(enable_ecn) | 1799 CCTRL_ECN(enable_ecn) |
1800 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); 1800 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1801 if (enable_tcp_timestamps) 1801 if (enable_tcp_timestamps)
1802 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); 1802 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
1803 if (enable_tcp_sack) 1803 if (enable_tcp_sack)
1804 req->tcb.opt2 |= (__force __be32) SACK_EN(1); 1804 req->tcb.opt2 |= (__force __be32)SACK_EN(1);
1805 if (wscale && enable_tcp_window_scaling) 1805 if (wscale && enable_tcp_window_scaling)
1806 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); 1806 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1807 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); 1807 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
1808 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); 1808 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
1809 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1809 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1810 set_bit(ACT_OFLD_CONN, &ep->com.history); 1810 set_bit(ACT_OFLD_CONN, &ep->com.history);
1811 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1811 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -2178,28 +2178,28 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2178 * remainder will be specified in the rx_data_ack. 2178 * remainder will be specified in the rx_data_ack.
2179 */ 2179 */
2180 win = ep->rcv_win >> 10; 2180 win = ep->rcv_win >> 10;
2181 if (win > RCV_BUFSIZ_MASK) 2181 if (win > RCV_BUFSIZ_M)
2182 win = RCV_BUFSIZ_MASK; 2182 win = RCV_BUFSIZ_M;
2183 opt0 = (nocong ? NO_CONG(1) : 0) | 2183 opt0 = (nocong ? NO_CONG(1) : 0) |
2184 KEEP_ALIVE(1) | 2184 KEEP_ALIVE_F |
2185 DELACK(1) | 2185 DELACK(1) |
2186 WND_SCALE(wscale) | 2186 WND_SCALE_V(wscale) |
2187 MSS_IDX(mtu_idx) | 2187 MSS_IDX_V(mtu_idx) |
2188 L2T_IDX(ep->l2t->idx) | 2188 L2T_IDX_V(ep->l2t->idx) |
2189 TX_CHAN(ep->tx_chan) | 2189 TX_CHAN_V(ep->tx_chan) |
2190 SMAC_SEL(ep->smac_idx) | 2190 SMAC_SEL_V(ep->smac_idx) |
2191 DSCP(ep->tos >> 2) | 2191 DSCP(ep->tos >> 2) |
2192 ULP_MODE(ULP_MODE_TCPDDP) | 2192 ULP_MODE_V(ULP_MODE_TCPDDP) |
2193 RCV_BUFSIZ(win); 2193 RCV_BUFSIZ_V(win);
2194 opt2 = RX_CHANNEL(0) | 2194 opt2 = RX_CHANNEL_V(0) |
2195 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 2195 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2196 2196
2197 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2197 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2198 opt2 |= TSTAMPS_EN(1); 2198 opt2 |= TSTAMPS_EN(1);
2199 if (enable_tcp_sack && req->tcpopt.sack) 2199 if (enable_tcp_sack && req->tcpopt.sack)
2200 opt2 |= SACK_EN(1); 2200 opt2 |= SACK_EN(1);
2201 if (wscale && enable_tcp_window_scaling) 2201 if (wscale && enable_tcp_window_scaling)
2202 opt2 |= WND_SCALE_EN(1); 2202 opt2 |= WND_SCALE_EN_F;
2203 if (enable_ecn) { 2203 if (enable_ecn) {
2204 const struct tcphdr *tcph; 2204 const struct tcphdr *tcph;
2205 u32 hlen = ntohl(req->hdr_len); 2205 u32 hlen = ntohl(req->hdr_len);
@@ -2211,7 +2211,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2211 } 2211 }
2212 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2212 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2213 u32 isn = (prandom_u32() & ~7UL) - 1; 2213 u32 isn = (prandom_u32() & ~7UL) - 1;
2214 opt2 |= T5_OPT_2_VALID; 2214 opt2 |= T5_OPT_2_VALID_F;
2215 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2215 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2216 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2216 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
2217 rpl5 = (void *)rpl; 2217 rpl5 = (void *)rpl;
@@ -3557,7 +3557,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3557 * We store the qid in opt2 which will be used by the firmware 3557 * We store the qid in opt2 which will be used by the firmware
3558 * to send us the wr response. 3558 * to send us the wr response.
3559 */ 3559 */
3560 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 3560 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
3561 3561
3562 /* 3562 /*
3563 * We initialize the MSS index in TCB to 0xF. 3563 * We initialize the MSS index in TCB to 0xF.
@@ -3565,7 +3565,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3565 * TCB picks up the correct value. If this was 0 3565 * TCB picks up the correct value. If this was 0
3566 * TP will ignore any value > 0 for MSS index. 3566 * TP will ignore any value > 0 for MSS index.
3567 */ 3567 */
3568 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 3568 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3569 req->cookie = (unsigned long)skb; 3569 req->cookie = (unsigned long)skb;
3570 3570
3571 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3571 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 9335148c1ad9..0744455cd88b 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -78,14 +78,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
78 (wait ? FW_WR_COMPL_F : 0)); 78 (wait ? FW_WR_COMPL_F : 0));
79 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; 79 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); 80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); 81 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); 82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5)); 83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); 84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr)); 85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
86 86
87 sgl = (struct ulptx_sgl *)(req + 1); 87 sgl = (struct ulptx_sgl *)(req + 1);
88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | 88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
89 ULPTX_NSGE(1)); 89 ULPTX_NSGE(1));
90 sgl->len0 = cpu_to_be32(len); 90 sgl->len0 = cpu_to_be32(len);
91 sgl->addr0 = cpu_to_be64(data); 91 sgl->addr0 = cpu_to_be64(data);
@@ -107,12 +107,12 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
107 u8 wr_len, *to_dp, *from_dp; 107 u8 wr_len, *to_dp, *from_dp;
108 int copy_len, num_wqe, i, ret = 0; 108 int copy_len, num_wqe, i, ret = 0;
109 struct c4iw_wr_wait wr_wait; 109 struct c4iw_wr_wait wr_wait;
110 __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); 110 __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
111 111
112 if (is_t4(rdev->lldi.adapter_type)) 112 if (is_t4(rdev->lldi.adapter_type))
113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1)); 113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
114 else 114 else
115 cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1)); 115 cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
116 116
117 addr &= 0x7FFFFFF; 117 addr &= 0x7FFFFFF;
118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len); 118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -144,14 +144,14 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
144 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); 144 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
145 145
146 req->cmd = cmd; 146 req->cmd = cmd;
147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( 147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
150 16)); 150 16));
151 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3)); 151 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
152 152
153 sc = (struct ulptx_idata *)(req + 1); 153 sc = (struct ulptx_idata *)(req + 1);
154 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM)); 154 sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
155 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); 155 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
156 156
157 to_dp = (u8 *)(sc + 1); 157 to_dp = (u8 *)(sc + 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 660bf0f79ac5..19ffe9bc1933 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3476,7 +3476,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3476 req->local_ip = sip; 3476 req->local_ip = sip;
3477 req->peer_ip = htonl(0); 3477 req->peer_ip = htonl(0);
3478 chan = rxq_to_chan(&adap->sge, queue); 3478 chan = rxq_to_chan(&adap->sge, queue);
3479 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 3479 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3480 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3480 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3481 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3481 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3482 ret = t4_mgmt_tx(adap, skb); 3482 ret = t4_mgmt_tx(adap, skb);
@@ -3519,7 +3519,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3519 req->peer_ip_hi = cpu_to_be64(0); 3519 req->peer_ip_hi = cpu_to_be64(0);
3520 req->peer_ip_lo = cpu_to_be64(0); 3520 req->peer_ip_lo = cpu_to_be64(0);
3521 chan = rxq_to_chan(&adap->sge, queue); 3521 chan = rxq_to_chan(&adap->sge, queue);
3522 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 3522 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3523 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3523 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3524 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3524 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3525 ret = t4_mgmt_tx(adap, skb); 3525 ret = t4_mgmt_tx(adap, skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 96041397ee15..1eca0e21f738 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -436,7 +436,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
436 if (tp->vnic_shift >= 0) { 436 if (tp->vnic_shift >= 0) {
437 u32 viid = cxgb4_port_viid(dev); 437 u32 viid = cxgb4_port_viid(dev);
438 u32 vf = FW_VIID_VIN_GET(viid); 438 u32 vf = FW_VIID_VIN_GET(viid);
439 u32 pf = FW_VIID_PFN_GET(viid); 439 u32 pf = FW_VIID_PFN_G(viid);
440 u32 vld = FW_VIID_VIVLD_GET(viid); 440 u32 vld = FW_VIID_VIVLD_GET(viid);
441 441
442 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | 442 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index dacd95008333..91dbf98036cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -816,7 +816,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
816 sgl->addr0 = cpu_to_be64(addr[1]); 816 sgl->addr0 = cpu_to_be64(addr[1]);
817 } 817 }
818 818
819 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); 819 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
820 if (likely(--nfrags == 0)) 820 if (likely(--nfrags == 0))
821 return; 821 return;
822 /* 822 /*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 5f4db2398c71..0f89f68948ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -205,16 +205,62 @@ struct work_request_hdr {
205#define WR_HDR struct work_request_hdr wr 205#define WR_HDR struct work_request_hdr wr
206 206
207/* option 0 fields */ 207/* option 0 fields */
208#define S_MSS_IDX 60 208#define TX_CHAN_S 2
209#define M_MSS_IDX 0xF 209#define TX_CHAN_V(x) ((x) << TX_CHAN_S)
210#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX) 210
211#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) 211#define ULP_MODE_S 8
212#define ULP_MODE_V(x) ((x) << ULP_MODE_S)
213
214#define RCV_BUFSIZ_S 12
215#define RCV_BUFSIZ_M 0x3FFU
216#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S)
217
218#define SMAC_SEL_S 28
219#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S)
220
221#define L2T_IDX_S 36
222#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S)
223
224#define WND_SCALE_S 50
225#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S)
226
227#define KEEP_ALIVE_S 54
228#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S)
229#define KEEP_ALIVE_F KEEP_ALIVE_V(1ULL)
230
231#define MSS_IDX_S 60
232#define MSS_IDX_M 0xF
233#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S)
234#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M)
212 235
213/* option 2 fields */ 236/* option 2 fields */
214#define S_RSS_QUEUE 0 237#define RSS_QUEUE_S 0
215#define M_RSS_QUEUE 0x3FF 238#define RSS_QUEUE_M 0x3FF
216#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE) 239#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S)
217#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE) 240#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M)
241
242#define RSS_QUEUE_VALID_S 10
243#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S)
244#define RSS_QUEUE_VALID_F RSS_QUEUE_VALID_V(1U)
245
246#define RX_FC_DISABLE_S 20
247#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S)
248#define RX_FC_DISABLE_F RX_FC_DISABLE_V(1U)
249
250#define RX_FC_VALID_S 22
251#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S)
252#define RX_FC_VALID_F RX_FC_VALID_V(1U)
253
254#define RX_CHANNEL_S 26
255#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S)
256
257#define WND_SCALE_EN_S 28
258#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S)
259#define WND_SCALE_EN_F WND_SCALE_EN_V(1U)
260
261#define T5_OPT_2_VALID_S 31
262#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S)
263#define T5_OPT_2_VALID_F T5_OPT_2_VALID_V(1U)
218 264
219struct cpl_pass_open_req { 265struct cpl_pass_open_req {
220 WR_HDR; 266 WR_HDR;
@@ -224,20 +270,11 @@ struct cpl_pass_open_req {
224 __be32 local_ip; 270 __be32 local_ip;
225 __be32 peer_ip; 271 __be32 peer_ip;
226 __be64 opt0; 272 __be64 opt0;
227#define TX_CHAN(x) ((x) << 2)
228#define NO_CONG(x) ((x) << 4) 273#define NO_CONG(x) ((x) << 4)
229#define DELACK(x) ((x) << 5) 274#define DELACK(x) ((x) << 5)
230#define ULP_MODE(x) ((x) << 8)
231#define RCV_BUFSIZ(x) ((x) << 12)
232#define RCV_BUFSIZ_MASK 0x3FFU
233#define DSCP(x) ((x) << 22) 275#define DSCP(x) ((x) << 22)
234#define SMAC_SEL(x) ((u64)(x) << 28)
235#define L2T_IDX(x) ((u64)(x) << 36)
236#define TCAM_BYPASS(x) ((u64)(x) << 48) 276#define TCAM_BYPASS(x) ((u64)(x) << 48)
237#define NAGLE(x) ((u64)(x) << 49) 277#define NAGLE(x) ((u64)(x) << 49)
238#define WND_SCALE(x) ((u64)(x) << 50)
239#define KEEP_ALIVE(x) ((u64)(x) << 54)
240#define MSS_IDX(x) ((u64)(x) << 60)
241 __be64 opt1; 278 __be64 opt1;
242#define SYN_RSS_ENABLE (1 << 0) 279#define SYN_RSS_ENABLE (1 << 0)
243#define SYN_RSS_QUEUE(x) ((x) << 2) 280#define SYN_RSS_QUEUE(x) ((x) << 2)
@@ -267,20 +304,13 @@ struct cpl_pass_accept_rpl {
267 WR_HDR; 304 WR_HDR;
268 union opcode_tid ot; 305 union opcode_tid ot;
269 __be32 opt2; 306 __be32 opt2;
270#define RSS_QUEUE(x) ((x) << 0)
271#define RSS_QUEUE_VALID (1 << 10)
272#define RX_COALESCE_VALID(x) ((x) << 11) 307#define RX_COALESCE_VALID(x) ((x) << 11)
273#define RX_COALESCE(x) ((x) << 12) 308#define RX_COALESCE(x) ((x) << 12)
274#define PACE(x) ((x) << 16) 309#define PACE(x) ((x) << 16)
275#define RX_FC_VALID ((1U) << 19)
276#define RX_FC_DISABLE ((1U) << 20)
277#define TX_QUEUE(x) ((x) << 23) 310#define TX_QUEUE(x) ((x) << 23)
278#define RX_CHANNEL(x) ((x) << 26)
279#define CCTRL_ECN(x) ((x) << 27) 311#define CCTRL_ECN(x) ((x) << 27)
280#define WND_SCALE_EN(x) ((x) << 28)
281#define TSTAMPS_EN(x) ((x) << 29) 312#define TSTAMPS_EN(x) ((x) << 29)
282#define SACK_EN(x) ((x) << 30) 313#define SACK_EN(x) ((x) << 30)
283#define T5_OPT_2_VALID ((1U) << 31)
284 __be64 opt0; 314 __be64 opt0;
285}; 315};
286 316
@@ -305,10 +335,10 @@ struct cpl_act_open_req {
305 __be32 opt2; 335 __be32 opt2;
306}; 336};
307 337
308#define S_FILTER_TUPLE 24 338#define FILTER_TUPLE_S 24
309#define M_FILTER_TUPLE 0xFFFFFFFFFF 339#define FILTER_TUPLE_M 0xFFFFFFFFFF
310#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE) 340#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S)
311#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE) 341#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M)
312struct cpl_t5_act_open_req { 342struct cpl_t5_act_open_req {
313 WR_HDR; 343 WR_HDR;
314 union opcode_tid ot; 344 union opcode_tid ot;
@@ -579,10 +609,16 @@ struct cpl_rx_data_ack {
579 WR_HDR; 609 WR_HDR;
580 union opcode_tid ot; 610 union opcode_tid ot;
581 __be32 credit_dack; 611 __be32 credit_dack;
582#define RX_CREDITS(x) ((x) << 0)
583#define RX_FORCE_ACK(x) ((x) << 28)
584}; 612};
585 613
614/* cpl_rx_data_ack.ack_seq fields */
615#define RX_CREDITS_S 0
616#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S)
617
618#define RX_FORCE_ACK_S 28
619#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
620#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U)
621
586struct cpl_rx_pkt { 622struct cpl_rx_pkt {
587 struct rss_header rsshdr; 623 struct rss_header rsshdr;
588 u8 opcode; 624 u8 opcode;
@@ -803,6 +839,9 @@ enum {
803 ULP_TX_SC_ISGL = 0x83 839 ULP_TX_SC_ISGL = 0x83
804}; 840};
805 841
842#define ULPTX_CMD_S 24
843#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S)
844
806struct ulptx_sge_pair { 845struct ulptx_sge_pair {
807 __be32 len[2]; 846 __be32 len[2];
808 __be64 addr[2]; 847 __be64 addr[2];
@@ -810,7 +849,6 @@ struct ulptx_sge_pair {
810 849
811struct ulptx_sgl { 850struct ulptx_sgl {
812 __be32 cmd_nsge; 851 __be32 cmd_nsge;
813#define ULPTX_CMD(x) ((x) << 24)
814#define ULPTX_NSGE(x) ((x) << 0) 852#define ULPTX_NSGE(x) ((x) << 0)
815#define ULPTX_MORE (1U << 23) 853#define ULPTX_MORE (1U << 23)
816 __be32 len0; 854 __be32 len0;
@@ -821,15 +859,21 @@ struct ulptx_sgl {
821struct ulp_mem_io { 859struct ulp_mem_io {
822 WR_HDR; 860 WR_HDR;
823 __be32 cmd; 861 __be32 cmd;
824#define ULP_MEMIO_ORDER(x) ((x) << 23)
825 __be32 len16; /* command length */ 862 __be32 len16; /* command length */
826 __be32 dlen; /* data length in 32-byte units */ 863 __be32 dlen; /* data length in 32-byte units */
827#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
828 __be32 lock_addr; 864 __be32 lock_addr;
829#define ULP_MEMIO_ADDR(x) ((x) << 0)
830#define ULP_MEMIO_LOCK(x) ((x) << 31) 865#define ULP_MEMIO_LOCK(x) ((x) << 31)
831}; 866};
832 867
868/* additional ulp_mem_io.cmd fields */
869#define ULP_MEMIO_ORDER_S 23
870#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
871#define ULP_MEMIO_ORDER_F ULP_MEMIO_ORDER_V(1U)
872
873#define T5_ULP_MEMIO_IMM_S 23
874#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
875#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
876
833#define S_T5_ULP_MEMIO_IMM 23 877#define S_T5_ULP_MEMIO_IMM 23
834#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) 878#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
835#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) 879#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
@@ -838,4 +882,12 @@ struct ulp_mem_io {
838#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER) 882#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
839#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U) 883#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
840 884
885/* ulp_mem_io.lock_addr fields */
886#define ULP_MEMIO_ADDR_S 0
887#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
888
889/* ulp_mem_io.dlen fields */
890#define ULP_MEMIO_DATA_LEN_S 0
891#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S)
892
841#endif /* __T4_MSG_H */ 893#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7cca67fde4f4..6fc46dc11988 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1395,7 +1395,11 @@ struct fw_eq_ofld_cmd {
1395 * Macros for VIID parsing: 1395 * Macros for VIID parsing:
1396 * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number 1396 * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
1397 */ 1397 */
1398#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) 1398
1399#define FW_VIID_PFN_S 8
1400#define FW_VIID_PFN_M 0x7
1401#define FW_VIID_PFN_G(x) (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M)
1402
1399#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) 1403#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
1400#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) 1404#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
1401 1405
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index cd538afa40dd..aff6d37f2676 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -907,7 +907,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
907 sgl->addr0 = cpu_to_be64(addr[1]); 907 sgl->addr0 = cpu_to_be64(addr[1]);
908 } 908 }
909 909
910 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | 910 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
911 ULPTX_NSGE(nfrags)); 911 ULPTX_NSGE(nfrags));
912 if (likely(--nfrags == 0)) 912 if (likely(--nfrags == 0))
913 return; 913 return;
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 48e45b1ea4e5..87f9280d9b43 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1757,7 +1757,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
1757 csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); 1757 csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
1758 else { 1758 else {
1759 /* Program DSGL to dma payload */ 1759 /* Program DSGL to dma payload */
1760 dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | 1760 dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
1761 ULPTX_MORE | ULPTX_NSGE(1)); 1761 ULPTX_MORE | ULPTX_NSGE(1));
1762 dsgl.len0 = cpu_to_be32(pld_len); 1762 dsgl.len0 = cpu_to_be32(pld_len);
1763 dsgl.addr0 = cpu_to_be64(pld->paddr); 1763 dsgl.addr0 = cpu_to_be64(pld->paddr);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index b37c69a2772a..b9c012ba34f8 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -322,7 +322,7 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
322 struct csio_dma_buf *dma_buf; 322 struct csio_dma_buf *dma_buf;
323 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 323 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
324 324
325 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE | 325 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE |
326 ULPTX_NSGE(req->nsge)); 326 ULPTX_NSGE(req->nsge));
327 /* Now add the data SGLs */ 327 /* Now add the data SGLs */
328 if (likely(!req->dcopy)) { 328 if (likely(!req->dcopy)) {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ccacf09c2c16..ed0e16866dc7 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -188,18 +188,18 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
188 unsigned int qid_atid = ((unsigned int)csk->atid) | 188 unsigned int qid_atid = ((unsigned int)csk->atid) |
189 (((unsigned int)csk->rss_qid) << 14); 189 (((unsigned int)csk->rss_qid) << 14);
190 190
191 opt0 = KEEP_ALIVE(1) | 191 opt0 = KEEP_ALIVE_F |
192 WND_SCALE(wscale) | 192 WND_SCALE_V(wscale) |
193 MSS_IDX(csk->mss_idx) | 193 MSS_IDX_V(csk->mss_idx) |
194 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | 194 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
195 TX_CHAN(csk->tx_chan) | 195 TX_CHAN_V(csk->tx_chan) |
196 SMAC_SEL(csk->smac_idx) | 196 SMAC_SEL_V(csk->smac_idx) |
197 ULP_MODE(ULP_MODE_ISCSI) | 197 ULP_MODE_V(ULP_MODE_ISCSI) |
198 RCV_BUFSIZ(cxgb4i_rcv_win >> 10); 198 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
199 opt2 = RX_CHANNEL(0) | 199 opt2 = RX_CHANNEL_V(0) |
200 RSS_QUEUE_VALID | 200 RSS_QUEUE_VALID_F |
201 (1 << 20) | 201 (RX_FC_DISABLE_F) |
202 RSS_QUEUE(csk->rss_qid); 202 RSS_QUEUE_V(csk->rss_qid);
203 203
204 if (is_t4(lldi->adapter_type)) { 204 if (is_t4(lldi->adapter_type)) {
205 struct cpl_act_open_req *req = 205 struct cpl_act_open_req *req =
@@ -216,7 +216,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
216 req->params = cpu_to_be32(cxgb4_select_ntuple( 216 req->params = cpu_to_be32(cxgb4_select_ntuple(
217 csk->cdev->ports[csk->port_id], 217 csk->cdev->ports[csk->port_id],
218 csk->l2t)); 218 csk->l2t));
219 opt2 |= 1 << 22; 219 opt2 |= RX_FC_VALID_F;
220 req->opt2 = cpu_to_be32(opt2); 220 req->opt2 = cpu_to_be32(opt2);
221 221
222 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 222 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -236,7 +236,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
236 req->local_ip = csk->saddr.sin_addr.s_addr; 236 req->local_ip = csk->saddr.sin_addr.s_addr;
237 req->peer_ip = csk->daddr.sin_addr.s_addr; 237 req->peer_ip = csk->daddr.sin_addr.s_addr;
238 req->opt0 = cpu_to_be64(opt0); 238 req->opt0 = cpu_to_be64(opt0);
239 req->params = cpu_to_be64(V_FILTER_TUPLE( 239 req->params = cpu_to_be64(FILTER_TUPLE_V(
240 cxgb4_select_ntuple( 240 cxgb4_select_ntuple(
241 csk->cdev->ports[csk->port_id], 241 csk->cdev->ports[csk->port_id],
242 csk->l2t))); 242 csk->l2t)));
@@ -271,19 +271,19 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
271 unsigned int qid_atid = ((unsigned int)csk->atid) | 271 unsigned int qid_atid = ((unsigned int)csk->atid) |
272 (((unsigned int)csk->rss_qid) << 14); 272 (((unsigned int)csk->rss_qid) << 14);
273 273
274 opt0 = KEEP_ALIVE(1) | 274 opt0 = KEEP_ALIVE_F |
275 WND_SCALE(wscale) | 275 WND_SCALE_V(wscale) |
276 MSS_IDX(csk->mss_idx) | 276 MSS_IDX_V(csk->mss_idx) |
277 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | 277 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
278 TX_CHAN(csk->tx_chan) | 278 TX_CHAN_V(csk->tx_chan) |
279 SMAC_SEL(csk->smac_idx) | 279 SMAC_SEL_V(csk->smac_idx) |
280 ULP_MODE(ULP_MODE_ISCSI) | 280 ULP_MODE_V(ULP_MODE_ISCSI) |
281 RCV_BUFSIZ(cxgb4i_rcv_win >> 10); 281 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
282 282
283 opt2 = RX_CHANNEL(0) | 283 opt2 = RX_CHANNEL_V(0) |
284 RSS_QUEUE_VALID | 284 RSS_QUEUE_VALID_F |
285 RX_FC_DISABLE | 285 RX_FC_DISABLE_F |
286 RSS_QUEUE(csk->rss_qid); 286 RSS_QUEUE_V(csk->rss_qid);
287 287
288 if (t4) { 288 if (t4) {
289 struct cpl_act_open_req6 *req = 289 struct cpl_act_open_req6 *req =
@@ -304,7 +304,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
304 304
305 req->opt0 = cpu_to_be64(opt0); 305 req->opt0 = cpu_to_be64(opt0);
306 306
307 opt2 |= RX_FC_VALID; 307 opt2 |= RX_FC_VALID_F;
308 req->opt2 = cpu_to_be32(opt2); 308 req->opt2 = cpu_to_be32(opt2);
309 309
310 req->params = cpu_to_be32(cxgb4_select_ntuple( 310 req->params = cpu_to_be32(cxgb4_select_ntuple(
@@ -327,10 +327,10 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
327 8); 327 8);
328 req->opt0 = cpu_to_be64(opt0); 328 req->opt0 = cpu_to_be64(opt0);
329 329
330 opt2 |= T5_OPT_2_VALID; 330 opt2 |= T5_OPT_2_VALID_F;
331 req->opt2 = cpu_to_be32(opt2); 331 req->opt2 = cpu_to_be32(opt2);
332 332
333 req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple( 333 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
334 csk->cdev->ports[csk->port_id], 334 csk->cdev->ports[csk->port_id],
335 csk->l2t))); 335 csk->l2t)));
336 } 336 }
@@ -451,7 +451,8 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
451 INIT_TP_WR(req, csk->tid); 451 INIT_TP_WR(req, csk->tid);
452 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 452 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
453 csk->tid)); 453 csk->tid));
454 req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1)); 454 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
455 | RX_FORCE_ACK_F);
455 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 456 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
456 return credits; 457 return credits;
457} 458}
@@ -1440,16 +1441,16 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1440 1441
1441 INIT_ULPTX_WR(req, wr_len, 0, 0); 1442 INIT_ULPTX_WR(req, wr_len, 0, 0);
1442 if (is_t4(lldi->adapter_type)) 1443 if (is_t4(lldi->adapter_type))
1443 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1444 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1444 (ULP_MEMIO_ORDER(1))); 1445 (ULP_MEMIO_ORDER_F));
1445 else 1446 else
1446 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1447 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1447 (V_T5_ULP_MEMIO_IMM(1))); 1448 (T5_ULP_MEMIO_IMM_F));
1448 req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); 1449 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1449 req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); 1450 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1450 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1451 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1451 1452
1452 idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM)); 1453 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1453 idata->len = htonl(dlen); 1454 idata->len = htonl(dlen);
1454} 1455}
1455 1456
@@ -1673,7 +1674,8 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1673 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1674 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1674 cdev->itp = &cxgb4i_iscsi_transport; 1675 cdev->itp = &cxgb4i_iscsi_transport;
1675 1676
1676 cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8; 1677 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1678 << FW_VIID_PFN_S;
1677 pr_info("cdev 0x%p,%s, pfvf %u.\n", 1679 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1678 cdev, lldi->ports[0]->name, cdev->pfvf); 1680 cdev, lldi->ports[0]->name, cdev->pfvf);
1679 1681